Merge "Update ART for LLVM merge up to r187914." into klp-dev
diff --git a/compiler/Android.mk b/compiler/Android.mk
index fec1e11..f81b460 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -75,8 +75,7 @@
 	llvm/runtime_support_builder_arm.cc \
 	llvm/runtime_support_builder_thumb2.cc \
 	llvm/runtime_support_builder_x86.cc \
-	stubs/portable/stubs.cc \
-	stubs/quick/stubs.cc \
+	trampolines/trampoline_compiler.cc \
 	utils/arm/assembler_arm.cc \
 	utils/arm/managed_register_arm.cc \
 	utils/assembler.cc \
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 1ee29cb..60e638c 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -209,8 +209,8 @@
 
 void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
   DCHECK(inst->Opcode() == Instruction::RETURN_VOID);
-  // Are we compiling a constructor ?
-  if ((unit_.GetAccessFlags() & kAccConstructor) == 0) {
+  // Are we compiling a non-clinit constructor?
+  if (!unit_.IsConstructor() || unit_.IsStatic()) {
     return;
   }
   // Do we need a constructor barrier ?
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 745e43d..2d8e24f 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -432,7 +432,7 @@
   // Making a call - use explicit registers
   FlushAllRegs();   /* Everything to home location */
   LoadValueDirectFixed(rl_src, r0);
-  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
                rARM_LR);
   // Materialize a pointer to the fill data image
   NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
@@ -488,7 +488,7 @@
   OpRegImm(kOpCmp, r1, 0);
   OpIT(kCondNe, "T");
   // Go expensive route - artLockObjectFromCode(self, obj);
-  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, rARM_LR);
   MarkSafepointPC(call_inst);
@@ -519,7 +519,7 @@
   OpIT(kCondEq, "EE");
   StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
   // Go expensive route - UnlockObjectFromCode(obj);
-  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, rARM_LR);
   MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 1599941..f1ccfa0 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -28,7 +28,7 @@
     // Required for target - codegen helpers.
     bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
                                     RegLocation rl_dest, int lit);
-    int LoadHelper(int offset);
+    int LoadHelper(ThreadOffset offset);
     LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
     LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                           int s_reg);
@@ -153,12 +153,12 @@
     LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
     LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
     LIR* OpVldm(int rBase, int count);
     LIR* OpVstm(int rBase, int count);
     void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
     void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
-    void OpTlsCmp(int offset, int val);
+    void OpTlsCmp(ThreadOffset offset, int val);
 
     RegLocation ArgLoc(RegLocation loc);
     LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 9db1016..c258019 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -498,7 +498,7 @@
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void ArmMir2Lir::OpTlsCmp(int offset, int val) {
+void ArmMir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
@@ -665,7 +665,7 @@
      */
     RegLocation rl_result;
     if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
-      int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
+      ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
       FlushAllRegs();
       CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
       rl_result = GetReturnWide(false);
@@ -956,7 +956,7 @@
 
   // Get the array's class.
   LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
   LoadValueDirectFixed(rl_array, r_array);  // Reload array
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 6f37798..47d3d97 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -714,8 +714,8 @@
   FreeTemp(r3);
 }
 
-int ArmMir2Lir::LoadHelper(int offset) {
-  LoadWordDisp(rARM_SELF, offset, rARM_LR);
+int ArmMir2Lir::LoadHelper(ThreadOffset offset) {
+  LoadWordDisp(rARM_SELF, offset.Int32Value(), rARM_LR);
   return rARM_LR;
 }
 
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index afc8a66..c63de69 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1029,7 +1029,7 @@
   return res;
 }
 
-LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset) {
+LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
   return NULL;
 }
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index c9780fa..5f6f3d5 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -17,6 +17,7 @@
 #include "dex/compiler_internals.h"
 #include "dex_file-inl.h"
 #include "gc_map.h"
+#include "mapping_table.h"
 #include "mir_to_lir-inl.h"
 #include "verifier/dex_gc_map.h"
 #include "verifier/method_verifier.h"
@@ -515,15 +516,35 @@
     }
   }
   if (kIsDebugBuild) {
-    DCHECK(VerifyCatchEntries());
+    CHECK(VerifyCatchEntries());
   }
-  combined_mapping_table_.push_back(pc2dex_mapping_table_.size() +
-                                        dex2pc_mapping_table_.size());
-  combined_mapping_table_.push_back(pc2dex_mapping_table_.size());
-  combined_mapping_table_.insert(combined_mapping_table_.end(), pc2dex_mapping_table_.begin(),
-                                 pc2dex_mapping_table_.end());
-  combined_mapping_table_.insert(combined_mapping_table_.end(), dex2pc_mapping_table_.begin(),
-                                 dex2pc_mapping_table_.end());
+  CHECK_EQ(pc2dex_mapping_table_.size() & 1, 0U);
+  CHECK_EQ(dex2pc_mapping_table_.size() & 1, 0U);
+  uint32_t total_entries = (pc2dex_mapping_table_.size() + dex2pc_mapping_table_.size()) / 2;
+  uint32_t pc2dex_entries = pc2dex_mapping_table_.size() / 2;
+  encoded_mapping_table_.PushBack(total_entries);
+  encoded_mapping_table_.PushBack(pc2dex_entries);
+  encoded_mapping_table_.InsertBack(pc2dex_mapping_table_.begin(), pc2dex_mapping_table_.end());
+  encoded_mapping_table_.InsertBack(dex2pc_mapping_table_.begin(), dex2pc_mapping_table_.end());
+  if (kIsDebugBuild) {
+    // Verify the encoded table holds the expected data.
+    MappingTable table(&encoded_mapping_table_.GetData()[0]);
+    CHECK_EQ(table.TotalSize(), total_entries);
+    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
+    CHECK_EQ(table.DexToPcSize(), dex2pc_mapping_table_.size() / 2);
+    MappingTable::PcToDexIterator it = table.PcToDexBegin();
+    for (uint32_t i = 0; i < pc2dex_mapping_table_.size(); ++i, ++it) {
+      CHECK_EQ(pc2dex_mapping_table_.at(i), it.NativePcOffset());
+      ++i;
+      CHECK_EQ(pc2dex_mapping_table_.at(i), it.DexPc());
+    }
+    MappingTable::DexToPcIterator it2 = table.DexToPcBegin();
+    for (uint32_t i = 0; i < dex2pc_mapping_table_.size(); ++i, ++it2) {
+      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.NativePcOffset());
+      ++i;
+      CHECK_EQ(dex2pc_mapping_table_.at(i), it2.DexPc());
+    }
+  }
 }
 
 class NativePcToReferenceMapBuilder {
@@ -980,28 +1001,35 @@
 
 CompiledMethod* Mir2Lir::GetCompiledMethod() {
   // Combine vmap tables - core regs, then fp regs - into vmap_table
-  std::vector<uint16_t> vmap_table;
+  std::vector<uint16_t> raw_vmap_table;
   // Core regs may have been inserted out of order - sort first
   std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
   for (size_t i = 0 ; i < core_vmap_table_.size(); i++) {
     // Copy, stripping out the phys register sort key
-    vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
+    raw_vmap_table.push_back(~(-1 << VREG_NUM_WIDTH) & core_vmap_table_[i]);
   }
   // If we have a frame, push a marker to take place of lr
   if (frame_size_ > 0) {
-    vmap_table.push_back(INVALID_VREG);
+    raw_vmap_table.push_back(INVALID_VREG);
   } else {
     DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0);
     DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0);
   }
   // Combine vmap tables - core regs, then fp regs. fp regs already sorted
   for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
-    vmap_table.push_back(fp_vmap_table_[i]);
+    raw_vmap_table.push_back(fp_vmap_table_[i]);
+  }
+  UnsignedLeb128EncodingVector vmap_encoder;
+  // Prefix the encoded data with its size.
+  vmap_encoder.PushBack(raw_vmap_table.size());
+  typedef std::vector<uint16_t>::const_iterator It;
+  for (It cur = raw_vmap_table.begin(), end = raw_vmap_table.end(); cur != end; ++cur) {
+    vmap_encoder.PushBack(*cur);
   }
   CompiledMethod* result =
       new CompiledMethod(cu_->instruction_set, code_buffer_,
                          frame_size_, core_spill_mask_, fp_spill_mask_,
-                         combined_mapping_table_, vmap_table, native_gc_map_);
+                         encoded_mapping_table_.GetData(), vmap_encoder.GetData(), native_gc_map_);
   return result;
 }
 
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index ebe10bb..298d389 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -208,12 +208,12 @@
 void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
                           RegLocation rl_src) {
   FlushAllRegs();  /* Everything to home location */
-  int func_offset;
+  ThreadOffset func_offset(-1);
   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
                                                        type_idx)) {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray);
   } else {
-    func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+    func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck);
   }
   CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
   RegLocation rl_result = GetReturn(false);
@@ -230,12 +230,12 @@
   int elems = info->num_arg_words;
   int type_idx = info->index;
   FlushAllRegs();  /* Everything to home location */
-  int func_offset;
+  ThreadOffset func_offset(-1);
   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
                                                        type_idx)) {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray);
   } else {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck);
   }
   CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
   FreeTemp(TargetReg(kArg2));
@@ -408,9 +408,10 @@
     FreeTemp(rBase);
   } else {
     FlushAllRegs();  // Everything to home locations
-    int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
-        : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
+    ThreadOffset setter_offset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
+                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
+                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
     CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
   }
 }
@@ -483,9 +484,10 @@
     }
   } else {
     FlushAllRegs();  // Everything to home locations
-    int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
-        : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
+    ThreadOffset getterOffset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
+                          :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
+                                      : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
     CallRuntimeHelperImm(getterOffset, field_idx, true);
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -499,7 +501,7 @@
 
 void Mir2Lir::HandleSuspendLaunchPads() {
   int num_elems = suspend_launchpads_.Size();
-  int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+  ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend);
   for (int i = 0; i < num_elems; i++) {
     ResetRegPool();
     ResetDefTracking();
@@ -539,13 +541,13 @@
     LIR* lab = throw_launchpads_.Get(i);
     current_dalvik_offset_ = lab->operands[1];
     AppendLIR(lab);
-    int func_offset = 0;
+    ThreadOffset func_offset(-1);
     int v1 = lab->operands[2];
     int v2 = lab->operands[3];
     bool target_x86 = (cu_->instruction_set == kX86);
     switch (lab->operands[0]) {
       case kThrowNullPointer:
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer);
         break;
       case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
         // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
@@ -557,7 +559,7 @@
         // Make sure the following LoadConstant doesn't mess with kArg1.
         LockTemp(TargetReg(kArg1));
         LoadConstant(TargetReg(kArg0), v2);
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
         break;
       case kThrowArrayBounds:
         // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
@@ -590,18 +592,18 @@
             OpRegCopy(TargetReg(kArg0), v1);
           }
         }
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
         break;
       case kThrowDivZero:
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero);
         break;
       case kThrowNoSuchMethod:
         OpRegCopy(TargetReg(kArg0), v1);
         func_offset =
-          QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+          QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod);
         break;
       case kThrowStackOverflow:
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
         // Restore stack alignment
         if (target_x86) {
           OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
@@ -664,9 +666,10 @@
       StoreValue(rl_dest, rl_result);
     }
   } else {
-    int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
-        : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
+    ThreadOffset getterOffset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
+                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
+                                       : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
     CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -719,9 +722,10 @@
       }
     }
   } else {
-    int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
-        : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
+    ThreadOffset setter_offset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
+                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
+                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
     CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
   }
 }
@@ -735,7 +739,7 @@
                                                    type_idx)) {
     // Call out to helper which resolves type and verifies access.
     // Resolved type returned in kRet0.
-    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
                             type_idx, rl_method.low_reg, true);
     RegLocation rl_result = GetReturn(false);
     StoreValue(rl_dest, rl_result);
@@ -764,7 +768,7 @@
       // TUNING: move slow path to end & remove unconditional branch
       LIR* target1 = NewLIR0(kPseudoTargetLabel);
       // Call out to helper, which will return resolved type in kArg0
-      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
                               rl_method.low_reg, true);
       RegLocation rl_result = GetReturn(false);
       StoreValue(rl_dest, rl_result);
@@ -797,7 +801,7 @@
     LoadWordDisp(TargetReg(kArg2),
                  mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
     // Might call out to helper, which will return resolved string in kRet0
-    int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode));
+    int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
     LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
     LoadConstant(TargetReg(kArg1), string_idx);
     if (cu_->instruction_set == kThumb2) {
@@ -821,7 +825,7 @@
       branch->target = target;
     } else {
       DCHECK_EQ(cu_->instruction_set, kX86);
-      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2),
+      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), TargetReg(kArg2),
                               TargetReg(kArg1), true);
     }
     GenBarrier();
@@ -845,12 +849,12 @@
   FlushAllRegs();  /* Everything to home location */
   // alloc will always check for resolution, do we also need to verify
   // access because the verifier was unable to?
-  int func_offset;
+  ThreadOffset func_offset(-1);
   if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
       cu_->method_idx, *cu_->dex_file, type_idx)) {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject);
   } else {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck);
   }
   CallRuntimeHelperImmMethod(func_offset, type_idx, true);
   RegLocation rl_result = GetReturn(false);
@@ -929,7 +933,7 @@
   if (needs_access_check) {
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kArg0
-    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
                          type_idx, true);
     OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
     LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
@@ -951,7 +955,7 @@
       LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
       // Not resolved
       // Call out to helper, which will return resolved type in kRet0
-      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true);
       OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
       LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
       // Rejoin code paths
@@ -986,7 +990,7 @@
     }
   } else {
     if (cu_->instruction_set == kThumb2) {
-      int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+      int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
       if (!type_known_abstract) {
       /* Uses conditional nullification */
         OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
@@ -1003,13 +1007,13 @@
         branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
       }
       if (cu_->instruction_set != kX86) {
-        int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+        int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
         OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
         OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
         FreeTemp(r_tgt);
       } else {
         OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
-        OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+        OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
       }
     }
   }
@@ -1069,7 +1073,7 @@
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kRet0
     // InitializeTypeAndVerifyAccess(idx, method)
-    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
                             type_idx, TargetReg(kArg1), true);
     OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
   } else if (use_declaring_class) {
@@ -1089,7 +1093,7 @@
       // Not resolved
       // Call out to helper, which will return resolved type in kArg0
       // InitializeTypeFromCode(idx, method)
-      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
                               TargetReg(kArg1), true);
       OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
       // Rejoin code paths
@@ -1109,7 +1113,7 @@
   if (!type_known_abstract) {
     branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
   }
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1),
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), TargetReg(kArg1),
                           TargetReg(kArg2), true);
   /* branch target here */
   LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -1168,7 +1172,7 @@
 
 void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_shift) {
-  int func_offset = -1;  // Make gcc happy
+  ThreadOffset func_offset(-1);
 
   switch (opcode) {
     case Instruction::SHL_LONG:
@@ -1303,7 +1307,7 @@
       }
       rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
     } else {
-      int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
+      ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
       FlushAllRegs();   /* Send everything to home location */
       LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
       int r_tgt = CallHelperSetup(func_offset);
@@ -1558,7 +1562,7 @@
         FlushAllRegs();   /* Everything to home location */
         LoadValueDirectFixed(rl_src, TargetReg(kArg0));
         Clobber(TargetReg(kArg0));
-        int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
+        ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
         CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
         if (is_div)
           rl_result = GetReturn(false);
@@ -1589,7 +1593,7 @@
   OpKind second_op = kOpBkpt;
   bool call_out = false;
   bool check_zero = false;
-  int func_offset;
+  ThreadOffset func_offset(-1);
   int ret_reg = TargetReg(kRet0);
 
   switch (opcode) {
@@ -1709,7 +1713,7 @@
   }
 }
 
-void Mir2Lir::GenConversionCall(int func_offset,
+void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
                                 RegLocation rl_dest, RegLocation rl_src) {
   /*
    * Don't optimize the register usage since it calls out to support
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1b34e99..20d683a 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -37,12 +37,12 @@
  * has a memory call operation, part 1 is a NOP for x86.  For other targets,
  * load arguments between the two parts.
  */
-int Mir2Lir::CallHelperSetup(int helper_offset) {
+int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
   return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
 }
 
 /* NOTE: if r_tgt is a temp, it will be freed following use */
-LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) {
+LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) {
   LIR* call_inst;
   if (cu_->instruction_set == kX86) {
     call_inst = OpThreadMem(kOpBlx, helper_offset);
@@ -56,21 +56,22 @@
   return call_inst;
 }
 
-void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadConstant(TargetReg(kArg0), arg0);
   ClobberCalleeSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg0), arg0);
   ClobberCalleeSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
+                                           bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   if (arg0.wide == 0) {
     LoadValueDirectFixed(arg0, TargetReg(kArg0));
@@ -81,7 +82,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
                                       bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadConstant(TargetReg(kArg0), arg0);
@@ -90,7 +91,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
                                               RegLocation arg1, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   if (arg1.wide == 0) {
@@ -103,7 +104,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
                                               bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadValueDirectFixed(arg0, TargetReg(kArg0));
@@ -112,7 +113,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
                                       bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg1), arg1);
@@ -121,8 +122,8 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
-                             bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg0), arg0);
   LoadConstant(TargetReg(kArg1), arg1);
@@ -130,7 +131,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadCurrMethodDirect(TargetReg(kArg1));
   LoadConstant(TargetReg(kArg0), arg0);
@@ -138,7 +139,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0,
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
                                                       RegLocation arg1, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   if (arg0.wide == 0) {
@@ -168,7 +169,8 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
   OpRegCopy(TargetReg(kArg0), arg0);
@@ -177,7 +179,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                          int arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
@@ -188,7 +190,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset,
+void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
                                                     int arg0, RegLocation arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadValueDirectFixed(arg2, TargetReg(kArg2));
@@ -198,7 +200,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
                                             int arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadCurrMethodDirect(TargetReg(kArg1));
@@ -208,7 +210,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
+void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
                                                          int arg0, RegLocation arg1,
                                                          RegLocation arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
@@ -470,14 +472,14 @@
     // Disable sharpening
     direct_method = 0;
   }
-  int trampoline = (cu->instruction_set == kX86) ? 0
-      : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
 
   if (direct_method != 0) {
     switch (state) {
       case 0:  // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
+                           cg->TargetReg(kInvokeTgt));
         }
         // Get the interface Method* [sets kArg0]
         if (direct_method != static_cast<unsigned int>(-1)) {
@@ -506,7 +508,8 @@
         cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
         // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
+                           cg->TargetReg(kInvokeTgt));
         }
         break;
     case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
@@ -528,7 +531,7 @@
   return state + 1;
 }
 
-static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
+static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline,
                             int state, const MethodReference& target_method,
                             uint32_t method_idx) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
@@ -539,7 +542,7 @@
   if (state == 0) {
     if (cu->instruction_set != kX86) {
       // Load trampoline target
-      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
     }
     // Load kArg0 with method index
     CHECK_EQ(cu->dex_file, target_method.dex_file);
@@ -555,7 +558,7 @@
                                 uint32_t method_idx,
                                 uintptr_t unused, uintptr_t unused2,
                                 InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -563,7 +566,7 @@
                                 const MethodReference& target_method,
                                 uint32_t method_idx, uintptr_t unused,
                                 uintptr_t unused2, InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -571,7 +574,7 @@
                                const MethodReference& target_method,
                                uint32_t method_idx, uintptr_t unused,
                                uintptr_t unused2, InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -579,7 +582,7 @@
                            const MethodReference& target_method,
                            uint32_t method_idx, uintptr_t unused,
                            uintptr_t unused2, InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -589,7 +592,7 @@
                                                 uint32_t unused,
                                                 uintptr_t unused2, uintptr_t unused3,
                                                 InvokeType unused4) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -1108,9 +1111,9 @@
 bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
   RegLocation rl_dest = InlineTarget(info);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  int offset = Thread::PeerOffset().Int32Value();
+  ThreadOffset offset = Thread::PeerOffset();
   if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
-    LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg);
+    LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
   } else {
     CHECK(cu_->instruction_set == kX86);
     reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
@@ -1406,7 +1409,7 @@
       call_inst = OpMem(kOpBlx, TargetReg(kArg0),
                         mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
     } else {
-      int trampoline = 0;
+      ThreadOffset trampoline(-1);
       switch (info->type) {
       case kInterface:
         trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 846c055..eaae0e1 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -247,7 +247,7 @@
   GenBarrier();
   NewLIR0(kMipsCurrPC);  // Really a jal to .+8
   // Now, fill the branch delay slot with the helper load
-  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData));
   GenBarrier();  // Scheduling barrier
 
   // Construct BaseLabel and set up table base register
@@ -272,7 +272,7 @@
   LockCallTemps();  // Prepare for explicit register usage
   GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
   // Go expensive route - artLockObjectFromCode(self, obj);
-  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode));
+  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObject));
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, r_tgt);
   MarkSafepointPC(call_inst);
@@ -287,7 +287,7 @@
   LockCallTemps();  // Prepare for explicit register usage
   GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
   // Go expensive route - UnlockObjectFromCode(obj);
-  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObject));
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, r_tgt);
   MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 802ff62..6100396 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -29,7 +29,7 @@
     // Required for target - codegen utilities.
     bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
                                     RegLocation rl_dest, int lit);
-    int LoadHelper(int offset);
+    int LoadHelper(ThreadOffset offset);
     LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
     LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                   int s_reg);
@@ -154,12 +154,12 @@
     LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
     LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
     LIR* OpVldm(int rBase, int count);
     LIR* OpVstm(int rBase, int count);
     void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
     void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
-    void OpTlsCmp(int offset, int val);
+    void OpTlsCmp(ThreadOffset offset, int val);
 
     LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
                           int s_reg);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 3203017..9e2fea9 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -176,7 +176,7 @@
 void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2) {
   bool wide = true;
-  int offset = -1;  // Make gcc happy.
+  ThreadOffset offset(-1);
 
   switch (opcode) {
     case Instruction::CMPL_FLOAT:
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index bd044c6..4a48c87 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -254,7 +254,7 @@
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void MipsMir2Lir::OpTlsCmp(int offset, int val) {
+void MipsMir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
@@ -579,7 +579,7 @@
 
   // Get the array's class.
   LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
   LoadValueDirectFixed(rl_array, r_array);  // Reload array
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 0a17fb1..7a9e91a 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -505,8 +505,8 @@
  * ensure that all branch instructions can be restarted if
  * there is a trap in the shadow.  Allocate a temp register.
  */
-int MipsMir2Lir::LoadHelper(int offset) {
-  LoadWordDisp(rMIPS_SELF, offset, r_T9);
+int MipsMir2Lir::LoadHelper(ThreadOffset offset) {
+  LoadWordDisp(rMIPS_SELF, offset.Int32Value(), r_T9);
   return r_T9;
 }
 
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 68b26f1..5d9ae33 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -632,7 +632,7 @@
   return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
 }
 
-LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset) {
+LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
   return NULL;
 }
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index a34e929..517fc66 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -25,6 +25,7 @@
 #include "dex/growable_array.h"
 #include "dex/arena_allocator.h"
 #include "driver/compiler_driver.h"
+#include "leb128_encoder.h"
 #include "safe_map.h"
 
 namespace art {
@@ -424,42 +425,42 @@
                           RegLocation rl_src, int lit);
     void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
                         RegLocation rl_src1, RegLocation rl_src2);
-    void GenConversionCall(int func_offset, RegLocation rl_dest,
+    void GenConversionCall(ThreadOffset func_offset, RegLocation rl_dest,
                            RegLocation rl_src);
     void GenSuspendTest(int opt_flags);
     void GenSuspendTestAndBranch(int opt_flags, LIR* target);
 
     // Shared by all targets - implemented in gen_invoke.cc.
-    int CallHelperSetup(int helper_offset);
-    LIR* CallHelper(int r_tgt, int helper_offset, bool safepoint_pc);
-    void CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0,
-                                       bool safepoint_pc);
-    void CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
+    int CallHelperSetup(ThreadOffset helper_offset);
+    LIR* CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc);
+    void CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
+                                      bool safepoint_pc);
+    void CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
+    void CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
                                          RegLocation arg1, bool safepoint_pc);
-    void CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0,
+    void CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0,
                                          int arg1, bool safepoint_pc);
-    void CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperImmMethod(int helper_offset, int arg0,
+    void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0,
                                     bool safepoint_pc);
-    void CallRuntimeHelperRegLocationRegLocation(int helper_offset,
+    void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset,
                                                  RegLocation arg0, RegLocation arg1,
                                                  bool safepoint_pc);
-    void CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                     int arg2, bool safepoint_pc);
-    void CallRuntimeHelperImmMethodRegLocation(int helper_offset, int arg0,
+    void CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, int arg0,
                                                RegLocation arg2, bool safepoint_pc);
-    void CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, int arg2,
+    void CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, int arg2,
                                        bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
+    void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
                                                     int arg0, RegLocation arg1, RegLocation arg2,
                                                     bool safepoint_pc);
     void GenInvoke(CallInfo* info);
@@ -526,7 +527,7 @@
     // Required for target - codegen helpers.
     virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode,
                                     RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
-    virtual int LoadHelper(int offset) = 0;
+    virtual int LoadHelper(ThreadOffset offset) = 0;
     virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
     virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                   int s_reg) = 0;
@@ -674,14 +675,14 @@
     virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1,
                              int r_src2) = 0;
     virtual LIR* OpTestSuspend(LIR* target) = 0;
-    virtual LIR* OpThreadMem(OpKind op, int thread_offset) = 0;
+    virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0;
     virtual LIR* OpVldm(int rBase, int count) = 0;
     virtual LIR* OpVstm(int rBase, int count) = 0;
     virtual void OpLea(int rBase, int reg1, int reg2, int scale,
                        int offset) = 0;
     virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
                                int src_hi) = 0;
-    virtual void OpTlsCmp(int offset, int val) = 0;
+    virtual void OpTlsCmp(ThreadOffset offset, int val) = 0;
     virtual bool InexpensiveConstantInt(int32_t value) = 0;
     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
     virtual bool InexpensiveConstantLong(int64_t value) = 0;
@@ -760,7 +761,8 @@
      */
     int live_sreg_;
     CodeBuffer code_buffer_;
-    std::vector<uint32_t> combined_mapping_table_;
+    // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
+    UnsignedLeb128EncodingVector encoded_mapping_table_;
     std::vector<uint32_t> core_vmap_table_;
     std::vector<uint32_t> fp_vmap_table_;
     std::vector<uint8_t> native_gc_map_;
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 1c395de..6e3e55f 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -148,7 +148,7 @@
   NewLIR1(kX86StartOfMethod, rX86_ARG2);
   NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
   NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0,
                           rX86_ARG1, true);
 }
 
@@ -165,7 +165,7 @@
   NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
   LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
   // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
-  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObject), rCX, true);
   branch->target = NewLIR0(kPseudoTargetLabel);
 }
 
@@ -185,7 +185,7 @@
   LIR* branch2 = NewLIR1(kX86Jmp8, 0);
   branch->target = NewLIR0(kPseudoTargetLabel);
   // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
-  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rAX, true);
   branch2->target = NewLIR0(kPseudoTargetLabel);
 }
 
@@ -243,7 +243,7 @@
   if (!skip_overflow_check) {
     // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
     LIR* tgt = RawLIR(0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
-    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset());
     OpCondBranch(kCondUlt, tgt);
     // Remember branch target - will process later
     throw_launchpads_.Insert(tgt);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index edb5ae5..21328d5 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -29,7 +29,7 @@
     // Required for target - codegen helpers.
     bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
                                     RegLocation rl_dest, int lit);
-    int LoadHelper(int offset);
+    int LoadHelper(ThreadOffset offset);
     LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
     LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                   int s_reg);
@@ -154,14 +154,14 @@
     LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
     LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
     LIR* OpVldm(int rBase, int count);
     LIR* OpVstm(int rBase, int count);
     void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
     void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
-    void OpTlsCmp(int offset, int val);
+    void OpTlsCmp(ThreadOffset offset, int val);
 
-    void OpRegThreadMem(OpKind op, int r_dest, int thread_offset);
+    void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset);
     void SpillCoreRegs();
     void UnSpillCoreRegs();
     static const X86EncodingMap EncodingMap[kX86Last];
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 0b4b4be..377d134 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -240,8 +240,8 @@
   NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
 }
 
-void X86Mir2Lir::OpTlsCmp(int offset, int val) {
-  NewLIR2(kX86Cmp16TI8, offset, val);
+void X86Mir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
+  NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
 }
 
 bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
@@ -285,7 +285,7 @@
 
 // Test suspend flag, return target of taken suspend branch
 LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
-  OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0);
+  OpTlsCmp(Thread::ThreadFlagsOffset(), 0);
   return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
 }
 
@@ -403,7 +403,7 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) {
+void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
   case kOpCmp: opcode = kX86Cmp32RT;  break;
@@ -412,7 +412,7 @@
     LOG(FATAL) << "Bad opcode: " << op;
     break;
   }
-  NewLIR2(opcode, r_dest, thread_offset);
+  NewLIR2(opcode, r_dest, thread_offset.Int32Value());
 }
 
 /*
@@ -532,7 +532,7 @@
 
   // Get the array's class.
   LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
   LoadValueDirectFixed(rl_array, r_array);  // Reload array
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 2c9b3c8..699f3ae 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -524,7 +524,7 @@
 }
 
 // Not used in x86
-int X86Mir2Lir::LoadHelper(int offset) {
+int X86Mir2Lir::LoadHelper(ThreadOffset offset) {
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
   return INVALID_REG;
 }
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index e15995f..c519bfe 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -292,7 +292,7 @@
   return OpRegImm(op, r_dest, value);
 }
 
-LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) {
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
     case kOpBlx: opcode = kX86CallT;  break;
@@ -300,7 +300,7 @@
       LOG(FATAL) << "Bad opcode: " << op;
       break;
   }
-  return NewLIR1(opcode, thread_offset);
+  return NewLIR1(opcode, thread_offset.Int32Value());
 }
 
 LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e7ba402..56b629c 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -41,9 +41,9 @@
 #include "mirror/throwable.h"
 #include "scoped_thread_state_change.h"
 #include "ScopedLocalRef.h"
-#include "stubs/stubs.h"
 #include "thread.h"
 #include "thread_pool.h"
+#include "trampolines/trampoline_compiler.h"
 #include "verifier/method_verifier.h"
 
 #if defined(ART_USE_PORTABLE_COMPILER)
@@ -433,64 +433,38 @@
   return res;
 }
 
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToInterpreterBridge() const {
+  return CreateTrampoline(instruction_set_, kInterpreterAbi,
+                          INTERPRETER_ENTRYPOINT_OFFSET(pInterpreterToInterpreterBridge));
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToCompiledCodeBridge() const {
+  return CreateTrampoline(instruction_set_, kInterpreterAbi,
+                          INTERPRETER_ENTRYPOINT_OFFSET(pInterpreterToCompiledCodeBridge));
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateJniDlsymLookup() const {
+  return CreateTrampoline(instruction_set_, kJniAbi, JNI_ENTRYPOINT_OFFSET(pDlsymLookup));
+}
+
 const std::vector<uint8_t>* CompilerDriver::CreatePortableResolutionTrampoline() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreatePortableResolutionTrampoline();
-    case kMips:
-      return mips::CreatePortableResolutionTrampoline();
-    case kX86:
-      return x86::CreatePortableResolutionTrampoline();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
+  return CreateTrampoline(instruction_set_, kPortableAbi,
+                          PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampoline));
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreatePortableToInterpreterBridge() const {
+  return CreateTrampoline(instruction_set_, kPortableAbi,
+                          PORTABLE_ENTRYPOINT_OFFSET(pPortableToInterpreterBridge));
 }
 
 const std::vector<uint8_t>* CompilerDriver::CreateQuickResolutionTrampoline() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreateQuickResolutionTrampoline();
-    case kMips:
-      return mips::CreateQuickResolutionTrampoline();
-    case kX86:
-      return x86::CreateQuickResolutionTrampoline();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
+  return CreateTrampoline(instruction_set_, kQuickAbi,
+                          QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampoline));
 }
 
-const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToInterpreterEntry() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreateInterpreterToInterpreterEntry();
-    case kMips:
-      return mips::CreateInterpreterToInterpreterEntry();
-    case kX86:
-      return x86::CreateInterpreterToInterpreterEntry();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
-}
-
-const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToQuickEntry() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreateInterpreterToQuickEntry();
-    case kMips:
-      return mips::CreateInterpreterToQuickEntry();
-    case kX86:
-      return x86::CreateInterpreterToQuickEntry();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
+const std::vector<uint8_t>* CompilerDriver::CreateQuickToInterpreterBridge() const {
+  return CreateTrampoline(instruction_set_, kQuickAbi,
+                          QUICK_ENTRYPOINT_OFFSET(pQuickToInterpreterBridge));
 }
 
 void CompilerDriver::CompileAll(jobject class_loader,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 18f852d..b5222c9 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -48,6 +48,17 @@
   kNoBackend
 };
 
+enum EntryPointCallingConvention {
+  // ABI of invocations to a method's interpreter entry point.
+  kInterpreterAbi,
+  // ABI of calls to a method's native code, only used for native methods.
+  kJniAbi,
+  // ABI of calls to a method's portable code entry point.
+  kPortableAbi,
+  // ABI of calls to a method's quick code entry point.
+  kQuickAbi
+};
+
 enum DexToDexCompilationLevel {
   kDontDexToDexCompile,   // Only meaning wrt image time interpretation.
   kRequired,              // Dex-to-dex compilation required for correctness.
@@ -110,13 +121,19 @@
   CompilerTls* GetTls();
 
   // Generate the trampolines that are invoked by unresolved direct methods.
+  const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateJniDlsymLookup() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreatePortableResolutionTrampoline() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreatePortableToInterpreterBridge() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const std::vector<uint8_t>* CreateInterpreterToQuickEntry() const
+  const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   CompiledClass* GetCompiledClass(ClassReference ref) const
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 5bf0086..465139b 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -80,6 +80,10 @@
     return access_flags_;
   }
 
+  bool IsConstructor() const {
+    return ((access_flags_ & kAccConstructor) != 0);
+  }
+
   bool IsNative() const {
     return ((access_flags_ & kAccNative) != 0);
   }
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index e73d021..3432c8c 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -90,11 +90,23 @@
     return false;
   }
   class_linker->RegisterOatFile(*oat_file_);
-  interpreter_to_interpreter_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToInterpreterEntryOffset();
-  interpreter_to_quick_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToQuickEntryOffset();
-  portable_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
-  quick_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
 
+  interpreter_to_interpreter_bridge_offset_ =
+      oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset();
+  interpreter_to_compiled_code_bridge_offset_ =
+      oat_file_->GetOatHeader().GetInterpreterToCompiledCodeBridgeOffset();
+
+  jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset();
+
+  portable_resolution_trampoline_offset_ =
+      oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
+  portable_to_interpreter_bridge_offset_ =
+      oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
+
+  quick_resolution_trampoline_offset_ =
+      oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
+  quick_to_interpreter_bridge_offset_ =
+      oat_file_->GetOatHeader().GetQuickToInterpreterBridgeOffset();
   {
     Thread::Current()->TransitionFromSuspendedToRunnable();
     PruneNonImageClasses();  // Remove junk
@@ -490,57 +502,62 @@
 void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) {
   FixupInstanceFields(orig, copy);
 
-  // OatWriter replaces the code_ with an offset value.
-  // Here we readjust to a pointer relative to oat_begin_
-  if (orig->IsAbstract()) {
-    // Code for abstract methods is set to the abstract method error stub when we load the image.
-    copy->SetEntryPointFromCompiledCode(NULL);
-    copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
-                                       (GetOatAddress(interpreter_to_interpreter_entry_offset_)));
-    return;
-  } else {
-    copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
-                                       (GetOatAddress(interpreter_to_quick_entry_offset_)));
-  }
+  // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
+  // oat_begin_
 
-  if (orig == Runtime::Current()->GetResolutionMethod()) {
+  // The resolution method has a special trampoline to call.
+  if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
     copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
 #else
     copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
 #endif
-    return;
-  }
-
-  // Use original code if it exists. Otherwise, set the code pointer to the resolution trampoline.
-  const byte* code = GetOatAddress(orig->GetOatCodeOffset());
-  if (code != NULL) {
-    copy->SetEntryPointFromCompiledCode(code);
   } else {
+    // We assume all methods have code. If they don't currently then we set them to the use the
+    // resolution trampoline. Abstract methods never have code and so we need to make sure their
+    // use results in an AbstractMethodError. We use the interpreter to achieve this.
+    if (UNLIKELY(orig->IsAbstract())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
-    copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+      copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_to_interpreter_bridge_offset_));
 #else
-    copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+      copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_));
 #endif
-  }
+      copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+      (GetOatAddress(interpreter_to_interpreter_bridge_offset_)));
+    } else {
+      copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+      (GetOatAddress(interpreter_to_compiled_code_bridge_offset_)));
+      // Use original code if it exists. Otherwise, set the code pointer to the resolution
+      // trampoline.
+      const byte* code = GetOatAddress(orig->GetOatCodeOffset());
+      if (code != NULL) {
+        copy->SetEntryPointFromCompiledCode(code);
+      } else {
+#if defined(ART_USE_PORTABLE_COMPILER)
+        copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+#else
+        copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+#endif
+      }
+      if (orig->IsNative()) {
+        // The native method's pointer is set to a stub to lookup via dlsym.
+        // Note this is not the code_ pointer, that is handled above.
+        copy->SetNativeMethod(GetOatAddress(jni_dlsym_lookup_offset_));
+      } else {
+        // Normal (non-abstract non-native) methods have various tables to relocate.
+        uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
+        const byte* mapping_table = GetOatAddress(mapping_table_off);
+        copy->SetMappingTable(mapping_table);
 
-  if (orig->IsNative()) {
-    // The native method's pointer is set to a stub to lookup via dlsym when we load the image.
-    // Note this is not the code_ pointer, that is handled above.
-    copy->SetNativeMethod(NULL);
-  } else {
-    // normal (non-abstract non-native) methods have mapping tables to relocate
-    uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
-    const byte* mapping_table = GetOatAddress(mapping_table_off);
-    copy->SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table));
+        uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
+        const byte* vmap_table = GetOatAddress(vmap_table_offset);
+        copy->SetVmapTable(vmap_table);
 
-    uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
-    const byte* vmap_table = GetOatAddress(vmap_table_offset);
-    copy->SetVmapTable(reinterpret_cast<const uint16_t*>(vmap_table));
-
-    uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
-    const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
-    copy->SetNativeGcMap(reinterpret_cast<const uint8_t*>(native_gc_map));
+        uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
+        const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
+        copy->SetNativeGcMap(reinterpret_cast<const uint8_t*>(native_gc_map));
+      }
+    }
   }
 }
 
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index e43ec63..545534f 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -39,8 +39,8 @@
  public:
   explicit ImageWriter(const CompilerDriver& compiler_driver)
       : compiler_driver_(compiler_driver), oat_file_(NULL), image_end_(0), image_begin_(NULL),
-        oat_data_begin_(NULL), interpreter_to_interpreter_entry_offset_(0),
-        interpreter_to_quick_entry_offset_(0), portable_resolution_trampoline_offset_(0),
+        oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
+        interpreter_to_compiled_code_bridge_offset_(0), portable_resolution_trampoline_offset_(0),
         quick_resolution_trampoline_offset_(0) {}
 
   ~ImageWriter() {}
@@ -195,10 +195,13 @@
   const byte* oat_data_begin_;
 
   // Offset from oat_data_begin_ to the stubs.
-  uint32_t interpreter_to_interpreter_entry_offset_;
-  uint32_t interpreter_to_quick_entry_offset_;
+  uint32_t interpreter_to_interpreter_bridge_offset_;
+  uint32_t interpreter_to_compiled_code_bridge_offset_;
+  uint32_t jni_dlsym_lookup_offset_;
   uint32_t portable_resolution_trampoline_offset_;
+  uint32_t portable_to_interpreter_bridge_offset_;
   uint32_t quick_resolution_trampoline_offset_;
+  uint32_t quick_to_interpreter_bridge_offset_;
 
   // DexCaches seen while scanning for fixing up CodeAndDirectMethods
   typedef std::set<mirror::DexCache*> Set;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b069fbd..9713fe9 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -172,8 +172,8 @@
   //    can occur. The result is the saved JNI local state that is restored by the exit call. We
   //    abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
   //    arguments.
-  uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
-                                        : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart);
+  ThreadOffset jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+                                           : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart);
   main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
   FrameOffset locked_object_sirt_offset(0);
   if (is_synchronized) {
@@ -301,7 +301,7 @@
   // 12. Call into JNI method end possibly passing a returned reference, the method and the current
   //     thread.
   end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
-  uintptr_t jni_end;
+  ThreadOffset jni_end(-1);
   if (reference_return) {
     // Pass result.
     jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
diff --git a/compiler/leb128_encoder.h b/compiler/leb128_encoder.h
new file mode 100644
index 0000000..e9a1c32
--- /dev/null
+++ b/compiler/leb128_encoder.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LEB128_ENCODER_H_
+#define ART_COMPILER_LEB128_ENCODER_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
+class UnsignedLeb128EncodingVector {
+ public:
+  UnsignedLeb128EncodingVector() {
+  }
+
+  void PushBack(uint32_t value) {
+    bool done = false;
+    do {
+      uint8_t out = value & 0x7f;
+      if (out != value) {
+        data_.push_back(out | 0x80);
+        value >>= 7;
+      } else {
+        data_.push_back(out);
+        done = true;
+      }
+    } while (!done);
+  }
+
+  template<typename It>
+  void InsertBack(It cur, It end) {
+    for (; cur != end; ++cur) {
+      PushBack(*cur);
+    }
+  }
+
+  const std::vector<uint8_t>& GetData() const {
+    return data_;
+  }
+
+ private:
+  std::vector<uint8_t> data_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnsignedLeb128EncodingVector);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_LEB128_ENCODER_H_
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5eb837b..ce88cf6 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -51,11 +51,14 @@
     size_oat_header_(0),
     size_oat_header_image_file_location_(0),
     size_dex_file_(0),
-    size_interpreter_to_interpreter_entry_(0),
-    size_interpreter_to_quick_entry_(0),
+    size_interpreter_to_interpreter_bridge_(0),
+    size_interpreter_to_compiled_code_bridge_(0),
+    size_jni_dlsym_lookup_(0),
     size_portable_resolution_trampoline_(0),
+    size_portable_to_interpreter_bridge_(0),
     size_quick_resolution_trampoline_(0),
-    size_stubs_alignment_(0),
+    size_quick_to_interpreter_bridge_(0),
+    size_trampoline_alignment_(0),
     size_code_size_(0),
     size_code_(0),
     size_code_alignment_(0),
@@ -176,30 +179,30 @@
   size_executable_offset_alignment_ = offset - old_offset;
   if (compiler_driver_->IsImage()) {
     InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
-    oat_header_->SetInterpreterToInterpreterEntryOffset(offset);
-    interpreter_to_interpreter_entry_.reset(
-        compiler_driver_->CreateInterpreterToInterpreterEntry());
-    offset += interpreter_to_interpreter_entry_->size();
 
-    offset = CompiledCode::AlignCode(offset, instruction_set);
-    oat_header_->SetInterpreterToQuickEntryOffset(offset);
-    interpreter_to_quick_entry_.reset(compiler_driver_->CreateInterpreterToQuickEntry());
-    offset += interpreter_to_quick_entry_->size();
+    #define DO_TRAMPOLINE(field, fn_name) \
+      offset = CompiledCode::AlignCode(offset, instruction_set); \
+      oat_header_->Set ## fn_name ## Offset(offset); \
+      field.reset(compiler_driver_->Create ## fn_name()); \
+      offset += field->size();
 
-    offset = CompiledCode::AlignCode(offset, instruction_set);
-    oat_header_->SetPortableResolutionTrampolineOffset(offset);
-    portable_resolution_trampoline_.reset(compiler_driver_->CreatePortableResolutionTrampoline());
-    offset += portable_resolution_trampoline_->size();
+    DO_TRAMPOLINE(interpreter_to_interpreter_bridge_, InterpreterToInterpreterBridge);
+    DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_, InterpreterToCompiledCodeBridge);
+    DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
+    DO_TRAMPOLINE(portable_resolution_trampoline_, PortableResolutionTrampoline);
+    DO_TRAMPOLINE(portable_to_interpreter_bridge_, PortableToInterpreterBridge);
+    DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline);
+    DO_TRAMPOLINE(quick_to_interpreter_bridge_, QuickToInterpreterBridge);
 
-    offset = CompiledCode::AlignCode(offset, instruction_set);
-    oat_header_->SetQuickResolutionTrampolineOffset(offset);
-    quick_resolution_trampoline_.reset(compiler_driver_->CreateQuickResolutionTrampoline());
-    offset += quick_resolution_trampoline_->size();
+    #undef DO_TRAMPOLINE
   } else {
-    oat_header_->SetInterpreterToInterpreterEntryOffset(0);
-    oat_header_->SetInterpreterToQuickEntryOffset(0);
+    oat_header_->SetInterpreterToInterpreterBridgeOffset(0);
+    oat_header_->SetInterpreterToCompiledCodeBridgeOffset(0);
+    oat_header_->SetJniDlsymLookupOffset(0);
     oat_header_->SetPortableResolutionTrampolineOffset(0);
+    oat_header_->SetPortableToInterpreterBridgeOffset(0);
     oat_header_->SetQuickResolutionTrampolineOffset(0);
+    oat_header_->SetQuickToInterpreterBridgeOffset(0);
   }
   return offset;
 }
@@ -319,12 +322,12 @@
     core_spill_mask = compiled_method->GetCoreSpillMask();
     fp_spill_mask = compiled_method->GetFpSpillMask();
 
-    const std::vector<uint32_t>& mapping_table = compiled_method->GetMappingTable();
+    const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
     size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
     mapping_table_offset = (mapping_table_size == 0) ? 0 : offset;
 
     // Deduplicate mapping tables
-    SafeMap<const std::vector<uint32_t>*, uint32_t>::iterator mapping_iter =
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator mapping_iter =
         mapping_table_offsets_.find(&mapping_table);
     if (mapping_iter != mapping_table_offsets_.end()) {
       mapping_table_offset = mapping_iter->second;
@@ -334,12 +337,12 @@
       oat_header_->UpdateChecksum(&mapping_table[0], mapping_table_size);
     }
 
-    const std::vector<uint16_t>& vmap_table = compiled_method->GetVmapTable();
+    const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
     size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
     vmap_table_offset = (vmap_table_size == 0) ? 0 : offset;
 
     // Deduplicate vmap tables
-    SafeMap<const std::vector<uint16_t>*, uint32_t>::iterator vmap_iter =
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator vmap_iter =
         vmap_table_offsets_.find(&vmap_table);
     if (vmap_iter != vmap_table_offsets_.end()) {
       vmap_table_offset = vmap_iter->second;
@@ -469,11 +472,14 @@
     DO_STAT(size_oat_header_);
     DO_STAT(size_oat_header_image_file_location_);
     DO_STAT(size_dex_file_);
-    DO_STAT(size_interpreter_to_interpreter_entry_);
-    DO_STAT(size_interpreter_to_quick_entry_);
+    DO_STAT(size_interpreter_to_interpreter_bridge_);
+    DO_STAT(size_interpreter_to_compiled_code_bridge_);
+    DO_STAT(size_jni_dlsym_lookup_);
     DO_STAT(size_portable_resolution_trampoline_);
+    DO_STAT(size_portable_to_interpreter_bridge_);
     DO_STAT(size_quick_resolution_trampoline_);
-    DO_STAT(size_stubs_alignment_);
+    DO_STAT(size_quick_to_interpreter_bridge_);
+    DO_STAT(size_trampoline_alignment_);
     DO_STAT(size_code_size_);
     DO_STAT(size_code_);
     DO_STAT(size_code_alignment_);
@@ -545,52 +551,30 @@
   DCHECK_OFFSET();
   if (compiler_driver_->IsImage()) {
     InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
-    if (!out.WriteFully(&(*interpreter_to_interpreter_entry_)[0],
-                        interpreter_to_interpreter_entry_->size())) {
-      PLOG(ERROR) << "Failed to write interpreter to interpreter entry to " << out.GetLocation();
-      return false;
-    }
-    size_interpreter_to_interpreter_entry_ += interpreter_to_interpreter_entry_->size();
-    relative_offset += interpreter_to_interpreter_entry_->size();
-    DCHECK_OFFSET();
 
-    uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set);
-    uint32_t alignment_padding = aligned_offset - relative_offset;
-    out.Seek(alignment_padding, kSeekCurrent);
-    size_stubs_alignment_ += alignment_padding;
-    if (!out.WriteFully(&(*interpreter_to_quick_entry_)[0], interpreter_to_quick_entry_->size())) {
-      PLOG(ERROR) << "Failed to write interpreter to quick entry to " << out.GetLocation();
-      return false;
-    }
-    size_interpreter_to_quick_entry_ += interpreter_to_quick_entry_->size();
-    relative_offset += alignment_padding + interpreter_to_quick_entry_->size();
-    DCHECK_OFFSET();
+    #define DO_TRAMPOLINE(field) \
+      do { \
+        uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); \
+        uint32_t alignment_padding = aligned_offset - relative_offset; \
+        out.Seek(alignment_padding, kSeekCurrent); \
+        size_trampoline_alignment_ += alignment_padding; \
+        if (!out.WriteFully(&(*field)[0], field->size())) { \
+          PLOG(ERROR) << "Failed to write " # field " to " << out.GetLocation(); \
+          return false; \
+        } \
+        size_ ## field += field->size(); \
+        relative_offset += alignment_padding + field->size(); \
+        DCHECK_OFFSET(); \
+      } while (false)
 
-    aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set);
-    alignment_padding = aligned_offset - relative_offset;
-    out.Seek(alignment_padding, kSeekCurrent);
-    size_stubs_alignment_ += alignment_padding;
-    if (!out.WriteFully(&(*portable_resolution_trampoline_)[0],
-                        portable_resolution_trampoline_->size())) {
-      PLOG(ERROR) << "Failed to write portable resolution trampoline to " << out.GetLocation();
-      return false;
-    }
-    size_portable_resolution_trampoline_ += portable_resolution_trampoline_->size();
-    relative_offset += alignment_padding + portable_resolution_trampoline_->size();
-    DCHECK_OFFSET();
-
-    aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set);
-    alignment_padding = aligned_offset - relative_offset;
-    out.Seek(alignment_padding, kSeekCurrent);
-    size_stubs_alignment_ += alignment_padding;
-    if (!out.WriteFully(&(*quick_resolution_trampoline_)[0],
-                        quick_resolution_trampoline_->size())) {
-      PLOG(ERROR) << "Failed to write quick resolution trampoline to " << out.GetLocation();
-      return false;
-    }
-    size_quick_resolution_trampoline_ += quick_resolution_trampoline_->size();
-    relative_offset += alignment_padding + quick_resolution_trampoline_->size();
-    DCHECK_OFFSET();
+    DO_TRAMPOLINE(interpreter_to_interpreter_bridge_);
+    DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_);
+    DO_TRAMPOLINE(jni_dlsym_lookup_);
+    DO_TRAMPOLINE(portable_resolution_trampoline_);
+    DO_TRAMPOLINE(portable_to_interpreter_bridge_);
+    DO_TRAMPOLINE(quick_resolution_trampoline_);
+    DO_TRAMPOLINE(quick_to_interpreter_bridge_);
+    #undef DO_TRAMPOLINE
   }
   return relative_offset;
 }
@@ -733,11 +717,11 @@
     DCHECK_OFFSET();
 #endif
 
-    const std::vector<uint32_t>& mapping_table = compiled_method->GetMappingTable();
+    const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
     size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
 
     // Deduplicate mapping tables
-    SafeMap<const std::vector<uint32_t>*, uint32_t>::iterator mapping_iter =
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator mapping_iter =
         mapping_table_offsets_.find(&mapping_table);
     if (mapping_iter != mapping_table_offsets_.end() &&
         relative_offset != method_offsets.mapping_table_offset_) {
@@ -757,11 +741,11 @@
     }
     DCHECK_OFFSET();
 
-    const std::vector<uint16_t>& vmap_table = compiled_method->GetVmapTable();
+    const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
     size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
 
     // Deduplicate vmap tables
-    SafeMap<const std::vector<uint16_t>*, uint32_t>::iterator vmap_iter =
+    SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator vmap_iter =
         vmap_table_offsets_.find(&vmap_table);
     if (vmap_iter != vmap_table_offsets_.end() &&
         relative_offset != method_offsets.vmap_table_offset_) {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index f2c5626..f7801f5 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -181,10 +181,13 @@
   OatHeader* oat_header_;
   std::vector<OatDexFile*> oat_dex_files_;
   std::vector<OatClass*> oat_classes_;
-  UniquePtr<const std::vector<uint8_t> > interpreter_to_interpreter_entry_;
-  UniquePtr<const std::vector<uint8_t> > interpreter_to_quick_entry_;
+  UniquePtr<const std::vector<uint8_t> > interpreter_to_interpreter_bridge_;
+  UniquePtr<const std::vector<uint8_t> > interpreter_to_compiled_code_bridge_;
+  UniquePtr<const std::vector<uint8_t> > jni_dlsym_lookup_;
   UniquePtr<const std::vector<uint8_t> > portable_resolution_trampoline_;
+  UniquePtr<const std::vector<uint8_t> > portable_to_interpreter_bridge_;
   UniquePtr<const std::vector<uint8_t> > quick_resolution_trampoline_;
+  UniquePtr<const std::vector<uint8_t> > quick_to_interpreter_bridge_;
 
   // output stats
   uint32_t size_dex_file_alignment_;
@@ -192,11 +195,14 @@
   uint32_t size_oat_header_;
   uint32_t size_oat_header_image_file_location_;
   uint32_t size_dex_file_;
-  uint32_t size_interpreter_to_interpreter_entry_;
-  uint32_t size_interpreter_to_quick_entry_;
+  uint32_t size_interpreter_to_interpreter_bridge_;
+  uint32_t size_interpreter_to_compiled_code_bridge_;
+  uint32_t size_jni_dlsym_lookup_;
   uint32_t size_portable_resolution_trampoline_;
+  uint32_t size_portable_to_interpreter_bridge_;
   uint32_t size_quick_resolution_trampoline_;
-  uint32_t size_stubs_alignment_;
+  uint32_t size_quick_to_interpreter_bridge_;
+  uint32_t size_trampoline_alignment_;
   uint32_t size_code_size_;
   uint32_t size_code_;
   uint32_t size_code_alignment_;
@@ -220,8 +226,8 @@
 
   // code mappings for deduplication
   SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > code_offsets_;
-  SafeMap<const std::vector<uint16_t>*, uint32_t, MapCompare<std::vector<uint16_t> > > vmap_table_offsets_;
-  SafeMap<const std::vector<uint32_t>*, uint32_t, MapCompare<std::vector<uint32_t> > > mapping_table_offsets_;
+  SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > vmap_table_offsets_;
+  SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > mapping_table_offsets_;
   SafeMap<const std::vector<uint8_t>*, uint32_t, MapCompare<std::vector<uint8_t> > > gc_map_offsets_;
 
   DISALLOW_COPY_AND_ASSIGN(OatWriter);
diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc
deleted file mode 100644
index def43e2..0000000
--- a/compiler/stubs/portable/stubs.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "stubs/stubs.h"
-
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "jni_internal.h"
-#include "utils/arm/assembler_arm.h"
-#include "utils/mips/assembler_mips.h"
-#include "utils/x86/assembler_x86.h"
-#include "stack_indirect_reference_table.h"
-#include "sirt_ref.h"
-
-#define __ assembler->
-
-namespace art {
-
-namespace arm {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-  RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
-
-  __ PushList(save);
-  __ LoadFromOffset(kLoadWord, R12, TR,
-                    PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
-  __ mov(R3, ShifterOperand(TR));  // Pass Thread::Current() in R3
-  __ mov(R2, ShifterOperand(SP));  // Pass sp for Method** callee_addr
-  __ IncreaseFrameSize(12);         // 3 words of space for alignment
-  // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
-  __ blx(R12);
-  __ mov(R12, ShifterOperand(R0));  // Save code address returned into R12
-  __ DecreaseFrameSize(12);
-  __ PopList(save);
-  __ cmp(R12, ShifterOperand(0));
-  __ bx(R12, NE);                   // If R12 != 0 tail call method's code
-  __ bx(LR);                        // Return to caller to handle exception
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-}  // namespace arm
-
-namespace mips {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-  // Build frame and save argument registers and RA.
-  __ AddConstant(SP, SP, -32);
-  __ StoreToOffset(kStoreWord, RA, SP, 28);
-  __ StoreToOffset(kStoreWord, A3, SP, 12);
-  __ StoreToOffset(kStoreWord, A2, SP, 8);
-  __ StoreToOffset(kStoreWord, A1, SP, 4);
-  __ StoreToOffset(kStoreWord, A0, SP, 0);
-
-  __ LoadFromOffset(kLoadWord, T9, S1,
-                    PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
-  __ Move(A3, S1);  // Pass Thread::Current() in A3
-  __ Move(A2, SP);  // Pass SP for Method** callee_addr
-  __ Jalr(T9);  // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
-
-  // Restore frame, argument registers, and RA.
-  __ LoadFromOffset(kLoadWord, A0, SP, 0);
-  __ LoadFromOffset(kLoadWord, A1, SP, 4);
-  __ LoadFromOffset(kLoadWord, A2, SP, 8);
-  __ LoadFromOffset(kLoadWord, A3, SP, 12);
-  __ LoadFromOffset(kLoadWord, RA, SP, 28);
-  __ AddConstant(SP, SP, 32);
-
-  Label resolve_fail;
-  __ EmitBranch(V0, ZERO, &resolve_fail, true);
-  __ Jr(V0);  // If V0 != 0 tail call method's code
-  __ Bind(&resolve_fail, false);
-  __ Jr(RA);  // Return to caller to handle exception
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-}  // namespace mips
-
-namespace x86 {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-
-  __ pushl(EBP);
-  __ movl(EBP, ESP);          // save ESP
-  __ subl(ESP, Immediate(8));  // Align stack
-  __ movl(EAX, Address(EBP, 8));  // Method* called
-  __ leal(EDX, Address(EBP, 8));  // Method** called_addr
-  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // pass thread
-  __ pushl(EDX);  // pass called_addr
-  __ pushl(ECX);  // pass receiver
-  __ pushl(EAX);  // pass called
-  // Call to resolve method.
-  __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
-          X86ManagedRegister::FromCpuRegister(ECX));
-  __ leave();
-
-  Label resolve_fail;  // forward declaration
-  __ cmpl(EAX, Immediate(0));
-  __ j(kEqual, &resolve_fail);
-  __ jmp(EAX);
-  // Tail call to intended method.
-  __ Bind(&resolve_fail);
-  __ ret();
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-}  // namespace x86
-
-}  // namespace art
diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc
deleted file mode 100644
index 912f1c0..0000000
--- a/compiler/stubs/quick/stubs.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "stubs/stubs.h"
-
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "jni_internal.h"
-#include "utils/arm/assembler_arm.h"
-#include "utils/mips/assembler_mips.h"
-#include "utils/x86/assembler_x86.h"
-#include "sirt_ref.h"
-#include "stack_indirect_reference_table.h"
-
-#define __ assembler->
-
-namespace art {
-
-namespace arm {
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-  // | Out args |
-  // | Method*  | <- SP on entry
-  // | LR       |    return address into caller
-  // | ...      |    callee saves
-  // | R3       |    possible argument
-  // | R2       |    possible argument
-  // | R1       |    possible argument
-  // | R0       |    junk on call to QuickResolutionTrampolineFromCode, holds result Method*
-  // | Method*  |    Callee save Method* set up by QuickResoltuionTrampolineFromCode
-  // Save callee saves and ready frame for exception delivery
-  RegList save = (1 << R1) | (1 << R2) | (1 << R3) | (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) |
-                 (1 << R10) | (1 << R11) | (1 << LR);
-  // TODO: enable when GetCalleeSaveMethod is available at stub generation time
-  // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
-  __ PushList(save);
-  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
-  __ mov(R3, ShifterOperand(TR));  // Pass Thread::Current() in R3
-  __ IncreaseFrameSize(8);         // 2 words of space for alignment
-  __ mov(R2, ShifterOperand(SP));  // Pass SP
-  // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
-  __ blx(R12);
-  __ mov(R12, ShifterOperand(R0));  // Save code address returned into R12
-  // Restore registers which may have been modified by GC, "R0" will hold the Method*
-  __ DecreaseFrameSize(4);
-  __ PopList((1 << R0) | save);
-  __ bx(R12);  // Leaf call to method's code
-  __ bkpt(0);
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-
-  __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
-  __ bkpt(0);
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-
-  __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
-  __ bkpt(0);
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-}  // namespace arm
-
-namespace mips {
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-  // | Out args   |
-  // | Method*    | <- SP on entry
-  // | RA         |    return address into caller
-  // | ...        |    callee saves
-  // | A3         |    possible argument
-  // | A2         |    possible argument
-  // | A1         |    possible argument
-  // | A0/Method* |    Callee save Method* set up by UnresolvedDirectMethodTrampolineFromCode
-  // Save callee saves and ready frame for exception delivery
-  __ AddConstant(SP, SP, -64);
-  __ StoreToOffset(kStoreWord, RA, SP, 60);
-  __ StoreToOffset(kStoreWord, FP, SP, 56);
-  __ StoreToOffset(kStoreWord, GP, SP, 52);
-  __ StoreToOffset(kStoreWord, S7, SP, 48);
-  __ StoreToOffset(kStoreWord, S6, SP, 44);
-  __ StoreToOffset(kStoreWord, S5, SP, 40);
-  __ StoreToOffset(kStoreWord, S4, SP, 36);
-  __ StoreToOffset(kStoreWord, S3, SP, 32);
-  __ StoreToOffset(kStoreWord, S2, SP, 28);
-  __ StoreToOffset(kStoreWord, A3, SP, 12);
-  __ StoreToOffset(kStoreWord, A2, SP, 8);
-  __ StoreToOffset(kStoreWord, A1, SP, 4);
-
-  __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
-  __ Move(A3, S1);  // Pass Thread::Current() in A3
-  __ Move(A2, SP);  // Pass SP for Method** callee_addr
-  __ Jalr(T9);  // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
-
-  // Restore registers which may have been modified by GC
-  __ LoadFromOffset(kLoadWord, A0, SP, 0);
-  __ LoadFromOffset(kLoadWord, A1, SP, 4);
-  __ LoadFromOffset(kLoadWord, A2, SP, 8);
-  __ LoadFromOffset(kLoadWord, A3, SP, 12);
-  __ LoadFromOffset(kLoadWord, S2, SP, 28);
-  __ LoadFromOffset(kLoadWord, S3, SP, 32);
-  __ LoadFromOffset(kLoadWord, S4, SP, 36);
-  __ LoadFromOffset(kLoadWord, S5, SP, 40);
-  __ LoadFromOffset(kLoadWord, S6, SP, 44);
-  __ LoadFromOffset(kLoadWord, S7, SP, 48);
-  __ LoadFromOffset(kLoadWord, GP, SP, 52);
-  __ LoadFromOffset(kLoadWord, FP, SP, 56);
-  __ LoadFromOffset(kLoadWord, RA, SP, 60);
-  __ AddConstant(SP, SP, 64);
-
-  __ Move(T9, V0);  // Put method's code in T9
-  __ Jr(T9);  // Leaf call to method's code
-
-  __ Break();
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-
-  __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
-  __ Jr(T9);
-  __ Break();
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-
-  __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
-  __ Jr(T9);
-  __ Break();
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-}  // namespace mips
-
-namespace x86 {
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-  // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
-  // return address
-  __ pushl(EDI);
-  __ pushl(ESI);
-  __ pushl(EBP);
-  __ pushl(EBX);
-  __ pushl(EDX);
-  __ pushl(ECX);
-  __ pushl(EAX);  // <-- callee save Method* to go here
-  __ movl(EDX, ESP);          // save ESP
-  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // pass Thread*
-  __ pushl(EDX);              // pass ESP for Method*
-  __ pushl(ECX);              // pass receiver
-  __ pushl(EAX);              // pass Method*
-
-  // Call to resolve method.
-  __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
-          X86ManagedRegister::FromCpuRegister(ECX));
-
-  __ movl(EDI, EAX);  // save code pointer in EDI
-  __ addl(ESP, Immediate(16));  // Pop arguments
-  __ popl(EAX);  // Restore args.
-  __ popl(ECX);
-  __ popl(EDX);
-  __ popl(EBX);
-  __ popl(EBP);  // Restore callee saves.
-  __ popl(ESI);
-  // Swap EDI callee save with code pointer
-  __ xchgl(EDI, Address(ESP, 0));
-  // Tail call to intended method.
-  __ ret();
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-
-  __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-
-  __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-}  // namespace x86
-
-}  // namespace art
diff --git a/compiler/stubs/stubs.h b/compiler/stubs/stubs.h
deleted file mode 100644
index d85eae8..0000000
--- a/compiler/stubs/stubs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_STUBS_STUBS_H_
-#define ART_COMPILER_STUBS_STUBS_H_
-
-#include "runtime.h"
-
-namespace art {
-
-namespace arm {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-}
-
-namespace mips {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-}
-
-namespace x86 {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_STUBS_STUBS_H_
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
new file mode 100644
index 0000000..32ae558
--- /dev/null
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "trampoline_compiler.h"
+
+#include "jni_internal.h"
+#include "utils/arm/assembler_arm.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/x86/assembler_x86.h"
+
+#define __ assembler->
+
+namespace art {
+
+namespace arm {
+static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
+                                                    ThreadOffset offset) {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+
+  switch (abi) {
+    case kInterpreterAbi:  // Thread* is first argument (R0) in interpreter ABI.
+      __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value());
+      break;
+    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (R0).
+      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
+      break;
+    case kPortableAbi:  // R9 holds Thread*.
+    case kQuickAbi:  // Fall-through.
+      __ LoadFromOffset(kLoadWord, PC, R9, offset.Int32Value());
+  }
+  __ bkpt(0);
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+}  // namespace arm
+
+namespace mips {
+static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
+                                                    ThreadOffset offset) {
+  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+
+  switch (abi) {
+    case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
+      __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
+      break;
+    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
+      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
+      break;
+    case kPortableAbi:  // S1 holds Thread*.
+    case kQuickAbi:  // Fall-through.
+      __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
+  }
+  __ Jr(T9);
+  __ Nop();
+  __ Break();
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+}  // namespace mips
+
+namespace x86 {
+static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset offset) {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  // All x86 trampolines call via the Thread* held in fs.
+  __ fs()->jmp(Address::Absolute(offset));
+  __ int3();
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+}  // namespace x86
+
+const std::vector<uint8_t>* CreateTrampoline(InstructionSet isa, EntryPointCallingConvention abi,
+                                             ThreadOffset offset) {
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      return arm::CreateTrampoline(abi, offset);
+    case kMips:
+      return mips::CreateTrampoline(abi, offset);
+    case kX86:
+      return x86::CreateTrampoline(offset);
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << isa;
+      return NULL;
+  }
+}
+
+}  // namespace art
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
new file mode 100644
index 0000000..21245db
--- /dev/null
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_TRAMPOLINES_TRAMPOLINE_COMPILER_H_
+#define ART_COMPILER_TRAMPOLINES_TRAMPOLINE_COMPILER_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "locks.h"
+#include "driver/compiler_driver.h"
+
+namespace art {
+
+// Create code that will invoke the function held in thread local storage.
+const std::vector<uint8_t>* CreateTrampoline(InstructionSet isa, EntryPointCallingConvention abi,
+                                             ThreadOffset entry_point_offset)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_TRAMPOLINES_TRAMPOLINE_COMPILER_H_
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index fa202c3..f0d11d8 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1246,10 +1246,10 @@
 // Implementation note: this method must emit at most one instruction when
 // Address::CanHoldLoadOffset.
 void ArmAssembler::LoadFromOffset(LoadOperandType type,
-                               Register reg,
-                               Register base,
-                               int32_t offset,
-                               Condition cond) {
+                                  Register reg,
+                                  Register base,
+                                  int32_t offset,
+                                  Condition cond) {
   if (!Address::CanHoldLoadOffset(type, offset)) {
     CHECK(base != IP);
     LoadImmediate(IP, offset, cond);
@@ -1884,7 +1884,7 @@
   // Don't care about preserving R0 as this call won't return
   __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
   // Set up call to Thread::Current()->pDeliverException
-  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
+  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value());
   __ blx(R12);
   // Call never returns
   __ bkpt(0);
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 931d7ab..2be3d56 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -813,14 +813,7 @@
 
 void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
                          ManagedRegister /*mscratch*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "no arm implementation";
-#if 0
-  Register scratch = mscratch.AsMips().AsCoreRegister();
-  CHECK_EQ(size, 4u);
-  movl(scratch, Address(ESP, src_base));
-  movl(scratch, Address(scratch, src_offset));
-  movl(Address(ESP, dest), scratch);
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset,
@@ -834,24 +827,11 @@
 
 void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
                          ManagedRegister /*mscratch*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "no arm implementation";
-#if 0
-  Register scratch = mscratch.AsMips().AsCoreRegister();
-  CHECK_EQ(size, 4u);
-  CHECK_EQ(dest.Int32Value(), src.Int32Value());
-  movl(scratch, Address(ESP, src));
-  pushl(Address(scratch, src_offset));
-  popl(Address(scratch, dest_offset));
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::MemoryBarrier(ManagedRegister) {
-  UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED";
-#if 0
-#if ANDROID_SMP != 0
-  mfence();
-#endif
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
@@ -953,10 +933,7 @@
 }
 
 void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) {
-  UNIMPLEMENTED(FATAL) << "no arm implementation";
-#if 0
-  fs()->call(Address::Absolute(offset));
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::GetCurrentThread(ManagedRegister tr) {
@@ -988,7 +965,7 @@
   // Don't care about preserving A0 as this call won't return
   __ Move(A0, scratch_.AsCoreRegister());
   // Set up call to Thread::Current()->pDeliverException
-  __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
+  __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value());
   __ Jr(T9);
   // Call never returns
   __ Break();
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 0a34686..a717f19 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -35,6 +35,7 @@
 #include "gc/space/space-inl.h"
 #include "image.h"
 #include "indenter.h"
+#include "mapping_table.h"
 #include "mirror/abstract_method-inl.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
@@ -48,6 +49,7 @@
 #include "safe_map.h"
 #include "scoped_thread_state_change.h"
 #include "verifier/method_verifier.h"
+#include "vmap_table.h"
 
 namespace art {
 
@@ -390,40 +392,39 @@
   }
 
   void DumpVmap(std::ostream& os, const OatFile::OatMethod& oat_method) {
-    const uint16_t* raw_table = oat_method.GetVmapTable();
-    if (raw_table == NULL) {
-      return;
-    }
-    const VmapTable vmap_table(raw_table);
-    bool first = true;
-    bool processing_fp = false;
-    uint32_t spill_mask = oat_method.GetCoreSpillMask();
-    for (size_t i = 0; i < vmap_table.size(); i++) {
-      uint16_t dex_reg = vmap_table[i];
-      uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i,
-                                                    processing_fp ? kFloatVReg : kIntVReg);
-      os << (first ? "v" : ", v")  << dex_reg;
-      if (!processing_fp) {
-        os << "/r" << cpu_reg;
-      } else {
-        os << "/fr" << cpu_reg;
+    const uint8_t* raw_table = oat_method.GetVmapTable();
+    if (raw_table != NULL) {
+      const VmapTable vmap_table(raw_table);
+      bool first = true;
+      bool processing_fp = false;
+      uint32_t spill_mask = oat_method.GetCoreSpillMask();
+      for (size_t i = 0; i < vmap_table.Size(); i++) {
+        uint16_t dex_reg = vmap_table[i];
+        uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i,
+                                                      processing_fp ? kFloatVReg : kIntVReg);
+        os << (first ? "v" : ", v")  << dex_reg;
+        if (!processing_fp) {
+          os << "/r" << cpu_reg;
+        } else {
+          os << "/fr" << cpu_reg;
+        }
+        first = false;
+        if (!processing_fp && dex_reg == 0xFFFF) {
+          processing_fp = true;
+          spill_mask = oat_method.GetFpSpillMask();
+        }
       }
-      first = false;
-      if (!processing_fp && dex_reg == 0xFFFF) {
-        processing_fp = true;
-        spill_mask = oat_method.GetFpSpillMask();
-      }
+      os << "\n";
     }
-    os << "\n";
   }
 
   void DescribeVReg(std::ostream& os, const OatFile::OatMethod& oat_method,
                     const DexFile::CodeItem* code_item, size_t reg, VRegKind kind) {
-    const uint16_t* raw_table = oat_method.GetVmapTable();
+    const uint8_t* raw_table = oat_method.GetVmapTable();
     if (raw_table != NULL) {
       const VmapTable vmap_table(raw_table);
       uint32_t vmap_offset;
-      if (vmap_table.IsInContext(reg, vmap_offset, kind)) {
+      if (vmap_table.IsInContext(reg, kind, &vmap_offset)) {
         bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
         uint32_t spill_mask = is_float ? oat_method.GetFpSpillMask()
                                        : oat_method.GetCoreSpillMask();
@@ -471,67 +472,50 @@
   }
 
   void DumpMappingTable(std::ostream& os, const OatFile::OatMethod& oat_method) {
-    const uint32_t* raw_table = oat_method.GetMappingTable();
     const void* code = oat_method.GetCode();
-    if (raw_table == NULL || code == NULL) {
+    if (code == NULL) {
       return;
     }
-
-    ++raw_table;
-    uint32_t length = *raw_table;
-    ++raw_table;
-    if (length == 0) {
-      return;
-    }
-    uint32_t pc_to_dex_entries = *raw_table;
-    ++raw_table;
-    if (pc_to_dex_entries != 0) {
-      os << "suspend point mappings {\n";
-    } else {
-      os << "catch entry mappings {\n";
-    }
-    Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
-    std::ostream indent_os(&indent_filter);
-    for (size_t i = 0; i < length; i += 2) {
-      const uint8_t* native_pc = reinterpret_cast<const uint8_t*>(code) + raw_table[i];
-      uint32_t dex_pc = raw_table[i + 1];
-      indent_os << StringPrintf("%p -> 0x%04x\n", native_pc, dex_pc);
-      if (i + 2 == pc_to_dex_entries && pc_to_dex_entries != length) {
-        // Separate the pc -> dex from dex -> pc sections
-        indent_os << std::flush;
-        os << "}\ncatch entry mappings {\n";
+    MappingTable table(oat_method.GetMappingTable());
+    if (table.TotalSize() != 0) {
+      Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
+      std::ostream indent_os(&indent_filter);
+      if (table.PcToDexSize() != 0) {
+        typedef MappingTable::PcToDexIterator It;
+        os << "suspend point mappings {\n";
+        for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+          indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
+        }
+        os << "}\n";
+      }
+      if (table.DexToPcSize() != 0) {
+        typedef MappingTable::DexToPcIterator It;
+        os << "catch entry mappings {\n";
+        for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+          indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc());
+        }
+        os << "}\n";
       }
     }
-    os << "}\n";
   }
 
-  uint32_t DumpMappingAtOffset(std::ostream& os, const OatFile::OatMethod& oat_method, size_t offset,
-                               bool suspend_point_mapping) {
-    const uint32_t* raw_table = oat_method.GetMappingTable();
-    if (raw_table != NULL) {
-      ++raw_table;
-      uint32_t length = *raw_table;
-      ++raw_table;
-      uint32_t pc_to_dex_entries = *raw_table;
-      ++raw_table;
-      size_t start, end;
-      if (suspend_point_mapping) {
-        start = 0;
-        end = pc_to_dex_entries;
-      } else {
-        start = pc_to_dex_entries;
-        end = length;
+  uint32_t DumpMappingAtOffset(std::ostream& os, const OatFile::OatMethod& oat_method,
+                               size_t offset, bool suspend_point_mapping) {
+    MappingTable table(oat_method.GetMappingTable());
+    if (suspend_point_mapping && table.PcToDexSize() > 0) {
+      typedef MappingTable::PcToDexIterator It;
+      for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+        if (offset == cur.NativePcOffset()) {
+          os << "suspend point dex PC: 0x" << cur.DexPc() << "\n";
+          return cur.DexPc();
+        }
       }
-      for (size_t i = start; i < end; i += 2) {
-        if (offset == raw_table[i]) {
-          uint32_t dex_pc = raw_table[i + 1];
-          if (suspend_point_mapping) {
-            os << "suspend point dex PC: 0x";
-          } else {
-            os << "catch entry dex PC: 0x";
-          }
-          os << std::hex << dex_pc << std::dec << "\n";
-          return dex_pc;
+    } else if (!suspend_point_mapping && table.DexToPcSize() > 0) {
+      typedef MappingTable::DexToPcIterator It;
+      for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+        if (offset == cur.NativePcOffset()) {
+          os << "catch entry dex PC: 0x" << cur.DexPc() << "\n";
+          return cur.DexPc();
         }
       }
     }
@@ -1019,13 +1003,13 @@
         }
 
         size_t pc_mapping_table_bytes =
-            state->ComputeOatSize(method->GetMappingTableRaw(), &first_occurrence);
+            state->ComputeOatSize(method->GetMappingTable(), &first_occurrence);
         if (first_occurrence) {
           state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
         }
 
         size_t vmap_table_bytes =
-            state->ComputeOatSize(method->GetVmapTableRaw(), &first_occurrence);
+            state->ComputeOatSize(method->GetVmapTable(), &first_occurrence);
         if (first_occurrence) {
           state->stats_.vmap_table_bytes += vmap_table_bytes;
         }
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 51bb3eb..4f25c00 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -142,6 +142,7 @@
 	arch/x86/registers_x86.cc \
 	arch/mips/registers_mips.cc \
 	entrypoints/entrypoint_utils.cc \
+	entrypoints/interpreter/interpreter_entrypoints.cc \
 	entrypoints/jni/jni_entrypoints.cc \
 	entrypoints/math_entrypoints.cc \
 	entrypoints/portable/portable_alloc_entrypoints.cc \
@@ -163,15 +164,13 @@
 	entrypoints/quick/quick_field_entrypoints.cc \
 	entrypoints/quick/quick_fillarray_entrypoints.cc \
 	entrypoints/quick/quick_instrumentation_entrypoints.cc \
-	entrypoints/quick/quick_interpreter_entrypoints.cc \
 	entrypoints/quick/quick_invoke_entrypoints.cc \
 	entrypoints/quick/quick_jni_entrypoints.cc \
 	entrypoints/quick/quick_lock_entrypoints.cc \
 	entrypoints/quick/quick_math_entrypoints.cc \
-	entrypoints/quick/quick_proxy_entrypoints.cc \
-	entrypoints/quick/quick_stub_entrypoints.cc \
 	entrypoints/quick/quick_thread_entrypoints.cc \
-	entrypoints/quick/quick_throw_entrypoints.cc
+	entrypoints/quick/quick_throw_entrypoints.cc \
+	entrypoints/quick/quick_trampoline_entrypoints.cc
 
 LIBART_TARGET_SRC_FILES := \
 	$(LIBART_COMMON_SRC_FILES) \
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index ed655e9..559788f 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -35,4 +35,11 @@
     .size \name, .-\name
 .endm
 
+.macro UNIMPLEMENTED name
+    ENTRY \name
+    bkpt
+    bkpt
+    END \name
+.endm
+
 #endif  // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index b71a158..848bacc 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "entrypoints/interpreter/interpreter_entrypoints.h"
 #include "entrypoints/portable/portable_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/entrypoint_utils.h"
@@ -21,49 +22,61 @@
 
 namespace art {
 
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                 const DexFile::CodeItem* code_item,
+                                                 ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+                                           const DexFile::CodeItem* code_item,
+                                           ShadowFrame* shadow_frame, JValue* result);
+
+// Portable entrypoints.
+extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+
 // Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
 
 // Cast entrypoints.
 extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
                                             const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
+extern "C" void art_quick_can_put_array_element(void*, void*);
+extern "C" void art_quick_check_cast(void*, void*);
 
 // DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Exception entrypoints.
 extern "C" void* GetAndClearException(Thread*);
 
 // Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
 
 // FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+extern "C" void art_quick_handle_fill_data(void*, void*);
 
 // Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
 
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
@@ -93,26 +106,14 @@
 extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
 extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result);
-
 // Intrinsic entrypoints.
 extern "C" int32_t __memcmp16(void*, void*, int32_t);
 extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 
 // Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
-                                                       mirror::Object* receiver,
-                                                       mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -125,49 +126,61 @@
 extern "C" void art_quick_test_suspend();
 
 // Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+  // Interpreter
+  ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
+  ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge;
+
+  // JNI
+  jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+  // Portable
+  ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
+  ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
+
   // Alloc
-  qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
-  qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
-  qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
-  qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
-  qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
-  qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+  qpoints->pAllocArray = art_quick_alloc_array;
+  qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+  qpoints->pAllocObject = art_quick_alloc_object;
+  qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+  qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+  qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
 
   // Cast
-  qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
-  qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
-  qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+  qpoints->pCanPutArrayElement = art_quick_can_put_array_element;
+  qpoints->pCheckCast = art_quick_check_cast;
 
   // DexCache
-  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
-  qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
-  qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
-  qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+  qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
-  qpoints->pSet32Instance = art_quick_set32_instance_from_code;
-  qpoints->pSet32Static = art_quick_set32_static_from_code;
-  qpoints->pSet64Instance = art_quick_set64_instance_from_code;
-  qpoints->pSet64Static = art_quick_set64_static_from_code;
-  qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
-  qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
-  qpoints->pGet32Instance = art_quick_get32_instance_from_code;
-  qpoints->pGet64Instance = art_quick_get64_instance_from_code;
-  qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
-  qpoints->pGet32Static = art_quick_get32_static_from_code;
-  qpoints->pGet64Static = art_quick_get64_static_from_code;
-  qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+  qpoints->pSet32Instance = art_quick_set32_instance;
+  qpoints->pSet32Static = art_quick_set32_static;
+  qpoints->pSet64Instance = art_quick_set64_instance;
+  qpoints->pSet64Static = art_quick_set64_static;
+  qpoints->pSetObjInstance = art_quick_set_obj_instance;
+  qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGet32Instance = art_quick_get32_instance;
+  qpoints->pGet64Instance = art_quick_get64_instance;
+  qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGet32Static = art_quick_get32_static;
+  qpoints->pGet64Static = art_quick_get64_static;
+  qpoints->pGetObjStatic = art_quick_get_obj_static;
 
   // FillArray
-  qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+  qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
 
   // JNI
   qpoints->pJniMethodStart = JniMethodStart;
@@ -178,8 +191,8 @@
   qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
 
   // Locks
-  qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
-  qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+  qpoints->pLockObject = art_quick_lock_object;
+  qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
   qpoints->pCmpgDouble = CmpgDouble;
@@ -203,10 +216,6 @@
   qpoints->pShrLong = art_quick_shr_long;
   qpoints->pUshrLong = art_quick_ushr_long;
 
-  // Interpreter
-  qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
-  qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
-
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
   qpoints->pMemcmp16 = __memcmp16;
@@ -214,7 +223,8 @@
   qpoints->pMemcpy = memcpy;
 
   // Invocation
-  qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+  qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+  qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
   qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
@@ -223,19 +233,16 @@
   qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
-  qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
-  qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+  qpoints->pCheckSuspend = CheckSuspendFromCode;
+  qpoints->pTestSuspend = art_quick_test_suspend;
 
   // Throws
-  qpoints->pDeliverException = art_quick_deliver_exception_from_code;
-  qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
-  qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
-  qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
-  qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
-  qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-
-  // Portable
-  ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+  qpoints->pDeliverException = art_quick_deliver_exception;
+  qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+  qpoints->pThrowDivZero = art_quick_throw_div_zero;
+  qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+  qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+  qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
 };
 
 }  // namespace art
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index 0a0d06a..f51f121 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -16,6 +16,8 @@
 
 #include "asm_support_arm.S"
 
+    .cfi_sections   .debug_frame
+
     /*
      * Jni dlsym lookup stub.
      */
@@ -28,8 +30,7 @@
     sub    sp, #12                        @ pad stack pointer to align frame
     .pad #12
     .cfi_adjust_cfa_offset 12
-    mov    r0, r9                         @ pass Thread::Current
-    blx    artFindNativeMethod            @ (Thread*)
+    blx    artFindNativeMethod
     mov    r12, r0                        @ save result in r12
     add    sp, #12                        @ restore stack pointer
     .cfi_adjust_cfa_offset -12
@@ -44,7 +45,7 @@
      * Entry point of native methods when JNI bug compatibility is enabled.
      */
     .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
+ENTRY art_work_around_app_jni_bugs
     @ save registers that may contain arguments and LR that will be crushed by a call
     push {r0-r3, lr}
     .save {r0-r3, lr}
@@ -62,4 +63,4 @@
     pop {r0-r3, lr}  @ restore possibly modified argument registers
     .cfi_adjust_cfa_offset -16
     bx  r12          @ tail call into JNI routine
-END art_quick_work_around_app_jni_bugs
+END art_work_around_app_jni_bugs
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index 4cc6654..adfd22b 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -16,6 +16,8 @@
 
 #include "asm_support_arm.S"
 
+    .cfi_sections   .debug_frame
+
     /*
      * Portable invocation stub.
      * On entry:
@@ -94,3 +96,6 @@
     .cfi_adjust_cfa_offset -48
     bx      lr                     @ return
 END art_portable_proxy_invoke_handler
+
+UNIMPLEMENTED art_portable_resolution_trampoline
+UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 9b8d238..d9bb433 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -16,6 +16,8 @@
 
 #include "asm_support_arm.S"
 
+    .cfi_sections   .debug_frame
+
     /* Deliver the given exception */
     .extern artDeliverExceptionFromCode
     /* Deliver an exception pending on a thread */
@@ -157,33 +159,33 @@
      * Called by managed code, saves callee saves and then calls artThrowException
      * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
 
     /*
      * Called by managed code to create and deliver a NullPointerException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
 
     /*
      * Called by managed code to create and deliver an ArithmeticException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
 
     /*
      * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
      * index, arg2 holds limit.
      */
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
 
     /*
      * Called by managed code to create and deliver a StackOverflowError.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
 
     /*
      * Called by managed code to create and deliver a NoSuchMethodError.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
 
     /*
      * All generated callsites for interface invokes and invocation slow paths will load arguments
@@ -294,7 +296,7 @@
      * failure.
      */
     .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data_from_code
+ENTRY art_quick_handle_fill_data
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
     mov    r2, r9                          @ pass Thread::Current
     mov    r3, sp                          @ pass SP
@@ -303,25 +305,25 @@
     cmp    r0, #0                          @ success?
     bxeq   lr                              @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_handle_fill_data_from_code
+END art_quick_handle_fill_data
 
     /*
      * Entry from managed code that calls artLockObjectFromCode, may block for GC.
      */
     .extern artLockObjectFromCode
-ENTRY art_quick_lock_object_from_code
+ENTRY art_quick_lock_object
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case we block
     mov    r1, r9                     @ pass Thread::Current
     mov    r2, sp                     @ pass SP
     bl     artLockObjectFromCode      @ (Object* obj, Thread*, SP)
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
-END art_quick_lock_object_from_code
+END art_quick_lock_object
 
     /*
      * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
      */
     .extern artUnlockObjectFromCode
-ENTRY art_quick_unlock_object_from_code
+ENTRY art_quick_unlock_object
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
     mov    r1, r9                   @ pass Thread::Current
     mov    r2, sp                   @ pass SP
@@ -330,13 +332,13 @@
     cmp    r0, #0                   @ success?
     bxeq   lr                       @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_unlock_object_from_code
+END art_quick_unlock_object
 
     /*
      * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
      */
     .extern artCheckCastFromCode
-ENTRY art_quick_check_cast_from_code
+ENTRY art_quick_check_cast
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME    @ save callee saves in case exception allocation triggers GC
     mov    r2, r9                       @ pass Thread::Current
     mov    r3, sp                       @ pass SP
@@ -345,14 +347,14 @@
     cmp    r0, #0                       @ success?
     bxeq   lr                           @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_check_cast_from_code
+END art_quick_check_cast
 
     /*
      * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
      * failure.
      */
     .extern artCanPutArrayElementFromCode
-ENTRY art_quick_can_put_array_element_from_code
+ENTRY art_quick_can_put_array_element
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME    @ save callee saves in case exception allocation triggers GC
     mov    r2, r9                         @ pass Thread::Current
     mov    r3, sp                         @ pass SP
@@ -361,7 +363,7 @@
     cmp    r0, #0                         @ success?
     bxeq   lr                             @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_can_put_array_element_from_code
+END art_quick_can_put_array_element
 
     /*
      * Entry from managed code when uninitialized static storage, this stub will run the class
@@ -369,7 +371,7 @@
      * returned.
      */
     .extern artInitializeStaticStorageFromCode
-ENTRY art_quick_initialize_static_storage_from_code
+ENTRY art_quick_initialize_static_storage
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
     mov    r2, r9                              @ pass Thread::Current
     mov    r3, sp                              @ pass SP
@@ -379,13 +381,13 @@
     cmp    r0, #0                              @ success if result is non-null
     bxne   lr                                  @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_initialize_static_storage_from_code
+END art_quick_initialize_static_storage
 
     /*
      * Entry from managed code when dex cache misses for a type_idx
      */
     .extern artInitializeTypeFromCode
-ENTRY art_quick_initialize_type_from_code
+ENTRY art_quick_initialize_type
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
     mov    r2, r9                              @ pass Thread::Current
     mov    r3, sp                              @ pass SP
@@ -395,14 +397,14 @@
     cmp    r0, #0                              @ success if result is non-null
     bxne   lr                                  @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_initialize_type_from_code
+END art_quick_initialize_type
 
     /*
      * Entry from managed code when type_idx needs to be checked for access and dex cache may also
      * miss.
      */
     .extern artInitializeTypeAndVerifyAccessFromCode
-ENTRY art_quick_initialize_type_and_verify_access_from_code
+ENTRY art_quick_initialize_type_and_verify_access
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
     mov    r2, r9                              @ pass Thread::Current
     mov    r3, sp                              @ pass SP
@@ -412,13 +414,13 @@
     cmp    r0, #0                              @ success if result is non-null
     bxne   lr                                  @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_initialize_type_and_verify_access_from_code
+END art_quick_initialize_type_and_verify_access
 
     /*
      * Called by managed code to resolve a static field and load a 32-bit primitive value.
      */
     .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static_from_code
+ENTRY art_quick_get32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r1, [sp, #32]                 @ pass referrer
     mov    r2, r9                        @ pass Thread::Current
@@ -429,13 +431,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get32_static_from_code
+END art_quick_get32_static
 
     /*
      * Called by managed code to resolve a static field and load a 64-bit primitive value.
      */
     .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static_from_code
+ENTRY art_quick_get64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r1, [sp, #32]                 @ pass referrer
     mov    r2, r9                        @ pass Thread::Current
@@ -446,13 +448,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq    lr                           @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get64_static_from_code
+END art_quick_get64_static
 
     /*
      * Called by managed code to resolve a static field and load an object reference.
      */
     .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static_from_code
+ENTRY art_quick_get_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r1, [sp, #32]                 @ pass referrer
     mov    r2, r9                        @ pass Thread::Current
@@ -463,13 +465,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get_obj_static_from_code
+END art_quick_get_obj_static
 
     /*
      * Called by managed code to resolve an instance field and load a 32-bit primitive value.
      */
     .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance_from_code
+ENTRY art_quick_get32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -482,13 +484,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get32_instance_from_code
+END art_quick_get32_instance
 
     /*
      * Called by managed code to resolve an instance field and load a 64-bit primitive value.
      */
     .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance_from_code
+ENTRY art_quick_get64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -504,13 +506,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq    lr                           @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get64_instance_from_code
+END art_quick_get64_instance
 
     /*
      * Called by managed code to resolve an instance field and load an object reference.
      */
     .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance_from_code
+ENTRY art_quick_get_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -526,13 +528,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get_obj_instance_from_code
+END art_quick_get_obj_instance
 
     /*
      * Called by managed code to resolve a static field and store a 32-bit primitive value.
      */
     .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static_from_code
+ENTRY art_quick_set32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -547,14 +549,14 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set32_static_from_code
+END art_quick_set32_static
 
     /*
      * Called by managed code to resolve a static field and store a 64-bit primitive value.
      * On entry r0 holds field index, r1:r2 hold new_val
      */
     .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static_from_code
+ENTRY art_quick_set64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     mov    r3, r2                        @ pass one half of wide argument
     mov    r2, r1                        @ pass other half of wide argument
@@ -573,13 +575,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set64_static_from_code
+END art_quick_set64_static
 
     /*
      * Called by managed code to resolve a static field and store an object reference.
      */
     .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static_from_code
+ENTRY art_quick_set_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -594,13 +596,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set_obj_static_from_code
+END art_quick_set_obj_static
 
     /*
      * Called by managed code to resolve an instance field and store a 32-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance_from_code
+ENTRY art_quick_set32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r3, [sp, #32]                 @ pass referrer
     mov    r12, sp                       @ save SP
@@ -619,13 +621,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set32_instance_from_code
+END art_quick_set32_instance
 
     /*
      * Called by managed code to resolve an instance field and store a 64-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set64_instance_from_code
+ENTRY art_quick_set64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     mov    r12, sp                       @ save SP
     sub    sp, #8                        @ grow frame for alignment with stack args
@@ -642,13 +644,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set64_instance_from_code
+END art_quick_set64_instance
 
     /*
      * Called by managed code to resolve an instance field and store an object reference.
      */
     .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance_from_code
+ENTRY art_quick_set_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r3, [sp, #32]                 @ pass referrer
     mov    r12, sp                       @ save SP
@@ -666,7 +668,7 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set_obj_instance_from_code
+END art_quick_set_obj_instance
 
     /*
      * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -675,7 +677,7 @@
      * performed.
      */
     .extern artResolveStringFromCode
-ENTRY art_quick_resolve_string_from_code
+ENTRY art_quick_resolve_string
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r2, r9                     @ pass Thread::Current
     mov    r3, sp                     @ pass SP
@@ -685,13 +687,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_resolve_string_from_code
+END art_quick_resolve_string
 
     /*
      * Called by managed code to allocate an object
      */
     .extern artAllocObjectFromCode
-ENTRY art_quick_alloc_object_from_code
+ENTRY art_quick_alloc_object
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r2, r9                     @ pass Thread::Current
     mov    r3, sp                     @ pass SP
@@ -700,14 +702,14 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object_from_code
+END art_quick_alloc_object
 
     /*
      * Called by managed code to allocate an object when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocObjectFromCodeWithAccessCheck
-ENTRY art_quick_alloc_object_from_code_with_access_check
+ENTRY art_quick_alloc_object_with_access_check
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r2, r9                     @ pass Thread::Current
     mov    r3, sp                     @ pass SP
@@ -716,13 +718,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object_from_code_with_access_check
+END art_quick_alloc_object_with_access_check
 
     /*
      * Called by managed code to allocate an array.
      */
     .extern artAllocArrayFromCode
-ENTRY art_quick_alloc_array_from_code
+ENTRY art_quick_alloc_array
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -737,14 +739,14 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array_from_code
+END art_quick_alloc_array
 
     /*
      * Called by managed code to allocate an array when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_alloc_array_from_code_with_access_check
+ENTRY art_quick_alloc_array_with_access_check
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -759,13 +761,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array_from_code_with_access_check
+END art_quick_alloc_array_with_access_check
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCode
-ENTRY art_quick_check_and_alloc_array_from_code
+ENTRY art_quick_check_and_alloc_array
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -780,13 +782,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array_from_code
+END art_quick_check_and_alloc_array
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_check_and_alloc_array_from_code_with_access_check
+ENTRY art_quick_check_and_alloc_array_with_access_check
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -801,7 +803,7 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array_from_code_with_access_check
+END art_quick_check_and_alloc_array_with_access_check
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -840,13 +842,33 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-    .extern artInterpreterEntry
-ENTRY art_quick_interpreter_entry
+    .extern artQuickResolutionTrampoline
+ENTRY art_quick_resolution_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
-    str     r0, [sp, #0]           @ place proxy method at bottom of frame
+    mov     r2, r9                 @ pass Thread::Current
+    mov     r3, sp                 @ pass SP
+    blx     artQuickResolutionTrampoline  @ (Method* called, receiver, Thread*, SP)
+    cmp     r0, #0                 @ is code pointer null?
+    beq     1f                     @ goto exception
+    mov     r12, r0
+    ldr  r0, [sp, #0]              @ load resolved method in r0
+    ldr  r1, [sp, #8]              @ restore non-callee save r1
+    ldrd r2, [sp, #12]             @ restore non-callee saves r2-r3
+    ldr  lr, [sp, #44]             @ restore lr
+    add  sp, #48                   @ rewind sp
+    .cfi_adjust_cfa_offset -48
+    bx      r12                    @ tail-call into actual code
+1:
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    DELIVER_PENDING_EXCEPTION
+END art_quick_resolution_trampoline
+
+    .extern artQuickToInterpreterBridge
+ENTRY art_quick_to_interpreter_bridge
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     mov     r1, r9                 @ pass Thread::Current
     mov     r2, sp                 @ pass SP
-    blx     artInterpreterEntry    @ (Method* method, Thread*, SP)
+    blx     artQuickToInterpreterBridge    @ (Method* method, Thread*, SP)
     ldr     r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     ldr     lr,  [sp, #44]         @ restore lr
     add     sp,  #48               @ pop frame
@@ -854,14 +876,14 @@
     cmp     r12, #0                @ success if no exception is pending
     bxeq    lr                     @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_interpreter_entry
+END art_quick_to_interpreter_bridge
 
     /*
      * Routine that intercepts method calls and returns.
      */
     .extern artInstrumentationMethodEntryFromCode
     .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry_from_code
+ENTRY art_quick_instrumentation_entry
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     str   r0, [sp, #4]     @ preserve r0
     mov   r12, sp          @ remember sp
@@ -877,11 +899,11 @@
     mov   r12, r0        @ r12 holds reference to code
     ldr   r0, [sp, #4]   @ restore r0
     RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
-    blx   r12            @ call method with lr set to art_quick_instrumentation_exit_from_code
-END art_quick_instrumentation_entry_from_code
-    .type art_quick_instrumentation_exit_from_code, #function
-    .global art_quick_instrumentation_exit_from_code
-art_quick_instrumentation_exit_from_code:
+    blx   r12            @ call method with lr set to art_quick_instrumentation_exit
+END art_quick_instrumentation_entry
+    .type art_quick_instrumentation_exit, #function
+    .global art_quick_instrumentation_exit
+art_quick_instrumentation_exit:
     .cfi_startproc
     .fnstart
     mov   lr, #0         @ link register is to here, so clobber with 0 for later checks
@@ -910,7 +932,7 @@
     add sp, #32          @ remove callee save frame
     .cfi_adjust_cfa_offset -32
     bx    r2             @ return
-END art_quick_instrumentation_exit_from_code
+END art_quick_instrumentation_exit
 
     /*
      * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
@@ -925,25 +947,6 @@
 END art_quick_deoptimize
 
     /*
-     * Portable abstract method error stub. r0 contains method* on entry. SP unused in portable.
-     */
-    .extern artThrowAbstractMethodErrorFromCode
-ENTRY art_portable_abstract_method_error_stub
-    mov    r1, r9         @ pass Thread::Current
-    b      artThrowAbstractMethodErrorFromCode  @ (Method*, Thread*, SP)
-END art_portable_abstract_method_error_stub
-
-    /*
-     * Quick abstract method error stub. r0 contains method* on entry.
-     */
-ENTRY art_quick_abstract_method_error_stub
-    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
-    mov    r1, r9         @ pass Thread::Current
-    mov    r2, sp         @ pass SP
-    b      artThrowAbstractMethodErrorFromCode  @ (Method*, Thread*, SP)
-END art_quick_abstract_method_error_stub
-
-    /*
      * Signed 64-bit integer multiply.
      *
      * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 8a34b9d..fe932d2 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -38,4 +38,12 @@
     .cpload $t9
 .endm
 
+.macro UNIMPLEMENTED name
+    ENTRY \name
+    break
+    break
+    END \name
+.endm
+
+
 #endif  // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 0a62a40..a18079b 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -21,49 +21,61 @@
 
 namespace art {
 
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                 const DexFile::CodeItem* code_item,
+                                                 ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+                                           const DexFile::CodeItem* code_item,
+                                           ShadowFrame* shadow_frame, JValue* result);
+
+// Portable entrypoints.
+extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+
 // Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
 
 // Cast entrypoints.
 extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
                                             const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
+extern "C" void art_quick_can_put_array_element(void*, void*);
+extern "C" void art_quick_check_cast(void*, void*);
 
 // DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Exception entrypoints.
 extern "C" void* GetAndClearException(Thread*);
 
 // Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
 
 // FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+extern "C" void art_quick_handle_fill_data(void*, void*);
 
 // Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
 
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
@@ -95,26 +107,14 @@
 extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
 extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result);
-
 // Intrinsic entrypoints.
 extern "C" int32_t __memcmp16(void*, void*, int32_t);
 extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 
 // Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
-                                                       mirror::Object* receiver,
-                                                       mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -127,49 +127,61 @@
 extern "C" void art_quick_test_suspend();
 
 // Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+  // Interpreter
+  ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
+  ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge;
+
+  // JNI
+  jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+  // Portable
+  ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
+  ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
+
   // Alloc
-  qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
-  qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
-  qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
-  qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
-  qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
-  qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+  qpoints->pAllocArray = art_quick_alloc_array;
+  qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+  qpoints->pAllocObject = art_quick_alloc_object;
+  qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+  qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+  qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
 
   // Cast
-  qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
-  qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
-  qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+  qpoints->pCanPutArrayElement = art_quick_can_put_array_element;
+  qpoints->pCheckCast = art_quick_check_cast;
 
   // DexCache
-  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
-  qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
-  qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
-  qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+  qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
-  qpoints->pSet32Instance = art_quick_set32_instance_from_code;
-  qpoints->pSet32Static = art_quick_set32_static_from_code;
-  qpoints->pSet64Instance = art_quick_set64_instance_from_code;
-  qpoints->pSet64Static = art_quick_set64_static_from_code;
-  qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
-  qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
-  qpoints->pGet32Instance = art_quick_get32_instance_from_code;
-  qpoints->pGet64Instance = art_quick_get64_instance_from_code;
-  qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
-  qpoints->pGet32Static = art_quick_get32_static_from_code;
-  qpoints->pGet64Static = art_quick_get64_static_from_code;
-  qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+  qpoints->pSet32Instance = art_quick_set32_instance;
+  qpoints->pSet32Static = art_quick_set32_static;
+  qpoints->pSet64Instance = art_quick_set64_instance;
+  qpoints->pSet64Static = art_quick_set64_static;
+  qpoints->pSetObjInstance = art_quick_set_obj_instance;
+  qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGet32Instance = art_quick_get32_instance;
+  qpoints->pGet64Instance = art_quick_get64_instance;
+  qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGet32Static = art_quick_get32_static;
+  qpoints->pGet64Static = art_quick_get64_static;
+  qpoints->pGetObjStatic = art_quick_get_obj_static;
 
   // FillArray
-  qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+  qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
 
   // JNI
   qpoints->pJniMethodStart = JniMethodStart;
@@ -180,8 +192,8 @@
   qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
 
   // Locks
-  qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
-  qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+  qpoints->pLockObject = art_quick_lock_object;
+  qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
   qpoints->pCmpgDouble = CmpgDouble;
@@ -204,10 +216,6 @@
   qpoints->pShrLong = art_quick_shr_long;
   qpoints->pUshrLong = art_quick_ushr_long;
 
-  // Interpreter
-  qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
-  qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
-
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
   qpoints->pMemcmp16 = __memcmp16;
@@ -215,7 +223,8 @@
   qpoints->pMemcpy = memcpy;
 
   // Invocation
-  qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+  qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+  qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
   qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
@@ -224,19 +233,16 @@
   qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
-  qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
-  qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+  qpoints->pCheckSuspend = CheckSuspendFromCode;
+  qpoints->pTestSuspend = art_quick_test_suspend;
 
   // Throws
-  qpoints->pDeliverException = art_quick_deliver_exception_from_code;
-  qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
-  qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
-  qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
-  qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
-  qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-
-  // Portable
-  ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+  qpoints->pDeliverException = art_quick_deliver_exception;
+  qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+  qpoints->pThrowDivZero = art_quick_throw_div_zero;
+  qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+  qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+  qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
 };
 
 }  // namespace art
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index fca6d77..ad7c021 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -59,7 +59,7 @@
      * Entry point of native methods when JNI bug compatibility is enabled.
      */
     .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
+ENTRY art_work_around_app_jni_bugs
     GENERATE_GLOBAL_POINTER
     # save registers that may contain arguments and LR that will be crushed by a call
     addiu    $sp, $sp, -32
@@ -86,4 +86,4 @@
     jr       $t9              # tail call into JNI routine
     addiu    $sp, $sp, 32
     .cfi_adjust_cfa_offset -32
-END art_quick_work_around_app_jni_bugs
+END art_work_around_app_jni_bugs
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index e7a9b0f..9208a8a 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -61,13 +61,5 @@
     .cfi_adjust_cfa_offset -64
 END art_portable_proxy_invoke_handler
 
-    /*
-     * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable.
-     */
-    .extern artThrowAbstractMethodErrorFromCode
-ENTRY art_portable_abstract_method_error_stub
-    GENERATE_GLOBAL_POINTER
-    la       $t9, artThrowAbstractMethodErrorFromCode
-    jr       $t9            # (Method*, Thread*, SP)
-    move     $a1, $s1       # pass Thread::Current
-END art_portable_abstract_method_error_stub
+UNIMPLEMENTED art_portable_resolution_trampoline
+UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index d32a2b4..004fda6 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -143,7 +143,7 @@
     lw     $a1, 4($sp)            # restore non-callee save $a1
     lw     $a2, 8($sp)            # restore non-callee save $a2
     lw     $a3, 12($sp)           # restore non-callee save $a3
-    addiu  $sp, $sp, 64           # strip frame
+    addiu  $sp, $sp, 64           # pop frame
     .cfi_adjust_cfa_offset -64
 .endm
 
@@ -268,79 +268,79 @@
      * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
      * the bottom of the thread. On entry r0 holds Throwable*
      */
-ENTRY art_quick_deliver_exception_from_code
+ENTRY art_quick_deliver_exception
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a1, rSELF                 # pass Thread::Current
     la   $t9, artDeliverExceptionFromCode
     jr   $t9                        # artDeliverExceptionFromCode(Throwable*, Thread*, $sp)
     move $a2, $sp                   # pass $sp
-END art_quick_deliver_exception_from_code
+END art_quick_deliver_exception
 
     /*
      * Called by managed code to create and deliver a NullPointerException
      */
     .extern artThrowNullPointerExceptionFromCode
-ENTRY art_quick_throw_null_pointer_exception_from_code
+ENTRY art_quick_throw_null_pointer_exception
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a0, rSELF                 # pass Thread::Current
     la   $t9, artThrowNullPointerExceptionFromCode
     jr   $t9                        # artThrowNullPointerExceptionFromCode(Thread*, $sp)
     move $a1, $sp                   # pass $sp
-END art_quick_throw_null_pointer_exception_from_code
+END art_quick_throw_null_pointer_exception
 
     /*
      * Called by managed code to create and deliver an ArithmeticException
      */
     .extern artThrowDivZeroFromCode
-ENTRY art_quick_throw_div_zero_from_code
+ENTRY art_quick_throw_div_zero
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a0, rSELF                 # pass Thread::Current
     la   $t9, artThrowDivZeroFromCode
     jr   $t9                        # artThrowDivZeroFromCode(Thread*, $sp)
     move $a1, $sp                   # pass $sp
-END art_quick_throw_div_zero_from_code
+END art_quick_throw_div_zero
 
     /*
      * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
      */
     .extern artThrowArrayBoundsFromCode
-ENTRY art_quick_throw_array_bounds_from_code
+ENTRY art_quick_throw_array_bounds
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a2, rSELF                 # pass Thread::Current
     la   $t9, artThrowArrayBoundsFromCode
     jr   $t9                        # artThrowArrayBoundsFromCode(index, limit, Thread*, $sp)
     move $a3, $sp                   # pass $sp
-END art_quick_throw_array_bounds_from_code
+END art_quick_throw_array_bounds
 
     /*
      * Called by managed code to create and deliver a StackOverflowError.
      */
     .extern artThrowStackOverflowFromCode
-ENTRY art_quick_throw_stack_overflow_from_code
+ENTRY art_quick_throw_stack_overflow
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a0, rSELF                 # pass Thread::Current
     la   $t9, artThrowStackOverflowFromCode
     jr   $t9                        # artThrowStackOverflowFromCode(Thread*, $sp)
     move $a1, $sp                   # pass $sp
-END art_quick_throw_stack_overflow_from_code
+END art_quick_throw_stack_overflow
 
     /*
      * Called by managed code to create and deliver a NoSuchMethodError.
      */
     .extern artThrowNoSuchMethodFromCode
-ENTRY art_quick_throw_no_such_method_from_code
+ENTRY art_quick_throw_no_such_method
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a1, rSELF                 # pass Thread::Current
     la   $t9, artThrowNoSuchMethodFromCode
     jr   $t9                        # artThrowNoSuchMethodFromCode(method_idx, Thread*, $sp)
     move $a2, $sp                   # pass $sp
-END art_quick_throw_no_such_method_from_code
+END art_quick_throw_no_such_method
 
     /*
      * All generated callsites for interface invokes and invocation slow paths will load arguments
@@ -466,67 +466,67 @@
      * failure.
      */
     .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data_from_code
+ENTRY art_quick_handle_fill_data
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
     move    $a2, rSELF                         # pass Thread::Current
     jal     artHandleFillArrayDataFromCode     # (Array*, const DexFile::Payload*, Thread*, $sp)
     move    $a3, $sp                           # pass $sp
     RETURN_IF_ZERO
-END art_quick_handle_fill_data_from_code
+END art_quick_handle_fill_data
 
     /*
      * Entry from managed code that calls artLockObjectFromCode, may block for GC.
      */
     .extern artLockObjectFromCode
-ENTRY art_quick_lock_object_from_code
+ENTRY art_quick_lock_object
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME      # save callee saves in case we block
     move    $a1, rSELF                    # pass Thread::Current
     jal     artLockObjectFromCode         # (Object* obj, Thread*, $sp)
     move    $a2, $sp                      # pass $sp
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
-END art_quick_lock_object_from_code
+END art_quick_lock_object
 
     /*
      * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
      */
     .extern artUnlockObjectFromCode
-ENTRY art_quick_unlock_object_from_code
+ENTRY art_quick_unlock_object
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
     move    $a1, rSELF                # pass Thread::Current
     jal     artUnlockObjectFromCode   # (Object* obj, Thread*, $sp)
     move    $a2, $sp                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_unlock_object_from_code
+END art_quick_unlock_object
 
     /*
      * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
      */
     .extern artCheckCastFromCode
-ENTRY art_quick_check_cast_from_code
+ENTRY art_quick_check_cast
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
     move    $a2, rSELF                # pass Thread::Current
     jal     artCheckCastFromCode      # (Class* a, Class* b, Thread*, $sp)
     move    $a3, $sp                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_check_cast_from_code
+END art_quick_check_cast
 
     /*
      * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
      * failure.
      */
     .extern artCanPutArrayElementFromCode
-ENTRY art_quick_can_put_array_element_from_code
+ENTRY art_quick_can_put_array_element
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case exception allocation triggers GC
     move    $a2, rSELF                     # pass Thread::Current
     jal     artCanPutArrayElementFromCode  # (Object* element, Class* array_class, Thread*, $sp)
     move    $a3, $sp                       # pass $sp
     RETURN_IF_ZERO
-END art_quick_can_put_array_element_from_code
+END art_quick_can_put_array_element
 
     /*
      * Entry from managed code when uninitialized static storage, this stub will run the class
@@ -534,7 +534,7 @@
      * returned.
      */
     .extern artInitializeStaticStorageFromCode
-ENTRY art_quick_initialize_static_storage_from_code
+ENTRY art_quick_initialize_static_storage
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME            # save callee saves in case of GC
     move    $a2, rSELF                          # pass Thread::Current
@@ -542,13 +542,13 @@
     jal     artInitializeStaticStorageFromCode
     move    $a3, $sp                            # pass $sp
     RETURN_IF_NONZERO
-END art_quick_initialize_static_storage_from_code
+END art_quick_initialize_static_storage
 
     /*
      * Entry from managed code when dex cache misses for a type_idx.
      */
     .extern artInitializeTypeFromCode
-ENTRY art_quick_initialize_type_from_code
+ENTRY art_quick_initialize_type
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           # save callee saves in case of GC
     move    $a2, rSELF                         # pass Thread::Current
@@ -556,14 +556,14 @@
     jal     artInitializeTypeFromCode
     move    $a3, $sp                           # pass $sp
     RETURN_IF_NONZERO
-END art_quick_initialize_type_from_code
+END art_quick_initialize_type
 
     /*
      * Entry from managed code when type_idx needs to be checked for access and dex cache may also
      * miss.
      */
     .extern artInitializeTypeAndVerifyAccessFromCode
-ENTRY art_quick_initialize_type_and_verify_access_from_code
+ENTRY art_quick_initialize_type_and_verify_access
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           # save callee saves in case of GC
     move    $a2, rSELF                         # pass Thread::Current
@@ -571,13 +571,13 @@
     jal     artInitializeTypeAndVerifyAccessFromCode
     move    $a3, $sp                           # pass $sp
     RETURN_IF_NONZERO
-END art_quick_initialize_type_and_verify_access_from_code
+END art_quick_initialize_type_and_verify_access
 
     /*
      * Called by managed code to resolve a static field and load a 32-bit primitive value.
      */
     .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static_from_code
+ENTRY art_quick_get32_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -585,13 +585,13 @@
     jal    artGet32StaticFromCode        # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
     move   $a3, $sp                      # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static_from_code
+END art_quick_get32_static
 
     /*
      * Called by managed code to resolve a static field and load a 64-bit primitive value.
      */
     .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static_from_code
+ENTRY art_quick_get64_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -599,13 +599,13 @@
     jal    artGet64StaticFromCode        # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
     move   $a3, $sp                      # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static_from_code
+END art_quick_get64_static
 
     /*
      * Called by managed code to resolve a static field and load an object reference.
      */
     .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static_from_code
+ENTRY art_quick_get_obj_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -613,13 +613,13 @@
     jal    artGetObjStaticFromCode       # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
     move   $a3, $sp                      # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static_from_code
+END art_quick_get_obj_static
 
     /*
      * Called by managed code to resolve an instance field and load a 32-bit primitive value.
      */
     .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance_from_code
+ENTRY art_quick_get32_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -627,13 +627,13 @@
     jal    artGet32InstanceFromCode      # (field_idx, Object*, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance_from_code
+END art_quick_get32_instance
 
     /*
      * Called by managed code to resolve an instance field and load a 64-bit primitive value.
      */
     .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance_from_code
+ENTRY art_quick_get64_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -641,13 +641,13 @@
     jal    artGet64InstanceFromCode      # (field_idx, Object*, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance_from_code
+END art_quick_get64_instance
 
     /*
      * Called by managed code to resolve an instance field and load an object reference.
      */
     .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance_from_code
+ENTRY art_quick_get_obj_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -655,13 +655,13 @@
     jal    artGetObjInstanceFromCode     # (field_idx, Object*, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance_from_code
+END art_quick_get_obj_instance
 
     /*
      * Called by managed code to resolve a static field and store a 32-bit primitive value.
      */
     .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static_from_code
+ENTRY art_quick_set32_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -669,13 +669,13 @@
     jal    artSet32StaticFromCode        # (field_idx, new_val, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set32_static_from_code
+END art_quick_set32_static
 
     /*
      * Called by managed code to resolve a static field and store a 64-bit primitive value.
      */
     .extern artSet32StaticFromCode
-ENTRY art_quick_set64_static_from_code
+ENTRY art_quick_set64_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -683,13 +683,13 @@
     jal    artSet64StaticFromCode        # (field_idx, referrer, new_val, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set64_static_from_code
+END art_quick_set64_static
 
     /*
      * Called by managed code to resolve a static field and store an object reference.
      */
     .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static_from_code
+ENTRY art_quick_set_obj_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -697,13 +697,13 @@
     jal    artSetObjStaticFromCode       # (field_idx, new_val, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set_obj_static_from_code
+END art_quick_set_obj_static
 
     /*
      * Called by managed code to resolve an instance field and store a 32-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance_from_code
+ENTRY art_quick_set32_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a3, 64($sp)                  # pass referrer's Method*
@@ -711,26 +711,26 @@
     jal    artSet32InstanceFromCode      # (field_idx, Object*, new_val, referrer, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set32_instance_from_code
+END art_quick_set32_instance
 
     /*
      * Called by managed code to resolve an instance field and store a 64-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set64_instance_from_code
+ENTRY art_quick_set64_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     sw     rSELF, 16($sp)                # pass Thread::Current
     jal    artSet64InstanceFromCode      # (field_idx, Object*, new_val, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set64_instance_from_code
+END art_quick_set64_instance
 
     /*
      * Called by managed code to resolve an instance field and store an object reference.
      */
     .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance_from_code
+ENTRY art_quick_set_obj_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a3, 64($sp)                  # pass referrer's Method*
@@ -738,7 +738,7 @@
     jal    artSetObjInstanceFromCode     # (field_idx, Object*, new_val, referrer, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set_obj_instance_from_code
+END art_quick_set_obj_instance
 
     /*
      * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -747,7 +747,7 @@
      * performed.
      */
     .extern artResolveStringFromCode
-ENTRY art_quick_resolve_string_from_code
+ENTRY art_quick_resolve_string
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a2, rSELF                # pass Thread::Current
@@ -755,40 +755,40 @@
     jal     artResolveStringFromCode
     move    $a3, $sp                  # pass $sp
     RETURN_IF_NONZERO
-END art_quick_resolve_string_from_code
+END art_quick_resolve_string
 
     /*
      * Called by managed code to allocate an object.
      */
     .extern artAllocObjectFromCode
-ENTRY art_quick_alloc_object_from_code
+ENTRY art_quick_alloc_object
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a2, rSELF                # pass Thread::Current
     jal     artAllocObjectFromCode    # (uint32_t type_idx, Method* method, Thread*, $sp)
     move    $a3, $sp                  # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_object_from_code
+END art_quick_alloc_object
 
     /*
      * Called by managed code to allocate an object when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocObjectFromCodeWithAccessCheck
-ENTRY art_quick_alloc_object_from_code_with_access_check
+ENTRY art_quick_alloc_object_with_access_check
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a2, rSELF                # pass Thread::Current
     jal     artAllocObjectFromCodeWithAccessCheck  # (uint32_t type_idx, Method* method, Thread*, $sp)
     move    $a3, $sp                  # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_object_from_code_with_access_check
+END art_quick_alloc_object_with_access_check
 
     /*
      * Called by managed code to allocate an array.
      */
     .extern artAllocArrayFromCode
-ENTRY art_quick_alloc_array_from_code
+ENTRY art_quick_alloc_array
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -796,14 +796,14 @@
     jal     artAllocArrayFromCode
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_array_from_code
+END art_quick_alloc_array
 
     /*
      * Called by managed code to allocate an array when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_alloc_array_from_code_with_access_check
+ENTRY art_quick_alloc_array_with_access_check
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -811,13 +811,13 @@
     jal     artAllocArrayFromCodeWithAccessCheck
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_array_from_code_with_access_check
+END art_quick_alloc_array_with_access_check
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCode
-ENTRY art_quick_check_and_alloc_array_from_code
+ENTRY art_quick_check_and_alloc_array
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -825,13 +825,13 @@
     jal     artCheckAndAllocArrayFromCode
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array_from_code
+END art_quick_check_and_alloc_array
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_check_and_alloc_array_from_code_with_access_check
+ENTRY art_quick_check_and_alloc_array_with_access_check
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -839,7 +839,7 @@
     jal     artCheckAndAllocArrayFromCodeWithAccessCheck
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array_from_code_with_access_check
+END art_quick_check_and_alloc_array_with_access_check
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -884,13 +884,33 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-    .extern artInterpreterEntry
-ENTRY art_quick_interpreter_entry
+    .extern artQuickResolutionTrampoline
+ENTRY art_quick_resolution_trampoline
     GENERATE_GLOBAL_POINTER
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
-    sw      $a0, 0($sp)            # place proxy method at bottom of frame
+    move    $a2, rSELF             # pass Thread::Current
+    jal     artQuickProxyInvokeHandler  # (Method* called, receiver, Thread*, SP)
+    move    $a3, $sp               # pass $sp
+    lw      $gp, 52($sp)           # restore $gp
+    lw      $ra, 60($sp)           # restore $ra
+    beqz    $v0, 1f
+    lw      $a0, 0($sp)            # load resolved method to $a0
+    lw      $a1, 4($sp)            # restore non-callee save $a1
+    lw      $a2, 8($sp)            # restore non-callee save $a2
+    lw      $a3, 12($sp)           # restore non-callee save $a3
+    jr      $v0                    # tail call to method
+1:
+    addiu   $sp, $sp, 64           # pop frame
+    .cfi_adjust_cfa_offset -64
+    DELIVER_PENDING_EXCEPTION
+END art_quick_resolution_trampoline
+
+    .extern artQuickToInterpreterBridge
+ENTRY art_quick_to_interpreter_bridge
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     move    $a1, rSELF             # pass Thread::Current
-    jal     artInterpreterEntry    # (Method* method, Thread*, SP)
+    jal     artQuickToInterpreterBridge    # (Method* method, Thread*, SP)
     move    $a2, $sp               # pass $sp
     lw      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
     lw      $gp, 52($sp)           # restore $gp
@@ -902,14 +922,14 @@
     nop
 1:
     DELIVER_PENDING_EXCEPTION
-END art_quick_interpreter_entry
+END art_quick_to_interpreter_bridge
 
     /*
      * Routine that intercepts method calls and returns.
      */
     .extern artInstrumentationMethodEntryFromCode
     .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry_from_code
+ENTRY art_quick_instrumentation_entry
     GENERATE_GLOBAL_POINTER
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     move     $t0, $sp       # remember bottom of caller's frame
@@ -927,10 +947,10 @@
     RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
     jalr     $t9            # call method
     nop
-END art_quick_instrumentation_entry_from_code
+END art_quick_instrumentation_entry
     /* intentional fallthrough */
-    .global art_quick_instrumentation_exit_from_code
-art_quick_instrumentation_exit_from_code:
+    .global art_quick_instrumentation_exit
+art_quick_instrumentation_exit:
     .cfi_startproc
     addiu    $t9, $ra, 4    # put current address into $t9 to rebuild $gp
     GENERATE_GLOBAL_POINTER
@@ -960,7 +980,7 @@
     jr       $t0            # return
     addiu    $sp, $sp, 112  # 48 bytes of args + 64 bytes of callee save frame
     .cfi_adjust_cfa_offset -112
-END art_quick_instrumentation_exit_from_code
+END art_quick_instrumentation_exit
 
     /*
      * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
@@ -978,18 +998,6 @@
 END art_quick_deoptimize
 
     /*
-     * Quick abstract method error stub. $a0 contains method* on entry.
-     */
-ENTRY art_quick_abstract_method_error_stub
-    GENERATE_GLOBAL_POINTER
-    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
-    move     $a1, $s1       # pass Thread::Current
-    la       $t9, artThrowAbstractMethodErrorFromCode
-    jr       $t9            # (Method*, Thread*, SP)
-    move     $a2, $sp       # pass SP
-END art_quick_abstract_method_error_stub
-
-    /*
      * Long integer shift.  This is different from the generic 32/64-bit
      * binary operations because vAA/vBB are 64-bit but vCC (the shift
      * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 7e6dce9..7a3fdfa 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -88,4 +88,16 @@
   .cfi_restore REG_VAR(reg,0)
 END_MACRO
 
+MACRO1(UNIMPLEMENTED,name)
+    .type VAR(name, 0), @function
+    .globl VAR(name, 0)
+    ALIGN_FUNCTION_ENTRY
+VAR(name, 0):
+    .cfi_startproc
+    int3
+    int3
+    .cfi_endproc
+    .size \name, .-\name
+END_MACRO
+
 #endif  // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index d47dfef..9152674 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -20,70 +20,74 @@
 
 namespace art {
 
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-
-// Cast entrypoints.
-extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass,
-                                                const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
-
-// FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
-
-// Math entrypoints.
-extern "C" double art_quick_fmod_from_code(double, double);
-extern "C" float art_quick_fmodf_from_code(float, float);
-extern "C" double art_quick_l2d_from_code(int64_t);
-extern "C" float art_quick_l2f_from_code(int64_t);
-extern "C" int64_t art_quick_d2l_from_code(double);
-extern "C" int64_t art_quick_f2l_from_code(float);
-extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t);
-extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t);
-extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t);
-extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t);
-extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t);
-
 // Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                  const DexFile::CodeItem* code_item,
+                                                  ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
                                            const DexFile::CodeItem* code_item,
                                            ShadowFrame* shadow_frame, JValue* result);
 
+// Portable entrypoints.
+extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
+                                                const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element(void*, void*);
+extern "C" void art_quick_check_cast(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
+
+// Math entrypoints.
+extern "C" double art_quick_fmod(double, double);
+extern "C" float art_quick_fmodf(float, float);
+extern "C" double art_quick_l2d(int64_t);
+extern "C" float art_quick_l2f(int64_t);
+extern "C" int64_t art_quick_d2l(double);
+extern "C" int64_t art_quick_f2l(float);
+extern "C" int32_t art_quick_idivmod(int32_t, int32_t);
+extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
+extern "C" int64_t art_quick_ldivmod(int64_t, int64_t);
+extern "C" int64_t art_quick_lmul(int64_t, int64_t);
+extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
+
 // Intrinsic entrypoints.
 extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
 extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
@@ -91,12 +95,8 @@
 extern "C" void* art_quick_memcpy(void*, const void*, size_t);
 
 // Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
-                                                       mirror::Object* receiver,
-                                                       mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -109,49 +109,61 @@
 extern "C" void art_quick_test_suspend();
 
 // Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+  // Interpreter
+  ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
+  ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge;
+
+  // JNI
+  jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+  // Portable
+  ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
+  ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
+
   // Alloc
-  qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
-  qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
-  qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
-  qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
-  qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
-  qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+  qpoints->pAllocArray = art_quick_alloc_array;
+  qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+  qpoints->pAllocObject = art_quick_alloc_object;
+  qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+  qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+  qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
 
   // Cast
-  qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code;
-  qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
-  qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+  qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
+  qpoints->pCanPutArrayElement = art_quick_can_put_array_element;
+  qpoints->pCheckCast = art_quick_check_cast;
 
   // DexCache
-  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
-  qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
-  qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
-  qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+  qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
-  qpoints->pSet32Instance = art_quick_set32_instance_from_code;
-  qpoints->pSet32Static = art_quick_set32_static_from_code;
-  qpoints->pSet64Instance = art_quick_set64_instance_from_code;
-  qpoints->pSet64Static = art_quick_set64_static_from_code;
-  qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
-  qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
-  qpoints->pGet32Instance = art_quick_get32_instance_from_code;
-  qpoints->pGet64Instance = art_quick_get64_instance_from_code;
-  qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
-  qpoints->pGet32Static = art_quick_get32_static_from_code;
-  qpoints->pGet64Static = art_quick_get64_static_from_code;
-  qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+  qpoints->pSet32Instance = art_quick_set32_instance;
+  qpoints->pSet32Static = art_quick_set32_static;
+  qpoints->pSet64Instance = art_quick_set64_instance;
+  qpoints->pSet64Static = art_quick_set64_static;
+  qpoints->pSetObjInstance = art_quick_set_obj_instance;
+  qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGet32Instance = art_quick_get32_instance;
+  qpoints->pGet64Instance = art_quick_get64_instance;
+  qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGet32Static = art_quick_get32_static;
+  qpoints->pGet64Static = art_quick_get64_static;
+  qpoints->pGetObjStatic = art_quick_get_obj_static;
 
   // FillArray
-  qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+  qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
 
   // JNI
   qpoints->pJniMethodStart = JniMethodStart;
@@ -162,33 +174,29 @@
   qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
 
   // Locks
-  qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
-  qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+  qpoints->pLockObject = art_quick_lock_object;
+  qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
   // points->pCmpgDouble = NULL;  // Not needed on x86.
   // points->pCmpgFloat = NULL;  // Not needed on x86.
   // points->pCmplDouble = NULL;  // Not needed on x86.
   // points->pCmplFloat = NULL;  // Not needed on x86.
-  qpoints->pFmod = art_quick_fmod_from_code;
-  qpoints->pL2d = art_quick_l2d_from_code;
-  qpoints->pFmodf = art_quick_fmodf_from_code;
-  qpoints->pL2f = art_quick_l2f_from_code;
+  qpoints->pFmod = art_quick_fmod;
+  qpoints->pL2d = art_quick_l2d;
+  qpoints->pFmodf = art_quick_fmodf;
+  qpoints->pL2f = art_quick_l2f;
   // points->pD2iz = NULL;  // Not needed on x86.
   // points->pF2iz = NULL;  // Not needed on x86.
-  qpoints->pIdivmod = art_quick_idivmod_from_code;
-  qpoints->pD2l = art_quick_d2l_from_code;
-  qpoints->pF2l = art_quick_f2l_from_code;
-  qpoints->pLdiv = art_quick_ldiv_from_code;
-  qpoints->pLdivmod = art_quick_ldivmod_from_code;
-  qpoints->pLmul = art_quick_lmul_from_code;
-  qpoints->pShlLong = art_quick_lshl_from_code;
-  qpoints->pShrLong = art_quick_lshr_from_code;
-  qpoints->pUshrLong = art_quick_lushr_from_code;
-
-  // Interpreter
-  qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
-  qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+  qpoints->pIdivmod = art_quick_idivmod;
+  qpoints->pD2l = art_quick_d2l;
+  qpoints->pF2l = art_quick_f2l;
+  qpoints->pLdiv = art_quick_ldiv;
+  qpoints->pLdivmod = art_quick_ldivmod;
+  qpoints->pLmul = art_quick_lmul;
+  qpoints->pShlLong = art_quick_lshl;
+  qpoints->pShrLong = art_quick_lshr;
+  qpoints->pUshrLong = art_quick_lushr;
 
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
@@ -197,7 +205,8 @@
   qpoints->pMemcpy = art_quick_memcpy;
 
   // Invocation
-  qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+  qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+  qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
   qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
@@ -206,19 +215,16 @@
   qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
-  qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
-  qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+  qpoints->pCheckSuspend = CheckSuspendFromCode;
+  qpoints->pTestSuspend = art_quick_test_suspend;
 
   // Throws
-  qpoints->pDeliverException = art_quick_deliver_exception_from_code;
-  qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
-  qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
-  qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
-  qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
-  qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-
-  // Portable
-  ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+  qpoints->pDeliverException = art_quick_deliver_exception;
+  qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+  qpoints->pThrowDivZero = art_quick_throw_div_zero;
+  qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+  qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+  qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
 };
 
 }  // namespace art
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index a0fca6c..0313d4b 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -90,20 +90,5 @@
     ret
 END_FUNCTION art_portable_proxy_invoke_handler
 
-    /*
-     * Portable abstract method error stub. method* is at %esp + 4 on entry.
-     */
-DEFINE_FUNCTION art_portable_abstract_method_error_stub
-    PUSH ebp
-    movl %esp, %ebp               // Remember SP.
-    .cfi_def_cfa_register ebp
-    subl LITERAL(12), %esp        // Align stack.
-    PUSH esp                      // Pass sp (not used).
-    pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current().
-    pushl 8(%ebp)                 // Pass Method*.
-    call SYMBOL(artThrowAbstractMethodErrorFromCode)  // (Method*, Thread*, SP)
-    leave                         // Restore the stack and %ebp.
-    .cfi_def_cfa esp, 4
-    .cfi_restore ebp
-    ret                           // Return to caller to handle pending exception.
-END_FUNCTION art_portable_abstract_method_error_stub
+UNIMPLEMENTED art_portable_resolution_trampoline
+UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 89ea71a..dbf552f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -135,34 +135,34 @@
     /*
      * Called by managed code to create and deliver a NullPointerException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
 
     /*
      * Called by managed code to create and deliver an ArithmeticException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
 
     /*
      * Called by managed code to create and deliver a StackOverflowError.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
 
     /*
      * Called by managed code, saves callee saves and then calls artThrowException
      * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
 
     /*
      * Called by managed code to create and deliver a NoSuchMethodError.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
 
     /*
      * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
      * index, arg2 holds limit.
      */
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
 
     /*
      * All generated callsites for interface invokes and invocation slow paths will load arguments
@@ -382,24 +382,24 @@
     DELIVER_PENDING_EXCEPTION
 END_MACRO
 
-TWO_ARG_DOWNCALL art_quick_alloc_object_from_code, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_alloc_object_from_code_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_from_code, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_from_code_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
 
-TWO_ARG_DOWNCALL art_quick_resolve_string_from_code, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage_from_code, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type_from_code, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access_from_code, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO
 
-ONE_ARG_DOWNCALL art_quick_lock_object_from_code, artLockObjectFromCode, ret
-ONE_ARG_DOWNCALL art_quick_unlock_object_from_code, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO
+ONE_ARG_DOWNCALL art_quick_lock_object, artLockObjectFromCode, ret
+ONE_ARG_DOWNCALL art_quick_unlock_object, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO
 
-TWO_ARG_DOWNCALL art_quick_handle_fill_data_from_code, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
 
-DEFINE_FUNCTION art_quick_is_assignable_from_code
+DEFINE_FUNCTION art_quick_is_assignable
     PUSH eax                     // alignment padding
     PUSH ecx                    // pass arg2
     PUSH eax                     // pass arg1
@@ -407,7 +407,7 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_is_assignable_from_code
+END_FUNCTION art_quick_is_assignable
 
 DEFINE_FUNCTION art_quick_memcpy
     PUSH edx                      // pass arg3
@@ -419,12 +419,12 @@
     ret
 END_FUNCTION art_quick_memcpy
 
-TWO_ARG_DOWNCALL art_quick_check_cast_from_code, artCheckCastFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_DOWNCALL art_quick_can_put_array_element_from_code, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_DOWNCALL art_quick_check_cast, artCheckCastFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_DOWNCALL art_quick_can_put_array_element, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO
 
 NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
 
-DEFINE_FUNCTION art_quick_fmod_from_code
+DEFINE_FUNCTION art_quick_fmod
     subl LITERAL(12), %esp        // alignment padding
     .cfi_adjust_cfa_offset 12
     PUSH ebx                      // pass arg4 b.hi
@@ -437,9 +437,9 @@
     addl LITERAL(28), %esp        // pop arguments
     .cfi_adjust_cfa_offset -28
     ret
-END_FUNCTION art_quick_fmod_from_code
+END_FUNCTION art_quick_fmod
 
-DEFINE_FUNCTION art_quick_fmodf_from_code
+DEFINE_FUNCTION art_quick_fmodf
     PUSH eax                      // alignment padding
     PUSH ecx                      // pass arg2 b
     PUSH eax                      // pass arg1 a
@@ -449,9 +449,9 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_fmodf_from_code
+END_FUNCTION art_quick_fmodf
 
-DEFINE_FUNCTION art_quick_l2d_from_code
+DEFINE_FUNCTION art_quick_l2d
     PUSH ecx                      // push arg2 a.hi
     PUSH eax                      // push arg1 a.lo
     fildll (%esp)                 // load as integer and push into st0
@@ -460,9 +460,9 @@
     addl LITERAL(8), %esp         // pop arguments
     .cfi_adjust_cfa_offset -8
     ret
-END_FUNCTION art_quick_l2d_from_code
+END_FUNCTION art_quick_l2d
 
-DEFINE_FUNCTION art_quick_l2f_from_code
+DEFINE_FUNCTION art_quick_l2f
     PUSH ecx                      // push arg2 a.hi
     PUSH eax                      // push arg1 a.lo
     fildll (%esp)                 // load as integer and push into st0
@@ -471,9 +471,9 @@
     addl LITERAL(8), %esp         // pop argument
     .cfi_adjust_cfa_offset -8
     ret
-END_FUNCTION art_quick_l2f_from_code
+END_FUNCTION art_quick_l2f
 
-DEFINE_FUNCTION art_quick_d2l_from_code
+DEFINE_FUNCTION art_quick_d2l
     PUSH eax                      // alignment padding
     PUSH ecx                      // pass arg2 a.hi
     PUSH eax                      // pass arg1 a.lo
@@ -481,9 +481,9 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_d2l_from_code
+END_FUNCTION art_quick_d2l
 
-DEFINE_FUNCTION art_quick_f2l_from_code
+DEFINE_FUNCTION art_quick_f2l
     subl LITERAL(8), %esp         // alignment padding
     .cfi_adjust_cfa_offset 8
     PUSH eax                      // pass arg1 a
@@ -491,9 +491,9 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_f2l_from_code
+END_FUNCTION art_quick_f2l
 
-DEFINE_FUNCTION art_quick_idivmod_from_code
+DEFINE_FUNCTION art_quick_idivmod
     cmpl LITERAL(0x80000000), %eax
     je check_arg2  // special case
 args_ok:
@@ -505,9 +505,9 @@
     jne args_ok
     xorl %edx, %edx
     ret         // eax already holds min int
-END_FUNCTION art_quick_idivmod_from_code
+END_FUNCTION art_quick_idivmod
 
-DEFINE_FUNCTION art_quick_ldiv_from_code
+DEFINE_FUNCTION art_quick_ldiv
     subl LITERAL(12), %esp        // alignment padding
     .cfi_adjust_cfa_offset 12
     PUSH ebx                     // pass arg4 b.hi
@@ -518,9 +518,9 @@
     addl LITERAL(28), %esp        // pop arguments
     .cfi_adjust_cfa_offset -28
     ret
-END_FUNCTION art_quick_ldiv_from_code
+END_FUNCTION art_quick_ldiv
 
-DEFINE_FUNCTION art_quick_ldivmod_from_code
+DEFINE_FUNCTION art_quick_ldivmod
     subl LITERAL(12), %esp        // alignment padding
     .cfi_adjust_cfa_offset 12
     PUSH ebx                     // pass arg4 b.hi
@@ -531,18 +531,18 @@
     addl LITERAL(28), %esp        // pop arguments
     .cfi_adjust_cfa_offset -28
     ret
-END_FUNCTION art_quick_ldivmod_from_code
+END_FUNCTION art_quick_ldivmod
 
-DEFINE_FUNCTION art_quick_lmul_from_code
+DEFINE_FUNCTION art_quick_lmul
     imul %eax, %ebx              // ebx = a.lo(eax) * b.hi(ebx)
     imul %edx, %ecx              // ecx = b.lo(edx) * a.hi(ecx)
     mul  %edx                    // edx:eax = a.lo(eax) * b.lo(edx)
     add  %ebx, %ecx
     add  %ecx, %edx              // edx += (a.lo * b.hi) + (b.lo * a.hi)
     ret
-END_FUNCTION art_quick_lmul_from_code
+END_FUNCTION art_quick_lmul
 
-DEFINE_FUNCTION art_quick_lshl_from_code
+DEFINE_FUNCTION art_quick_lshl
     // ecx:eax << edx
     xchg %edx, %ecx
     shld %cl,%eax,%edx
@@ -553,9 +553,9 @@
     xor %eax, %eax
 1:
     ret
-END_FUNCTION art_quick_lshl_from_code
+END_FUNCTION art_quick_lshl
 
-DEFINE_FUNCTION art_quick_lshr_from_code
+DEFINE_FUNCTION art_quick_lshr
     // ecx:eax >> edx
     xchg %edx, %ecx
     shrd %cl,%edx,%eax
@@ -566,9 +566,9 @@
     sar LITERAL(31), %edx
 1:
     ret
-END_FUNCTION art_quick_lshr_from_code
+END_FUNCTION art_quick_lshr
 
-DEFINE_FUNCTION art_quick_lushr_from_code
+DEFINE_FUNCTION art_quick_lushr
     // ecx:eax >>> edx
     xchg %edx, %ecx
     shrd %cl,%edx,%eax
@@ -579,9 +579,9 @@
     xor %edx, %edx
 1:
     ret
-END_FUNCTION art_quick_lushr_from_code
+END_FUNCTION art_quick_lushr
 
-DEFINE_FUNCTION art_quick_set32_instance_from_code
+DEFINE_FUNCTION art_quick_set32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     subl LITERAL(8), %esp         // alignment padding
@@ -599,9 +599,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set32_instance_from_code
+END_FUNCTION art_quick_set32_instance
 
-DEFINE_FUNCTION art_quick_set64_instance_from_code
+DEFINE_FUNCTION art_quick_set64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     subl LITERAL(8), %esp         // alignment padding
     .cfi_adjust_cfa_offset 8
@@ -618,9 +618,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set64_instance_from_code
+END_FUNCTION art_quick_set64_instance
 
-DEFINE_FUNCTION art_quick_set_obj_instance_from_code
+DEFINE_FUNCTION art_quick_set_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     subl LITERAL(8), %esp         // alignment padding
@@ -638,9 +638,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set_obj_instance_from_code
+END_FUNCTION art_quick_set_obj_instance
 
-DEFINE_FUNCTION art_quick_get32_instance_from_code
+DEFINE_FUNCTION art_quick_get32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -657,9 +657,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get32_instance_from_code
+END_FUNCTION art_quick_get32_instance
 
-DEFINE_FUNCTION art_quick_get64_instance_from_code
+DEFINE_FUNCTION art_quick_get64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -676,9 +676,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get64_instance_from_code
+END_FUNCTION art_quick_get64_instance
 
-DEFINE_FUNCTION art_quick_get_obj_instance_from_code
+DEFINE_FUNCTION art_quick_get_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -695,9 +695,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get_obj_instance_from_code
+END_FUNCTION art_quick_get_obj_instance
 
-DEFINE_FUNCTION art_quick_set32_static_from_code
+DEFINE_FUNCTION art_quick_set32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -714,9 +714,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set32_static_from_code
+END_FUNCTION art_quick_set32_static
 
-DEFINE_FUNCTION art_quick_set64_static_from_code
+DEFINE_FUNCTION art_quick_set64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     subl LITERAL(8), %esp         // alignment padding
@@ -734,9 +734,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set64_static_from_code
+END_FUNCTION art_quick_set64_static
 
-DEFINE_FUNCTION art_quick_set_obj_static_from_code
+DEFINE_FUNCTION art_quick_set_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -752,9 +752,9 @@
     addl LITERAL(32), %esp        // pop arguments
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set_obj_static_from_code
+END_FUNCTION art_quick_set_obj_static
 
-DEFINE_FUNCTION art_quick_get32_static_from_code
+DEFINE_FUNCTION art_quick_get32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %edx                // remember SP
     mov 32(%esp), %ecx            // get referrer
@@ -768,9 +768,9 @@
     .cfi_adjust_cfa_offset -16
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get32_static_from_code
+END_FUNCTION art_quick_get32_static
 
-DEFINE_FUNCTION art_quick_get64_static_from_code
+DEFINE_FUNCTION art_quick_get64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %edx                // remember SP
     mov 32(%esp), %ecx            // get referrer
@@ -784,9 +784,9 @@
     .cfi_adjust_cfa_offset -16
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get64_static_from_code
+END_FUNCTION art_quick_get64_static
 
-DEFINE_FUNCTION art_quick_get_obj_static_from_code
+DEFINE_FUNCTION art_quick_get_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %edx                // remember SP
     mov 32(%esp), %ecx            // get referrer
@@ -800,7 +800,7 @@
     .cfi_adjust_cfa_offset -16
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get_obj_static_from_code
+END_FUNCTION art_quick_get_obj_static
 
 DEFINE_FUNCTION art_quick_proxy_invoke_handler
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // save frame and Method*
@@ -818,7 +818,32 @@
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
 END_FUNCTION art_quick_proxy_invoke_handler
 
-DEFINE_FUNCTION art_quick_interpreter_entry
+DEFINE_FUNCTION art_quick_resolution_trampoline
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    PUSH esp                      // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    .cfi_adjust_cfa_offset 4
+    PUSH ecx                      // pass receiver
+    PUSH eax                      // pass method
+    call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
+    movl %eax, %edi               // remember code pointer in EDI
+    addl LITERAL(16), %esp        // pop arguments
+    test %eax, %eax               // if code pointer is NULL goto deliver pending exception
+    jz 1f
+    POP eax                       // called method
+    POP ecx                       // restore args
+    POP edx
+    POP ebx
+    POP ebp                       // restore callee saves except EDI
+    POP esi
+    xchgl 0(%esp),%edi            // restore EDI and place code pointer as only value on stack
+    ret                           // tail call into method
+1:
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_resolution_trampoline
+
+DEFINE_FUNCTION art_quick_to_interpreter_bridge
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // save frame
     mov %esp, %edx                // remember SP
     PUSH eax                      // alignment padding
@@ -826,19 +851,19 @@
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     .cfi_adjust_cfa_offset 4
     PUSH eax                      // pass  method
-    call SYMBOL(artInterpreterEntry)  // (method, Thread*, SP)
+    call SYMBOL(artQuickToInterpreterBridge)  // (method, Thread*, SP)
     movd %eax, %xmm0              // place return value also into floating point return value
     movd %edx, %xmm1
     punpckldq %xmm1, %xmm0
     addl LITERAL(44), %esp        // pop arguments
     .cfi_adjust_cfa_offset -44
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_interpreter_entry
+END_FUNCTION art_quick_to_interpreter_bridge
 
     /*
      * Routine that intercepts method calls and returns.
      */
-DEFINE_FUNCTION art_quick_instrumentation_entry_from_code
+DEFINE_FUNCTION art_quick_instrumentation_entry
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     movl  %esp, %edx              // Save SP.
     PUSH eax                      // Save eax which will be clobbered by the callee-save method.
@@ -855,7 +880,7 @@
     addl  LITERAL(28), %esp       // Pop arguments upto saved Method*.
     movl 28(%esp), %edi           // Restore edi.
     movl %eax, 28(%esp)           // Place code* over edi, just under return pc.
-    movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 32(%esp)
+    movl LITERAL(SYMBOL(art_quick_instrumentation_exit)), 32(%esp)
                                   // Place instrumentation exit as return pc.
     movl (%esp), %eax             // Restore eax.
     movl 8(%esp), %ecx            // Restore ecx.
@@ -865,9 +890,9 @@
     movl 24(%esp), %esi           // Restore esi.
     addl LITERAL(28), %esp        // Wind stack back upto code*.
     ret                           // Call method (and pop).
-END_FUNCTION art_quick_instrumentation_entry_from_code
+END_FUNCTION art_quick_instrumentation_entry
 
-DEFINE_FUNCTION art_quick_instrumentation_exit_from_code
+DEFINE_FUNCTION art_quick_instrumentation_exit
     pushl LITERAL(0)              // Push a fake return PC as there will be none on the stack.
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME
     mov  %esp, %ecx               // Remember SP
@@ -900,7 +925,7 @@
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
     addl LITERAL(4), %esp         // Remove fake return pc.
     jmp   *%ecx                   // Return.
-END_FUNCTION art_quick_instrumentation_exit_from_code
+END_FUNCTION art_quick_instrumentation_exit
 
     /*
      * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
@@ -920,21 +945,6 @@
 END_FUNCTION art_quick_deoptimize
 
     /*
-     * Quick abstract method error stub. %eax contains method* on entry.
-     */
-DEFINE_FUNCTION art_quick_abstract_method_error_stub
-    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
-    movl %esp, %ecx               // Remember SP.
-    PUSH eax                      // Align frame.
-    PUSH ecx                      // Pass SP for Method*.
-    pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current().
-    .cfi_adjust_cfa_offset 4
-    PUSH eax                      // Pass Method*.
-    call SYMBOL(artThrowAbstractMethodErrorFromCode)  // (Method*, Thread*, SP)
-    int3                          // Unreachable.
-END_FUNCTION art_quick_abstract_method_error_stub
-
-    /*
      * String's indexOf.
      *
      * On entry:
@@ -1030,12 +1040,5 @@
     ret
 END_FUNCTION art_quick_string_compareto
 
-MACRO1(UNIMPLEMENTED,name)
-    .globl VAR(name, 0)
-    ALIGN_FUNCTION_ENTRY
-VAR(name, 0):
-    int3
-END_MACRO
-
     // TODO: implement these!
 UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 83ecca8..7d54baf 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -156,8 +156,6 @@
   if (data_->severity == FATAL) {
     Runtime::Abort();
   }
-
-  delete data_;
 }
 
 HexDump::HexDump(const void* address, size_t byte_count, bool show_actual_addresses)
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index d641ae4..eafa050 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -24,6 +24,7 @@
 #include <signal.h>
 #include "base/macros.h"
 #include "log_severity.h"
+#include "UniquePtr.h"
 
 #define CHECK(x) \
   if (UNLIKELY(!(x))) \
@@ -194,7 +195,7 @@
  private:
   static void LogLine(const LogMessageData& data, const char*);
 
-  LogMessageData* const data_;
+  const UniquePtr<LogMessageData> data_;
 
   friend void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context);
   friend class Mutex;
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 879c10c..6531858 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -142,6 +142,8 @@
 #define HOT_ATTR __attribute__ ((hot))
 #endif
 
+#define PURE __attribute__ ((__pure__))
+
 // bionic and glibc both have TEMP_FAILURE_RETRY, but Mac OS' libc doesn't.
 #ifndef TEMP_FAILURE_RETRY
 #define TEMP_FAILURE_RETRY(exp) ({ \
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index b924798..21ba0d2 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -238,7 +238,7 @@
 
   // Assert the current thread has exclusive access to the ReaderWriterMutex.
   void AssertExclusiveHeld(const Thread* self) {
-    if (kDebugLocking & (gAborting == 0)) {
+    if (kDebugLocking && (gAborting == 0)) {
       CHECK(IsExclusiveHeld(self)) << *this;
     }
   }
@@ -246,7 +246,7 @@
 
   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
   void AssertNotExclusiveHeld(const Thread* self) {
-    if (kDebugLocking & (gAborting == 0)) {
+    if (kDebugLocking && (gAborting == 0)) {
       CHECK(!IsExclusiveHeld(self)) << *this;
     }
   }
@@ -257,7 +257,7 @@
 
   // Assert the current thread has shared access to the ReaderWriterMutex.
   void AssertSharedHeld(const Thread* self) {
-    if (kDebugLocking  & (gAborting == 0)) {
+    if (kDebugLocking && (gAborting == 0)) {
       // TODO: we can only assert this well when self != NULL.
       CHECK(IsSharedHeld(self) || self == NULL) << *this;
     }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6052993..71959c6 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -71,7 +71,7 @@
 
 namespace art {
 
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
                                            const DexFile::CodeItem* code_item,
                                            ShadowFrame* shadow_frame, JValue* result);
 
@@ -944,6 +944,43 @@
   return oat_file;
 }
 
+static void InitFromImageCallbackCommon(mirror::Object* obj, ClassLinker* class_linker,
+                                        bool interpret_only_mode)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(obj != NULL);
+  DCHECK(class_linker != NULL);
+
+  if (obj->GetClass()->IsStringClass()) {
+    class_linker->GetInternTable()->RegisterStrong(obj->AsString());
+  } else if (obj->IsClass()) {
+    // Restore class to ClassLinker::classes_ table.
+    mirror::Class* klass = obj->AsClass();
+    ClassHelper kh(klass, class_linker);
+    mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true);
+    DCHECK(existing == NULL) << kh.GetDescriptor();
+  } else if (interpret_only_mode && obj->IsMethod()) {
+    mirror::AbstractMethod* method = obj->AsMethod();
+    if (!method->IsNative()) {
+      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
+      if (method != Runtime::Current()->GetResolutionMethod()) {
+        method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
+      }
+    }
+  }
+}
+
+static void InitFromImageCallback(mirror::Object* obj, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
+  InitFromImageCallbackCommon(obj, class_linker, false);
+}
+
+static void InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
+  InitFromImageCallbackCommon(obj, class_linker, true);
+}
+
 void ClassLinker::InitFromImage() {
   VLOG(startup) << "ClassLinker::InitFromImage entering";
   CHECK(!init_done_);
@@ -997,7 +1034,11 @@
   {
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
     heap->FlushAllocStack();
-    heap->GetLiveBitmap()->Walk(InitFromImageCallback, this);
+    if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+      heap->GetLiveBitmap()->Walk(InitFromImageInterpretOnlyCallback, this);
+    } else {
+      heap->GetLiveBitmap()->Walk(InitFromImageCallback, this);
+    }
   }
 
   // reinit class_roots_
@@ -1025,40 +1066,6 @@
   VLOG(startup) << "ClassLinker::InitFromImage exiting";
 }
 
-void ClassLinker::InitFromImageCallback(mirror::Object* obj, void* arg) {
-  DCHECK(obj != NULL);
-  DCHECK(arg != NULL);
-  ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
-
-  if (obj->GetClass()->IsStringClass()) {
-    class_linker->intern_table_->RegisterStrong(obj->AsString());
-    return;
-  }
-  if (obj->IsClass()) {
-    // restore class to ClassLinker::classes_ table
-    mirror::Class* klass = obj->AsClass();
-    ClassHelper kh(klass, class_linker);
-    mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true);
-    DCHECK(existing == NULL) << kh.GetDescriptor();
-    return;
-  }
-
-  if (obj->IsMethod()) {
-    mirror::AbstractMethod* method = obj->AsMethod();
-    // Set entry points to interpreter for methods in interpreter only mode.
-    if (Runtime::Current()->GetInstrumentation()->InterpretOnly() && !method->IsNative()) {
-      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
-      if (method != Runtime::Current()->GetResolutionMethod()) {
-        method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
-      }
-    }
-    // Populate native method pointer with jni lookup stub.
-    if (method->IsNative()) {
-      method->UnregisterNative(Thread::Current());
-    }
-  }
-}
-
 // Keep in sync with InitCallback. Anything we visit, we need to
 // reinit references to when reinitializing a ClassLinker from a
 // mapped image.
@@ -1558,7 +1565,7 @@
   const void* result = GetOatMethodFor(method).GetCode();
   if (result == NULL) {
     // No code? You must mean to go into the interpreter.
-    result = GetInterpreterEntryPoint();
+    result = GetCompiledCodeToInterpreterBridge();
   }
   return result;
 }
@@ -1619,7 +1626,7 @@
     const bool enter_interpreter = NeedsInterpreter(method, code);
     if (enter_interpreter) {
       // Use interpreter entry point.
-      code = GetInterpreterEntryPoint();
+      code = GetCompiledCodeToInterpreterBridge();
     }
     runtime->GetInstrumentation()->UpdateMethodsCode(method, code);
   }
@@ -1640,13 +1647,13 @@
   Runtime* runtime = Runtime::Current();
   bool enter_interpreter = NeedsInterpreter(method.get(), method->GetEntryPointFromCompiledCode());
   if (enter_interpreter) {
-    method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
+    method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
   } else {
-    method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry);
+    method->SetEntryPointFromInterpreter(artInterperterToCompiledCodeBridge);
   }
 
   if (method->IsAbstract()) {
-    method->SetEntryPointFromCompiledCode(GetAbstractMethodErrorStub());
+    method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
     return;
   }
 
@@ -1657,7 +1664,7 @@
     method->SetEntryPointFromCompiledCode(GetResolutionTrampoline(runtime->GetClassLinker()));
   } else if (enter_interpreter) {
     // Set entry point from compiled code if there's no code or in interpreter only mode.
-    method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
+    method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
   }
 
   if (method->IsNative()) {
@@ -2625,12 +2632,8 @@
   method->SetCoreSpillMask(refs_and_args->GetCoreSpillMask());
   method->SetFpSpillMask(refs_and_args->GetFpSpillMask());
   method->SetFrameSizeInBytes(refs_and_args->GetFrameSizeInBytes());
-#if !defined(ART_USE_PORTABLE_COMPILER)
-  method->SetEntryPointFromCompiledCode(reinterpret_cast<void*>(art_quick_proxy_invoke_handler));
-#else
-  method->SetEntryPointFromCompiledCode(reinterpret_cast<void*>(art_portable_proxy_invoke_handler));
-#endif
-  method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry);
+  method->SetEntryPointFromCompiledCode(GetProxyInvokeHandler());
+  method->SetEntryPointFromInterpreter(artInterperterToCompiledCodeBridge);
 
   return method;
 }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index fdf75c2..060c26c 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -347,6 +347,17 @@
     return quick_resolution_trampoline_;
   }
 
+  InternTable* GetInternTable() const {
+    return intern_table_;
+  }
+
+  // Attempts to insert a class into a class table.  Returns NULL if
+  // the class was inserted, otherwise returns an existing class with
+  // the same descriptor and ClassLoader.
+  mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class)
+      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   explicit ClassLinker(InternTable*);
 
@@ -362,8 +373,6 @@
   OatFile& GetImageOatFile(gc::space::ImageSpace* space)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void InitFromImageCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -423,13 +432,6 @@
   const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Attempts to insert a class into a class table.  Returns NULL if
-  // the class was inserted, otherwise returns an existing class with
-  // the same descriptor and ClassLoader.
-  mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache)
       EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 7ee6fe2..7110e11 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -168,8 +168,8 @@
                                      const size_t frame_size_in_bytes,
                                      const uint32_t core_spill_mask,
                                      const uint32_t fp_spill_mask,
-                                     const uint32_t* mapping_table,
-                                     const uint16_t* vmap_table,
+                                     const uint8_t* mapping_table,
+                                     const uint8_t* vmap_table,
                                      const uint8_t* gc_map) {
       return OatFile::OatMethod(NULL,
                                 reinterpret_cast<uint32_t>(code),
@@ -192,10 +192,7 @@
       compiled_method =
           compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
                                                               method->GetDexMethodIndex()));
-
-#ifndef ART_LIGHT_MODE
       CHECK(compiled_method != NULL) << PrettyMethod(method);
-#endif
     }
     if (compiled_method != NULL) {
       const std::vector<uint8_t>& code = compiled_method->GetCode();
@@ -213,12 +210,8 @@
       oat_method.LinkMethod(method);
     } else {
       const void* method_code;
-      if (method->IsAbstract()) {
-        method_code = GetAbstractMethodErrorStub();
-      } else {
-        // No code? You must mean to go into the interpreter.
-        method_code = GetInterpreterEntryPoint();
-      }
+      // No code? You must mean to go into the interpreter.
+      method_code = GetCompiledCodeToInterpreterBridge();
       LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
       OatFile::OatMethod oat_method = CreateOatMethod(method_code,
                                                       kStackAlignment,
diff --git a/runtime/compiled_method.cc b/runtime/compiled_method.cc
index c64c71e..4631cb5 100644
--- a/runtime/compiled_method.cc
+++ b/runtime/compiled_method.cc
@@ -112,35 +112,13 @@
                                const size_t frame_size_in_bytes,
                                const uint32_t core_spill_mask,
                                const uint32_t fp_spill_mask,
-                               const std::vector<uint32_t>& mapping_table,
-                               const std::vector<uint16_t>& vmap_table,
+                               const std::vector<uint8_t>& mapping_table,
+                               const std::vector<uint8_t>& vmap_table,
                                const std::vector<uint8_t>& native_gc_map)
     : CompiledCode(instruction_set, code), frame_size_in_bytes_(frame_size_in_bytes),
       core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+      mapping_table_(mapping_table), vmap_table_(vmap_table),
       gc_map_(native_gc_map) {
-  DCHECK_EQ(vmap_table.size(),
-            static_cast<uint32_t>(__builtin_popcount(core_spill_mask)
-                                  + __builtin_popcount(fp_spill_mask)));
-  CHECK_LE(vmap_table.size(), (1U << 16) - 1);  // length must fit in 2^16-1
-
-  std::vector<uint32_t> length_prefixed_mapping_table;
-  length_prefixed_mapping_table.push_back(mapping_table.size());
-  length_prefixed_mapping_table.insert(length_prefixed_mapping_table.end(),
-                                       mapping_table.begin(),
-                                       mapping_table.end());
-  DCHECK_EQ(mapping_table.size() + 1, length_prefixed_mapping_table.size());
-
-  std::vector<uint16_t> length_prefixed_vmap_table;
-  length_prefixed_vmap_table.push_back(vmap_table.size());
-  length_prefixed_vmap_table.insert(length_prefixed_vmap_table.end(),
-                                    vmap_table.begin(),
-                                    vmap_table.end());
-  DCHECK_EQ(vmap_table.size() + 1, length_prefixed_vmap_table.size());
-  DCHECK_EQ(vmap_table.size(), length_prefixed_vmap_table[0]);
-
-  mapping_table_ = length_prefixed_mapping_table;
-  vmap_table_ = length_prefixed_vmap_table;
-  DCHECK_EQ(vmap_table_[0], static_cast<uint32_t>(__builtin_popcount(core_spill_mask) + __builtin_popcount(fp_spill_mask)));
 }
 
 CompiledMethod::CompiledMethod(InstructionSet instruction_set,
diff --git a/runtime/compiled_method.h b/runtime/compiled_method.h
index 800dde2..b3bb20f 100644
--- a/runtime/compiled_method.h
+++ b/runtime/compiled_method.h
@@ -103,8 +103,8 @@
                  const size_t frame_size_in_bytes,
                  const uint32_t core_spill_mask,
                  const uint32_t fp_spill_mask,
-                 const std::vector<uint32_t>& mapping_table,
-                 const std::vector<uint16_t>& vmap_table,
+                 const std::vector<uint8_t>& mapping_table,
+                 const std::vector<uint8_t>& vmap_table,
                  const std::vector<uint8_t>& native_gc_map);
 
   // Constructs a CompiledMethod for the JniCompiler.
@@ -147,11 +147,11 @@
     return fp_spill_mask_;
   }
 
-  const std::vector<uint32_t>& GetMappingTable() const {
+  const std::vector<uint8_t>& GetMappingTable() const {
     return mapping_table_;
   }
 
-  const std::vector<uint16_t>& GetVmapTable() const {
+  const std::vector<uint8_t>& GetVmapTable() const {
     return vmap_table_;
   }
 
@@ -166,10 +166,11 @@
   const uint32_t core_spill_mask_;
   // For quick code, a bit mask describing spilled FPR callee-save registers.
   const uint32_t fp_spill_mask_;
-  // For quick code, a map from native PC offset to dex PC.
-  std::vector<uint32_t> mapping_table_;
-  // For quick code, a map from GPR/FPR register to dex register.
-  std::vector<uint16_t> vmap_table_;
+  // For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
+  // native PC offset. Size prefixed.
+  std::vector<uint8_t> mapping_table_;
+  // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
+  std::vector<uint8_t> vmap_table_;
   // For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
   // are live. For portable code, the key is a dalvik PC.
   std::vector<uint8_t> gc_map_;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index cbdc430..aaff0fc 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -313,7 +313,6 @@
   method_ids_ = reinterpret_cast<const MethodId*>(b + h->method_ids_off_);
   proto_ids_ = reinterpret_cast<const ProtoId*>(b + h->proto_ids_off_);
   class_defs_ = reinterpret_cast<const ClassDef*>(b + h->class_defs_off_);
-  DCHECK_EQ(size_, header_->file_size_) << GetLocation();
 }
 
 bool DexFile::CheckMagicAndVersion() const {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 3f28b5e..b6781c0 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -30,24 +30,13 @@
 #include "object_utils.h"
 #include "thread.h"
 
-extern "C" void art_interpreter_invoke_handler();
-extern "C" void art_jni_dlsym_lookup_stub();
-extern "C" void art_portable_abstract_method_error_stub();
-extern "C" void art_portable_proxy_invoke_handler();
-extern "C" void art_quick_abstract_method_error_stub();
-extern "C" void art_quick_deoptimize();
-extern "C" void art_quick_instrumentation_entry_from_code(void*);
-extern "C" void art_quick_instrumentation_exit_from_code();
-extern "C" void art_quick_interpreter_entry(void*);
-extern "C" void art_quick_proxy_invoke_handler();
-extern "C" void art_work_around_app_jni_bugs();
-
 namespace art {
+
 namespace mirror {
-class Class;
-class Field;
-class Object;
-}
+  class Class;
+  class Field;
+  class Object;
+}  // namespace mirror
 
 // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
 // cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -350,25 +339,43 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Entry point for deoptimization.
-static inline uintptr_t GetDeoptimizationEntryPoint() {
+extern "C" void art_quick_deoptimize();
+static inline uintptr_t GetQuickDeoptimizationEntryPoint() {
   return reinterpret_cast<uintptr_t>(art_quick_deoptimize);
 }
 
 // Return address of instrumentation stub.
-static inline void* GetInstrumentationEntryPoint() {
-  return reinterpret_cast<void*>(art_quick_instrumentation_entry_from_code);
+extern "C" void art_quick_instrumentation_entry(void*);
+static inline void* GetQuickInstrumentationEntryPoint() {
+  return reinterpret_cast<void*>(art_quick_instrumentation_entry);
 }
 
 // The return_pc of instrumentation exit stub.
-static inline uintptr_t GetInstrumentationExitPc() {
-  return reinterpret_cast<uintptr_t>(art_quick_instrumentation_exit_from_code);
+extern "C" void art_quick_instrumentation_exit();
+static inline uintptr_t GetQuickInstrumentationExitPc() {
+  return reinterpret_cast<uintptr_t>(art_quick_instrumentation_exit);
+}
+
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+static inline const void* GetPortableToInterpreterBridge() {
+  return reinterpret_cast<void*>(art_portable_to_interpreter_bridge);
+}
+
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
+static inline const void* GetQuickToInterpreterBridge() {
+  return reinterpret_cast<void*>(art_quick_to_interpreter_bridge);
 }
 
 // Return address of interpreter stub.
-static inline void* GetInterpreterEntryPoint() {
-  return reinterpret_cast<void*>(art_quick_interpreter_entry);
+static inline const void* GetCompiledCodeToInterpreterBridge() {
+#if defined(ART_USE_PORTABLE_COMPILER)
+  return GetPortableToInterpreterBridge();
+#else
+  return GetQuickToInterpreterBridge();
+#endif
 }
 
+
 static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) {
   return class_linker->GetPortableResolutionTrampoline();
 }
@@ -386,23 +393,25 @@
 #endif
 }
 
-static inline void* GetPortableAbstractMethodErrorStub() {
-  return reinterpret_cast<void*>(art_portable_abstract_method_error_stub);
+extern "C" void art_portable_proxy_invoke_handler();
+static inline const void* GetPortableProxyInvokeHandler() {
+  return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
 }
 
-static inline void* GetQuickAbstractMethodErrorStub() {
-  return reinterpret_cast<void*>(art_quick_abstract_method_error_stub);
+extern "C" void art_quick_proxy_invoke_handler();
+static inline const void* GetQuickProxyInvokeHandler() {
+  return reinterpret_cast<void*>(art_quick_proxy_invoke_handler);
 }
 
-// Return address of abstract method error stub for defined compiler.
-static inline void* GetAbstractMethodErrorStub() {
+static inline const void* GetProxyInvokeHandler() {
 #if defined(ART_USE_PORTABLE_COMPILER)
-  return GetPortableAbstractMethodErrorStub();
+  return GetPortableProxyInvokeHandler();
 #else
-  return GetQuickAbstractMethodErrorStub();
+  return GetQuickProxyInvokeHandler();
 #endif
 }
 
+extern "C" void* art_jni_dlsym_lookup_stub(JNIEnv*, jobject);
 static inline void* GetJniDlsymLookupStub() {
   return reinterpret_cast<void*>(art_jni_dlsym_lookup_stub);
 }
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
new file mode 100644
index 0000000..d99c43e
--- /dev/null
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_linker.h"
+#include "interpreter/interpreter.h"
+#include "invoke_arg_array_builder.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "runtime.h"
+#include "stack.h"
+
+namespace art {
+
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+                                                   const DexFile::CodeItem* code_item,
+                                                   ShadowFrame* shadow_frame, JValue* result)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::AbstractMethod* method = shadow_frame->GetMethod();
+  // Ensure static methods are initialized.
+  if (method->IsStatic()) {
+    Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true);
+  }
+  uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
+  ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+  arg_array.BuildArgArray(shadow_frame, arg_offset);
+  method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
+}
+
+}  // namespace art
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.h b/runtime/entrypoints/interpreter/interpreter_entrypoints.h
new file mode 100644
index 0000000..c7df4e6
--- /dev/null
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "offsets.h"
+
+#define INTERPRETER_ENTRYPOINT_OFFSET(x) \
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, interpreter_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(InterpreterEntryPoints, x)))
+
+namespace art {
+
+union JValue;
+class MethodHelper;
+class ShadowFrame;
+class Thread;
+
+// Pointers to functions that are called by interpreter trampolines via thread-local storage.
+struct PACKED(4) InterpreterEntryPoints {
+  void (*pInterpreterToInterpreterBridge)(Thread* self, MethodHelper& mh,
+                                          const DexFile::CodeItem* code_item,
+                                          ShadowFrame* shadow_frame, JValue* result);
+  void (*pInterpreterToCompiledCodeBridge)(Thread* self, MethodHelper& mh,
+                                           const DexFile::CodeItem* code_item,
+                                           ShadowFrame* shadow_frame, JValue* result);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index 98f7b12..88b4936 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -15,23 +15,26 @@
  */
 
 #include "base/logging.h"
-#include "mirror/abstract_method.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
 #include "scoped_thread_state_change.h"
 #include "thread.h"
 
 namespace art {
 
 // Used by the JNI dlsym stub to find the native method to invoke if none is registered.
-extern "C" void* artFindNativeMethod(Thread* self) {
+extern "C" void* artFindNativeMethod() {
+  Thread* self = Thread::Current();
   Locks::mutator_lock_->AssertNotHeld(self);  // We come here as Native.
-  DCHECK(Thread::Current() == self);
   ScopedObjectAccess soa(self);
 
   mirror::AbstractMethod* method = self->GetCurrentMethod(NULL);
   DCHECK(method != NULL);
 
-  // Lookup symbol address for method, on failure we'll return NULL with an
-  // exception set, otherwise we return the address of the method we found.
+  // Lookup symbol address for method, on failure we'll return NULL with an exception set,
+  // otherwise we return the address of the method we found.
   void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
   if (native_code == NULL) {
     DCHECK(self->IsExceptionPending());
@@ -43,4 +46,78 @@
   }
 }
 
+static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) {
+  intptr_t value = *arg_ptr;
+  mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
+  mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
+  CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep))
+      << value_as_work_around_rep;
+  *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
+}
+
+extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(Thread::Current() == self);
+  // TODO: this code is specific to ARM
+  // On entry the stack pointed by sp is:
+  // | arg3   | <- Calling JNI method's frame (and extra bit for out args)
+  // | LR     |
+  // | R3     |    arg2
+  // | R2     |    arg1
+  // | R1     |    jclass/jobject
+  // | R0     |    JNIEnv
+  // | unused |
+  // | unused |
+  // | unused | <- sp
+  mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL);
+  DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
+  intptr_t* arg_ptr = sp + 4;  // pointer to r1 on stack
+  // Fix up this/jclass argument
+  WorkAroundJniBugsForJobject(arg_ptr);
+  arg_ptr++;
+  // Fix up jobject arguments
+  MethodHelper mh(jni_method);
+  int reg_num = 2;  // Current register being processed, -1 for stack arguments.
+  for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
+    char shorty_char = mh.GetShorty()[i];
+    if (shorty_char == 'L') {
+      WorkAroundJniBugsForJobject(arg_ptr);
+    }
+    if (shorty_char == 'J' || shorty_char == 'D') {
+      if (reg_num == 2) {
+        arg_ptr = sp + 8;  // skip to out arguments
+        reg_num = -1;
+      } else if (reg_num == 3) {
+        arg_ptr = sp + 10;  // skip to out arguments plus 2 slots as long must be aligned
+        reg_num = -1;
+      } else {
+        DCHECK_EQ(reg_num, -1);
+        if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
+          arg_ptr += 3;  // unaligned, pad and move through stack arguments
+        } else {
+          arg_ptr += 2;  // aligned, move through stack arguments
+        }
+      }
+    } else {
+      if (reg_num == 2) {
+        arg_ptr++;  // move through register arguments
+        reg_num++;
+      } else if (reg_num == 3) {
+        arg_ptr = sp + 8;  // skip to outgoing stack arguments
+        reg_num = -1;
+      } else {
+        DCHECK_EQ(reg_num, -1);
+        arg_ptr++;  // move through stack arguments
+      }
+    }
+  }
+  // Load expected destination, see Method::RegisterNative
+  const void* code = reinterpret_cast<const void*>(jni_method->GetNativeGcMap());
+  if (UNLIKELY(code == NULL)) {
+    code = GetJniDlsymLookupStub();
+    jni_method->RegisterNative(self, code);
+  }
+  return code;
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/jni/jni_entrypoints.h b/runtime/entrypoints/jni/jni_entrypoints.h
new file mode 100644
index 0000000..0a53447
--- /dev/null
+++ b/runtime/entrypoints/jni/jni_entrypoints.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
+
+#include "base/macros.h"
+#include "offsets.h"
+
+#define JNI_ENTRYPOINT_OFFSET(x) \
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, jni_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(JniEntryPoints, x)))
+
+namespace art {
+
+// Pointers to functions that are called by JNI trampolines via thread-local storage.
+struct PACKED(4) JniEntryPoints {
+  // Called when the JNI method isn't registered.
+  void* (*pDlsymLookup)(JNIEnv* env, jobject);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h
index a229c76..ec9e4f8 100644
--- a/runtime/entrypoints/portable/portable_entrypoints.h
+++ b/runtime/entrypoints/portable/portable_entrypoints.h
@@ -28,15 +28,15 @@
 class Thread;
 
 #define PORTABLE_ENTRYPOINT_OFFSET(x) \
-    (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \
-        static_cast<uintptr_t>(OFFSETOF_MEMBER(PortableEntryPoints, x)))
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(PortableEntryPoints, x)))
 
 // Pointers to functions that are called by code generated by compiler's adhering to the portable
 // compiler ABI.
 struct PACKED(4) PortableEntryPoints {
   // Invocation
-  const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
-                                                       mirror::AbstractMethod**, Thread*);
+  void (*pPortableResolutionTrampoline)(mirror::AbstractMethod*);
+  void (*pPortableToInterpreterBridge)(mirror::AbstractMethod*);
 };
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h
deleted file mode 100644
index 35fa972..0000000
--- a/runtime/entrypoints/quick/quick_argument_visitor.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
-#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
-
-#include "object_utils.h"
-
-namespace art {
-
-// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
-class QuickArgumentVisitor {
- public:
-// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
-// Size of Runtime::kRefAndArgs callee save frame.
-// Size of Method* and register parameters in out stack arguments.
-#if defined(__arm__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
-#define QUICK_STACK_ARG_SKIP 16
-#elif defined(__mips__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
-#define QUICK_STACK_ARG_SKIP 16
-#elif defined(__i386__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
-#define QUICK_STACK_ARG_SKIP 16
-#else
-#error "Unsupported architecture"
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
-#define QUICK_STACK_ARG_SKIP 0
-#endif
-
-  QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
-    caller_mh_(caller_mh),
-    args_in_regs_(ComputeArgsInRegs(caller_mh)),
-    num_params_(caller_mh.NumArgs()),
-    reg_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
-    stack_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
-                + QUICK_STACK_ARG_SKIP),
-    cur_args_(reg_args_),
-    cur_arg_index_(0),
-    param_index_(0),
-    is_split_long_or_double_(false) {
-  }
-
-  virtual ~QuickArgumentVisitor() {}
-
-  virtual void Visit() = 0;
-
-  bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return caller_mh_.IsParamAReference(param_index_);
-  }
-
-  bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return caller_mh_.IsParamALongOrDouble(param_index_);
-  }
-
-  Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return caller_mh_.GetParamPrimitiveType(param_index_);
-  }
-
-  byte* GetParamAddress() const {
-    return cur_args_ + (cur_arg_index_ * kPointerSize);
-  }
-
-  bool IsSplitLongOrDouble() const {
-    return is_split_long_or_double_;
-  }
-
-  uint64_t ReadSplitLongParam() const {
-    DCHECK(IsSplitLongOrDouble());
-    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
-    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
-    return (low_half & 0xffffffffULL) | (high_half << 32);
-  }
-
-  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    for (cur_arg_index_ = 0;  cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
-      is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble();
-      Visit();
-      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
-      param_index_++;
-    }
-    cur_args_ = stack_args_;
-    cur_arg_index_ = is_split_long_or_double_ ? 1 : 0;
-    is_split_long_or_double_ = false;
-    while (param_index_ < num_params_) {
-      Visit();
-      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
-      param_index_++;
-    }
-  }
-
- private:
-  static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    size_t args_in_regs = 0;
-    size_t num_params = mh.NumArgs();
-    for (size_t i = 0; i < num_params; i++) {
-      args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
-      if (args_in_regs > 3) {
-        args_in_regs = 3;
-        break;
-      }
-    }
-    return args_in_regs;
-  }
-  MethodHelper& caller_mh_;
-  const size_t args_in_regs_;
-  const size_t num_params_;
-  byte* const reg_args_;
-  byte* const stack_args_;
-  byte* cur_args_;
-  size_t cur_arg_index_;
-  size_t param_index_;
-  // Does a 64bit parameter straddle the register and stack arguments?
-  bool is_split_long_or_double_;
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 74b8cfd..e76679b 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -17,44 +17,45 @@
 #ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
 #define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
 
-#include "dex_file-inl.h"
-#include "runtime.h"
+#include <jni.h>
+
+#include "base/macros.h"
+#include "offsets.h"
 
 #define QUICK_ENTRYPOINT_OFFSET(x) \
-    (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \
-        static_cast<uintptr_t>(OFFSETOF_MEMBER(QuickEntryPoints, x)))
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(QuickEntryPoints, x)))
 
 namespace art {
+
 namespace mirror {
   class AbstractMethod;
   class Class;
   class Object;
 }  // namespace mirror
-class DvmDex;
-class MethodHelper;
-class ShadowFrame;
+
 class Thread;
 
 // Pointers to functions that are called by quick compiler generated code via thread-local storage.
 struct PACKED(4) QuickEntryPoints {
   // Alloc
-  void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
-  void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
-  void* (*pAllocObjectFromCode)(uint32_t, void*);
-  void* (*pAllocObjectFromCodeWithAccessCheck)(uint32_t, void*);
-  void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t);
-  void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
+  void* (*pAllocArray)(uint32_t, void*, int32_t);
+  void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
+  void* (*pAllocObject)(uint32_t, void*);
+  void* (*pAllocObjectWithAccessCheck)(uint32_t, void*);
+  void* (*pCheckAndAllocArray)(uint32_t, void*, int32_t);
+  void* (*pCheckAndAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
 
   // Cast
-  uint32_t (*pInstanceofNonTrivialFromCode)(const mirror::Class*, const mirror::Class*);
-  void (*pCanPutArrayElementFromCode)(void*, void*);
-  void (*pCheckCastFromCode)(void*, void*);
+  uint32_t (*pInstanceofNonTrivial)(const mirror::Class*, const mirror::Class*);
+  void (*pCanPutArrayElement)(void*, void*);
+  void (*pCheckCast)(void*, void*);
 
   // DexCache
   void* (*pInitializeStaticStorage)(uint32_t, void*);
-  void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*);
-  void* (*pInitializeTypeFromCode)(uint32_t, void*);
-  void* (*pResolveStringFromCode)(void*, uint32_t);
+  void* (*pInitializeTypeAndVerifyAccess)(uint32_t, void*);
+  void* (*pInitializeType)(uint32_t, void*);
+  void* (*pResolveString)(void*, uint32_t);
 
   // Field
   int (*pSet32Instance)(uint32_t, void*, int32_t);  // field_idx, obj, src
@@ -71,7 +72,7 @@
   void* (*pGetObjStatic)(uint32_t);
 
   // FillArray
-  void (*pHandleFillArrayDataFromCode)(void*, void*);
+  void (*pHandleFillArrayData)(void*, void*);
 
   // JNI
   uint32_t (*pJniMethodStart)(Thread*);
@@ -83,8 +84,8 @@
                                                     jobject locked, Thread* self);
 
   // Locks
-  void (*pLockObjectFromCode)(void*);
-  void (*pUnlockObjectFromCode)(void*);
+  void (*pLockObject)(void*);
+  void (*pUnlockObject)(void*);
 
   // Math
   int32_t (*pCmpgDouble)(double, double);
@@ -108,14 +109,6 @@
   uint64_t (*pShrLong)(uint64_t, uint32_t);
   uint64_t (*pUshrLong)(uint64_t, uint32_t);
 
-  // Interpreter
-  void (*pInterpreterToInterpreterEntry)(Thread* self, MethodHelper& mh,
-                                         const DexFile::CodeItem* code_item,
-                                         ShadowFrame* shadow_frame, JValue* result);
-  void (*pInterpreterToQuickEntry)(Thread* self, MethodHelper& mh,
-                                   const DexFile::CodeItem* code_item,
-                                   ShadowFrame* shadow_frame, JValue* result);
-
   // Intrinsics
   int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
   int32_t (*pMemcmp16)(void*, void*, int32_t);
@@ -123,8 +116,8 @@
   void* (*pMemcpy)(void*, const void*, size_t);
 
   // Invocation
-  const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
-                                                    mirror::AbstractMethod**, Thread*);
+  void (*pQuickResolutionTrampoline)(mirror::AbstractMethod*);
+  void (*pQuickToInterpreterBridge)(mirror::AbstractMethod*);
   void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
   void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
   void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
@@ -133,22 +126,21 @@
   void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*);
 
   // Thread
-  void (*pCheckSuspendFromCode)(Thread*);  // Stub that is called when the suspend count is non-zero
-  void (*pTestSuspendFromCode)();  // Stub that is periodically called to test the suspend count
+  void (*pCheckSuspend)(Thread*);  // Stub that is called when the suspend count is non-zero
+  void (*pTestSuspend)();  // Stub that is periodically called to test the suspend count
 
   // Throws
   void (*pDeliverException)(void*);
-  void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
-  void (*pThrowDivZeroFromCode)();
-  void (*pThrowNoSuchMethodFromCode)(int32_t);
-  void (*pThrowNullPointerFromCode)();
-  void (*pThrowStackOverflowFromCode)(void*);
+  void (*pThrowArrayBounds)(int32_t, int32_t);
+  void (*pThrowDivZero)();
+  void (*pThrowNoSuchMethod)(int32_t);
+  void (*pThrowNullPointer)();
+  void (*pThrowStackOverflow)(void*);
 };
 
 
 // JNI entrypoints.
-extern uint32_t JniMethodStart(Thread* self)
-    UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
 extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self)
     UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
 extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self)
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 7ecd296..0e61942 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -32,7 +32,7 @@
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   const void* result = instrumentation->GetQuickCodeFor(method);
-  bool interpreter_entry = (result == GetInterpreterEntryPoint());
+  bool interpreter_entry = (result == GetQuickToInterpreterBridge());
   instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object,
                                                  method, lr, interpreter_entry);
   CHECK(result != NULL) << PrettyMethod(method);
diff --git a/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc b/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc
deleted file mode 100644
index 656df8d..0000000
--- a/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "quick_argument_visitor.h"
-#include "callee_save_frame.h"
-#include "dex_file-inl.h"
-#include "interpreter/interpreter.h"
-#include "invoke_arg_array_builder.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "object_utils.h"
-
-namespace art {
-
-// Visits arguments on the stack placing them into the shadow frame.
-class BuildShadowFrameVisitor : public QuickArgumentVisitor {
- public:
-  BuildShadowFrameVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
-                          ShadowFrame& sf, size_t first_arg_reg) :
-    QuickArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) {}
-
-  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    Primitive::Type type = GetParamPrimitiveType();
-    switch (type) {
-      case Primitive::kPrimLong:  // Fall-through.
-      case Primitive::kPrimDouble:
-        if (IsSplitLongOrDouble()) {
-          sf_.SetVRegLong(cur_reg_, ReadSplitLongParam());
-        } else {
-          sf_.SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
-        }
-        ++cur_reg_;
-        break;
-      case Primitive::kPrimNot:
-        sf_.SetVRegReference(cur_reg_, *reinterpret_cast<mirror::Object**>(GetParamAddress()));
-        break;
-      case Primitive::kPrimBoolean:  // Fall-through.
-      case Primitive::kPrimByte:     // Fall-through.
-      case Primitive::kPrimChar:     // Fall-through.
-      case Primitive::kPrimShort:    // Fall-through.
-      case Primitive::kPrimInt:      // Fall-through.
-      case Primitive::kPrimFloat:
-        sf_.SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
-        break;
-      case Primitive::kPrimVoid:
-        LOG(FATAL) << "UNREACHABLE";
-        break;
-    }
-    ++cur_reg_;
-  }
-
- private:
-  ShadowFrame& sf_;
-  size_t cur_reg_;
-
-  DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor);
-};
-
-extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread* self,
-                                        mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
-  // frame.
-  const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
-
-  MethodHelper mh(method);
-  const DexFile::CodeItem* code_item = mh.GetCodeItem();
-  uint16_t num_regs = code_item->registers_size_;
-  void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
-  ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL,  // No last shadow coming from quick.
-                                                method, 0, memory));
-  size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
-  BuildShadowFrameVisitor shadow_frame_builder(mh, sp, *shadow_frame, first_arg_reg);
-  shadow_frame_builder.VisitArguments();
-  // Push a transition back into managed code onto the linked list in thread.
-  ManagedStack fragment;
-  self->PushManagedStackFragment(&fragment);
-  self->PushShadowFrame(shadow_frame);
-  self->EndAssertNoThreadSuspension(old_cause);
-
-  if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
-    // Ensure static method's class is initialized.
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(),
-                                                                 true, true)) {
-      DCHECK(Thread::Current()->IsExceptionPending());
-      self->PopManagedStackFragment(fragment);
-      return 0;
-    }
-  }
-
-  JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
-  // Pop transition.
-  self->PopManagedStackFragment(fragment);
-  return result.GetJ();
-}
-
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::AbstractMethod* method = shadow_frame->GetMethod();
-  // Ensure static methods are initialized.
-  if (method->IsStatic()) {
-    Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true);
-  }
-  uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
-  ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
-  arg_array.BuildArgArray(shadow_frame, arg_offset);
-  method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
-}
-
-}  // namespace art
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 23a28f9..9907c04 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -94,78 +94,4 @@
   return o;
 }
 
-static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) {
-  intptr_t value = *arg_ptr;
-  mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
-  mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
-  CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep))
-      << value_as_work_around_rep;
-  *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
-}
-
-extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(Thread::Current() == self);
-  // TODO: this code is specific to ARM
-  // On entry the stack pointed by sp is:
-  // | arg3   | <- Calling JNI method's frame (and extra bit for out args)
-  // | LR     |
-  // | R3     |    arg2
-  // | R2     |    arg1
-  // | R1     |    jclass/jobject
-  // | R0     |    JNIEnv
-  // | unused |
-  // | unused |
-  // | unused | <- sp
-  mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL);
-  DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
-  intptr_t* arg_ptr = sp + 4;  // pointer to r1 on stack
-  // Fix up this/jclass argument
-  WorkAroundJniBugsForJobject(arg_ptr);
-  arg_ptr++;
-  // Fix up jobject arguments
-  MethodHelper mh(jni_method);
-  int reg_num = 2;  // Current register being processed, -1 for stack arguments.
-  for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
-    char shorty_char = mh.GetShorty()[i];
-    if (shorty_char == 'L') {
-      WorkAroundJniBugsForJobject(arg_ptr);
-    }
-    if (shorty_char == 'J' || shorty_char == 'D') {
-      if (reg_num == 2) {
-        arg_ptr = sp + 8;  // skip to out arguments
-        reg_num = -1;
-      } else if (reg_num == 3) {
-        arg_ptr = sp + 10;  // skip to out arguments plus 2 slots as long must be aligned
-        reg_num = -1;
-      } else {
-        DCHECK_EQ(reg_num, -1);
-        if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
-          arg_ptr += 3;  // unaligned, pad and move through stack arguments
-        } else {
-          arg_ptr += 2;  // aligned, move through stack arguments
-        }
-      }
-    } else {
-      if (reg_num == 2) {
-        arg_ptr++;  // move through register arguments
-        reg_num++;
-      } else if (reg_num == 3) {
-        arg_ptr = sp + 8;  // skip to outgoing stack arguments
-        reg_num = -1;
-      } else {
-        DCHECK_EQ(reg_num, -1);
-        arg_ptr++;  // move through stack arguments
-      }
-    }
-  }
-  // Load expected destination, see Method::RegisterNative
-  const void* code = reinterpret_cast<const void*>(jni_method->GetNativeGcMap());
-  if (UNLIKELY(code == NULL)) {
-    code = GetJniDlsymLookupStub();
-    jni_method->RegisterNative(self, code);
-  }
-  return code;
-}
-
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
deleted file mode 100644
index 4e3d749..0000000
--- a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "quick_argument_visitor.h"
-#include "dex_file-inl.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "object_utils.h"
-#include "reflection.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-#include "well_known_classes.h"
-
-#include "ScopedLocalRef.h"
-
-namespace art {
-
-// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
-// to jobjects.
-class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
- public:
-  BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
-                            ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
-    QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
-
-  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    jvalue val;
-    Primitive::Type type = GetParamPrimitiveType();
-    switch (type) {
-      case Primitive::kPrimNot: {
-        mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
-        val.l = soa_.AddLocalReference<jobject>(obj);
-        break;
-      }
-      case Primitive::kPrimLong:  // Fall-through.
-      case Primitive::kPrimDouble:
-        if (IsSplitLongOrDouble()) {
-          val.j = ReadSplitLongParam();
-        } else {
-          val.j = *reinterpret_cast<jlong*>(GetParamAddress());
-        }
-        break;
-      case Primitive::kPrimBoolean:  // Fall-through.
-      case Primitive::kPrimByte:     // Fall-through.
-      case Primitive::kPrimChar:     // Fall-through.
-      case Primitive::kPrimShort:    // Fall-through.
-      case Primitive::kPrimInt:      // Fall-through.
-      case Primitive::kPrimFloat:
-        val.i =  *reinterpret_cast<jint*>(GetParamAddress());
-        break;
-      case Primitive::kPrimVoid:
-        LOG(FATAL) << "UNREACHABLE";
-        val.j = 0;
-        break;
-    }
-    args_.push_back(val);
-  }
-
- private:
-  ScopedObjectAccessUnchecked& soa_;
-  std::vector<jvalue>& args_;
-
-  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
-};
-
-// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
-// which is responsible for recording callee save registers. We explicitly place into jobjects the
-// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
-// field within the proxy object, which will box the primitive arguments and deal with error cases.
-extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
-                                          mirror::Object* receiver,
-                                          Thread* self, mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
-  const char* old_cause =
-      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
-  // Register the top of the managed stack, making stack crawlable.
-  DCHECK_EQ(*sp, proxy_method);
-  self->SetTopOfStack(sp, 0);
-  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
-            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  self->VerifyStack();
-  // Start new JNI local reference state.
-  JNIEnvExt* env = self->GetJniEnv();
-  ScopedObjectAccessUnchecked soa(env);
-  ScopedJniEnvLocalRefState env_state(env);
-  // Create local ref. copies of proxy method and the receiver.
-  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
-  // Placing arguments into args vector and remove the receiver.
-  MethodHelper proxy_mh(proxy_method);
-  std::vector<jvalue> args;
-  BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
-  local_ref_visitor.VisitArguments();
-  args.erase(args.begin());
-
-  // Convert proxy method into expected interface method.
-  mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
-  DCHECK(interface_method != NULL);
-  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
-  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
-  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
-  // that performs allocations.
-  self->EndAssertNoThreadSuspension(old_cause);
-  JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
-                                               rcvr_jobj, interface_method_jobj, args);
-  return result.GetJ();
-}
-
-}  // namespace art
diff --git a/runtime/entrypoints/quick/quick_stub_entrypoints.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc
deleted file mode 100644
index d78bbf3..0000000
--- a/runtime/entrypoints/quick/quick_stub_entrypoints.cc
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "callee_save_frame.h"
-#include "class_linker-inl.h"
-#include "dex_file-inl.h"
-#include "dex_instruction-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "object_utils.h"
-#include "scoped_thread_state_change.h"
-
-// Architecture specific assembler helper to deliver exception.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-
-namespace art {
-
-// Lazily resolve a method for quick. Called by stub code.
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__arm__)
-  // On entry the stack pointed by sp is:
-  // | argN       |  |
-  // | ...        |  |
-  // | arg4       |  |
-  // | arg3 spill |  |  Caller's frame
-  // | arg2 spill |  |
-  // | arg1 spill |  |
-  // | Method*    | ---
-  // | LR         |
-  // | ...        |    callee saves
-  // | R3         |    arg3
-  // | R2         |    arg2
-  // | R1         |    arg1
-  // | R0         |
-  // | Method*    |  <- sp
-  DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 48);
-  uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
-  uint32_t pc_offset = 10;
-  uintptr_t caller_pc = regs[pc_offset];
-#elif defined(__i386__)
-  // On entry the stack pointed by sp is:
-  // | argN        |  |
-  // | ...         |  |
-  // | arg4        |  |
-  // | arg3 spill  |  |  Caller's frame
-  // | arg2 spill  |  |
-  // | arg1 spill  |  |
-  // | Method*     | ---
-  // | Return      |
-  // | EBP,ESI,EDI |    callee saves
-  // | EBX         |    arg3
-  // | EDX         |    arg2
-  // | ECX         |    arg1
-  // | EAX/Method* |  <- sp
-  DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 32);
-  uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
-  uintptr_t caller_pc = regs[7];
-#elif defined(__mips__)
-  // On entry the stack pointed by sp is:
-  // | argN       |  |
-  // | ...        |  |
-  // | arg4       |  |
-  // | arg3 spill |  |  Caller's frame
-  // | arg2 spill |  |
-  // | arg1 spill |  |
-  // | Method*    | ---
-  // | RA         |
-  // | ...        |    callee saves
-  // | A3         |    arg3
-  // | A2         |    arg2
-  // | A1         |    arg1
-  // | A0/Method* |  <- sp
-  DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 64);
-  uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
-  uint32_t pc_offset = 15;
-  uintptr_t caller_pc = regs[pc_offset];
-#else
-  UNIMPLEMENTED(FATAL);
-  mirror::AbstractMethod** caller_sp = NULL;
-  uintptr_t* regs = NULL;
-  uintptr_t caller_pc = 0;
-#endif
-  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs);
-  // Start new JNI local reference state
-  JNIEnvExt* env = thread->GetJniEnv();
-  ScopedObjectAccessUnchecked soa(env);
-  ScopedJniEnvLocalRefState env_state(env);
-
-  // Compute details about the called method (avoid GCs)
-  ClassLinker* linker = Runtime::Current()->GetClassLinker();
-  mirror::AbstractMethod* caller = *caller_sp;
-  InvokeType invoke_type;
-  uint32_t dex_method_idx;
-#if !defined(__i386__)
-  const char* shorty;
-  uint32_t shorty_len;
-#endif
-  if (called->IsRuntimeMethod()) {
-    uint32_t dex_pc = caller->ToDexPc(caller_pc);
-    const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
-    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
-    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
-    Instruction::Code instr_code = instr->Opcode();
-    bool is_range;
-    switch (instr_code) {
-      case Instruction::INVOKE_DIRECT:
-        invoke_type = kDirect;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_DIRECT_RANGE:
-        invoke_type = kDirect;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_STATIC:
-        invoke_type = kStatic;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_STATIC_RANGE:
-        invoke_type = kStatic;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_SUPER:
-        invoke_type = kSuper;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_SUPER_RANGE:
-        invoke_type = kSuper;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_VIRTUAL:
-        invoke_type = kVirtual;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_VIRTUAL_RANGE:
-        invoke_type = kVirtual;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_INTERFACE:
-        invoke_type = kInterface;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_INTERFACE_RANGE:
-        invoke_type = kInterface;
-        is_range = true;
-        break;
-      default:
-        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
-        // Avoid used uninitialized warnings.
-        invoke_type = kDirect;
-        is_range = false;
-    }
-    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
-#if !defined(__i386__)
-    shorty = linker->MethodShorty(dex_method_idx, caller, &shorty_len);
-#endif
-  } else {
-    invoke_type = kStatic;
-    dex_method_idx = called->GetDexMethodIndex();
-#if !defined(__i386__)
-    MethodHelper mh(called);
-    shorty = mh.GetShorty();
-    shorty_len = mh.GetShortyLength();
-#endif
-  }
-#if !defined(__i386__)
-  // Discover shorty (avoid GCs)
-  size_t args_in_regs = 0;
-  for (size_t i = 1; i < shorty_len; i++) {
-    char c = shorty[i];
-    args_in_regs = args_in_regs + (c == 'J' || c == 'D' ? 2 : 1);
-    if (args_in_regs > 3) {
-      args_in_regs = 3;
-      break;
-    }
-  }
-  // Place into local references incoming arguments from the caller's register arguments
-  size_t cur_arg = 1;   // skip method_idx in R0, first arg is in R1
-  if (invoke_type != kStatic) {
-    mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
-    cur_arg++;
-    if (args_in_regs < 3) {
-      // If we thought we had fewer than 3 arguments in registers, account for the receiver
-      args_in_regs++;
-    }
-    soa.AddLocalReference<jobject>(obj);
-  }
-  size_t shorty_index = 1;  // skip return value
-  // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip
-  // R0)
-  while ((cur_arg - 1) < args_in_regs && shorty_index < shorty_len) {
-    char c = shorty[shorty_index];
-    shorty_index++;
-    if (c == 'L') {
-      mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
-      soa.AddLocalReference<jobject>(obj);
-    }
-    cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
-  }
-  // Place into local references incoming arguments from the caller's stack arguments
-  cur_arg += pc_offset + 1;  // skip LR/RA, Method* and spills for R1-R3/A1-A3 and callee saves
-  while (shorty_index < shorty_len) {
-    char c = shorty[shorty_index];
-    shorty_index++;
-    if (c == 'L') {
-      mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
-      soa.AddLocalReference<jobject>(obj);
-    }
-    cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
-  }
-#endif
-  // Resolve method filling in dex cache
-  if (called->IsRuntimeMethod()) {
-    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
-  }
-  const void* code = NULL;
-  if (LIKELY(!thread->IsExceptionPending())) {
-    // Incompatible class change should have been handled in resolve method.
-    CHECK(!called->CheckIncompatibleClassChange(invoke_type));
-    // Refine called method based on receiver.
-    if (invoke_type == kVirtual) {
-      called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
-    } else if (invoke_type == kInterface) {
-      called = receiver->GetClass()->FindVirtualMethodForInterface(called);
-    }
-    // Ensure that the called method's class is initialized.
-    mirror::Class* called_class = called->GetDeclaringClass();
-    linker->EnsureInitialized(called_class, true, true);
-    if (LIKELY(called_class->IsInitialized())) {
-      code = called->GetEntryPointFromCompiledCode();
-    } else if (called_class->IsInitializing()) {
-      if (invoke_type == kStatic) {
-        // Class is still initializing, go to oat and grab code (trampoline must be left in place
-        // until class is initialized to stop races between threads).
-        code = linker->GetOatCodeFor(called);
-      } else {
-        // No trampoline for non-static methods.
-        code = called->GetEntryPointFromCompiledCode();
-      }
-    } else {
-      DCHECK(called_class->IsErroneous());
-    }
-  }
-  if (UNLIKELY(code == NULL)) {
-    // Something went wrong in ResolveMethod or EnsureInitialized,
-    // go into deliver exception with the pending exception in r0
-    CHECK(thread->IsExceptionPending());
-    code = reinterpret_cast<void*>(art_quick_deliver_exception_from_code);
-    regs[0] = reinterpret_cast<uintptr_t>(thread->GetException(NULL));
-    thread->ClearException();
-  } else {
-    // Expect class to at least be initializing.
-    DCHECK(called->GetDeclaringClass()->IsInitializing());
-    // Don't want infinite recursion.
-    DCHECK(code != GetResolutionTrampoline(linker));
-    // Set up entry into main method
-    regs[0] = reinterpret_cast<uintptr_t>(called);
-  }
-  return code;
-}
-
-// Called by the abstract method error stub.
-extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* self,
-                                                    mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if !defined(ART_USE_PORTABLE_COMPILER)
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
-#else
-  UNUSED(sp);
-#endif
-  ThrowAbstractMethodError(method);
-  self->QuickDeliverException();
-}
-
-}  // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
new file mode 100644
index 0000000..9bf02e8
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -0,0 +1,558 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "interpreter/interpreter.h"
+#include "invoke_arg_array_builder.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "runtime.h"
+
+namespace art {
+
+// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+class QuickArgumentVisitor {
+ public:
+// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
+// Size of Runtime::kRefAndArgs callee save frame.
+// Size of Method* and register parameters in out stack arguments.
+#if defined(__arm__)
+  // The callee save frame is pointed to by SP.
+  // | argN       |  |
+  // | ...        |  |
+  // | arg4       |  |
+  // | arg3 spill |  |  Caller's frame
+  // | arg2 spill |  |
+  // | arg1 spill |  |
+  // | Method*    | ---
+  // | LR         |
+  // | ...        |    callee saves
+  // | R3         |    arg3
+  // | R2         |    arg2
+  // | R1         |    arg1
+  // | R0         |
+  // | Method*    |  <- sp
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 44
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__mips__)
+  // The callee save frame is pointed to by SP.
+  // | argN       |  |
+  // | ...        |  |
+  // | arg4       |  |
+  // | arg3 spill |  |  Caller's frame
+  // | arg2 spill |  |
+  // | arg1 spill |  |
+  // | Method*    | ---
+  // | RA         |
+  // | ...        |    callee saves
+  // | A3         |    arg3
+  // | A2         |    arg2
+  // | A1         |    arg1
+  // | A0/Method* |  <- sp
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 60
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__i386__)
+  // The callee save frame is pointed to by SP.
+  // | argN        |  |
+  // | ...         |  |
+  // | arg4        |  |
+  // | arg3 spill  |  |  Caller's frame
+  // | arg2 spill  |  |
+  // | arg1 spill  |  |
+  // | Method*     | ---
+  // | Return      |
+  // | EBP,ESI,EDI |    callee saves
+  // | EBX         |    arg3
+  // | EDX         |    arg2
+  // | ECX         |    arg1
+  // | EAX/Method* |  <- sp
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 28
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
+#define QUICK_STACK_ARG_SKIP 16
+#else
+#error "Unsupported architecture"
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 0
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
+#define QUICK_STACK_ARG_SKIP 0
+#endif
+
+  static mirror::AbstractMethod* GetCallingMethod(mirror::AbstractMethod** sp) {
+    byte* previous_sp = reinterpret_cast<byte*>(sp) +
+        QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE;
+    return *reinterpret_cast<mirror::AbstractMethod**>(previous_sp);
+  }
+
+  static uintptr_t GetCallingPc(mirror::AbstractMethod** sp) {
+    byte* lr = reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET;
+    return *reinterpret_cast<uintptr_t*>(lr);
+  }
+
+  QuickArgumentVisitor(mirror::AbstractMethod** sp, bool is_static,
+                       const char* shorty, uint32_t shorty_len)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+    is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
+    args_in_regs_(ComputeArgsInRegs(is_static, shorty, shorty_len)),
+    num_params_((is_static ? 0 : 1) + shorty_len - 1),  // +1 for this, -1 for return type
+    reg_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+    stack_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+                + QUICK_STACK_ARG_SKIP),
+    cur_args_(reg_args_),
+    cur_arg_index_(0),
+    param_index_(0),
+    is_split_long_or_double_(false) {
+    DCHECK_EQ(static_cast<size_t>(QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE),
+              Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+  }
+
+  virtual ~QuickArgumentVisitor() {}
+
+  virtual void Visit() = 0;
+
+  Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    size_t index = param_index_;
+    if (is_static_) {
+      index++;  // 0th argument must skip return value at start of the shorty
+    } else if (index == 0) {
+      return Primitive::kPrimNot;
+    }
+    CHECK_LT(index, shorty_len_);
+    return Primitive::GetType(shorty_[index]);
+  }
+
+  byte* GetParamAddress() const {
+    return cur_args_ + (cur_arg_index_ * kPointerSize);
+  }
+
+  bool IsSplitLongOrDouble() const {
+    return is_split_long_or_double_;
+  }
+
+  bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetParamPrimitiveType() == Primitive::kPrimNot;
+  }
+
+  bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    Primitive::Type type = GetParamPrimitiveType();
+    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
+  }
+
+  uint64_t ReadSplitLongParam() const {
+    DCHECK(IsSplitLongOrDouble());
+    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
+    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
+    return (low_half & 0xffffffffULL) | (high_half << 32);
+  }
+
+  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    for (cur_arg_index_ = 0;  cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
+      is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble();
+      Visit();
+      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+      param_index_++;
+    }
+    cur_args_ = stack_args_;
+    cur_arg_index_ = is_split_long_or_double_ ? 1 : 0;
+    is_split_long_or_double_ = false;
+    while (param_index_ < num_params_) {
+      Visit();
+      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+      param_index_++;
+    }
+  }
+
+ private:
+  static size_t ComputeArgsInRegs(bool is_static, const char* shorty, uint32_t shorty_len)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    size_t args_in_regs = (is_static ? 0 : 1);
+    for (size_t i = 0; i < shorty_len; i++) {
+      char s = shorty[i];
+      if (s == 'J' || s == 'D') {
+        args_in_regs += 2;
+      } else {
+        args_in_regs++;
+      }
+      if (args_in_regs > 3) {
+        args_in_regs = 3;
+        break;
+      }
+    }
+    return args_in_regs;
+  }
+
+  const bool is_static_;
+  const char* const shorty_;
+  const uint32_t shorty_len_;
+  const size_t args_in_regs_;
+  const size_t num_params_;
+  byte* const reg_args_;
+  byte* const stack_args_;
+  byte* cur_args_;
+  size_t cur_arg_index_;
+  size_t param_index_;
+  // Does a 64bit parameter straddle the register and stack arguments?
+  bool is_split_long_or_double_;
+};
+
+// Visits arguments on the stack placing them into the shadow frame.
+class BuildShadowFrameVisitor : public QuickArgumentVisitor {
+ public:
+  BuildShadowFrameVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty,
+                          uint32_t shorty_len, ShadowFrame& sf, size_t first_arg_reg) :
+    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
+
+  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    Primitive::Type type = GetParamPrimitiveType();
+    switch (type) {
+      case Primitive::kPrimLong:  // Fall-through.
+      case Primitive::kPrimDouble:
+        if (IsSplitLongOrDouble()) {
+          sf_.SetVRegLong(cur_reg_, ReadSplitLongParam());
+        } else {
+          sf_.SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
+        }
+        ++cur_reg_;
+        break;
+      case Primitive::kPrimNot:
+        sf_.SetVRegReference(cur_reg_, *reinterpret_cast<mirror::Object**>(GetParamAddress()));
+        break;
+      case Primitive::kPrimBoolean:  // Fall-through.
+      case Primitive::kPrimByte:     // Fall-through.
+      case Primitive::kPrimChar:     // Fall-through.
+      case Primitive::kPrimShort:    // Fall-through.
+      case Primitive::kPrimInt:      // Fall-through.
+      case Primitive::kPrimFloat:
+        sf_.SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
+        break;
+      case Primitive::kPrimVoid:
+        LOG(FATAL) << "UNREACHABLE";
+        break;
+    }
+    ++cur_reg_;
+  }
+
+ private:
+  ShadowFrame& sf_;
+  size_t cur_reg_;
+
+  DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor);
+};
+
+extern "C" uint64_t artQuickToInterpreterBridge(mirror::AbstractMethod* method, Thread* self,
+                                                mirror::AbstractMethod** sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
+  // frame.
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+
+  if (method->IsAbstract()) {
+    ThrowAbstractMethodError(method);
+    return 0;
+  } else {
+    const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
+    MethodHelper mh(method);
+    const DexFile::CodeItem* code_item = mh.GetCodeItem();
+    uint16_t num_regs = code_item->registers_size_;
+    void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
+    ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL,  // No last shadow coming from quick.
+                                                  method, 0, memory));
+    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
+    BuildShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(),
+                                                 mh.GetShortyLength(),
+                                                 *shadow_frame, first_arg_reg);
+    shadow_frame_builder.VisitArguments();
+    // Push a transition back into managed code onto the linked list in thread.
+    ManagedStack fragment;
+    self->PushManagedStackFragment(&fragment);
+    self->PushShadowFrame(shadow_frame);
+    self->EndAssertNoThreadSuspension(old_cause);
+
+    if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
+      // Ensure static method's class is initialized.
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(),
+                                                                   true, true)) {
+        DCHECK(Thread::Current()->IsExceptionPending());
+        self->PopManagedStackFragment(fragment);
+        return 0;
+      }
+    }
+
+    JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
+    // Pop transition.
+    self->PopManagedStackFragment(fragment);
+    return result.GetJ();
+  }
+}
+
+// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
+// to jobjects.
+class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
+ public:
+  BuildQuickArgumentVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty,
+                            uint32_t shorty_len, ScopedObjectAccessUnchecked* soa,
+                            std::vector<jvalue>* args) :
+    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
+
+  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    jvalue val;
+    Primitive::Type type = GetParamPrimitiveType();
+    switch (type) {
+      case Primitive::kPrimNot: {
+        mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
+        val.l = soa_->AddLocalReference<jobject>(obj);
+        break;
+      }
+      case Primitive::kPrimLong:  // Fall-through.
+      case Primitive::kPrimDouble:
+        if (IsSplitLongOrDouble()) {
+          val.j = ReadSplitLongParam();
+        } else {
+          val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+        }
+        break;
+      case Primitive::kPrimBoolean:  // Fall-through.
+      case Primitive::kPrimByte:     // Fall-through.
+      case Primitive::kPrimChar:     // Fall-through.
+      case Primitive::kPrimShort:    // Fall-through.
+      case Primitive::kPrimInt:      // Fall-through.
+      case Primitive::kPrimFloat:
+        val.i =  *reinterpret_cast<jint*>(GetParamAddress());
+        break;
+      case Primitive::kPrimVoid:
+        LOG(FATAL) << "UNREACHABLE";
+        val.j = 0;
+        break;
+    }
+    args_->push_back(val);
+  }
+
+ private:
+  ScopedObjectAccessUnchecked* soa_;
+  std::vector<jvalue>* args_;
+
+  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
+};
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly place into jobjects the
+// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
+// field within the proxy object, which will box the primitive arguments and deal with error cases.
+extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
+                                               mirror::Object* receiver,
+                                               Thread* self, mirror::AbstractMethod** sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
+  const char* old_cause =
+      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
+  // Register the top of the managed stack, making stack crawlable.
+  DCHECK_EQ(*sp, proxy_method);
+  self->SetTopOfStack(sp, 0);
+  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
+            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+  self->VerifyStack();
+  // Start new JNI local reference state.
+  JNIEnvExt* env = self->GetJniEnv();
+  ScopedObjectAccessUnchecked soa(env);
+  ScopedJniEnvLocalRefState env_state(env);
+  // Create local ref. copies of proxy method and the receiver.
+  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+
+  // Placing arguments into args vector and remove the receiver.
+  MethodHelper proxy_mh(proxy_method);
+  std::vector<jvalue> args;
+  BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(),
+                                              proxy_mh.GetShortyLength(), &soa, &args);
+  local_ref_visitor.VisitArguments();
+  args.erase(args.begin());
+
+  // Convert proxy method into expected interface method.
+  mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+  DCHECK(interface_method != NULL);
+  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+
+  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
+  // that performs allocations.
+  self->EndAssertNoThreadSuspension(old_cause);
+  JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
+                                               rcvr_jobj, interface_method_jobj, args);
+  return result.GetJ();
+}
+
+// Read object references held in arguments from quick frames and place in a JNI local references,
+// so they don't get garbage collected.
+class RememberFoGcArgumentVisitor : public QuickArgumentVisitor {
+ public:
+  RememberFoGcArgumentVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty,
+                              uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
+    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
+
+  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (IsParamAReference()) {
+      soa_->AddLocalReference<jobject>(*reinterpret_cast<mirror::Object**>(GetParamAddress()));
+    }
+  }
+
+ private:
+  ScopedObjectAccessUnchecked* soa_;
+
+  DISALLOW_COPY_AND_ASSIGN(RememberFoGcArgumentVisitor);
+};
+
+// Lazily resolve a method for quick. Called by stub code.
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+                                                    mirror::Object* receiver,
+                                                    Thread* thread, mirror::AbstractMethod** sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs);
+  // Start new JNI local reference state
+  JNIEnvExt* env = thread->GetJniEnv();
+  ScopedObjectAccessUnchecked soa(env);
+  ScopedJniEnvLocalRefState env_state(env);
+  const char* old_cause = thread->StartAssertNoThreadSuspension("Quick method resolution set up");
+
+  // Compute details about the called method (avoid GCs)
+  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  mirror::AbstractMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
+  InvokeType invoke_type;
+  const DexFile* dex_file;
+  uint32_t dex_method_idx;
+  if (called->IsRuntimeMethod()) {
+    uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
+    const DexFile::CodeItem* code;
+    {
+      MethodHelper mh(caller);
+      dex_file = &mh.GetDexFile();
+      code = mh.GetCodeItem();
+    }
+    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+    Instruction::Code instr_code = instr->Opcode();
+    bool is_range;
+    switch (instr_code) {
+      case Instruction::INVOKE_DIRECT:
+        invoke_type = kDirect;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_DIRECT_RANGE:
+        invoke_type = kDirect;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_STATIC:
+        invoke_type = kStatic;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_STATIC_RANGE:
+        invoke_type = kStatic;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_SUPER:
+        invoke_type = kSuper;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_SUPER_RANGE:
+        invoke_type = kSuper;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_VIRTUAL:
+        invoke_type = kVirtual;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_VIRTUAL_RANGE:
+        invoke_type = kVirtual;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_INTERFACE:
+        invoke_type = kInterface;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_INTERFACE_RANGE:
+        invoke_type = kInterface;
+        is_range = true;
+        break;
+      default:
+        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
+        // Avoid used uninitialized warnings.
+        invoke_type = kDirect;
+        is_range = false;
+    }
+    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
+
+  } else {
+    invoke_type = kStatic;
+    dex_file = &MethodHelper(called).GetDexFile();
+    dex_method_idx = called->GetDexMethodIndex();
+  }
+  uint32_t shorty_len;
+  const char* shorty =
+      dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
+  RememberFoGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
+  visitor.VisitArguments();
+  thread->EndAssertNoThreadSuspension(old_cause);
+  // Resolve method filling in dex cache.
+  if (called->IsRuntimeMethod()) {
+    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+  }
+  const void* code = NULL;
+  if (LIKELY(!thread->IsExceptionPending())) {
+    // Incompatible class change should have been handled in resolve method.
+    CHECK(!called->CheckIncompatibleClassChange(invoke_type));
+    // Refine called method based on receiver.
+    if (invoke_type == kVirtual) {
+      called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+    } else if (invoke_type == kInterface) {
+      called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+    }
+    // Ensure that the called method's class is initialized.
+    mirror::Class* called_class = called->GetDeclaringClass();
+    linker->EnsureInitialized(called_class, true, true);
+    if (LIKELY(called_class->IsInitialized())) {
+      code = called->GetEntryPointFromCompiledCode();
+    } else if (called_class->IsInitializing()) {
+      if (invoke_type == kStatic) {
+        // Class is still initializing, go to oat and grab code (trampoline must be left in place
+        // until class is initialized to stop races between threads).
+        code = linker->GetOatCodeFor(called);
+      } else {
+        // No trampoline for non-static methods.
+        code = called->GetEntryPointFromCompiledCode();
+      }
+    } else {
+      DCHECK(called_class->IsErroneous());
+    }
+  }
+  CHECK_EQ(code == NULL, thread->IsExceptionPending());
+#ifdef MOVING_GARBAGE_COLLECTOR
+  // TODO: locally saved objects may have moved during a GC during resolution. Need to update the
+  //       registers so that the stale objects aren't passed to the method we've resolved.
+    UNIMPLEMENTED(WARNING);
+#endif
+  // Place called method in callee-save frame to be placed as first argument to quick method.
+  *sp = called;
+  return code;
+}
+
+}  // namespace art
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index a7a6d46..933b74a 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -18,6 +18,7 @@
 #include "common_test.h"
 #include "dex_file.h"
 #include "gtest/gtest.h"
+#include "leb128_encoder.h"
 #include "mirror/class-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
@@ -53,17 +54,17 @@
       fake_code_.push_back(0x70 | i);
     }
 
-    fake_mapping_data_.push_back(4);  // first element is count
-    fake_mapping_data_.push_back(4);  // total (non-length) elements
-    fake_mapping_data_.push_back(2);  // count of pc to dex elements
+    fake_mapping_data_.PushBack(4);  // first element is count
+    fake_mapping_data_.PushBack(4);  // total (non-length) elements
+    fake_mapping_data_.PushBack(2);  // count of pc to dex elements
                                       // ---  pc to dex table
-    fake_mapping_data_.push_back(3);  // offset 3
-    fake_mapping_data_.push_back(3);  // maps to dex offset 3
+    fake_mapping_data_.PushBack(3);  // offset 3
+    fake_mapping_data_.PushBack(3);  // maps to dex offset 3
                                       // ---  dex to pc table
-    fake_mapping_data_.push_back(3);  // offset 3
-    fake_mapping_data_.push_back(3);  // maps to dex offset 3
+    fake_mapping_data_.PushBack(3);  // offset 3
+    fake_mapping_data_.PushBack(3);  // maps to dex offset 3
 
-    fake_vmap_table_data_.push_back(0);
+    fake_vmap_table_data_.PushBack(0);
 
     fake_gc_map_.push_back(0);  // 0 bytes to encode references and native pc offsets.
     fake_gc_map_.push_back(0);
@@ -74,24 +75,24 @@
     ASSERT_TRUE(method_f_ != NULL);
     method_f_->SetFrameSizeInBytes(kStackAlignment);
     method_f_->SetEntryPointFromCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2));
-    method_f_->SetMappingTable(&fake_mapping_data_[0]);
-    method_f_->SetVmapTable(&fake_vmap_table_data_[0]);
+    method_f_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
+    method_f_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
     method_f_->SetNativeGcMap(&fake_gc_map_[0]);
 
     method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
     ASSERT_TRUE(method_g_ != NULL);
     method_g_->SetFrameSizeInBytes(kStackAlignment);
     method_g_->SetEntryPointFromCompiledCode(CompiledMethod::CodePointer(&fake_code_[sizeof(code_size)], kThumb2));
-    method_g_->SetMappingTable(&fake_mapping_data_[0]);
-    method_g_->SetVmapTable(&fake_vmap_table_data_[0]);
+    method_g_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
+    method_g_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
     method_g_->SetNativeGcMap(&fake_gc_map_[0]);
   }
 
   const DexFile* dex_;
 
   std::vector<uint8_t> fake_code_;
-  std::vector<uint32_t> fake_mapping_data_;
-  std::vector<uint16_t> fake_vmap_table_data_;
+  UnsignedLeb128EncodingVector fake_mapping_data_;
+  UnsignedLeb128EncodingVector fake_vmap_table_data_;
   std::vector<uint8_t> fake_gc_map_;
 
   mirror::AbstractMethod* method_f_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 47e9b75..a9e5b08 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -85,6 +85,14 @@
       max_allowed_footprint_(initial_size),
       native_footprint_gc_watermark_(initial_size),
       native_footprint_limit_(2 * initial_size),
+      activity_thread_class_(NULL),
+      application_thread_class_(NULL),
+      activity_thread_(NULL),
+      application_thread_(NULL),
+      last_process_state_id_(NULL),
+      // Initially care about pauses in case we never get notified of process states, or if the JNI
+      // code becomes broken.
+      care_about_pause_times_(true),
       concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
                                             :  std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
@@ -92,7 +100,6 @@
       large_object_threshold_(3 * kPageSize),
       num_bytes_allocated_(0),
       native_bytes_allocated_(0),
-      process_state_(PROCESS_STATE_TOP),
       gc_memory_overhead_(0),
       verify_missing_card_marks_(false),
       verify_system_weaks_(false),
@@ -246,8 +253,122 @@
   }
 };
 
-void Heap::UpdateProcessState(ProcessState process_state) {
-  process_state_ = process_state;
+static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) {
+  CHECK(out_value != NULL);
+  jfieldID field = env->GetStaticFieldID(clz, name, "I");
+  if (field == NULL) {
+    env->ExceptionClear();
+    return false;
+  }
+  *out_value = env->GetStaticIntField(clz, field);
+  return true;
+}
+
+void Heap::ListenForProcessStateChange() {
+  VLOG(gc) << "Heap notified of process state change";
+
+  Thread* self = Thread::Current();
+  JNIEnvExt* env = self->GetJniEnv();
+
+  if (!have_zygote_space_) {
+    return;
+  }
+
+  if (activity_thread_class_ == NULL) {
+    jclass clz = env->FindClass("android/app/ActivityThread");
+    if (clz == NULL) {
+      env->ExceptionClear();
+      LOG(WARNING) << "Could not find activity thread class in process state change";
+      return;
+    }
+    activity_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz));
+  }
+
+  if (activity_thread_class_ != NULL && activity_thread_ == NULL) {
+    jmethodID current_activity_method = env->GetStaticMethodID(activity_thread_class_,
+                                                               "currentActivityThread",
+                                                               "()Landroid/app/ActivityThread;");
+    if (current_activity_method == NULL) {
+      env->ExceptionClear();
+      LOG(WARNING) << "Could not get method for currentActivityThread";
+      return;
+    }
+
+    jobject obj = env->CallStaticObjectMethod(activity_thread_class_, current_activity_method);
+    if (obj == NULL) {
+      env->ExceptionClear();
+      LOG(WARNING) << "Could not get current activity";
+      return;
+    }
+    activity_thread_ = env->NewGlobalRef(obj);
+  }
+
+  if (process_state_cares_about_pause_time_.empty()) {
+    // Just attempt to do this the first time.
+    jclass clz = env->FindClass("android/app/ActivityManager");
+    if (clz == NULL) {
+      LOG(WARNING) << "Activity manager class is null";
+      return;
+    }
+    ScopedLocalRef<jclass> activity_manager(env, clz);
+    std::vector<const char*> care_about_pauses;
+    care_about_pauses.push_back("PROCESS_STATE_TOP");
+    care_about_pauses.push_back("PROCESS_STATE_IMPORTANT_BACKGROUND");
+    // Attempt to read the constants and classify them as whether or not we care about pause times.
+    for (size_t i = 0; i < care_about_pauses.size(); ++i) {
+      int process_state = 0;
+      if (ReadStaticInt(env, activity_manager.get(), care_about_pauses[i], &process_state)) {
+        process_state_cares_about_pause_time_.insert(process_state);
+        VLOG(gc) << "Adding process state " << process_state
+                 << " to set of states which care about pause time";
+      }
+    }
+  }
+
+  if (application_thread_class_ == NULL) {
+    jclass clz = env->FindClass("android/app/ActivityThread$ApplicationThread");
+    if (clz == NULL) {
+      env->ExceptionClear();
+      LOG(WARNING) << "Could not get application thread class";
+      return;
+    }
+    application_thread_class_ = reinterpret_cast<jclass>(env->NewGlobalRef(clz));
+    last_process_state_id_ = env->GetFieldID(application_thread_class_, "mLastProcessState", "I");
+    if (last_process_state_id_ == NULL) {
+      env->ExceptionClear();
+      LOG(WARNING) << "Could not get last process state member";
+      return;
+    }
+  }
+
+  if (application_thread_class_ != NULL && application_thread_ == NULL) {
+    jmethodID get_application_thread =
+        env->GetMethodID(activity_thread_class_, "getApplicationThread",
+                         "()Landroid/app/ActivityThread$ApplicationThread;");
+    if (get_application_thread == NULL) {
+      LOG(WARNING) << "Could not get method ID for get application thread";
+      return;
+    }
+
+    jobject obj = env->CallObjectMethod(activity_thread_, get_application_thread);
+    if (obj == NULL) {
+      LOG(WARNING) << "Could not get application thread";
+      return;
+    }
+
+    application_thread_ = env->NewGlobalRef(obj);
+  }
+
+  if (application_thread_ != NULL && last_process_state_id_ != NULL) {
+    int process_state = env->GetIntField(application_thread_, last_process_state_id_);
+    env->ExceptionClear();
+
+    care_about_pause_times_ = process_state_cares_about_pause_time_.find(process_state) !=
+        process_state_cares_about_pause_time_.end();
+
+    VLOG(gc) << "New process state " << process_state
+             << " care about pauses " << care_about_pause_times_;
+  }
 }
 
 void Heap::AddContinuousSpace(space::ContinuousSpace* space) {
@@ -1874,20 +1995,18 @@
     }
   }
 
-  SchedPolicy policy;
-  get_sched_policy(self->GetTid(), &policy);
-  if (policy == SP_FOREGROUND || policy == SP_AUDIO_APP) {
-    // Don't trim the heap if we are a foreground or audio app.
-    return;
-  }
-
   last_trim_time_ms_ = ms_time;
-  JNIEnv* env = self->GetJniEnv();
-  DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
-  DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
-  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
-                            WellKnownClasses::java_lang_Daemons_requestHeapTrim);
-  CHECK(!env->ExceptionCheck());
+  ListenForProcessStateChange();
+
+  // Trim only if we do not currently care about pause times.
+  if (!care_about_pause_times_) {
+    JNIEnv* env = self->GetJniEnv();
+    DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
+    DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
+    env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
+                              WellKnownClasses::java_lang_Daemons_requestHeapTrim);
+    CHECK(!env->ExceptionCheck());
+  }
 }
 
 size_t Heap::Trim() {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3f91553..c1cff43 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -28,6 +28,7 @@
 #include "gc/collector/gc_type.h"
 #include "globals.h"
 #include "gtest/gtest.h"
+#include "jni.h"
 #include "locks.h"
 #include "offsets.h"
 #include "safe_map.h"
@@ -100,24 +101,6 @@
 };
 const HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
 
-// This comes from ActivityManager and needs to be kept in sync.
-enum ProcessState {
-  PROCESS_STATE_PERSISTENT = 0,
-  PROCESS_STATE_PERSISTENT_UI = 1,
-  PROCESS_STATE_TOP = 2,
-  PROCESS_STATE_IMPORTANT_FOREGROUND = 3,
-  PROCESS_STATE_IMPORTANT_BACKGROUND = 4,
-  PROCESS_STATE_BACKUP = 5,
-  PROCESS_STATE_HEAVY_WEIGHT = 6,
-  PROCESS_STATE_SERVICE = 7,
-  PROCESS_STATE_RECEIVER = 8,
-  PROCESS_STATE_HOME = 9,
-  PROCESS_STATE_LAST_ACTIVITY = 10,
-  PROCESS_STATE_CACHED_ACTIVITY = 11,
-  PROCESS_STATE_CACHED_ACTIVITY_CLIENT = 12,
-  PROCESS_STATE_CACHED_EMPTY = 13,
-};
-
 class Heap {
  public:
   static const size_t kDefaultInitialSize = 2 * MB;
@@ -387,8 +370,8 @@
                              collector::GcType gc_type)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  // Update process state to let the heap know which type of GC to do.
-  void UpdateProcessState(ProcessState process_state);
+  // Gets called when we get notified by ActivityThread that the process state has changed.
+  void ListenForProcessStateChange();
 
   // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
   // Assumes there is only one image space.
@@ -542,6 +525,19 @@
   // The watermark at which a GC is performed inside of registerNativeAllocation.
   size_t native_footprint_limit_;
 
+  // Activity manager members.
+  jclass activity_thread_class_;
+  jclass application_thread_class_;
+  jobject activity_thread_;
+  jobject application_thread_;
+  jfieldID last_process_state_id_;
+
+  // Process states which care about pause times.
+  std::set<int> process_state_cares_about_pause_time_;
+
+  // Whether or not we currently care about pause times.
+  bool care_about_pause_times_;
+
   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   // it completes ahead of an allocation failing.
   size_t concurrent_start_bytes_;
@@ -561,9 +557,6 @@
   // Bytes which are allocated and managed by native code but still need to be accounted for.
   AtomicInteger native_bytes_allocated_;
 
-  // Current process state, updated by activity manager.
-  ProcessState process_state_;
-
   // Data structure GC overhead.
   AtomicInteger gc_memory_overhead_;
 
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index c0b85f4..c3b66b3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -60,7 +60,7 @@
       const void* new_code;
       if (uninstall) {
         if (forced_interpret_only_ && !method->IsNative() && !method->IsProxyMethod()) {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         } else if (is_initialized || !method->IsStatic() || method->IsConstructor()) {
           new_code = class_linker->GetOatCodeFor(method);
         } else {
@@ -68,9 +68,9 @@
         }
       } else {  // !uninstall
         if (!interpreter_stubs_installed_ || method->IsNative()) {
-          new_code = GetInstrumentationEntryPoint();
+          new_code = GetQuickInstrumentationEntryPoint();
         } else {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         }
       }
       method->SetEntryPointFromCompiledCode(new_code);
@@ -82,15 +82,15 @@
       const void* new_code;
       if (uninstall) {
         if (forced_interpret_only_ && !method->IsNative() && !method->IsProxyMethod()) {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         } else {
           new_code = class_linker->GetOatCodeFor(method);
         }
       } else {  // !uninstall
         if (!interpreter_stubs_installed_ || method->IsNative()) {
-          new_code = GetInstrumentationEntryPoint();
+          new_code = GetQuickInstrumentationEntryPoint();
         } else {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         }
       }
       method->SetEntryPointFromCompiledCode(new_code);
@@ -159,7 +159,7 @@
     LOG(INFO) << "Installing exit stubs in " << thread_name;
   }
   UniquePtr<Context> context(Context::Create());
-  uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc();
+  uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc();
   InstallStackVisitor visitor(thread, context.get(), instrumentation_exit_pc);
   visitor.WalkStack(true);
 
@@ -251,7 +251,7 @@
   std::deque<instrumentation::InstrumentationStackFrame>* stack = thread->GetInstrumentationStack();
   if (stack->size() > 0) {
     Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
-    uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc();
+    uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc();
     RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation);
     visitor.WalkStack(true);
     CHECK_EQ(visitor.frames_removed_, stack->size());
@@ -384,9 +384,9 @@
     method->SetEntryPointFromCompiledCode(code);
   } else {
     if (!interpreter_stubs_installed_ || method->IsNative()) {
-      method->SetEntryPointFromCompiledCode(GetInstrumentationEntryPoint());
+      method->SetEntryPointFromCompiledCode(GetQuickInstrumentationEntryPoint());
     } else {
-      method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
+      method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
     }
   }
 }
@@ -396,8 +396,8 @@
   if (LIKELY(!instrumentation_stubs_installed_)) {
     const void* code = method->GetEntryPointFromCompiledCode();
     DCHECK(code != NULL);
-    if (LIKELY(code != GetResolutionTrampoline(runtime->GetClassLinker()) &&
-               code != GetInterpreterEntryPoint())) {
+    if (LIKELY(code != GetQuickResolutionTrampoline(runtime->GetClassLinker()) &&
+               code != GetQuickToInterpreterBridge())) {
       return code;
     }
   }
@@ -548,7 +548,7 @@
           << " result is " << std::hex << return_value.GetJ();
     }
     self->SetDeoptimizationReturnValue(return_value);
-    return static_cast<uint64_t>(GetDeoptimizationEntryPoint()) |
+    return static_cast<uint64_t>(GetQuickDeoptimizationEntryPoint()) |
         (static_cast<uint64_t>(*return_pc) << 32);
   } else {
     if (kVerboseInstrumentation) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index ef4b95c..6e35d93 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -148,7 +148,7 @@
     }
   } else {
     // Not special, continue with regular interpreter execution.
-    artInterpreterToInterpreterEntry(self, mh, code_item, shadow_frame, result);
+    artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result);
   }
 }
 
@@ -3039,6 +3039,10 @@
 
 static inline JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
                              ShadowFrame& shadow_frame, JValue result_register) {
+  DCHECK(shadow_frame.GetMethod() == mh.GetMethod() ||
+         shadow_frame.GetMethod()->GetDeclaringClass()->IsProxyClass());
+  DCHECK(!shadow_frame.GetMethod()->IsAbstract());
+  DCHECK(!shadow_frame.GetMethod()->IsNative());
   if (shadow_frame.GetMethod()->IsPreverified()) {
     // Enter the "without access check" interpreter.
     return ExecuteImpl<false>(self, mh, code_item, shadow_frame, result_register);
@@ -3150,8 +3154,7 @@
 }
 
 JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
-                                ShadowFrame& shadow_frame)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+                                ShadowFrame& shadow_frame) {
   DCHECK_EQ(self, Thread::Current());
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
     ThrowStackOverflowError(self);
@@ -3161,10 +3164,9 @@
   return Execute(self, mh, code_item, shadow_frame, JValue());
 }
 
-void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                      const DexFile::CodeItem* code_item,
-                                      ShadowFrame* shadow_frame, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                  const DexFile::CodeItem* code_item,
+                                                  ShadowFrame* shadow_frame, JValue* result) {
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
     ThrowStackOverflowError(self);
     return;
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 17884b9..af4a147 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -47,9 +47,9 @@
                                        ShadowFrame& shadow_frame)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                const DexFile::CodeItem* code_item,
-                                                ShadowFrame* shadow_frame, JValue* result)
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                  const DexFile::CodeItem* code_item,
+                                                  ShadowFrame* shadow_frame, JValue* result)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 }  // namespace interpreter
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6681d56..d1de6e6 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2853,10 +2853,11 @@
 
   VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]";
 
-  bool result = true;
+  bool was_successful = false;
   void* sym = dlsym(handle, "JNI_OnLoad");
   if (sym == NULL) {
     VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
+    was_successful = true;
   } else {
     // Call JNI_OnLoad.  We have to override the current class
     // loader, which will always be "null" since the stuff at the
@@ -2876,7 +2877,9 @@
 
     self->SetClassLoaderOverride(old_class_loader);
 
-    if (IsBadJniVersion(version)) {
+    if (version == JNI_ERR) {
+      StringAppendF(&detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
+    } else if (IsBadJniVersion(version)) {
       StringAppendF(&detail, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
                     path.c_str(), version);
       // It's unwise to call dlclose() here, but we can mark it
@@ -2885,14 +2888,15 @@
       // be some partially-initialized stuff accessible through
       // newly-registered native method calls.  We could try to
       // unregister them, but that doesn't seem worthwhile.
-      result = false;
+    } else {
+      was_successful = true;
     }
-    VLOG(jni) << "[Returned " << (result ? "successfully" : "failure")
+    VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
               << " from JNI_OnLoad in \"" << path << "\"]";
   }
 
-  library->SetResult(result);
-  return result;
+  library->SetResult(was_successful);
+  return was_successful;
 }
 
 void* JavaVMExt::FindCodeForNativeMethod(AbstractMethod* m) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index ad66ada..fcac481 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -144,6 +144,10 @@
     return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
   }
 
+  static Offset SelfOffset() {
+    return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
+  }
+
   Thread* const self;
   JavaVMExt* vm;
 
diff --git a/runtime/leb128.h b/runtime/leb128.h
index ca955b0..6041f8c 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -24,8 +24,8 @@
 // Reads an unsigned LEB128 value, updating the given pointer to point
 // just past the end of the read value. This function tolerates
 // non-zero high-order bits in the fifth encoded byte.
-static inline uint32_t DecodeUnsignedLeb128(const byte** data) {
-  const byte* ptr = *data;
+static inline uint32_t DecodeUnsignedLeb128(const uint8_t** data) {
+  const uint8_t* ptr = *data;
   int result = *(ptr++);
   if (result > 0x7f) {
     int cur = *(ptr++);
@@ -53,15 +53,15 @@
 // just past the end of the read value. This function tolerates
 // non-zero high-order bits in the fifth encoded byte.
 // It is possible for this function to return -1.
-static inline int32_t DecodeUnsignedLeb128P1(const byte** data) {
+static inline int32_t DecodeUnsignedLeb128P1(const uint8_t** data) {
   return DecodeUnsignedLeb128(data) - 1;
 }
 
 // Reads a signed LEB128 value, updating the given pointer to point
 // just past the end of the read value. This function tolerates
 // non-zero high-order bits in the fifth encoded byte.
-static inline int32_t DecodeSignedLeb128(const byte** data) {
-  const byte* ptr = *data;
+static inline int32_t DecodeSignedLeb128(const uint8_t** data) {
+  const uint8_t* ptr = *data;
   int32_t result = *(ptr++);
   if (result <= 0x7f) {
     result = (result << 25) >> 25;
@@ -103,22 +103,6 @@
   return count;
 }
 
-// Writes a 32-bit value in unsigned ULEB128 format.
-// Returns the updated pointer.
-static inline uint8_t* WriteUnsignedLeb128(uint8_t* ptr, uint32_t data) {
-  while (true) {
-    uint8_t out = data & 0x7f;
-    if (out != data) {
-      *ptr++ = out | 0x80;
-      data >>= 7;
-    } else {
-      *ptr++ = out;
-      break;
-    }
-  }
-  return ptr;
-}
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_LEB128_H_
diff --git a/runtime/mapping_table.h b/runtime/mapping_table.h
new file mode 100644
index 0000000..2162008
--- /dev/null
+++ b/runtime/mapping_table.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MAPPING_TABLE_H_
+#define ART_RUNTIME_MAPPING_TABLE_H_
+
+#include "base/logging.h"
+#include "leb128.h"
+
+namespace art {
+
+// A utility for processing the raw uleb128 encoded mapping table created by the quick compiler.
+class MappingTable {
+ public:
+  explicit MappingTable(const uint8_t* encoded_map) : encoded_table_(encoded_map) {
+  }
+
+  uint32_t TotalSize() const PURE {
+    const uint8_t* table = encoded_table_;
+    if (table == NULL) {
+      return 0;
+    } else {
+      return DecodeUnsignedLeb128(&table);
+    }
+  }
+
+  uint32_t DexToPcSize() const PURE {
+    const uint8_t* table = encoded_table_;
+    if (table == NULL) {
+      return 0;
+    } else {
+      uint32_t total_size = DecodeUnsignedLeb128(&table);
+      uint32_t pc_to_dex_size = DecodeUnsignedLeb128(&table);
+      return total_size - pc_to_dex_size;
+    }
+  }
+
+  const uint8_t* FirstDexToPcPtr() const {
+    const uint8_t* table = encoded_table_;
+    if (table != NULL) {
+      DecodeUnsignedLeb128(&table);  // Total_size, unused.
+      uint32_t pc_to_dex_size = DecodeUnsignedLeb128(&table);
+      for (uint32_t i = 0; i < pc_to_dex_size; ++i) {
+        DecodeUnsignedLeb128(&table);  // Move ptr past native PC.
+        DecodeUnsignedLeb128(&table);  // Move ptr past dex PC.
+      }
+    }
+    return table;
+  }
+
+  class DexToPcIterator {
+   public:
+    DexToPcIterator(const MappingTable* table, uint32_t element) :
+        table_(table), element_(element), end_(table_->DexToPcSize()), encoded_table_ptr_(NULL),
+        native_pc_offset_(0), dex_pc_(0) {
+      if (element == 0) {
+        encoded_table_ptr_ = table_->FirstDexToPcPtr();
+        native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+        dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+      } else {
+        DCHECK_EQ(table_->DexToPcSize(), element);
+      }
+    }
+    uint32_t NativePcOffset() const {
+      return native_pc_offset_;
+    }
+    uint32_t DexPc() const {
+      return dex_pc_;
+    }
+    void operator++() {
+      ++element_;
+      if (element_ != end_) {  // Avoid reading beyond the end of the table.
+        native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+        dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+      }
+    }
+    bool operator==(const DexToPcIterator& rhs) const {
+      CHECK(table_ == rhs.table_);
+      return element_ == rhs.element_;
+    }
+    bool operator!=(const DexToPcIterator& rhs) const {
+      CHECK(table_ == rhs.table_);
+      return element_ != rhs.element_;
+    }
+
+   private:
+    const MappingTable* const table_;  // The original table.
+    uint32_t element_;  // A value in the range 0 to end_.
+    const uint32_t end_;  // Equal to table_->DexToPcSize().
+    const uint8_t* encoded_table_ptr_;  // Either NULL or points to encoded data after this entry.
+    uint32_t native_pc_offset_;  // The current value of native pc offset.
+    uint32_t dex_pc_;  // The current value of dex pc.
+  };
+
+  DexToPcIterator DexToPcBegin() const {
+    return DexToPcIterator(this, 0);
+  }
+
+  DexToPcIterator DexToPcEnd() const {
+    uint32_t size = DexToPcSize();
+    return DexToPcIterator(this, size);
+  }
+
+  uint32_t PcToDexSize() const PURE {
+    const uint8_t* table = encoded_table_;
+    if (table == NULL) {
+      return 0;
+    } else {
+      DecodeUnsignedLeb128(&table);  // Total_size, unused.
+      uint32_t pc_to_dex_size = DecodeUnsignedLeb128(&table);
+      return pc_to_dex_size;
+    }
+  }
+
+  const uint8_t* FirstPcToDexPtr() const {
+    const uint8_t* table = encoded_table_;
+    if (table != NULL) {
+      DecodeUnsignedLeb128(&table);  // Total_size, unused.
+      DecodeUnsignedLeb128(&table);  // PC to Dex size, unused.
+    }
+    return table;
+  }
+
+  class PcToDexIterator {
+   public:
+    PcToDexIterator(const MappingTable* table, uint32_t element) :
+        table_(table), element_(element), end_(table_->PcToDexSize()), encoded_table_ptr_(NULL),
+        native_pc_offset_(0), dex_pc_(0) {
+      if (element == 0) {
+        encoded_table_ptr_ = table_->FirstPcToDexPtr();
+        native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+        dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+      } else {
+        DCHECK_EQ(table_->PcToDexSize(), element);
+      }
+    }
+    uint32_t NativePcOffset() const {
+      return native_pc_offset_;
+    }
+    uint32_t DexPc() const {
+      return dex_pc_;
+    }
+    void operator++() {
+      ++element_;
+      if (element_ != end_) {  // Avoid reading beyond the end of the table.
+        native_pc_offset_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+        dex_pc_ = DecodeUnsignedLeb128(&encoded_table_ptr_);
+      }
+    }
+    bool operator==(const PcToDexIterator& rhs) const {
+      CHECK(table_ == rhs.table_);
+      return element_ == rhs.element_;
+    }
+    bool operator!=(const PcToDexIterator& rhs) const {
+      CHECK(table_ == rhs.table_);
+      return element_ != rhs.element_;
+    }
+
+   private:
+    const MappingTable* const table_;  // The original table.
+    uint32_t element_;  // A value in the range 0 to PcToDexSize.
+    const uint32_t end_;  // Equal to table_->PcToDexSize().
+    const uint8_t* encoded_table_ptr_;  // Either NULL or points to encoded data after this entry.
+    uint32_t native_pc_offset_;  // The current value of native pc offset.
+    uint32_t dex_pc_;  // The current value of dex pc.
+  };
+
+  PcToDexIterator PcToDexBegin() const {
+    return PcToDexIterator(this, 0);
+  }
+
+  PcToDexIterator PcToDexEnd() const {
+    uint32_t size = PcToDexSize();
+    return PcToDexIterator(this, size);
+  }
+
+ private:
+  const uint8_t* const encoded_table_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_MAPPING_TABLE_H_
diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h
index d235e3e..d47b3eb 100644
--- a/runtime/mirror/abstract_method-inl.h
+++ b/runtime/mirror/abstract_method-inl.h
@@ -114,11 +114,11 @@
   if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
     return;
   }
-  if (pc == GetInstrumentationExitPc()) {
+  if (pc == GetQuickInstrumentationExitPc()) {
     return;
   }
   const void* code = GetEntryPointFromCompiledCode();
-  if (code == GetInterpreterEntryPoint() || code == GetInstrumentationEntryPoint()) {
+  if (code == GetCompiledCodeToInterpreterBridge() || code == GetQuickInstrumentationEntryPoint()) {
     return;
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -144,22 +144,22 @@
 
 inline uint32_t AbstractMethod::GetOatMappingTableOffset() const {
   DCHECK(!Runtime::Current()->IsStarted());
-  return reinterpret_cast<uint32_t>(GetMappingTableRaw());
+  return reinterpret_cast<uint32_t>(GetMappingTable());
 }
 
 inline void AbstractMethod::SetOatMappingTableOffset(uint32_t mapping_table_offset) {
   DCHECK(!Runtime::Current()->IsStarted());
-  SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table_offset));
+  SetMappingTable(reinterpret_cast<const uint8_t*>(mapping_table_offset));
 }
 
 inline uint32_t AbstractMethod::GetOatVmapTableOffset() const {
   DCHECK(!Runtime::Current()->IsStarted());
-  return reinterpret_cast<uint32_t>(GetVmapTableRaw());
+  return reinterpret_cast<uint32_t>(GetVmapTable());
 }
 
 inline void AbstractMethod::SetOatVmapTableOffset(uint32_t vmap_table_offset) {
   DCHECK(!Runtime::Current()->IsStarted());
-  SetVmapTable(reinterpret_cast<uint16_t*>(vmap_table_offset));
+  SetVmapTable(reinterpret_cast<uint8_t*>(vmap_table_offset));
 }
 
 inline void AbstractMethod::SetOatNativeGcMapOffset(uint32_t gc_map_offset) {
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 4d7f99e..b3db5c2 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -24,6 +24,7 @@
 #include "gc/accounting/card_table-inl.h"
 #include "interpreter/interpreter.h"
 #include "jni_internal.h"
+#include "mapping_table.h"
 #include "object-inl.h"
 #include "object_array.h"
 #include "object_array-inl.h"
@@ -157,43 +158,27 @@
   return pc - reinterpret_cast<uintptr_t>(code);
 }
 
-// Find the lowest-address native safepoint pc for a given dex pc
-uintptr_t AbstractMethod::ToFirstNativeSafepointPc(const uint32_t dex_pc) const {
-#if !defined(ART_USE_PORTABLE_COMPILER)
-  const uint32_t* mapping_table = GetPcToDexMappingTable();
-  if (mapping_table == NULL) {
-    DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
-    return DexFile::kDexNoIndex;   // Special no mapping case
-  }
-  size_t mapping_table_length = GetPcToDexMappingTableLength();
-  for (size_t i = 0; i < mapping_table_length; i += 2) {
-    if (mapping_table[i + 1] == dex_pc) {
-      const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
-      return mapping_table[i] + reinterpret_cast<uintptr_t>(code);
-    }
-  }
-  LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
-             << " in " << PrettyMethod(this);
-  return 0;
-#else
-  // Compiler LLVM doesn't use the machine pc, we just use dex pc instead.
-  return static_cast<uint32_t>(dex_pc);
-#endif
-}
-
 uint32_t AbstractMethod::ToDexPc(const uintptr_t pc) const {
 #if !defined(ART_USE_PORTABLE_COMPILER)
-  const uint32_t* mapping_table = GetPcToDexMappingTable();
-  if (mapping_table == NULL) {
+  MappingTable table(GetMappingTable());
+  if (table.TotalSize() == 0) {
     DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
     return DexFile::kDexNoIndex;   // Special no mapping case
   }
-  size_t mapping_table_length = GetPcToDexMappingTableLength();
   const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
   uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(code);
-  for (size_t i = 0; i < mapping_table_length; i += 2) {
-    if (mapping_table[i] == sought_offset) {
-      return mapping_table[i + 1];
+  // Assume the caller wants a pc-to-dex mapping so check here first.
+  typedef MappingTable::PcToDexIterator It;
+  for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+    if (cur.NativePcOffset() == sought_offset) {
+      return cur.DexPc();
+    }
+  }
+  // Now check dex-to-pc mappings.
+  typedef MappingTable::DexToPcIterator It2;
+  for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+    if (cur.NativePcOffset() == sought_offset) {
+      return cur.DexPc();
     }
   }
   LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
@@ -207,21 +192,28 @@
 }
 
 uintptr_t AbstractMethod::ToNativePc(const uint32_t dex_pc) const {
-  const uint32_t* mapping_table = GetDexToPcMappingTable();
-  if (mapping_table == NULL) {
+  MappingTable table(GetMappingTable());
+  if (table.TotalSize() == 0) {
     DCHECK_EQ(dex_pc, 0U);
     return 0;   // Special no mapping/pc == 0 case
   }
-  size_t mapping_table_length = GetDexToPcMappingTableLength();
-  for (size_t i = 0; i < mapping_table_length; i += 2) {
-    uint32_t map_offset = mapping_table[i];
-    uint32_t map_dex_offset = mapping_table[i + 1];
-    if (map_dex_offset == dex_pc) {
+  // Assume the caller wants a dex-to-pc mapping so check here first.
+  typedef MappingTable::DexToPcIterator It;
+  for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+    if (cur.DexPc() == dex_pc) {
       const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
-      return reinterpret_cast<uintptr_t>(code) + map_offset;
+      return reinterpret_cast<uintptr_t>(code) + cur.NativePcOffset();
     }
   }
-  LOG(FATAL) << "Looking up Dex PC not contained in method, 0x" << std::hex << dex_pc
+  // Now check pc-to-dex mappings.
+  typedef MappingTable::PcToDexIterator It2;
+  for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+    if (cur.DexPc() == dex_pc) {
+      const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
+      return reinterpret_cast<uintptr_t>(code) + cur.NativePcOffset();
+    }
+  }
+  LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
              << " in " << PrettyMethod(this);
   return 0;
 }
@@ -321,6 +313,7 @@
   return native_method != jni_stub;
 }
 
+extern "C" void art_work_around_app_jni_bugs(JNIEnv*, jobject);
 void AbstractMethod::RegisterNative(Thread* self, const void* native_method) {
   DCHECK(Thread::Current() == self);
   CHECK(IsNative()) << PrettyMethod(this);
@@ -332,10 +325,10 @@
     // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct
     // the native method to runtime support and store the target somewhere runtime support will
     // find it.
-#if defined(__arm__) && !defined(ART_USE_PORTABLE_COMPILER)
-    SetNativeMethod(native_method);
-#else
+#if defined(__i386__)
     UNIMPLEMENTED(FATAL);
+#else
+    SetNativeMethod(reinterpret_cast<void*>(art_work_around_app_jni_bugs));
 #endif
     SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, gc_map_),
         reinterpret_cast<const uint8_t*>(native_method), false);
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 2e6e262..5b8c61c 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -246,54 +246,13 @@
     return OFFSET_OF_OBJECT_MEMBER(AbstractMethod, entry_point_from_compiled_code_);
   }
 
-  const uint32_t* GetMappingTable() const {
-    const uint32_t* map = GetMappingTableRaw();
-    if (map == NULL) {
-      return map;
-    }
-    return map + 1;
+  // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
+  const uint8_t* GetMappingTable() const {
+    return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), false);
   }
 
-  uint32_t GetPcToDexMappingTableLength() const {
-    const uint32_t* map = GetMappingTableRaw();
-    if (map == NULL) {
-      return 0;
-    }
-    return map[2];
-  }
-
-  const uint32_t* GetPcToDexMappingTable() const {
-    const uint32_t* map = GetMappingTableRaw();
-    if (map == NULL) {
-      return map;
-    }
-    return map + 3;
-  }
-
-
-  uint32_t GetDexToPcMappingTableLength() const {
-    const uint32_t* map = GetMappingTableRaw();
-    if (map == NULL) {
-      return 0;
-    }
-    return map[1] - map[2];
-  }
-
-  const uint32_t* GetDexToPcMappingTable() const {
-    const uint32_t* map = GetMappingTableRaw();
-    if (map == NULL) {
-      return map;
-    }
-    return map + 3 + map[2];
-  }
-
-
-  const uint32_t* GetMappingTableRaw() const {
-    return GetFieldPtr<const uint32_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_), false);
-  }
-
-  void SetMappingTable(const uint32_t* mapping_table) {
-    SetFieldPtr<const uint32_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_),
+  void SetMappingTable(const uint8_t* mapping_table) {
+    SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, mapping_table_),
                                  mapping_table, false);
   }
 
@@ -301,13 +260,13 @@
 
   void SetOatMappingTableOffset(uint32_t mapping_table_offset);
 
-  // Callers should wrap the uint16_t* in a VmapTable instance for convenient access.
-  const uint16_t* GetVmapTableRaw() const {
-    return GetFieldPtr<const uint16_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), false);
+  // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
+  const uint8_t* GetVmapTable() const {
+    return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), false);
   }
 
-  void SetVmapTable(const uint16_t* vmap_table) {
-    SetFieldPtr<const uint16_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), vmap_table, false);
+  void SetVmapTable(const uint8_t* vmap_table) {
+    SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, vmap_table_), vmap_table, false);
   }
 
   uint32_t GetOatVmapTableOffset() const;
@@ -403,10 +362,6 @@
   // Converts a dex PC to a native PC.
   uintptr_t ToNativePc(const uint32_t dex_pc) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Converts a dex PC to the first corresponding safepoint PC.
-  uintptr_t ToFirstNativeSafepointPc(const uint32_t dex_pc)
-      const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   // Find the catch block for the given exception type and dex_pc. When a catch block is found,
   // indicates whether the found catch block is responsible for clearing the exception or whether
   // a move-exception instruction is present.
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 97126cb..c64caa8 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -101,7 +101,8 @@
 uint16_t String::CharAt(int32_t index) const {
   // TODO: do we need this? Equals is the only caller, and could
   // bounds check itself.
-  if (index < 0 || index >= count_) {
+  DCHECK_GE(count_, 0);  // ensures the unsigned comparison is safe.
+  if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(count_))) {
     Thread* self = Thread::Current();
     ThrowLocation throw_location = self->GetCurrentLocationForThrow();
     self->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;",
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index b352d08..5fb53df 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -131,11 +131,9 @@
   return env->NewStringUTF(kIsDebugBuild ? "libartd.so" : "libart.so");
 }
 
-#if !defined(ART_USE_PORTABLE_COMPILER)
 static void DisableCheckJniCallback(Thread* t, void*) {
   t->GetJniEnv()->SetCheckJniEnabled(false);
 }
-#endif
 
 static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVersion) {
   // This is the target SDK version of the app we're about to run.
@@ -144,8 +142,6 @@
   if (targetSdkVersion > 0 && targetSdkVersion <= 13 /* honeycomb-mr2 */) {
     Runtime* runtime = Runtime::Current();
     JavaVMExt* vm = runtime->GetJavaVM();
-
-#if !defined(ART_USE_PORTABLE_COMPILER)
     if (vm->check_jni) {
       LOG(WARNING) << "Turning off CheckJNI so we can turn on JNI app bug workarounds...";
       Thread* self = static_cast<JNIEnvExt*>(env)->self;
@@ -158,11 +154,6 @@
               << targetSdkVersion << "...";
 
     vm->work_around_app_jni_bugs = true;
-#else
-    UNUSED(env);
-    LOG(WARNING) << "LLVM does not work-around app jni bugs.";
-    vm->work_around_app_jni_bugs = false;
-#endif
   }
 }
 
@@ -216,10 +207,6 @@
   Runtime::Current()->GetHeap()->ConcurrentGC(self);
 }
 
-static void VMRuntime_updateProcessState(JNIEnv* env, jobject, jint processState) {
-  Runtime::Current()->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(processState));
-}
-
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(VMRuntime, addressOf, "(Ljava/lang/Object;)J"),
   NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -239,7 +226,6 @@
   NATIVE_METHOD(VMRuntime, trimHeap, "()V"),
   NATIVE_METHOD(VMRuntime, vmVersion, "()Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, vmLibrary, "()Ljava/lang/String;"),
-  NATIVE_METHOD(VMRuntime, updateProcessState, "(I)V"),
 };
 
 void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/runtime/oat.cc b/runtime/oat.cc
index e606953..c01f77c 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '0', '6', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '0', '7', '\0' };
 
 OatHeader::OatHeader() {
   memset(this, 0, sizeof(*this));
@@ -57,10 +57,13 @@
   UpdateChecksum(image_file_location.data(), image_file_location_size_);
 
   executable_offset_ = 0;
-  interpreter_to_interpreter_entry_offset_ = 0;
-  interpreter_to_quick_entry_offset_ = 0;
+  interpreter_to_interpreter_bridge_offset_ = 0;
+  interpreter_to_compiled_code_bridge_offset_ = 0;
+  jni_dlsym_lookup_offset_ = 0;
   portable_resolution_trampoline_offset_ = 0;
+  portable_to_interpreter_bridge_offset_ = 0;
   quick_resolution_trampoline_offset_ = 0;
+  quick_to_interpreter_bridge_offset_ = 0;
 }
 
 bool OatHeader::IsValid() const {
@@ -111,42 +114,61 @@
   UpdateChecksum(&executable_offset_, sizeof(executable_offset));
 }
 
-const void* OatHeader::GetInterpreterToInterpreterEntry() const {
-  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToInterpreterEntryOffset();
+const void* OatHeader::GetInterpreterToInterpreterBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToInterpreterBridgeOffset();
 }
 
-uint32_t OatHeader::GetInterpreterToInterpreterEntryOffset() const {
+uint32_t OatHeader::GetInterpreterToInterpreterBridgeOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(interpreter_to_interpreter_entry_offset_, executable_offset_);
-  return interpreter_to_interpreter_entry_offset_;
+  CHECK_GE(interpreter_to_interpreter_bridge_offset_, executable_offset_);
+  return interpreter_to_interpreter_bridge_offset_;
 }
 
-void OatHeader::SetInterpreterToInterpreterEntryOffset(uint32_t offset) {
+void OatHeader::SetInterpreterToInterpreterBridgeOffset(uint32_t offset) {
   CHECK(offset == 0 || offset >= executable_offset_);
   DCHECK(IsValid());
-  DCHECK_EQ(interpreter_to_interpreter_entry_offset_, 0U) << offset;
+  DCHECK_EQ(interpreter_to_interpreter_bridge_offset_, 0U) << offset;
 
-  interpreter_to_interpreter_entry_offset_ = offset;
-  UpdateChecksum(&interpreter_to_interpreter_entry_offset_, sizeof(offset));
+  interpreter_to_interpreter_bridge_offset_ = offset;
+  UpdateChecksum(&interpreter_to_interpreter_bridge_offset_, sizeof(offset));
 }
 
-const void* OatHeader::GetInterpreterToQuickEntry() const {
-  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToQuickEntryOffset();
+const void* OatHeader::GetInterpreterToCompiledCodeBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToCompiledCodeBridgeOffset();
 }
 
-uint32_t OatHeader::GetInterpreterToQuickEntryOffset() const {
+uint32_t OatHeader::GetInterpreterToCompiledCodeBridgeOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(interpreter_to_quick_entry_offset_, interpreter_to_interpreter_entry_offset_);
-  return interpreter_to_quick_entry_offset_;
+  CHECK_GE(interpreter_to_compiled_code_bridge_offset_, interpreter_to_interpreter_bridge_offset_);
+  return interpreter_to_compiled_code_bridge_offset_;
 }
 
-void OatHeader::SetInterpreterToQuickEntryOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= interpreter_to_interpreter_entry_offset_);
+void OatHeader::SetInterpreterToCompiledCodeBridgeOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= interpreter_to_interpreter_bridge_offset_);
   DCHECK(IsValid());
-  DCHECK_EQ(interpreter_to_quick_entry_offset_, 0U) << offset;
+  DCHECK_EQ(interpreter_to_compiled_code_bridge_offset_, 0U) << offset;
 
-  interpreter_to_quick_entry_offset_ = offset;
-  UpdateChecksum(&interpreter_to_quick_entry_offset_, sizeof(offset));
+  interpreter_to_compiled_code_bridge_offset_ = offset;
+  UpdateChecksum(&interpreter_to_compiled_code_bridge_offset_, sizeof(offset));
+}
+
+const void* OatHeader::GetJniDlsymLookup() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetJniDlsymLookupOffset();
+}
+
+uint32_t OatHeader::GetJniDlsymLookupOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(jni_dlsym_lookup_offset_, interpreter_to_compiled_code_bridge_offset_);
+  return jni_dlsym_lookup_offset_;
+}
+
+void OatHeader::SetJniDlsymLookupOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= interpreter_to_compiled_code_bridge_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(jni_dlsym_lookup_offset_, 0U) << offset;
+
+  jni_dlsym_lookup_offset_ = offset;
+  UpdateChecksum(&jni_dlsym_lookup_offset_, sizeof(offset));
 }
 
 const void* OatHeader::GetPortableResolutionTrampoline() const {
@@ -155,12 +177,12 @@
 
 uint32_t OatHeader::GetPortableResolutionTrampolineOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(portable_resolution_trampoline_offset_, interpreter_to_quick_entry_offset_);
+  CHECK_GE(portable_resolution_trampoline_offset_, jni_dlsym_lookup_offset_);
   return portable_resolution_trampoline_offset_;
 }
 
 void OatHeader::SetPortableResolutionTrampolineOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= interpreter_to_quick_entry_offset_);
+  CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_);
   DCHECK(IsValid());
   DCHECK_EQ(portable_resolution_trampoline_offset_, 0U) << offset;
 
@@ -168,18 +190,37 @@
   UpdateChecksum(&portable_resolution_trampoline_offset_, sizeof(offset));
 }
 
+const void* OatHeader::GetPortableToInterpreterBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetPortableToInterpreterBridgeOffset();
+}
+
+uint32_t OatHeader::GetPortableToInterpreterBridgeOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(portable_to_interpreter_bridge_offset_, portable_resolution_trampoline_offset_);
+  return portable_to_interpreter_bridge_offset_;
+}
+
+void OatHeader::SetPortableToInterpreterBridgeOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(portable_to_interpreter_bridge_offset_, 0U) << offset;
+
+  portable_to_interpreter_bridge_offset_ = offset;
+  UpdateChecksum(&portable_to_interpreter_bridge_offset_, sizeof(offset));
+}
+
 const void* OatHeader::GetQuickResolutionTrampoline() const {
   return reinterpret_cast<const uint8_t*>(this) + GetQuickResolutionTrampolineOffset();
 }
 
 uint32_t OatHeader::GetQuickResolutionTrampolineOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(quick_resolution_trampoline_offset_, portable_resolution_trampoline_offset_);
+  CHECK_GE(quick_resolution_trampoline_offset_, portable_to_interpreter_bridge_offset_);
   return quick_resolution_trampoline_offset_;
 }
 
 void OatHeader::SetQuickResolutionTrampolineOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_);
+  CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_);
   DCHECK(IsValid());
   DCHECK_EQ(quick_resolution_trampoline_offset_, 0U) << offset;
 
@@ -187,6 +228,25 @@
   UpdateChecksum(&quick_resolution_trampoline_offset_, sizeof(offset));
 }
 
+const void* OatHeader::GetQuickToInterpreterBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetQuickToInterpreterBridgeOffset();
+}
+
+uint32_t OatHeader::GetQuickToInterpreterBridgeOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(quick_to_interpreter_bridge_offset_, quick_resolution_trampoline_offset_);
+  return quick_to_interpreter_bridge_offset_;
+}
+
+void OatHeader::SetQuickToInterpreterBridgeOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= quick_resolution_trampoline_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(quick_to_interpreter_bridge_offset_, 0U) << offset;
+
+  quick_to_interpreter_bridge_offset_ = offset;
+  UpdateChecksum(&quick_to_interpreter_bridge_offset_, sizeof(offset));
+}
+
 uint32_t OatHeader::GetImageFileLocationOatChecksum() const {
   CHECK(IsValid());
   return image_file_location_oat_checksum_;
diff --git a/runtime/oat.h b/runtime/oat.h
index 4bd1871..a5c6bed 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -44,18 +44,32 @@
   }
   uint32_t GetExecutableOffset() const;
   void SetExecutableOffset(uint32_t executable_offset);
-  const void* GetInterpreterToInterpreterEntry() const;
-  uint32_t GetInterpreterToInterpreterEntryOffset() const;
-  void SetInterpreterToInterpreterEntryOffset(uint32_t offset);
-  const void* GetInterpreterToQuickEntry() const;
-  uint32_t GetInterpreterToQuickEntryOffset() const;
-  void SetInterpreterToQuickEntryOffset(uint32_t offset);
+
+  const void* GetInterpreterToInterpreterBridge() const;
+  uint32_t GetInterpreterToInterpreterBridgeOffset() const;
+  void SetInterpreterToInterpreterBridgeOffset(uint32_t offset);
+  const void* GetInterpreterToCompiledCodeBridge() const;
+  uint32_t GetInterpreterToCompiledCodeBridgeOffset() const;
+  void SetInterpreterToCompiledCodeBridgeOffset(uint32_t offset);
+
+  const void* GetJniDlsymLookup() const;
+  uint32_t GetJniDlsymLookupOffset() const;
+  void SetJniDlsymLookupOffset(uint32_t offset);
+
   const void* GetPortableResolutionTrampoline() const;
   uint32_t GetPortableResolutionTrampolineOffset() const;
   void SetPortableResolutionTrampolineOffset(uint32_t offset);
+  const void* GetPortableToInterpreterBridge() const;
+  uint32_t GetPortableToInterpreterBridgeOffset() const;
+  void SetPortableToInterpreterBridgeOffset(uint32_t offset);
+
   const void* GetQuickResolutionTrampoline() const;
   uint32_t GetQuickResolutionTrampolineOffset() const;
   void SetQuickResolutionTrampolineOffset(uint32_t offset);
+  const void* GetQuickToInterpreterBridge() const;
+  uint32_t GetQuickToInterpreterBridgeOffset() const;
+  void SetQuickToInterpreterBridgeOffset(uint32_t offset);
+
   InstructionSet GetInstructionSet() const;
   uint32_t GetImageFileLocationOatChecksum() const;
   uint32_t GetImageFileLocationOatDataBegin() const;
@@ -74,10 +88,13 @@
   InstructionSet instruction_set_;
   uint32_t dex_file_count_;
   uint32_t executable_offset_;
-  uint32_t interpreter_to_interpreter_entry_offset_;
-  uint32_t interpreter_to_quick_entry_offset_;
+  uint32_t interpreter_to_interpreter_bridge_offset_;
+  uint32_t interpreter_to_compiled_code_bridge_offset_;
+  uint32_t jni_dlsym_lookup_offset_;
   uint32_t portable_resolution_trampoline_offset_;
+  uint32_t portable_to_interpreter_bridge_offset_;
   uint32_t quick_resolution_trampoline_offset_;
+  uint32_t quick_to_interpreter_bridge_offset_;
 
   uint32_t image_file_location_oat_checksum_;
   uint32_t image_file_location_oat_data_begin_;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 7bffc8c..93e98ad 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -28,6 +28,7 @@
 #include "mirror/object-inl.h"
 #include "os.h"
 #include "utils.h"
+#include "vmap_table.h"
 
 namespace art {
 
@@ -416,9 +417,10 @@
       DCHECK_EQ(0U, static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
                                           __builtin_popcount(fp_spill_mask_)));
     } else {
-      const uint16_t* vmap_table_ = reinterpret_cast<const uint16_t*>(begin_ + vmap_table_offset_);
-      DCHECK_EQ(vmap_table_[0], static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
-                                                      __builtin_popcount(fp_spill_mask_)));
+      VmapTable vmap_table(reinterpret_cast<const uint8_t*>(begin_ + vmap_table_offset_));
+
+      DCHECK_EQ(vmap_table.Size(), static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
+                                                         __builtin_popcount(fp_spill_mask_)));
     }
   } else {
     DCHECK_EQ(vmap_table_offset_, 0U);
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index fff6c8a..6503014 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -97,11 +97,11 @@
     const void* GetCode() const;
     uint32_t GetCodeSize() const;
 
-    const uint32_t* GetMappingTable() const {
-      return GetOatPointer<const uint32_t*>(mapping_table_offset_);
+    const uint8_t* GetMappingTable() const {
+      return GetOatPointer<const uint8_t*>(mapping_table_offset_);
     }
-    const uint16_t* GetVmapTable() const {
-      return GetOatPointer<const uint16_t*>(vmap_table_offset_);
+    const uint8_t* GetVmapTable() const {
+      return GetOatPointer<const uint8_t*>(vmap_table_offset_);
     }
     const uint8_t* GetNativeGcMap() const {
       return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc
index 5d0dca9..68595c8 100644
--- a/runtime/oat_test.cc
+++ b/runtime/oat_test.cc
@@ -141,7 +141,7 @@
 TEST_F(OatTest, OatHeaderSizeCheck) {
   // If this test is failing and you have to update these constants,
   // it is time to update OatHeader::kOatVersion
-  EXPECT_EQ(52U, sizeof(OatHeader));
+  EXPECT_EQ(64U, sizeof(OatHeader));
   EXPECT_EQ(28U, sizeof(OatMethodOffsets));
 }
 
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index fa7763e..3639a80 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -411,6 +411,10 @@
     shorty_ = NULL;
   }
 
+  const mirror::AbstractMethod* GetMethod() const {
+    return method_;
+  }
+
   const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     const DexFile& dex_file = GetDexFile();
     uint32_t dex_method_idx = method_->GetDexMethodIndex();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index aeb15f0..e1a752a 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -24,6 +24,7 @@
 #include "object_utils.h"
 #include "thread_list.h"
 #include "throw_location.h"
+#include "vmap_table.h"
 
 namespace art {
 
@@ -135,10 +136,10 @@
   if (cur_quick_frame_ != NULL) {
     DCHECK(context_ != NULL);  // You can't reliably read registers without a context.
     DCHECK(m == GetMethod());
-    const VmapTable vmap_table(m->GetVmapTableRaw());
+    const VmapTable vmap_table(m->GetVmapTable());
     uint32_t vmap_offset;
     // TODO: IsInContext stops before spotting floating point registers.
-    if (vmap_table.IsInContext(vreg, vmap_offset, kind)) {
+    if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
       bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
       uint32_t spill_mask = is_float ? m->GetFpSpillMask()
                                      : m->GetCoreSpillMask();
@@ -160,10 +161,10 @@
   if (cur_quick_frame_ != NULL) {
     DCHECK(context_ != NULL);  // You can't reliably write registers without a context.
     DCHECK(m == GetMethod());
-    const VmapTable vmap_table(m->GetVmapTableRaw());
+    const VmapTable vmap_table(m->GetVmapTable());
     uint32_t vmap_offset;
     // TODO: IsInContext stops before spotting floating point registers.
-    if (vmap_table.IsInContext(vreg, vmap_offset, kind)) {
+    if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
       bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
       uint32_t spill_mask = is_float ? m->GetFpSpillMask() : m->GetCoreSpillMask();
       const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kReferenceVReg);
@@ -266,7 +267,11 @@
     // Frame sanity.
     size_t frame_size = method->GetFrameSizeInBytes();
     CHECK_NE(frame_size, 0u);
-    CHECK_LT(frame_size, 1024u);
+    // A rough guess at an upper size we expect to see for a frame. The 256 is
+    // a dex register limit. The 16 incorporates callee save spills and
+    // outgoing argument set up.
+    const size_t kMaxExpectedFrameSize = 256 * sizeof(word) + 16;
+    CHECK_LE(frame_size, kMaxExpectedFrameSize);
     size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
     CHECK_LT(return_pc_offset, frame_size);
   }
@@ -304,7 +309,7 @@
         if (UNLIKELY(exit_stubs_installed)) {
           // While profiling, the return pc is restored from the side stack, except when walking
           // the stack for an exception where the side stack will be unwound in VisitFrame.
-          if (GetInstrumentationExitPc() == return_pc) {
+          if (GetQuickInstrumentationExitPc() == return_pc) {
             instrumentation::InstrumentationStackFrame instrumentation_frame =
                 GetInstrumentationStackFrame(instrumentation_stack_depth);
             instrumentation_stack_depth++;
diff --git a/runtime/stack.h b/runtime/stack.h
index de93846..388e401 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -571,78 +571,6 @@
   Context* const context_;
 };
 
-class VmapTable {
- public:
-  explicit VmapTable(const uint16_t* table) : table_(table) {
-  }
-
-  uint16_t operator[](size_t i) const {
-    return table_[i + 1];
-  }
-
-  size_t size() const {
-    return table_[0];
-  }
-
-  // Is the dex register 'vreg' in the context or on the stack? Should not be called when the
-  // 'kind' is unknown or constant.
-  bool IsInContext(size_t vreg, uint32_t& vmap_offset, VRegKind kind) const {
-    DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
-           kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
-           kind == kDoubleHiVReg || kind == kImpreciseConstant);
-    vmap_offset = 0xEBAD0FF5;
-    // TODO: take advantage of the registers being ordered
-    // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
-    //       are never promoted to floating point registers.
-    bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
-    bool in_floats = false;
-    for (size_t i = 0; i < size(); ++i) {
-      // Stop if we find what we are are looking for.
-      if ((table_[i + 1] == vreg) && (in_floats == is_float)) {
-        vmap_offset = i;
-        return true;
-      }
-      // 0xffff is the marker for LR (return PC on x86), following it are spilled float registers.
-      if (table_[i + 1] == 0xffff) {
-        in_floats = true;
-      }
-    }
-    return false;
-  }
-
-  // Compute the register number that corresponds to the entry in the vmap (vmap_offset, computed
-  // by IsInContext above). If the kind is floating point then the result will be a floating point
-  // register number, otherwise it will be an integer register number.
-  uint32_t ComputeRegister(uint32_t spill_mask, uint32_t vmap_offset, VRegKind kind) const {
-    // Compute the register we need to load from the context.
-    DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
-           kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
-           kind == kDoubleHiVReg || kind == kImpreciseConstant);
-    // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
-    //       are never promoted to floating point registers.
-    bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
-    uint32_t matches = 0;
-    if (is_float) {
-      while (table_[matches] != 0xffff) {
-        matches++;
-      }
-    }
-    CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
-    uint32_t spill_shifts = 0;
-    while (matches != (vmap_offset + 1)) {
-      DCHECK_NE(spill_mask, 0u);
-      matches += spill_mask & 1;  // Add 1 if the low bit is set
-      spill_mask >>= 1;
-      spill_shifts++;
-    }
-    spill_shifts--;  // wind back one as we want the last match
-    return spill_shifts;
-  }
-
- private:
-  const uint16_t* table_;
-};
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_STACK_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 97a1410..07a003d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -66,6 +66,7 @@
 #include "utils.h"
 #include "verifier/dex_gc_map.h"
 #include "verifier/method_verifier.h"
+#include "vmap_table.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -86,23 +87,25 @@
 }
 #endif
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints);
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints);
 
-void Thread::InitFunctionPointers() {
+void Thread::InitTlsEntryPoints() {
 #if !defined(__APPLE__)  // The Mac GCC is too old to accept this code.
   // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
-  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&quick_entrypoints_);
+  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
   uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
   for (uintptr_t* it = begin; it != end; ++it) {
     *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
   }
-  begin = reinterpret_cast<uintptr_t*>(&portable_entrypoints_);
+  begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
   end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
   for (uintptr_t* it = begin; it != end; ++it) {
     *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
   }
 #endif
-  InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_);
+  InitEntryPoints(&interpreter_entrypoints_, &jni_entrypoints_, &portable_entrypoints_,
+                  &quick_entrypoints_);
 }
 
 void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
@@ -292,7 +295,7 @@
   CHECK(Thread::Current() == NULL);
   SetUpAlternateSignalStack();
   InitCpu();
-  InitFunctionPointers();
+  InitTlsEntryPoints();
   InitCardTable();
   InitTid();
   // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
@@ -1589,22 +1592,29 @@
   uint32_t offset;
   const char* name;
 };
-#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x }
-#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x }
+#define INTERPRETER_ENTRY_POINT_INFO(x) { INTERPRETER_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
+#define JNI_ENTRY_POINT_INFO(x)         { JNI_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
+#define PORTABLE_ENTRY_POINT_INFO(x)    { PORTABLE_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
+#define QUICK_ENTRY_POINT_INFO(x)       { QUICK_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
 static const EntryPointInfo gThreadEntryPointInfo[] = {
-  QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode),
-  QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode),
-  QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
-  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
-  QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
-  QUICK_ENTRY_POINT_INFO(pCheckCastFromCode),
+  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge),
+  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge),
+  JNI_ENTRY_POINT_INFO(pDlsymLookup),
+  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline),
+  PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge),
+  QUICK_ENTRY_POINT_INFO(pAllocArray),
+  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck),
+  QUICK_ENTRY_POINT_INFO(pAllocObject),
+  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck),
+  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray),
+  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck),
+  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial),
+  QUICK_ENTRY_POINT_INFO(pCanPutArrayElement),
+  QUICK_ENTRY_POINT_INFO(pCheckCast),
   QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
-  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
-  QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode),
-  QUICK_ENTRY_POINT_INFO(pResolveStringFromCode),
+  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess),
+  QUICK_ENTRY_POINT_INFO(pInitializeType),
+  QUICK_ENTRY_POINT_INFO(pResolveString),
   QUICK_ENTRY_POINT_INFO(pSet32Instance),
   QUICK_ENTRY_POINT_INFO(pSet32Static),
   QUICK_ENTRY_POINT_INFO(pSet64Instance),
@@ -1617,15 +1627,15 @@
   QUICK_ENTRY_POINT_INFO(pGet64Static),
   QUICK_ENTRY_POINT_INFO(pGetObjInstance),
   QUICK_ENTRY_POINT_INFO(pGetObjStatic),
-  QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
+  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData),
   QUICK_ENTRY_POINT_INFO(pJniMethodStart),
   QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
   QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
   QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
   QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
   QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
-  QUICK_ENTRY_POINT_INFO(pLockObjectFromCode),
-  QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode),
+  QUICK_ENTRY_POINT_INFO(pLockObject),
+  QUICK_ENTRY_POINT_INFO(pUnlockObject),
   QUICK_ENTRY_POINT_INFO(pCmpgDouble),
   QUICK_ENTRY_POINT_INFO(pCmpgFloat),
   QUICK_ENTRY_POINT_INFO(pCmplDouble),
@@ -1646,28 +1656,26 @@
   QUICK_ENTRY_POINT_INFO(pShlLong),
   QUICK_ENTRY_POINT_INFO(pShrLong),
   QUICK_ENTRY_POINT_INFO(pUshrLong),
-  QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
-  QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry),
   QUICK_ENTRY_POINT_INFO(pIndexOf),
   QUICK_ENTRY_POINT_INFO(pMemcmp16),
   QUICK_ENTRY_POINT_INFO(pStringCompareTo),
   QUICK_ENTRY_POINT_INFO(pMemcpy),
-  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
+  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline),
+  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge),
   QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
   QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode),
-  QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode),
+  QUICK_ENTRY_POINT_INFO(pCheckSuspend),
+  QUICK_ENTRY_POINT_INFO(pTestSuspend),
   QUICK_ENTRY_POINT_INFO(pDeliverException),
-  QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
-  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
+  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds),
+  QUICK_ENTRY_POINT_INFO(pThrowDivZero),
+  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod),
+  QUICK_ENTRY_POINT_INFO(pThrowNullPointer),
+  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow),
 };
 #undef QUICK_ENTRY_POINT_INFO
 
@@ -1695,8 +1703,9 @@
 
   size_t entry_point_count = arraysize(gThreadEntryPointInfo);
   CHECK_EQ(entry_point_count * size_of_pointers,
-           sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints));
-  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_);
+           sizeof(InterpreterEntryPoints) + sizeof(JniEntryPoints) + sizeof(PortableEntryPoints) +
+           sizeof(QuickEntryPoints));
+  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, interpreter_entrypoints_);
   for (size_t i = 0; i < entry_point_count; ++i) {
     CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
     expected_offset += size_of_pointers;
@@ -1739,7 +1748,7 @@
       return false;  // End stack walk.
     } else {
       if (UNLIKELY(method_tracing_active_ &&
-                   GetInstrumentationExitPc() == GetReturnPc())) {
+                   GetQuickInstrumentationExitPc() == GetReturnPc())) {
         // Keep count of the number of unwinds during instrumentation.
         instrumentation_frames_to_pop_++;
       }
@@ -2035,7 +2044,7 @@
         if (num_regs > 0) {
           const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
           DCHECK(reg_bitmap != NULL);
-          const VmapTable vmap_table(m->GetVmapTableRaw());
+          const VmapTable vmap_table(m->GetVmapTable());
           uint32_t core_spills = m->GetCoreSpillMask();
           uint32_t fp_spills = m->GetFpSpillMask();
           size_t frame_size = m->GetFrameSizeInBytes();
@@ -2047,7 +2056,7 @@
             if (TestBitmap(reg, reg_bitmap)) {
               uint32_t vmap_offset;
               mirror::Object* ref;
-              if (vmap_table.IsInContext(reg, vmap_offset, kReferenceVReg)) {
+              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
                 uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
                                                                   kReferenceVReg));
                 ref = reinterpret_cast<mirror::Object*>(val);
diff --git a/runtime/thread.h b/runtime/thread.h
index ff0fe22..8b6771e 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -26,6 +26,8 @@
 #include <string>
 
 #include "base/macros.h"
+#include "entrypoints/interpreter/interpreter_entrypoints.h"
+#include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/portable/portable_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "globals.h"
@@ -43,17 +45,17 @@
 namespace art {
 
 namespace mirror {
-class AbstractMethod;
-class Array;
-class Class;
-class ClassLoader;
-class Object;
-template<class T> class ObjectArray;
-template<class T> class PrimitiveArray;
-typedef PrimitiveArray<int32_t> IntArray;
-class StackTraceElement;
-class StaticStorageBase;
-class Throwable;
+  class AbstractMethod;
+  class Array;
+  class Class;
+  class ClassLoader;
+  class Object;
+  template<class T> class ObjectArray;
+  template<class T> class PrimitiveArray;
+  typedef PrimitiveArray<int32_t> IntArray;
+  class StackTraceElement;
+  class StaticStorageBase;
+  class Throwable;
 }  // namespace mirror
 class BaseMutex;
 class ClassLinker;
@@ -614,7 +616,7 @@
   void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
   void InitCardTable();
   void InitCpu();
-  void InitFunctionPointers();
+  void InitTlsEntryPoints();
   void InitTid();
   void InitPthreadKeySelf();
   void InitStackHwm();
@@ -776,8 +778,10 @@
  public:
   // Entrypoint function pointers
   // TODO: move this near the top, since changing its offset requires all oats to be recompiled!
-  QuickEntryPoints quick_entrypoints_;
+  InterpreterEntryPoints interpreter_entrypoints_;
+  JniEntryPoints jni_entrypoints_;
   PortableEntryPoints portable_entrypoints_;
+  QuickEntryPoints quick_entrypoints_;
 
  private:
   // How many times has our pthread key's destructor been called?
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f1de565..eb6e3c3 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2507,8 +2507,8 @@
 
     // Special instructions.
     case Instruction::RETURN_VOID_BARRIER:
-      DCHECK(Runtime::Current()->IsStarted());
-      if (!IsConstructor()) {
+      DCHECK(Runtime::Current()->IsStarted()) << PrettyMethod(dex_method_idx_, *dex_file_);
+      if (!IsConstructor() || IsStatic()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-barrier not expected";
       }
       break;
@@ -2819,8 +2819,10 @@
     dex_cache_->SetResolvedType(class_idx, result.GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
-  // check at runtime if access is allowed and so pass here.
-  if (!result.IsUnresolvedTypes() && !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
+  // check at runtime if access is allowed and so pass here. If result is
+  // primitive, skip the access check.
+  if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
+      !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
     Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
                                     << referrer << "' -> '" << result << "'";
   }
@@ -3297,6 +3299,43 @@
   }
 }
 
+void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+                                        const uint32_t vregA) {
+  // Primitive assignability rules are weaker than regular assignability rules.
+  bool instruction_compatible;
+  bool value_compatible;
+  const RegType& value_type = work_line_->GetRegisterType(vregA);
+  if (target_type.IsIntegralTypes()) {
+    instruction_compatible = target_type.Equals(insn_type);
+    value_compatible = value_type.IsIntegralTypes();
+  } else if (target_type.IsFloat()) {
+    instruction_compatible = insn_type.IsInteger();  // no put-float, so expect put-int
+    value_compatible = value_type.IsFloatTypes();
+  } else if (target_type.IsLong()) {
+    instruction_compatible = insn_type.IsLong();
+    value_compatible = value_type.IsLongTypes();
+  } else if (target_type.IsDouble()) {
+    instruction_compatible = insn_type.IsLong();  // no put-double, so expect put-long
+    value_compatible = value_type.IsDoubleTypes();
+  } else {
+    instruction_compatible = false;  // reference with primitive store
+    value_compatible = false;  // unused
+  }
+  if (!instruction_compatible) {
+    // This is a global failure rather than a class change failure as the instructions and
+    // the descriptors for the type should have been consistent within the same file at
+    // compile time.
+    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "put insn has type '" << insn_type
+        << "' but expected type '" << target_type << "'";
+    return;
+  }
+  if (!value_compatible) {
+    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
+        << " of type " << value_type << " but expected " << target_type << " for put";
+    return;
+  }
+}
+
 void MethodVerifier::VerifyAPut(const Instruction* inst,
                              const RegType& insn_type, bool is_primitive) {
   const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
@@ -3310,25 +3349,20 @@
     } else if (!array_type.IsArrayTypes()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
     } else {
-      /* verify the class */
       const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
-      if (!component_type.IsReferenceTypes() && !is_primitive) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
-            << " source for aput-object";
-      } else if (component_type.IsNonZeroReferenceTypes() && is_primitive) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "reference array type " << array_type
-            << " source for category 1 aput";
-      } else if (is_primitive && !insn_type.Equals(component_type) &&
-                 !((insn_type.IsInteger() && component_type.IsFloat()) ||
-                   (insn_type.IsLong() && component_type.IsDouble()))) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array type " << array_type
-            << " incompatible with aput of type " << insn_type;
+      const uint32_t vregA = inst->VRegA_23x();
+      if (is_primitive) {
+        VerifyPrimitivePut(component_type, insn_type, vregA);
       } else {
-        // The instruction agrees with the type of array, confirm the value to be stored does too
-        // Note: we use the instruction type (rather than the component type) for aput-object as
-        // incompatible classes will be caught at runtime as an array store exception
-        work_line_->VerifyRegisterType(inst->VRegA_23x(),
-                                       is_primitive ? component_type : insn_type);
+        if (!component_type.IsReferenceTypes()) {
+          Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
+              << " source for aput-object";
+        } else {
+          // The instruction agrees with the type of array, confirm the value to be stored does too
+          // Note: we use the instruction type (rather than the component type) for aput-object as
+          // incompatible classes will be caught at runtime as an array store exception
+          work_line_->VerifyRegisterType(vregA, insn_type);
+        }
       }
     }
   }
@@ -3458,8 +3492,8 @@
   const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
   if (is_primitive) {
     if (field_type.Equals(insn_type) ||
-        (field_type.IsFloat() && insn_type.IsIntegralTypes()) ||
-        (field_type.IsDouble() && insn_type.IsLongTypes())) {
+        (field_type.IsFloat() && insn_type.IsInteger()) ||
+        (field_type.IsDouble() && insn_type.IsLong())) {
       // expected that read is of the correct primitive type or that int reads are reading
       // floats or long reads are reading doubles
     } else {
@@ -3518,43 +3552,7 @@
   }
   const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
   if (is_primitive) {
-    // Primitive field assignability rules are weaker than regular assignability rules
-    bool instruction_compatible;
-    bool value_compatible;
-    const RegType& value_type = work_line_->GetRegisterType(vregA);
-    if (field_type.IsIntegralTypes()) {
-      instruction_compatible = insn_type.IsIntegralTypes();
-      value_compatible = value_type.IsIntegralTypes();
-    } else if (field_type.IsFloat()) {
-      instruction_compatible = insn_type.IsInteger();  // no [is]put-float, so expect [is]put-int
-      value_compatible = value_type.IsFloatTypes();
-    } else if (field_type.IsLong()) {
-      instruction_compatible = insn_type.IsLong();
-      value_compatible = value_type.IsLongTypes();
-    } else if (field_type.IsDouble()) {
-      instruction_compatible = insn_type.IsLong();  // no [is]put-double, so expect [is]put-long
-      value_compatible = value_type.IsDoubleTypes();
-    } else {
-      instruction_compatible = false;  // reference field with primitive store
-      value_compatible = false;  // unused
-    }
-    if (!instruction_compatible) {
-      // This is a global failure rather than a class change failure as the instructions and
-      // the descriptors for the type should have been consistent within the same file at
-      // compile time
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
-                                        << " to be of type '" << insn_type
-                                        << "' but found type '" << field_type
-                                        << "' in put";
-      return;
-    }
-    if (!value_compatible) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
-          << " of type " << value_type
-          << " but expected " << field_type
-          << " for store to " << PrettyField(field) << " in put";
-      return;
-    }
+    VerifyPrimitivePut(field_type, insn_type, vregA);
   } else {
     if (!insn_type.IsAssignableFrom(field_type)) {
       Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
@@ -3756,6 +3754,10 @@
     if (!insn_flags_[next_insn].IsReturn()) {
       target_line->CopyFromLine(merge_line);
     } else {
+      // Verify that the monitor stack is empty on return.
+      if (!merge_line->VerifyMonitorStackEmpty()) {
+        return false;
+      }
       // For returns we only care about the operand to the return, all other registers are dead.
       // Initialize them as conflicts so they don't add to GC and deoptimization information.
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
@@ -4061,20 +4063,19 @@
 
 void  MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* cast_set) {
   DCHECK(Runtime::Current()->IsCompiler());
-  MutexLock mu(Thread::Current(), *safecast_map_lock_);
+  WriterMutexLock mu(Thread::Current(), *safecast_map_lock_);
   SafeCastMap::iterator it = safecast_map_->find(ref);
   if (it != safecast_map_->end()) {
     delete it->second;
     safecast_map_->erase(it);
   }
-
   safecast_map_->Put(ref, cast_set);
   DCHECK(safecast_map_->find(ref) != safecast_map_->end());
 }
 
 bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) {
   DCHECK(Runtime::Current()->IsCompiler());
-  MutexLock mu(Thread::Current(), *safecast_map_lock_);
+  ReaderMutexLock mu(Thread::Current(), *safecast_map_lock_);
   SafeCastMap::const_iterator it = safecast_map_->find(ref);
   if (it == safecast_map_->end()) {
     return false;
@@ -4186,7 +4187,7 @@
 ReaderWriterMutex* MethodVerifier::dex_gc_maps_lock_ = NULL;
 MethodVerifier::DexGcMapTable* MethodVerifier::dex_gc_maps_ = NULL;
 
-Mutex* MethodVerifier::safecast_map_lock_ = NULL;
+ReaderWriterMutex* MethodVerifier::safecast_map_lock_ = NULL;
 MethodVerifier::SafeCastMap* MethodVerifier::safecast_map_ = NULL;
 
 ReaderWriterMutex* MethodVerifier::devirt_maps_lock_ = NULL;
@@ -4204,9 +4205,9 @@
       dex_gc_maps_ = new MethodVerifier::DexGcMapTable;
     }
 
-    safecast_map_lock_ = new Mutex("verifier Cast Elision lock");
+    safecast_map_lock_ = new ReaderWriterMutex("verifier Cast Elision lock");
     {
-      MutexLock mu(self, *safecast_map_lock_);
+      WriterMutexLock mu(self, *safecast_map_lock_);
       safecast_map_ = new MethodVerifier::SafeCastMap();
     }
 
@@ -4239,7 +4240,7 @@
     dex_gc_maps_lock_ = NULL;
 
     {
-      MutexLock mu(self, *safecast_map_lock_);
+      WriterMutexLock mu(self, *safecast_map_lock_);
       STLDeleteValues(safecast_map_);
       delete safecast_map_;
       safecast_map_ = NULL;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 3f98a00..e01f2c0 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -230,6 +230,10 @@
                  uint32_t access_flags, bool can_load_classes, bool allow_soft_failures)
           SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  ~MethodVerifier() {
+    STLDeleteElements(&failure_messages_);
+  }
+
   // Run verification on the method. Returns true if verification completes and false if the input
   // has an irrecoverable corruption.
   bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -476,6 +480,10 @@
   void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Helper to perform verification on puts of primitive type.
+  void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+                          const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Perform verification of an aget instruction. The destination register's type will be set to
   // be that of component type of the array unless the array type is unknown, in which case a
   // bottom type inferred from the type of instruction is used. is_primitive is false for an
@@ -640,7 +648,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* mscs);
       LOCKS_EXCLUDED(safecast_map_lock_);
-  static Mutex* safecast_map_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  static ReaderWriterMutex* safecast_map_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   static SafeCastMap* safecast_map_ GUARDED_BY(safecast_map_lock_);
 
   // Devirtualization map.
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 7965c06..24a626b 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -38,7 +38,7 @@
 bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
   DCHECK_LT(vdst, num_regs_);
   if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Expected category1 register type not '"
+    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
         << new_type << "'";
     return false;
   } else if (new_type.IsConflict()) {  // should only be set during a merge
@@ -448,7 +448,7 @@
   }
 }
 
-bool RegisterLine::VerifyMonitorStackEmpty() {
+bool RegisterLine::VerifyMonitorStackEmpty() const {
   if (MonitorStackDepth() != 0) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
     return false;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index f380877..f19dcca 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -268,7 +268,7 @@
 
   // We expect no monitors to be held at certain points, such a method returns. Verify the stack
   // is empty, failing and returning false if not.
-  bool VerifyMonitorStackEmpty();
+  bool VerifyMonitorStackEmpty() const;
 
   bool MergeRegisters(const RegisterLine* incoming_line)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/vmap_table.h b/runtime/vmap_table.h
new file mode 100644
index 0000000..abc50b9
--- /dev/null
+++ b/runtime/vmap_table.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VMAP_TABLE_H_
+#define ART_RUNTIME_VMAP_TABLE_H_
+
+#include "base/logging.h"
+#include "leb128.h"
+#include "stack.h"
+
+namespace art {
+
+class VmapTable {
+ public:
+  explicit VmapTable(const uint8_t* table) : table_(table) {
+  }
+
+  // Look up nth entry, not called from performance critical code.
+  uint16_t operator[](size_t n) const {
+    const uint8_t* table = table_;
+    size_t size = DecodeUnsignedLeb128(&table);
+    CHECK_LT(n, size);
+    uint16_t entry = DecodeUnsignedLeb128(&table);
+    for (size_t i = 0; i < n; ++i) {
+      entry = DecodeUnsignedLeb128(&table);
+    }
+    return entry;
+  }
+
+  size_t Size() const {
+    const uint8_t* table = table_;
+    return DecodeUnsignedLeb128(&table);
+  }
+
+  // Is the dex register 'vreg' in the context or on the stack? Should not be called when the
+  // 'kind' is unknown or constant.
+  bool IsInContext(size_t vreg, VRegKind kind, uint32_t* vmap_offset) const {
+    DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
+           kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
+           kind == kDoubleHiVReg || kind == kImpreciseConstant);
+    *vmap_offset = 0xEBAD0FF5;
+    // TODO: take advantage of the registers being ordered
+    // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
+    //       are never promoted to floating point registers.
+    bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+    bool in_floats = false;
+    const uint8_t* table = table_;
+    size_t end = DecodeUnsignedLeb128(&table);
+    for (size_t i = 0; i < end; ++i) {
+      // Stop if we find what we are are looking for.
+      uint16_t entry = DecodeUnsignedLeb128(&table);
+      if ((entry == vreg) && (in_floats == is_float)) {
+        *vmap_offset = i;
+        return true;
+      }
+      // 0xffff is the marker for LR (return PC on x86), following it are spilled float registers.
+      if (entry == 0xffff) {
+        in_floats = true;
+      }
+    }
+    return false;
+  }
+
+  // Compute the register number that corresponds to the entry in the vmap (vmap_offset, computed
+  // by IsInContext above). If the kind is floating point then the result will be a floating point
+  // register number, otherwise it will be an integer register number.
+  uint32_t ComputeRegister(uint32_t spill_mask, uint32_t vmap_offset, VRegKind kind) const {
+    // Compute the register we need to load from the context.
+    DCHECK(kind == kReferenceVReg || kind == kIntVReg || kind == kFloatVReg ||
+           kind == kLongLoVReg || kind == kLongHiVReg || kind == kDoubleLoVReg ||
+           kind == kDoubleHiVReg || kind == kImpreciseConstant);
+    // TODO: we treat kImpreciseConstant as an integer below, need to ensure that such values
+    //       are never promoted to floating point registers.
+    bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
+    uint32_t matches = 0;
+    if (UNLIKELY(is_float)) {
+      const uint8_t* table = table_;
+      DecodeUnsignedLeb128(&table);  // Skip size.
+      while (DecodeUnsignedLeb128(&table) != 0xffff) {
+        matches++;
+      }
+      matches++;
+    }
+    CHECK_LT(vmap_offset - matches, static_cast<uint32_t>(__builtin_popcount(spill_mask)));
+    uint32_t spill_shifts = 0;
+    while (matches != (vmap_offset + 1)) {
+      DCHECK_NE(spill_mask, 0u);
+      matches += spill_mask & 1;  // Add 1 if the low bit is set
+      spill_mask >>= 1;
+      spill_shifts++;
+    }
+    spill_shifts--;  // wind back one as we want the last match
+    return spill_shifts;
+  }
+
+ private:
+  const uint8_t* const table_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_VMAP_TABLE_H_
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 3b5d80d..84f5f2e 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -73,31 +73,31 @@
     // we know the Dex registers with live reference values. Assert that what we
     // find is what is expected.
     if (m_name.compare("f") == 0) {
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x03U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x03U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8);  // v8: this
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x06U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x06U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 1);  // v8: this, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x08U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x08U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x0cU)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0cU)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x0eU)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0eU)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x10U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x10U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x13U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x13U)));
       CHECK(ref_bitmap);
       // v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
       //   0024: move-object v3, v2
@@ -107,45 +107,45 @@
       // We eliminate the non-live registers at a return, so only v3 is live:
       CHECK_REGS_CONTAIN_REFS(3);  // v3: y
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x18U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x18U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x1aU)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1aU)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x1dU)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1dU)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x1fU)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1fU)));
       CHECK(ref_bitmap);
       // v5 is removed from the root set because there is a "merge" operation.
       // See 0015: if-nez v2, 001f.
       CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x21U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x21U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x27U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x27U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x29U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x29U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x2cU)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2cU)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x2fU)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2fU)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 4, 3, 2, 1);  // v8: this, v4: ex, v3: y, v2: y, v1: x
 
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x32U)));
+      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x32U)));
       CHECK(ref_bitmap);
       CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1, 0);  // v8: this, v3: y, v2: y, v1: x, v0: ex
     }