Merge "Slow path should break def tracking"
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 3897401..ee72706 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -44,15 +44,6 @@
 # Failing valgrind tests.
 # Note: *all* 64b tests involving the runtime do not work currently. b/15170219.
 
-# Optimizing compiler codegen is not destructed and can leak non-arena-ed structures.
-ART_TEST_KNOWN_BROKEN += \
-  valgrind-test-art-host-gtest-codegen_test32 \
-  valgrind-test-art-host-gtest-find_loops_test32 \
-  valgrind-test-art-host-gtest-linearize_test32 \
-  valgrind-test-art-host-gtest-live_ranges_test32 \
-  valgrind-test-art-host-gtest-liveness_test32 \
-  valgrind-test-art-host-gtest-register_allocator_test32
-
 # List of known failing tests that when executed won't cause test execution to not finish.
 # The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64.
 ART_TEST_KNOWN_FAILING :=
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 700bcf0..d93d6dc 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -79,6 +79,7 @@
   runtime/base/histogram_test.cc \
   runtime/base/mutex_test.cc \
   runtime/base/scoped_flock_test.cc \
+  runtime/base/stringprintf_test.cc \
   runtime/base/timing_logger_test.cc \
   runtime/base/unix_file/fd_file_test.cc \
   runtime/base/unix_file/mapped_file_test.cc \
@@ -139,9 +140,13 @@
   compiler/jni/jni_compiler_test.cc \
   compiler/oat_test.cc \
   compiler/optimizing/codegen_test.cc \
+  compiler/optimizing/dead_code_elimination_test.cc \
+  compiler/optimizing/constant_propagation_test.cc \
   compiler/optimizing/dominator_test.cc \
   compiler/optimizing/find_loops_test.cc \
+  compiler/optimizing/graph_checker_test.cc \
   compiler/optimizing/graph_test.cc \
+  compiler/optimizing/gvn_test.cc \
   compiler/optimizing/linearize_test.cc \
   compiler/optimizing/liveness_test.cc \
   compiler/optimizing/live_interval_test.cc \
@@ -151,6 +156,7 @@
   compiler/optimizing/register_allocator_test.cc \
   compiler/optimizing/ssa_test.cc \
   compiler/optimizing/stack_map_test.cc \
+  compiler/optimizing/suspend_check_test.cc \
   compiler/output_stream_test.cc \
   compiler/utils/arena_allocator_test.cc \
   compiler/utils/dedupe_set_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 6e48bdf..7ac1c6b 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -90,7 +90,11 @@
 	optimizing/code_generator_arm.cc \
 	optimizing/code_generator_x86.cc \
 	optimizing/code_generator_x86_64.cc \
+	optimizing/constant_propagation.cc \
+	optimizing/dead_code_elimination.cc \
+	optimizing/graph_checker.cc \
 	optimizing/graph_visualizer.cc \
+	optimizing/gvn.cc \
 	optimizing/locations.cc \
 	optimizing/nodes.cc \
 	optimizing/optimizing_compiler.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index db9dcd4..fbaed9f 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -367,7 +367,7 @@
   MakeExecutable(method);
 }
 
-void CommonCompilerTest::CompileDirectMethod(ConstHandle<mirror::ClassLoader> class_loader,
+void CommonCompilerTest::CompileDirectMethod(Handle<mirror::ClassLoader> class_loader,
                                              const char* class_name, const char* method_name,
                                              const char* signature) {
   std::string class_descriptor(DotToDescriptor(class_name));
@@ -380,7 +380,7 @@
   CompileMethod(method);
 }
 
-void CommonCompilerTest::CompileVirtualMethod(ConstHandle<mirror::ClassLoader> class_loader,
+void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader,
                                               const char* class_name, const char* method_name,
                                               const char* signature) {
   std::string class_descriptor(DotToDescriptor(class_name));
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 4e74f0a..df06b71 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -63,11 +63,11 @@
 
   void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CompileDirectMethod(ConstHandle<mirror::ClassLoader> class_loader, const char* class_name,
+  void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                            const char* method_name, const char* signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CompileVirtualMethod(ConstHandle<mirror::ClassLoader> class_loader, const char* class_name,
+  void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
                             const char* method_name, const char* signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 3e34144..cc46b92 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -154,7 +154,7 @@
       // get rid of the highest values
       size_t i = size() - 1;
       for (; i > 0 ; i--) {
-        if ((*this)[i].from_ >= highest_pc) {
+        if ((*this)[i].from_ < highest_pc) {
           break;
         }
       }
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index b267841..07f3033 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -82,11 +82,22 @@
                                      jobject class_loader, const DexFile& dex_file,
                                      void* llvm_compilation_unit) {
   VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
-  if (code_item->insns_size_in_code_units_ >= 0x10000) {
-    LOG(INFO) << "Method size exceeds compiler limits: " << code_item->insns_size_in_code_units_
+  /*
+   * Skip compilation for pathologically large methods - either by instruction count or num vregs.
+   * Dalvik uses 16-bit uints for instruction and register counts.  We'll limit to a quarter
+   * of that, which also guarantees we cannot overflow our 16-bit internal SSA name space.
+   */
+  if (code_item->insns_size_in_code_units_ >= UINT16_MAX / 4) {
+    LOG(INFO) << "Method exceeds compiler instruction limit: "
+              << code_item->insns_size_in_code_units_
               << " in " << PrettyMethod(method_idx, dex_file);
     return NULL;
   }
+  if (code_item->registers_size_ >= UINT16_MAX / 4) {
+    LOG(INFO) << "Method exceeds compiler virtual register limit: "
+              << code_item->registers_size_ << " in " << PrettyMethod(method_idx, dex_file);
+    return NULL;
+  }
 
   if (!driver.GetCompilerOptions().IsCompilationEnabled()) {
     return nullptr;
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index b265ee7..9fa5fac 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1094,6 +1094,7 @@
       default_cutoff = compiler_options.GetSmallMethodThreshold();
       break;
     case CompilerOptions::kSpeed:
+    case CompilerOptions::kTime:
       small_cutoff = compiler_options.GetHugeMethodThreshold();
       default_cutoff = compiler_options.GetHugeMethodThreshold();
       break;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 62a8f26..ce56255 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -2420,9 +2420,9 @@
     case kMirOpDivZeroCheck:
       return Instruction::kContinue | Instruction::kThrow;
     case kMirOpCheck:
-      return 0;
+      return Instruction::kContinue | Instruction::kThrow;
     case kMirOpCheckPart2:
-      return 0;
+      return Instruction::kContinue;
     case kMirOpSelect:
       return Instruction::kContinue;
     case kMirOpConstVector:
@@ -2457,6 +2457,12 @@
       return Instruction::kContinue;
     case kMirOpReturnVectorRegisters:
       return Instruction::kContinue;
+    case kMirOpMemBarrier:
+      return Instruction::kContinue;
+    case kMirOpPackedArrayGet:
+      return Instruction::kContinue | Instruction::kThrow;
+    case kMirOpPackedArrayPut:
+      return Instruction::kContinue | Instruction::kThrow;
     default:
       LOG(WARNING) << "ExtendedFlagsOf: Unhandled case: " << static_cast<int> (opcode);
       return 0;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 7ac878f..fdabc3e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1360,11 +1360,20 @@
     if (!method_info.FastPath()) {
       continue;
     }
+
     InvokeType sharp_type = method_info.GetSharpType();
-    if ((sharp_type != kDirect) &&
-        (sharp_type != kStatic || method_info.NeedsClassInitialization())) {
+    if ((sharp_type != kDirect) && (sharp_type != kStatic)) {
       continue;
     }
+
+    if (sharp_type == kStatic) {
+      bool needs_clinit = method_info.NeedsClassInitialization() &&
+          ((mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0);
+      if (needs_clinit) {
+        continue;
+      }
+    }
+
     DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
     MethodReference target = method_info.GetTargetMethod();
     if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file)
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index b9a17cc..0de2a44 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1039,6 +1039,7 @@
   jmp_to_ret->target = return_point;
 
   AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+  ClobberCallerSave();  // We must clobber everything because slow path will return here
 
   return true;
 }
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index 5d63dd0..a39d151 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -426,11 +426,12 @@
   RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
   rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  RegStorage r_imm_point5 = (is_double) ? AllocTempDouble() : AllocTempSingle();
   RegStorage r_tmp = (is_double) ? AllocTempDouble() : AllocTempSingle();
   // 0.5f and 0.5d are encoded in the same way.
-  NewLIR2(kA64Fmov2fI | wide, r_tmp.GetReg(), encoded_imm);
-  NewLIR3(kA64Fadd3fff | wide, rl_src.reg.GetReg(), rl_src.reg.GetReg(), r_tmp.GetReg());
-  NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  NewLIR2(kA64Fmov2fI | wide, r_imm_point5.GetReg(), encoded_imm);
+  NewLIR3(kA64Fadd3fff | wide, r_tmp.GetReg(), rl_src.reg.GetReg(), r_imm_point5.GetReg());
+  NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), r_tmp.GetReg());
   (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
   return true;
 }
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 1777e98..094db4c 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -918,6 +918,7 @@
   loop_finished->target = return_point;
 
   AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+  ClobberCallerSave();  // We must clobber everything because slow path will return here
 
   return true;
 }
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 8ce696c..960f217 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1193,7 +1193,7 @@
 
   LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
   AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
-
+  ClobberCallerSave();  // We must clobber everything because slow path will return here
   return true;
 }
 
@@ -1492,6 +1492,7 @@
     LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
     info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
     AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
+    ClobberCallerSave();  // We must clobber everything because slow path will return here
   } else {
     DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
     DCHECK(high_code_point_branch == nullptr);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index dd4d661..6020e70 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -497,6 +497,8 @@
   void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
                           uint32_t m3, uint32_t m4);
   void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
+  virtual void LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src, OpSize opsize,
+                                  int op_mov);
 
   static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
 
@@ -914,7 +916,7 @@
    * @param bb Basic block containing instruction.
    * @param mir Instruction to analyze.
    */
-  void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
+  virtual void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
 
   /*
    * @brief Analyze one use of a double operand.
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index aadb41a..675d6ac 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1206,6 +1206,7 @@
   if (dst_bad_len != nullptr)
     dst_bad_len->target = check_failed;
   AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+  ClobberCallerSave();  // We must clobber everything because slow path will return here
   return true;
 }
 
@@ -1384,6 +1385,7 @@
   if (slowpath_branch != nullptr) {
     LIR *return_point = NewLIR0(kPseudoTargetLabel);
     AddIntrinsicSlowPath(info, slowpath_branch, return_point);
+    ClobberCallerSave();  // We must clobber everything because slow path will return here
   }
 
   StoreValue(rl_dest, rl_return);
@@ -2268,6 +2270,20 @@
   }
 }
 
+void X86Mir2Lir::LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src,
+                                    OpSize opsize, int op_mov) {
+  if (!cu_->target64 && opsize == k64) {
+    // Logic assumes that longs are loaded in GP register pairs.
+    NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), rs_src.GetLowReg());
+    RegStorage r_tmp = AllocTempDouble();
+    NewLIR2(kX86MovdxrRR, r_tmp.GetReg(), rs_src.GetHighReg());
+    NewLIR2(kX86PunpckldqRR, rs_dest.GetReg(), r_tmp.GetReg());
+    FreeTemp(r_tmp);
+  } else {
+    NewLIR2(op_mov, rs_dest.GetReg(), rs_src.GetReg());
+  }
+}
+
 void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
   DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
   OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
@@ -2321,16 +2337,7 @@
   RegStorage reg_to_shuffle = rl_src.reg;
 
   // Load the value into the XMM register.
-  if (!cu_->target64 && opsize == k64) {
-    // Logic assumes that longs are loaded in GP register pairs.
-    NewLIR2(kX86MovdxrRR, rs_dest.GetReg(), reg_to_shuffle.GetLowReg());
-    RegStorage r_tmp = AllocTempDouble();
-    NewLIR2(kX86MovdxrRR, r_tmp.GetReg(), reg_to_shuffle.GetHighReg());
-    NewLIR2(kX86PunpckldqRR, rs_dest.GetReg(), r_tmp.GetReg());
-    FreeTemp(r_tmp);
-  } else {
-    NewLIR2(op_mov, rs_dest.GetReg(), reg_to_shuffle.GetReg());
-  }
+  LoadVectorRegister(rs_dest, reg_to_shuffle, opsize, op_mov);
 
   if (opsize == kSignedByte || opsize == kUnsignedByte) {
     // In the byte case, first duplicate it to be a word
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 01c8f80..9f0a696 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -38,7 +38,7 @@
 #include "verifier/dex_gc_map.h"
 #include "verifier/method_verifier.h"
 #include "verifier/method_verifier-inl.h"
-#include "verifier/register_line.h"
+#include "verifier/reg_type-inl.h"
 #include "verifier/register_line-inl.h"
 
 namespace art {
@@ -127,7 +127,7 @@
         dex_gc_map_.push_back((i >> 8) & 0xFF);
       }
       verifier::RegisterLine* line = method_verifier->GetRegLine(i);
-      line->WriteReferenceBitMap(dex_gc_map_, ref_bitmap_bytes);
+      line->WriteReferenceBitMap(method_verifier, &dex_gc_map_, ref_bitmap_bytes);
     }
   }
   DCHECK_EQ(dex_gc_map_.size(), table_size);
@@ -151,7 +151,7 @@
       map_index++;
       verifier::RegisterLine* line = method_verifier->GetRegLine(i);
       for (size_t j = 0; j < code_item->registers_size_; j++) {
-        if (line->GetRegisterType(j).IsNonZeroReferenceTypes()) {
+        if (line->GetRegisterType(method_verifier, j).IsNonZeroReferenceTypes()) {
           DCHECK_LT(j / 8, map.RegWidth());
           DCHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 1);
         } else if ((j / 8) < map.RegWidth()) {
@@ -178,7 +178,7 @@
       local_gc_points++;
       max_insn = i;
       verifier::RegisterLine* line = method_verifier->GetRegLine(i);
-      max_ref_reg = line->GetMaxNonZeroReferenceReg(max_ref_reg);
+      max_ref_reg = line->GetMaxNonZeroReferenceReg(method_verifier, max_ref_reg);
     }
   }
   *gc_points = local_gc_points;
@@ -217,7 +217,8 @@
     bool is_range = (inst->Opcode() ==  Instruction::INVOKE_VIRTUAL_RANGE) ||
         (inst->Opcode() ==  Instruction::INVOKE_INTERFACE_RANGE);
     const verifier::RegType&
-        reg_type(line->GetRegisterType(is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
+        reg_type(line->GetRegisterType(method_verifier,
+                                       is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
 
     if (!reg_type.HasClass()) {
       // We will compute devirtualization information only when we know the Class of the reg type.
@@ -284,17 +285,20 @@
       const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
       bool is_safe_cast = false;
       if (code == Instruction::CHECK_CAST) {
-        const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c()));
+        const verifier::RegType& reg_type(line->GetRegisterType(method_verifier,
+                                                                inst->VRegA_21c()));
         const verifier::RegType& cast_type =
             method_verifier->ResolveCheckedClass(inst->VRegB_21c());
         is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
       } else {
-        const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x()));
+        const verifier::RegType& array_type(line->GetRegisterType(method_verifier,
+                                                                  inst->VRegB_23x()));
         // We only know its safe to assign to an array if the array type is precise. For example,
         // an Object[] can have any type of object stored in it, but it may also be assigned a
         // String[] in which case the stores need to be of Strings.
         if (array_type.IsPreciseReference()) {
-          const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x()));
+          const verifier::RegType& value_type(line->GetRegisterType(method_verifier,
+                                                                    inst->VRegA_23x()));
           const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
               ->GetComponentType(array_type, method_verifier->GetClassLoader());
           is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index db6a01e..d743f90 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -350,10 +350,10 @@
       dump_stats_(dump_stats),
       dump_passes_(dump_passes),
       timings_logger_(timer),
-      compiler_library_(NULL),
-      compiler_context_(NULL),
-      compiler_enable_auto_elf_loading_(NULL),
-      compiler_get_method_code_addr_(NULL),
+      compiler_library_(nullptr),
+      compiler_context_(nullptr),
+      compiler_enable_auto_elf_loading_(nullptr),
+      compiler_get_method_code_addr_(nullptr),
       support_boot_image_fixup_(instruction_set != kMips),
       dedupe_code_("dedupe code"),
       dedupe_src_mapping_table_("dedupe source mapping table"),
@@ -365,7 +365,7 @@
   DCHECK(verification_results_ != nullptr);
   DCHECK(method_inliner_map_ != nullptr);
 
-  CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, NULL), "compiler tls key");
+  CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, nullptr), "compiler tls key");
 
   dex_to_dex_compiler_ = reinterpret_cast<DexToDexCompilerFn>(ArtCompileDEX);
 
@@ -445,7 +445,7 @@
 CompilerTls* CompilerDriver::GetTls() {
   // Lazily create thread-local storage
   CompilerTls* res = static_cast<CompilerTls*>(pthread_getspecific(tls_key_));
-  if (res == NULL) {
+  if (res == nullptr) {
     res = compiler_->CreateNewCompilerTls();
     CHECK_PTHREAD_CALL(pthread_setspecific, (tls_key_, res), "compiler tls");
   }
@@ -520,20 +520,18 @@
   const char* descriptor = dex_file.GetClassDescriptor(class_def);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader);
-  if (klass == NULL) {
+  if (klass == nullptr) {
     CHECK(self->IsExceptionPending());
     self->ClearException();
     return kDontDexToDexCompile;
   }
-  // The verifier can only run on "quick" instructions at runtime (see usage of
-  // FindAccessedFieldAtDexPc and FindInvokedMethodAtDexPc in ThrowNullPointerExceptionFromDexPC
-  // function). Since image classes can be verified again while compiling an application,
-  // we must prevent the DEX-to-DEX compiler from introducing them.
-  // TODO: find a way to enable "quick" instructions for image classes and remove this check.
-  bool compiling_image_classes = class_loader.Get() == nullptr;
-  if (compiling_image_classes) {
-    return kRequired;
-  } else if (klass->IsVerified()) {
+  // DexToDex at the kOptimize level may introduce quickened opcodes, which replace symbolic
+  // references with actual offsets. We cannot re-verify such instructions.
+  //
+  // We store the verification information in the class status in the oat file, which the linker
+  // can validate (checksums) and use to skip load-time verification. It is thus safe to
+  // optimize when a class has been fully verified before.
+  if (klass->IsVerified()) {
     // Class is verified so we can enable DEX-to-DEX compilation for performance.
     return kOptimize;
   } else if (klass->IsCompileTimeVerified()) {
@@ -606,13 +604,14 @@
                                 ThreadPool* thread_pool, TimingLogger* timings) {
   LoadImageClasses(timings);
 
+  Resolve(class_loader, dex_files, thread_pool, timings);
+
   if (!compiler_options_->IsVerificationEnabled()) {
-    VLOG(compiler) << "Verify none mode specified, skipping pre-compilation";
+    LOG(INFO) << "Verify none mode specified, skipping verification.";
+    SetVerified(class_loader, dex_files, thread_pool, timings);
     return;
   }
 
-  Resolve(class_loader, dex_files, thread_pool, timings);
-
   Verify(class_loader, dex_files, thread_pool, timings);
 
   InitializeClasses(class_loader, dex_files, thread_pool, timings);
@@ -628,11 +627,11 @@
   }
 }
 
-static void ResolveExceptionsForMethod(MethodHelper* mh,
+static void ResolveExceptionsForMethod(MutableMethodHelper* mh,
     std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   const DexFile::CodeItem* code_item = mh->GetMethod()->GetCodeItem();
-  if (code_item == NULL) {
+  if (code_item == nullptr) {
     return;  // native or abstract method
   }
   if (code_item->tries_size_ == 0) {
@@ -671,7 +670,7 @@
   std::set<std::pair<uint16_t, const DexFile*>>* exceptions_to_resolve =
       reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg);
   StackHandleScope<1> hs(Thread::Current());
-  MethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+  MutableMethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
   for (size_t i = 0; i < c->NumVirtualMethods(); ++i) {
     mh.ChangeMethod(c->GetVirtualMethod(i));
     ResolveExceptionsForMethod(&mh, *exceptions_to_resolve);
@@ -710,7 +709,7 @@
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> klass(
         hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
-    if (klass.Get() == NULL) {
+    if (klass.Get() == nullptr) {
       VLOG(compiler) << "Failed to find class " << descriptor;
       image_classes_->erase(it++);
       self->ClearException();
@@ -738,7 +737,7 @@
       Handle<mirror::Class> klass(hs.NewHandle(
           class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
                                     NullHandle<mirror::ClassLoader>())));
-      if (klass.Get() == NULL) {
+      if (klass.Get() == nullptr) {
         const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
         const char* descriptor = dex_file->GetTypeDescriptor(type_id);
         LOG(FATAL) << "Failed to resolve class " << descriptor;
@@ -762,7 +761,7 @@
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   // Make a copy of the handle so that we don't clobber it doing Assign.
-  Handle<mirror::Class> klass(hs.NewHandle(c.Get()));
+  MutableHandle<mirror::Class> klass(hs.NewHandle(c.Get()));
   std::string temp;
   while (!klass->IsObjectClass()) {
     const char* descriptor = klass->GetDescriptor(&temp);
@@ -785,8 +784,8 @@
 }
 
 void CompilerDriver::FindClinitImageClassesCallback(mirror::Object* object, void* arg) {
-  DCHECK(object != NULL);
-  DCHECK(arg != NULL);
+  DCHECK(object != nullptr);
+  DCHECK(arg != nullptr);
   CompilerDriver* compiler_driver = reinterpret_cast<CompilerDriver*>(arg);
   StackHandleScope<1> hs(Thread::Current());
   MaybeAddToImageClasses(hs.NewHandle(object->GetClass()), compiler_driver->image_classes_.get());
@@ -854,29 +853,29 @@
                                                 uint32_t type_idx,
                                                 bool* type_known_final, bool* type_known_abstract,
                                                 bool* equals_referrers_class) {
-  if (type_known_final != NULL) {
+  if (type_known_final != nullptr) {
     *type_known_final = false;
   }
-  if (type_known_abstract != NULL) {
+  if (type_known_abstract != nullptr) {
     *type_known_abstract = false;
   }
-  if (equals_referrers_class != NULL) {
+  if (equals_referrers_class != nullptr) {
     *equals_referrers_class = false;
   }
   ScopedObjectAccess soa(Thread::Current());
   mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
   // Get type from dex cache assuming it was populated by the verifier
   mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
-  if (resolved_class == NULL) {
+  if (resolved_class == nullptr) {
     stats_->TypeNeedsAccessCheck();
     return false;  // Unknown class needs access checks.
   }
   const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
-  if (equals_referrers_class != NULL) {
+  if (equals_referrers_class != nullptr) {
     *equals_referrers_class = (method_id.class_idx_ == type_idx);
   }
   mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
-  if (referrer_class == NULL) {
+  if (referrer_class == nullptr) {
     stats_->TypeNeedsAccessCheck();
     return false;  // Incomplete referrer knowledge needs access check.
   }
@@ -885,10 +884,10 @@
   bool result = referrer_class->CanAccess(resolved_class);
   if (result) {
     stats_->TypeDoesntNeedAccessCheck();
-    if (type_known_final != NULL) {
+    if (type_known_final != nullptr) {
       *type_known_final = resolved_class->IsFinal() && !resolved_class->IsArrayClass();
     }
-    if (type_known_abstract != NULL) {
+    if (type_known_abstract != nullptr) {
       *type_known_abstract = resolved_class->IsAbstract() && !resolved_class->IsArrayClass();
     }
   } else {
@@ -904,13 +903,13 @@
   mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
   // Get type from dex cache assuming it was populated by the verifier.
   mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
-  if (resolved_class == NULL) {
+  if (resolved_class == nullptr) {
     stats_->TypeNeedsAccessCheck();
     return false;  // Unknown class needs access checks.
   }
   const DexFile::MethodId& method_id = dex_file.GetMethodId(referrer_idx);
   mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_);
-  if (referrer_class == NULL) {
+  if (referrer_class == nullptr) {
     stats_->TypeNeedsAccessCheck();
     return false;  // Incomplete referrer knowledge needs access check.
   }
@@ -1310,6 +1309,10 @@
 }
 
 bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) {
+  if (!compiler_options_->IsVerificationEnabled()) {
+    // If we didn't verify, every cast has to be treated as non-safe.
+    return false;
+  }
   DCHECK(mUnit->GetVerifiedMethod() != nullptr);
   bool result = mUnit->GetVerifiedMethod()->IsSafeCast(dex_pc);
   if (result) {
@@ -1410,7 +1413,7 @@
       thread_pool_(thread_pool) {}
 
   ClassLinker* GetClassLinker() const {
-    CHECK(class_linker_ != NULL);
+    CHECK(class_linker_ != nullptr);
     return class_linker_;
   }
 
@@ -1419,12 +1422,12 @@
   }
 
   CompilerDriver* GetCompiler() const {
-    CHECK(compiler_ != NULL);
+    CHECK(compiler_ != nullptr);
     return compiler_;
   }
 
   const DexFile* GetDexFile() const {
-    CHECK(dex_file_ != NULL);
+    CHECK(dex_file_ != nullptr);
     return dex_file_;
   }
 
@@ -1499,10 +1502,10 @@
 // that avoids the expensive FindInClassPath search.
 static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(klass != NULL);
+  DCHECK(klass != nullptr);
   const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
   if (&dex_file != &original_dex_file) {
-    if (class_loader == NULL) {
+    if (class_loader == nullptr) {
       LOG(WARNING) << "Skipping class " << PrettyDescriptor(klass) << " from "
                    << dex_file.GetLocation() << " previously found in "
                    << original_dex_file.GetLocation();
@@ -1587,7 +1590,7 @@
   // static fields, instance fields, direct methods, and virtual
   // methods.
   const byte* class_data = dex_file.GetClassData(class_def);
-  if (class_data == NULL) {
+  if (class_data == nullptr) {
     // Empty class such as a marker interface.
     requires_constructor_barrier = false;
   } else {
@@ -1596,7 +1599,7 @@
       if (resolve_fields_and_methods) {
         mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
                                                              dex_cache, class_loader, true);
-        if (field == NULL) {
+        if (field == nullptr) {
           CheckAndClearResolveException(soa.Self());
         }
       }
@@ -1605,13 +1608,13 @@
     // We require a constructor barrier if there are final instance fields.
     requires_constructor_barrier = false;
     while (it.HasNextInstanceField()) {
-      if ((it.GetMemberAccessFlags() & kAccFinal) != 0) {
+      if (it.MemberIsFinal()) {
         requires_constructor_barrier = true;
       }
       if (resolve_fields_and_methods) {
         mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
                                                              dex_cache, class_loader, false);
-        if (field == NULL) {
+        if (field == nullptr) {
           CheckAndClearResolveException(soa.Self());
         }
       }
@@ -1623,7 +1626,7 @@
                                                                 dex_cache, class_loader,
                                                                 NullHandle<mirror::ArtMethod>(),
                                                                 it.GetMethodInvokeType(class_def));
-        if (method == NULL) {
+        if (method == nullptr) {
           CheckAndClearResolveException(soa.Self());
         }
         it.Next();
@@ -1633,7 +1636,7 @@
                                                                 dex_cache, class_loader,
                                                                 NullHandle<mirror::ArtMethod>(),
                                                                 it.GetMethodInvokeType(class_def));
-        if (method == NULL) {
+        if (method == nullptr) {
           CheckAndClearResolveException(soa.Self());
         }
         it.Next();
@@ -1658,9 +1661,9 @@
       hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader())));
   mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
 
-  if (klass == NULL) {
+  if (klass == nullptr) {
     CHECK(soa.Self()->IsExceptionPending());
-    mirror::Throwable* exception = soa.Self()->GetException(NULL);
+    mirror::Throwable* exception = soa.Self()->GetException(nullptr);
     VLOG(compiler) << "Exception during type resolution: " << exception->Dump();
     if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) {
       // There's little point continuing compilation if the heap is exhausted.
@@ -1691,11 +1694,20 @@
   context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
 }
 
+void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                                 ThreadPool* thread_pool, TimingLogger* timings) {
+  for (size_t i = 0; i != dex_files.size(); ++i) {
+    const DexFile* dex_file = dex_files[i];
+    CHECK(dex_file != nullptr);
+    SetVerifiedDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
+  }
+}
+
 void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                             ThreadPool* thread_pool, TimingLogger* timings) {
   for (size_t i = 0; i != dex_files.size(); ++i) {
     const DexFile* dex_file = dex_files[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
   }
 }
@@ -1725,15 +1737,15 @@
      */
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
     std::string error_msg;
-    if (verifier::MethodVerifier::VerifyClass(&dex_file, dex_cache, class_loader, &class_def, true,
-                                              &error_msg) ==
+    if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader,
+                                              &class_def, true, &error_msg) ==
                                                   verifier::MethodVerifier::kHardFailure) {
       LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
                  << " because: " << error_msg;
     }
   } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
     CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
-    class_linker->VerifyClass(klass);
+    class_linker->VerifyClass(soa.Self(), klass);
 
     if (klass->IsErroneous()) {
       // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
@@ -1757,6 +1769,50 @@
   context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
 }
 
+static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t class_def_index)
+    LOCKS_EXCLUDED(Locks::mutator_lock_) {
+  ATRACE_CALL();
+  ScopedObjectAccess soa(Thread::Current());
+  const DexFile& dex_file = *manager->GetDexFile();
+  const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+  const char* descriptor = dex_file.GetClassDescriptor(class_def);
+  ClassLinker* class_linker = manager->GetClassLinker();
+  jobject jclass_loader = manager->GetClassLoader();
+  StackHandleScope<3> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader(
+      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+  Handle<mirror::Class> klass(
+      hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+  // Class might have failed resolution. Then don't set it to verified.
+  if (klass.Get() != nullptr) {
+    // Only do this if the class is resolved. If even resolution fails, quickening will go very,
+    // very wrong.
+    if (klass->IsResolved()) {
+      if (klass->GetStatus() < mirror::Class::kStatusVerified) {
+        ObjectLock<mirror::Class> lock(soa.Self(), klass);
+        klass->SetStatus(mirror::Class::kStatusVerified, soa.Self());
+      }
+      // Record the final class status if necessary.
+      ClassReference ref(manager->GetDexFile(), class_def_index);
+      manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+    }
+  } else {
+    Thread* self = soa.Self();
+    DCHECK(self->IsExceptionPending());
+    self->ClearException();
+  }
+}
+
+void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
+                                        const std::vector<const DexFile*>& dex_files,
+                                        ThreadPool* thread_pool, TimingLogger* timings) {
+  TimingLogger::ScopedTiming t("Verify Dex File", timings);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
+                                     thread_pool);
+  context.ForAll(0, dex_file.NumClassDefs(), SetVerifiedClass, thread_count_);
+}
+
 static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index)
     LOCKS_EXCLUDED(Locks::mutator_lock_) {
   ATRACE_CALL();
@@ -1778,7 +1834,7 @@
     if (klass->IsVerified()) {
       // Attempt to initialize the class but bail if we either need to initialize the super-class
       // or static fields.
-      manager->GetClassLinker()->EnsureInitialized(klass, false, false);
+      manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
       if (!klass->IsInitialized()) {
         // We don't want non-trivial class initialization occurring on multiple threads due to
         // deadlock problems. For example, a parent class is initialized (holding its lock) that
@@ -1792,7 +1848,7 @@
         ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
         // Attempt to initialize allowing initialization of parent classes but still not static
         // fields.
-        manager->GetClassLinker()->EnsureInitialized(klass, false, true);
+        manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
         if (!klass->IsInitialized()) {
           // We need to initialize static fields, we only do this for image classes that aren't
           // marked with the $NoPreloadHolder (which implies this should not be initialized early).
@@ -1811,7 +1867,8 @@
             // Run the class initializer in transaction mode.
             runtime->EnterTransactionMode(&transaction);
             const mirror::Class::Status old_status = klass->GetStatus();
-            bool success = manager->GetClassLinker()->EnsureInitialized(klass, true, true);
+            bool success = manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
+                                                                        true);
             // TODO we detach transaction from runtime to indicate we quit the transactional
             // mode which prevents the GC from visiting objects modified during the transaction.
             // Ensure GC is not run so don't access freed objects when aborting transaction.
@@ -1864,7 +1921,7 @@
                                        ThreadPool* thread_pool, TimingLogger* timings) {
   for (size_t i = 0; i != dex_files.size(); ++i) {
     const DexFile* dex_file = dex_files[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     InitializeClasses(class_loader, *dex_file, dex_files, thread_pool, timings);
   }
   if (IsImage()) {
@@ -1877,7 +1934,7 @@
                              ThreadPool* thread_pool, TimingLogger* timings) {
   for (size_t i = 0; i != dex_files.size(); ++i) {
     const DexFile* dex_file = dex_files[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     CompileDexFile(class_loader, *dex_file, dex_files, thread_pool, timings);
   }
 }
@@ -1910,7 +1967,7 @@
     return;
   }
   const byte* class_data = dex_file.GetClassData(class_def);
-  if (class_data == NULL) {
+  if (class_data == nullptr) {
     // empty class, probably a marker interface
     return;
   }
@@ -1945,7 +2002,7 @@
       continue;
     }
     previous_direct_method_idx = method_idx;
-    driver->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(),
+    driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
                           it.GetMethodInvokeType(class_def), class_def_index,
                           method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
     it.Next();
@@ -1961,7 +2018,7 @@
       continue;
     }
     previous_virtual_method_idx = method_idx;
-    driver->CompileMethod(it.GetMethodCodeItem(), it.GetMemberAccessFlags(),
+    driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
                           it.GetMethodInvokeType(class_def), class_def_index,
                           method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level);
     it.Next();
@@ -1983,7 +2040,7 @@
                                    uint32_t method_idx, jobject class_loader,
                                    const DexFile& dex_file,
                                    DexToDexCompilationLevel dex_to_dex_compilation_level) {
-  CompiledMethod* compiled_method = NULL;
+  CompiledMethod* compiled_method = nullptr;
   uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
 
   if ((access_flags & kAccNative) != 0) {
@@ -1993,14 +2050,14 @@
       // Leaving this empty will trigger the generic JNI version
     } else {
       compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
-      CHECK(compiled_method != NULL);
+      CHECK(compiled_method != nullptr);
     }
   } else if ((access_flags & kAccAbstract) != 0) {
   } else {
     MethodReference method_ref(&dex_file, method_idx);
     bool compile = verification_results_->IsCandidateForCompilation(method_ref, access_flags);
     if (compile) {
-      // NOTE: if compiler declines to compile this method, it will return NULL.
+      // NOTE: if compiler declines to compile this method, it will return nullptr.
       compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
                                            method_idx, class_loader, dex_file);
     }
@@ -2021,20 +2078,20 @@
   }
 
   Thread* self = Thread::Current();
-  if (compiled_method != NULL) {
+  if (compiled_method != nullptr) {
     MethodReference ref(&dex_file, method_idx);
-    DCHECK(GetCompiledMethod(ref) == NULL) << PrettyMethod(method_idx, dex_file);
+    DCHECK(GetCompiledMethod(ref) == nullptr) << PrettyMethod(method_idx, dex_file);
     {
       MutexLock mu(self, compiled_methods_lock_);
       compiled_methods_.Put(ref, compiled_method);
     }
-    DCHECK(GetCompiledMethod(ref) != NULL) << PrettyMethod(method_idx, dex_file);
+    DCHECK(GetCompiledMethod(ref) != nullptr) << PrettyMethod(method_idx, dex_file);
   }
 
   if (self->IsExceptionPending()) {
     ScopedObjectAccess soa(self);
     LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n"
-        << self->GetException(NULL)->Dump();
+        << self->GetException(nullptr)->Dump();
   }
 }
 
@@ -2042,9 +2099,9 @@
   MutexLock mu(Thread::Current(), compiled_classes_lock_);
   ClassTable::const_iterator it = compiled_classes_.find(ref);
   if (it == compiled_classes_.end()) {
-    return NULL;
+    return nullptr;
   }
-  CHECK(it->second != NULL);
+  CHECK(it->second != nullptr);
   return it->second;
 }
 
@@ -2078,9 +2135,9 @@
   MutexLock mu(Thread::Current(), compiled_methods_lock_);
   MethodTable::const_iterator it = compiled_methods_.find(ref);
   if (it == compiled_methods_.end()) {
-    return NULL;
+    return nullptr;
   }
-  CHECK(it->second != NULL);
+  CHECK(it->second != nullptr);
   return it->second;
 }
 
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 624947d..e7bd357 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -81,7 +81,7 @@
   // Create a compiler targeting the requested "instruction_set".
   // "image" should be true if image specific optimizations should be
   // enabled.  "image_classes" lets the compiler know what classes it
-  // can assume will be in the image, with NULL implying all available
+  // can assume will be in the image, with nullptr implying all available
   // classes.
   explicit CompilerDriver(const CompilerOptions* compiler_options,
                           VerificationResults* verification_results,
@@ -183,9 +183,9 @@
 
   // Are runtime access checks necessary in the compiled code?
   bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file,
-                                  uint32_t type_idx, bool* type_known_final = NULL,
-                                  bool* type_known_abstract = NULL,
-                                  bool* equals_referrers_class = NULL)
+                                  uint32_t type_idx, bool* type_known_final = nullptr,
+                                  bool* type_known_abstract = nullptr,
+                                  bool* equals_referrers_class = nullptr)
       LOCKS_EXCLUDED(Locks::mutator_lock_);
 
   // Are runtime access and instantiable checks necessary in the code?
@@ -260,7 +260,7 @@
       uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Get declaration location of a resolved field.
+  // Get the index in the vtable of the method.
   uint16_t GetResolvedMethodVTableIndex(
       mirror::ArtMethod* resolved_method, InvokeType type)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -436,7 +436,7 @@
         referrer_class_def_idx_(referrer_class_def_idx),
         referrer_method_idx_(referrer_method_idx),
         literal_offset_(literal_offset) {
-      CHECK(dex_file_ != NULL);
+      CHECK(dex_file_ != nullptr);
     }
     virtual ~PatchInformation() {}
 
@@ -655,6 +655,13 @@
                      ThreadPool* thread_pool, TimingLogger* timings)
       LOCKS_EXCLUDED(Locks::mutator_lock_);
 
+  void SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files,
+                   ThreadPool* thread_pool, TimingLogger* timings);
+  void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file,
+                          const std::vector<const DexFile*>& dex_files,
+                          ThreadPool* thread_pool, TimingLogger* timings)
+      LOCKS_EXCLUDED(Locks::mutator_lock_);
+
   void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                          ThreadPool* thread_pool, TimingLogger* timings)
       LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -712,7 +719,7 @@
   const bool image_;
 
   // If image_ is true, specifies the classes that will be included in
-  // the image. Note if image_classes_ is NULL, all classes are
+  // the image. Note if image_classes_ is nullptr, all classes are
   // included in the image.
   std::unique_ptr<std::set<std::string>> image_classes_;
 
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index c0f91d1..eb3de97 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -27,7 +27,8 @@
     kSpace,               // Maximize space savings.
     kBalanced,            // Try to get the best performance return on compilation investment.
     kSpeed,               // Maximize runtime performance.
-    kEverything           // Force compilation (Note: excludes compilaton of class initializers).
+    kEverything,          // Force compilation (Note: excludes compilation of class initializers).
+    kTime                 // Compile methods, but minimize compilation time.
   };
 
   // Guide heuristics to determine whether to compile method if profile data not available.
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index bbfbc6e..0d34879 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -89,17 +89,18 @@
 
 bool ElfFixup::FixupSectionHeaders(ElfFile& elf_file, uintptr_t base_address) {
   for (Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
-    Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
+    Elf32_Shdr* sh = elf_file.GetSectionHeader(i);
+    CHECK(sh != nullptr);
     // 0 implies that the section will not exist in the memory of the process
-    if (sh.sh_addr == 0) {
+    if (sh->sh_addr == 0) {
       continue;
     }
     if (DEBUG_FIXUP) {
       LOG(INFO) << StringPrintf("In %s moving Elf32_Shdr[%d] from 0x%08x to 0x%08" PRIxPTR,
                                 elf_file.GetFile().GetPath().c_str(), i,
-                                sh.sh_addr, sh.sh_addr + base_address);
+                                sh->sh_addr, sh->sh_addr + base_address);
     }
-    sh.sh_addr += base_address;
+    sh->sh_addr += base_address;
   }
   return true;
 }
@@ -107,18 +108,19 @@
 bool ElfFixup::FixupProgramHeaders(ElfFile& elf_file, uintptr_t base_address) {
   // TODO: ELFObjectFile doesn't have give to Elf32_Phdr, so we do that ourselves for now.
   for (Elf32_Word i = 0; i < elf_file.GetProgramHeaderNum(); i++) {
-    Elf32_Phdr& ph = elf_file.GetProgramHeader(i);
-    CHECK_EQ(ph.p_vaddr, ph.p_paddr) << elf_file.GetFile().GetPath() << " i=" << i;
-    CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+    Elf32_Phdr* ph = elf_file.GetProgramHeader(i);
+    CHECK(ph != nullptr);
+    CHECK_EQ(ph->p_vaddr, ph->p_paddr) << elf_file.GetFile().GetPath() << " i=" << i;
+    CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
             << elf_file.GetFile().GetPath() << " i=" << i;
     if (DEBUG_FIXUP) {
       LOG(INFO) << StringPrintf("In %s moving Elf32_Phdr[%d] from 0x%08x to 0x%08" PRIxPTR,
                                 elf_file.GetFile().GetPath().c_str(), i,
-                                ph.p_vaddr, ph.p_vaddr + base_address);
+                                ph->p_vaddr, ph->p_vaddr + base_address);
     }
-    ph.p_vaddr += base_address;
-    ph.p_paddr += base_address;
-    CHECK((ph.p_align == 0) || (0 == ((ph.p_vaddr - ph.p_offset) & (ph.p_align - 1))))
+    ph->p_vaddr += base_address;
+    ph->p_paddr += base_address;
+    CHECK((ph->p_align == 0) || (0 == ((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1))))
             << elf_file.GetFile().GetPath() << " i=" << i;
   }
   return true;
@@ -128,20 +130,21 @@
   Elf32_Word section_type = dynamic ? SHT_DYNSYM : SHT_SYMTAB;
   // TODO: Unfortunate ELFObjectFile has protected symbol access, so use ElfFile
   Elf32_Shdr* symbol_section = elf_file.FindSectionByType(section_type);
-  if (symbol_section == NULL) {
+  if (symbol_section == nullptr) {
     // file is missing optional .symtab
     CHECK(!dynamic) << elf_file.GetFile().GetPath();
     return true;
   }
   for (uint32_t i = 0; i < elf_file.GetSymbolNum(*symbol_section); i++) {
-    Elf32_Sym& symbol = elf_file.GetSymbol(section_type, i);
-    if (symbol.st_value != 0) {
+    Elf32_Sym* symbol = elf_file.GetSymbol(section_type, i);
+    CHECK(symbol != nullptr);
+    if (symbol->st_value != 0) {
       if (DEBUG_FIXUP) {
         LOG(INFO) << StringPrintf("In %s moving Elf32_Sym[%d] from 0x%08x to 0x%08" PRIxPTR,
                                   elf_file.GetFile().GetPath().c_str(), i,
-                                  symbol.st_value, symbol.st_value + base_address);
+                                  symbol->st_value, symbol->st_value + base_address);
       }
-      symbol.st_value += base_address;
+      symbol->st_value += base_address;
     }
   }
   return true;
@@ -149,10 +152,11 @@
 
 bool ElfFixup::FixupRelocations(ElfFile& elf_file, uintptr_t base_address) {
   for (Elf32_Word i = 0; i < elf_file.GetSectionHeaderNum(); i++) {
-    Elf32_Shdr& sh = elf_file.GetSectionHeader(i);
-    if (sh.sh_type == SHT_REL) {
-      for (uint32_t i = 0; i < elf_file.GetRelNum(sh); i++) {
-        Elf32_Rel& rel = elf_file.GetRel(sh, i);
+    Elf32_Shdr* sh = elf_file.GetSectionHeader(i);
+    CHECK(sh != nullptr);
+    if (sh->sh_type == SHT_REL) {
+      for (uint32_t i = 0; i < elf_file.GetRelNum(*sh); i++) {
+        Elf32_Rel& rel = elf_file.GetRel(*sh, i);
         if (DEBUG_FIXUP) {
           LOG(INFO) << StringPrintf("In %s moving Elf32_Rel[%d] from 0x%08x to 0x%08" PRIxPTR,
                                     elf_file.GetFile().GetPath().c_str(), i,
@@ -160,9 +164,9 @@
         }
         rel.r_offset += base_address;
       }
-    } else if (sh.sh_type == SHT_RELA) {
-      for (uint32_t i = 0; i < elf_file.GetRelaNum(sh); i++) {
-        Elf32_Rela& rela = elf_file.GetRela(sh, i);
+    } else if (sh->sh_type == SHT_RELA) {
+      for (uint32_t i = 0; i < elf_file.GetRelaNum(*sh); i++) {
+        Elf32_Rela& rela = elf_file.GetRela(*sh, i);
         if (DEBUG_FIXUP) {
           LOG(INFO) << StringPrintf("In %s moving Elf32_Rela[%d] from 0x%08x to 0x%08" PRIxPTR,
                                     elf_file.GetFile().GetPath().c_str(), i,
diff --git a/compiler/elf_patcher.cc b/compiler/elf_patcher.cc
index f192227..92eb4d8 100644
--- a/compiler/elf_patcher.cc
+++ b/compiler/elf_patcher.cc
@@ -276,7 +276,7 @@
         << "We got more patches than anticipated";
     CHECK_LE(reinterpret_cast<uintptr_t>(elf_file_->Begin()) + shdr->sh_offset + shdr->sh_size,
               reinterpret_cast<uintptr_t>(elf_file_->End())) << "section is too large";
-    CHECK(shdr == &elf_file_->GetSectionHeader(elf_file_->GetSectionHeaderNum() - 1) ||
+    CHECK(shdr == elf_file_->GetSectionHeader(elf_file_->GetSectionHeaderNum() - 1) ||
           shdr->sh_offset + shdr->sh_size <= (shdr + 1)->sh_offset)
         << "Section overlaps onto next section";
     // It's mmap'd so we can just memcpy.
diff --git a/compiler/elf_stripper.cc b/compiler/elf_stripper.cc
index 0b86ad0..457d8a0 100644
--- a/compiler/elf_stripper.cc
+++ b/compiler/elf_stripper.cc
@@ -72,13 +72,15 @@
   section_headers.reserve(elf_file->GetSectionHeaderNum());
 
 
-  Elf32_Shdr& string_section = elf_file->GetSectionNameStringSection();
+  Elf32_Shdr* string_section = elf_file->GetSectionNameStringSection();
+  CHECK(string_section != nullptr);
   for (Elf32_Word i = 0; i < elf_file->GetSectionHeaderNum(); i++) {
-    Elf32_Shdr& sh = elf_file->GetSectionHeader(i);
-    const char* name = elf_file->GetString(string_section, sh.sh_name);
-    if (name == NULL) {
+    Elf32_Shdr* sh = elf_file->GetSectionHeader(i);
+    CHECK(sh != nullptr);
+    const char* name = elf_file->GetString(*string_section, sh->sh_name);
+    if (name == nullptr) {
       CHECK_EQ(0U, i);
-      section_headers.push_back(sh);
+      section_headers.push_back(*sh);
       section_headers_original_indexes.push_back(0);
       continue;
     }
@@ -87,32 +89,34 @@
         || (strcmp(name, ".symtab") == 0)) {
       continue;
     }
-    section_headers.push_back(sh);
+    section_headers.push_back(*sh);
     section_headers_original_indexes.push_back(i);
   }
   CHECK_NE(0U, section_headers.size());
   CHECK_EQ(section_headers.size(), section_headers_original_indexes.size());
 
   // section 0 is the NULL section, sections start at offset of first section
-  Elf32_Off offset = elf_file->GetSectionHeader(1).sh_offset;
+  CHECK(elf_file->GetSectionHeader(1) != nullptr);
+  Elf32_Off offset = elf_file->GetSectionHeader(1)->sh_offset;
   for (size_t i = 1; i < section_headers.size(); i++) {
     Elf32_Shdr& new_sh = section_headers[i];
-    Elf32_Shdr& old_sh = elf_file->GetSectionHeader(section_headers_original_indexes[i]);
-    CHECK_EQ(new_sh.sh_name, old_sh.sh_name);
-    if (old_sh.sh_addralign > 1) {
-      offset = RoundUp(offset, old_sh.sh_addralign);
+    Elf32_Shdr* old_sh = elf_file->GetSectionHeader(section_headers_original_indexes[i]);
+    CHECK(old_sh != nullptr);
+    CHECK_EQ(new_sh.sh_name, old_sh->sh_name);
+    if (old_sh->sh_addralign > 1) {
+      offset = RoundUp(offset, old_sh->sh_addralign);
     }
-    if (old_sh.sh_offset == offset) {
+    if (old_sh->sh_offset == offset) {
       // already in place
-      offset += old_sh.sh_size;
+      offset += old_sh->sh_size;
       continue;
     }
     // shift section earlier
     memmove(elf_file->Begin() + offset,
-            elf_file->Begin() + old_sh.sh_offset,
-            old_sh.sh_size);
+            elf_file->Begin() + old_sh->sh_offset,
+            old_sh->sh_size);
     new_sh.sh_offset = offset;
-    offset += old_sh.sh_size;
+    offset += old_sh->sh_size;
   }
 
   Elf32_Off shoff = offset;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index c5d1478..e74d6de 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -437,7 +437,7 @@
         std::vector<uint8_t> const * gc_map = compiled_method->GetGcMap();
         if (gc_map != nullptr) {
           size_t gc_map_size = gc_map->size() * sizeof(gc_map[0]);
-          bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
+          bool is_native = it.MemberIsNative();
           CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
               << gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
               << (status < mirror::Class::kStatusVerified) << " " << status << " "
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index ecd6802..33b00d2 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -138,13 +138,15 @@
 
 template<typename T>
 void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
+  int32_t target_offset = instruction.GetTargetOffset();
+  PotentiallyAddSuspendCheck(target_offset, dex_offset);
   HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
   HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
   T* comparison = new (arena_) T(first, second);
   current_block_->AddInstruction(comparison);
   HInstruction* ifinst = new (arena_) HIf(comparison);
   current_block_->AddInstruction(ifinst);
-  HBasicBlock* target = FindBlockStartingAt(dex_offset + instruction.GetTargetOffset());
+  HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
   DCHECK(target != nullptr);
   current_block_->AddSuccessor(target);
   target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
@@ -155,12 +157,14 @@
 
 template<typename T>
 void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_offset) {
+  int32_t target_offset = instruction.GetTargetOffset();
+  PotentiallyAddSuspendCheck(target_offset, dex_offset);
   HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
   T* comparison = new (arena_) T(value, GetIntConstant(0));
   current_block_->AddInstruction(comparison);
   HInstruction* ifinst = new (arena_) HIf(comparison);
   current_block_->AddInstruction(ifinst);
-  HBasicBlock* target = FindBlockStartingAt(dex_offset + instruction.GetTargetOffset());
+  HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
   DCHECK(target != nullptr);
   current_block_->AddSuccessor(target);
   target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
@@ -209,6 +213,8 @@
   // Add the exit block at the end to give it the highest id.
   graph_->AddBlock(exit_block_);
   exit_block_->AddInstruction(new (arena_) HExit());
+  // Add the suspend check to the entry block.
+  entry_block_->AddInstruction(new (arena_) HSuspendCheck(0));
   entry_block_->AddInstruction(new (arena_) HGoto());
   return graph_;
 }
@@ -325,18 +331,61 @@
                                 bool is_range,
                                 uint32_t* args,
                                 uint32_t register_index) {
+  Instruction::Code opcode = instruction.Opcode();
+  InvokeType invoke_type;
+  switch (opcode) {
+    case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_STATIC_RANGE:
+      invoke_type = kStatic;
+      break;
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_DIRECT_RANGE:
+      invoke_type = kDirect;
+      break;
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+      invoke_type = kVirtual;
+      break;
+    case Instruction::INVOKE_INTERFACE:
+    case Instruction::INVOKE_INTERFACE_RANGE:
+      invoke_type = kInterface;
+      break;
+    case Instruction::INVOKE_SUPER_RANGE:
+    case Instruction::INVOKE_SUPER:
+      invoke_type = kSuper;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected invoke op: " << opcode;
+      return false;
+  }
+
   const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
   const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(method_id.proto_idx_);
   const char* descriptor = dex_file_->StringDataByIdx(proto_id.shorty_idx_);
   Primitive::Type return_type = Primitive::GetType(descriptor[0]);
-  bool is_instance_call =
-      instruction.Opcode() != Instruction::INVOKE_STATIC
-      && instruction.Opcode() != Instruction::INVOKE_STATIC_RANGE;
+  bool is_instance_call = invoke_type != kStatic;
   const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
 
-  // Treat invoke-direct like static calls for now.
-  HInvoke* invoke = new (arena_) HInvokeStatic(
-      arena_, number_of_arguments, return_type, dex_offset, method_idx);
+  HInvoke* invoke = nullptr;
+  if (invoke_type == kVirtual) {
+    MethodReference target_method(dex_file_, method_idx);
+    uintptr_t direct_code;
+    uintptr_t direct_method;
+    int vtable_index;
+    // TODO: Add devirtualization support.
+    compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
+                                        &invoke_type, &target_method, &vtable_index,
+                                        &direct_code, &direct_method);
+    if (vtable_index == -1) {
+      return false;
+    }
+    invoke = new (arena_) HInvokeVirtual(
+        arena_, number_of_arguments, return_type, dex_offset, vtable_index);
+  } else {
+    // Treat invoke-direct like static calls for now.
+    invoke = new (arena_) HInvokeStatic(
+        arena_, number_of_arguments, return_type, dex_offset, method_idx);
+  }
 
   size_t start_index = 0;
   Temporaries temps(graph_, is_instance_call ? 1 : 0);
@@ -462,7 +511,15 @@
   }
 }
 
-bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset) {
+void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {
+  if (target_offset <= 0) {
+    // Unconditionnally add a suspend check to backward branches. We can remove
+    // them after we recognize loops in the graph.
+    current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_offset));
+  }
+}
+
+bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset) {
   if (current_block_ == nullptr) {
     return true;  // Dead code
   }
@@ -580,7 +637,9 @@
     case Instruction::GOTO:
     case Instruction::GOTO_16:
     case Instruction::GOTO_32: {
-      HBasicBlock* target = FindBlockStartingAt(instruction.GetTargetOffset() + dex_offset);
+      int32_t offset = instruction.GetTargetOffset();
+      PotentiallyAddSuspendCheck(offset, dex_offset);
+      HBasicBlock* target = FindBlockStartingAt(offset + dex_offset);
       DCHECK(target != nullptr);
       current_block_->AddInstruction(new (arena_) HGoto());
       current_block_->AddSuccessor(target);
@@ -604,7 +663,8 @@
     }
 
     case Instruction::INVOKE_STATIC:
-    case Instruction::INVOKE_DIRECT: {
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_VIRTUAL: {
       uint32_t method_idx = instruction.VRegB_35c();
       uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
       uint32_t args[5];
@@ -616,7 +676,8 @@
     }
 
     case Instruction::INVOKE_STATIC_RANGE:
-    case Instruction::INVOKE_DIRECT_RANGE: {
+    case Instruction::INVOKE_DIRECT_RANGE:
+    case Instruction::INVOKE_VIRTUAL_RANGE: {
       uint32_t method_idx = instruction.VRegB_3rc();
       uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
       uint32_t register_index = instruction.VRegC();
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 170c427..e143786 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -54,7 +54,7 @@
   // Analyzes the dex instruction and adds HInstruction to the graph
   // to execute that instruction. Returns whether the instruction can
   // be handled.
-  bool AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset);
+  bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset);
 
   // Finds all instructions that start a new block, and populates branch_targets_ with
   // the newly created blocks.
@@ -70,6 +70,7 @@
   HLocal* GetLocalAt(int register_index) const;
   void UpdateLocal(int register_index, HInstruction* instruction) const;
   HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
+  void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset);
 
   // Temporarily returns whether the compiler supports the parameters
   // of the method.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 7731e6e..2547a29 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -48,11 +48,11 @@
                      + 1 /* current method */);
   GenerateFrameEntry();
 
+  HGraphVisitor* location_builder = GetLocationBuilder();
+  HGraphVisitor* instruction_visitor = GetInstructionVisitor();
   for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
     HBasicBlock* block = blocks.Get(i);
     Bind(GetLabelOf(block));
-    HGraphVisitor* location_builder = GetLocationBuilder();
-    HGraphVisitor* instruction_visitor = GetInstructionVisitor();
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
       current->Accept(location_builder);
@@ -77,10 +77,10 @@
   block_labels_.SetSize(blocks.Size());
 
   GenerateFrameEntry();
+  HGraphVisitor* instruction_visitor = GetInstructionVisitor();
   for (size_t i = 0, e = blocks.Size(); i < e; ++i) {
     HBasicBlock* block = blocks.Get(i);
     Bind(GetLabelOf(block));
-    HGraphVisitor* instruction_visitor = GetInstructionVisitor();
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
       current->Accept(instruction_visitor);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e72e39b..ad62279 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -20,6 +20,7 @@
 #include "gc/accounting/card_table.h"
 #include "mirror/array.h"
 #include "mirror/art_method.h"
+#include "mirror/class.h"
 #include "thread.h"
 #include "utils/assembler.h"
 #include "utils/arm/assembler_arm.h"
@@ -90,6 +91,29 @@
   DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathARM);
 };
 
+class SuspendCheckSlowPathARM : public SlowPathCode {
+ public:
+  explicit SuspendCheckSlowPathARM(HSuspendCheck* instruction)
+      : instruction_(instruction) {}
+
+  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    __ Bind(GetEntryLabel());
+    int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pTestSuspend).Int32Value();
+    __ ldr(LR, Address(TR, offset));
+    __ blx(LR);
+    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    __ b(GetReturnLabel());
+  }
+
+  Label* GetReturnLabel() { return &return_label_; }
+
+ private:
+  HSuspendCheck* const instruction_;
+  Label return_label_;
+
+  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
+};
+
 class BoundsCheckSlowPathARM : public SlowPathCode {
  public:
   explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction,
@@ -795,6 +819,47 @@
 }
 
 void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+  HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
+  __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+  Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+  uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+  size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+      invoke->GetIndexInDexCache() * kArmWordSize;
+
+  // TODO: Implement all kinds of calls:
+  // 1) boot -> boot
+  // 2) app -> boot
+  // 3) app -> app
+  //
+  // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+  // temp = method;
+  LoadCurrentMethod(temp);
+  // temp = temp->dex_cache_resolved_methods_;
+  __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+  // temp = temp[index_in_cache]
+  __ ldr(temp, Address(temp, index_in_cache));
+  // LR = temp[offset_of_quick_compiled_code]
+  __ ldr(LR, Address(temp,
+                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+  // LR()
+  __ blx(LR);
+
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+  DCHECK(!codegen_->IsLeafMethod());
+}
+
+void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+  HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
   locations->AddTemp(ArmCoreLocation(R0));
@@ -829,37 +894,30 @@
   }
 }
 
-void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
-  __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
-}
 
-void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
-  uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
-  size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
-      invoke->GetIndexInDexCache() * kArmWordSize;
-
-  // TODO: Implement all kinds of calls:
-  // 1) boot -> boot
-  // 2) app -> boot
-  // 3) app -> app
-  //
-  // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
-  // temp = method;
-  LoadCurrentMethod(temp);
-  // temp = temp->dex_cache_resolved_methods_;
-  __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
-  // temp = temp[index_in_cache]
-  __ ldr(temp, Address(temp, index_in_cache));
-  // LR = temp[offset_of_quick_compiled_code]
-  __ ldr(LR, Address(temp,
-                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
-  // LR()
+  uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+          invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+  LocationSummary* locations = invoke->GetLocations();
+  Location receiver = locations->InAt(0);
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ ldr(temp, Address(SP, receiver.GetStackIndex()));
+    __ ldr(temp, Address(temp, class_offset));
+  } else {
+    __ ldr(temp, Address(receiver.AsArm().AsCoreRegister(), class_offset));
+  }
+  // temp = temp->GetMethodAt(method_offset);
+  uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+  __ ldr(temp, Address(temp, method_offset));
+  // LR = temp->GetEntryPoint();
+  __ ldr(LR, Address(temp, entry_point));
+  // LR();
   __ blx(LR);
-
-  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
   DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
 }
 
 void LocationsBuilderARM::VisitAdd(HAdd* add) {
@@ -1494,6 +1552,21 @@
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
+void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
+  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
+  SuspendCheckSlowPathARM* slow_path =
+      new (GetGraph()->GetArena()) SuspendCheckSlowPathARM(instruction);
+  codegen_->AddSlowPath(slow_path);
+
+  __ AddConstant(R4, R4, -1);
+  __ cmp(R4, ShifterOperand(0));
+  __ b(slow_path->GetEntryLabel(), LE);
+  __ Bind(slow_path->GetReturnLabel());
+}
+
 ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
   return codegen_->GetAssembler();
 }
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 610625c..2480960 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -93,6 +93,8 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
+  void HandleInvoke(HInvoke* invoke);
+
  private:
   CodeGeneratorARM* const codegen_;
   InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -124,7 +126,7 @@
 class CodeGeneratorARM : public CodeGenerator {
  public:
   explicit CodeGeneratorARM(HGraph* graph);
-  virtual ~CodeGeneratorARM() { }
+  virtual ~CodeGeneratorARM() {}
 
   virtual void GenerateFrameEntry() OVERRIDE;
   virtual void GenerateFrameExit() OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6602d3f..3383cb2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -20,6 +20,7 @@
 #include "gc/accounting/card_table.h"
 #include "mirror/array.h"
 #include "mirror/art_method.h"
+#include "mirror/class.h"
 #include "thread.h"
 #include "utils/assembler.h"
 #include "utils/stack_checks.h"
@@ -114,6 +115,27 @@
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
 };
 
+class SuspendCheckSlowPathX86 : public SlowPathCode {
+ public:
+  explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction)
+      : instruction_(instruction) {}
+
+  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    __ Bind(GetEntryLabel());
+    __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend)));
+    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    __ jmp(GetReturnLabel());
+  }
+
+  Label* GetReturnLabel() { return &return_label_; }
+
+ private:
+  HSuspendCheck* const instruction_;
+  Label return_label_;
+
+  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
+};
+
 #undef __
 #define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
 
@@ -742,6 +764,40 @@
 }
 
 void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+  HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+  Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+  uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+  size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+      invoke->GetIndexInDexCache() * kX86WordSize;
+
+  // TODO: Implement all kinds of calls:
+  // 1) boot -> boot
+  // 2) app -> boot
+  // 3) app -> app
+  //
+  // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+  // temp = method;
+  LoadCurrentMethod(temp);
+  // temp = temp->dex_cache_resolved_methods_;
+  __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+  // temp = temp[index_in_cache]
+  __ movl(temp, Address(temp, index_in_cache));
+  // (temp + offset_of_quick_compiled_code)()
+  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+
+  DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+  HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
   locations->AddTemp(X86CpuLocation(EAX));
@@ -778,26 +834,23 @@
   invoke->SetLocations(locations);
 }
 
-void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
-  uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
-  size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
-      invoke->GetIndexInDexCache() * kX86WordSize;
-
-  // TODO: Implement all kinds of calls:
-  // 1) boot -> boot
-  // 2) app -> boot
-  // 3) app -> app
-  //
-  // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
-  // temp = method;
-  LoadCurrentMethod(temp);
-  // temp = temp->dex_cache_resolved_methods_;
-  __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
-  // temp = temp[index_in_cache]
-  __ movl(temp, Address(temp, index_in_cache));
-  // (temp + offset_of_quick_compiled_code)()
+  uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+          invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+  LocationSummary* locations = invoke->GetLocations();
+  Location receiver = locations->InAt(0);
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ movl(temp, Address(ESP, receiver.GetStackIndex()));
+    __ movl(temp, Address(temp, class_offset));
+  } else {
+    __ movl(temp, Address(receiver.AsX86().AsCpuRegister(), class_offset));
+  }
+  // temp = temp->GetMethodAt(method_offset);
+  __ movl(temp, Address(temp, method_offset));
+  // call temp->GetEntryPoint();
   __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
 
   DCHECK(!codegen_->IsLeafMethod());
@@ -1483,6 +1536,20 @@
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
+void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
+  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) {
+  SuspendCheckSlowPathX86* slow_path =
+      new (GetGraph()->GetArena()) SuspendCheckSlowPathX86(instruction);
+  codegen_->AddSlowPath(slow_path);
+  __ fs()->cmpl(Address::Absolute(
+      Thread::ThreadFlagsOffset<kX86WordSize>().Int32Value()), Immediate(0));
+  __ j(kNotEqual, slow_path->GetEntryLabel());
+  __ Bind(slow_path->GetReturnLabel());
+}
+
 X86Assembler* ParallelMoveResolverX86::GetAssembler() const {
   return codegen_->GetAssembler();
 }
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7c50204..f1be0ad 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -94,6 +94,8 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
+  void HandleInvoke(HInvoke* invoke);
+
  private:
   CodeGeneratorX86* const codegen_;
   InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b2d81e3..ca03af8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -20,6 +20,7 @@
 #include "gc/accounting/card_table.h"
 #include "mirror/array.h"
 #include "mirror/art_method.h"
+#include "mirror/class.h"
 #include "mirror/object_reference.h"
 #include "thread.h"
 #include "utils/assembler.h"
@@ -95,6 +96,27 @@
   DISALLOW_COPY_AND_ASSIGN(StackOverflowCheckSlowPathX86_64);
 };
 
+class SuspendCheckSlowPathX86_64 : public SlowPathCode {
+ public:
+  explicit SuspendCheckSlowPathX86_64(HSuspendCheck* instruction)
+      : instruction_(instruction) {}
+
+  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    __ Bind(GetEntryLabel());
+    __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true));
+    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+    __ jmp(GetReturnLabel());
+  }
+
+  Label* GetReturnLabel() { return &return_label_; }
+
+ private:
+  HSuspendCheck* const instruction_;
+  Label return_label_;
+
+  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86_64);
+};
+
 class BoundsCheckSlowPathX86_64 : public SlowPathCode {
  public:
   explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction,
@@ -688,12 +710,46 @@
 }
 
 void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+  HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+  CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+  uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+  size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
+      invoke->GetIndexInDexCache() * heap_reference_size;
+
+  // TODO: Implement all kinds of calls:
+  // 1) boot -> boot
+  // 2) app -> boot
+  // 3) app -> app
+  //
+  // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+  // temp = method;
+  LoadCurrentMethod(temp);
+  // temp = temp->dex_cache_resolved_methods_;
+  __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+  // temp = temp[index_in_cache]
+  __ movl(temp, Address(temp, index_in_cache));
+  // (temp + offset_of_quick_compiled_code)()
+  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+
+  DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+  HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
   locations->AddTemp(X86_64CpuLocation(RDI));
 
   InvokeDexCallingConventionVisitor calling_convention_visitor;
-  for (size_t i = 0; i < invoke->InputCount(); ++i) {
+  for (size_t i = 0; i < invoke->InputCount(); i++) {
     HInstruction* input = invoke->InputAt(i);
     locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
   }
@@ -719,26 +775,23 @@
   }
 }
 
-void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
   CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
-  uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
-  size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
-      invoke->GetIndexInDexCache() * heap_reference_size;
-
-  // TODO: Implement all kinds of calls:
-  // 1) boot -> boot
-  // 2) app -> boot
-  // 3) app -> app
-  //
-  // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
-  // temp = method;
-  LoadCurrentMethod(temp);
-  // temp = temp->dex_cache_resolved_methods_;
-  __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
-  // temp = temp[index_in_cache]
-  __ movl(temp, Address(temp, index_in_cache));
-  // (temp + offset_of_quick_compiled_code)()
+  size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+          invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+  LocationSummary* locations = invoke->GetLocations();
+  Location receiver = locations->InAt(0);
+  size_t class_offset = mirror::Object::ClassOffset().SizeValue();
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+    __ movq(temp, Address(temp, class_offset));
+  } else {
+    __ movq(temp, Address(receiver.AsX86_64().AsCpuRegister(), class_offset));
+  }
+  // temp = temp->GetMethodAt(method_offset);
+  __ movl(temp, Address(temp, method_offset));
+  // call temp->GetEntryPoint();
   __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
 
   DCHECK(!codegen_->IsLeafMethod());
@@ -1329,6 +1382,20 @@
   codegen_->GetMoveResolver()->EmitNativeCode(instruction);
 }
 
+void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
+  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
+  SuspendCheckSlowPathX86_64* slow_path =
+      new (GetGraph()->GetArena()) SuspendCheckSlowPathX86_64(instruction);
+  codegen_->AddSlowPath(slow_path);
+  __ gs()->cmpl(Address::Absolute(
+      Thread::ThreadFlagsOffset<kX86_64WordSize>().Int32Value(), true), Immediate(0));
+  __ j(kNotEqual, slow_path->GetEntryLabel());
+  __ Bind(slow_path->GetReturnLabel());
+}
+
 X86_64Assembler* ParallelMoveResolverX86_64::GetAssembler() const {
   return codegen_->GetAssembler();
 }
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 44552ea..78b60fe 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -91,6 +91,8 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
+  void HandleInvoke(HInvoke* invoke);
+
  private:
   CodeGeneratorX86_64* const codegen_;
   InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index d7ac10d..7161eed 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -15,7 +15,9 @@
  */
 
 #include "builder.h"
-#include "code_generator.h"
+#include "code_generator_arm.h"
+#include "code_generator_x86.h"
+#include "code_generator_x86_64.h"
 #include "common_compiler_test.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
@@ -47,7 +49,6 @@
   DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
 };
 
-#if defined(__i386__) || defined(__arm__) || defined(__x86_64__)
 static void Run(const InternalCodeAllocator& allocator,
                 const CodeGenerator& codegen,
                 bool has_result,
@@ -64,7 +65,6 @@
     CHECK_EQ(result, expected);
   }
 }
-#endif
 
 static void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0) {
   ArenaPool pool;
@@ -72,28 +72,30 @@
   HGraphBuilder builder(&arena);
   const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
   HGraph* graph = builder.BuildGraph(*item);
+  // Remove suspend checks, they cannot be executed in this context.
+  RemoveSuspendChecks(graph);
   ASSERT_NE(graph, nullptr);
   InternalCodeAllocator allocator;
 
-  CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, kX86);
+  x86::CodeGeneratorX86 codegenX86(graph);
   // We avoid doing a stack overflow check that requires the runtime being setup,
   // by making sure the compiler knows the methods we are running are leaf methods.
-  codegen->CompileBaseline(&allocator, true);
-#if defined(__i386__)
-  Run(allocator, *codegen, has_result, expected);
-#endif
+  codegenX86.CompileBaseline(&allocator, true);
+  if (kRuntimeISA == kX86) {
+    Run(allocator, codegenX86, has_result, expected);
+  }
 
-  codegen = CodeGenerator::Create(&arena, graph, kArm);
-  codegen->CompileBaseline(&allocator, true);
-#if defined(__arm__)
-  Run(allocator, *codegen, has_result, expected);
-#endif
+  arm::CodeGeneratorARM codegenARM(graph);
+  codegenARM.CompileBaseline(&allocator, true);
+  if (kRuntimeISA == kArm || kRuntimeISA == kThumb2) {
+    Run(allocator, codegenARM, has_result, expected);
+  }
 
-  codegen = CodeGenerator::Create(&arena, graph, kX86_64);
-  codegen->CompileBaseline(&allocator, true);
-#if defined(__x86_64__)
-  Run(allocator, *codegen, has_result, expected);
-#endif
+  x86_64::CodeGeneratorX86_64 codegenX86_64(graph);
+  codegenX86_64.CompileBaseline(&allocator, true);
+  if (kRuntimeISA == kX86_64) {
+    Run(allocator, codegenX86_64, has_result, expected);
+  }
 }
 
 TEST(CodegenTest, ReturnVoid) {
diff --git a/compiler/optimizing/constant_propagation.cc b/compiler/optimizing/constant_propagation.cc
new file mode 100644
index 0000000..d675164
--- /dev/null
+++ b/compiler/optimizing/constant_propagation.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constant_propagation.h"
+
+namespace art {
+
+void ConstantPropagation::Run() {
+  // Process basic blocks in reverse post-order in the dominator tree,
+  // so that an instruction turned into a constant, used as input of
+  // another instruction, may possibly be used to turn that second
+  // instruction into a constant as well.
+  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    HBasicBlock* block = it.Current();
+    // Traverse this block's instructions in (forward) order and
+    // replace the ones that can be statically evaluated by a
+    // compile-time counterpart.
+    for (HInstructionIterator it(block->GetInstructions());
+         !it.Done(); it.Advance()) {
+      HInstruction* inst = it.Current();
+      // Constant folding: replace `c <- a op b' with a compile-time
+      // evaluation of `a op b' if `a' and `b' are constant.
+      if (inst->IsBinaryOperation()) {
+        HConstant* constant =
+          inst->AsBinaryOperation()->TryStaticEvaluation(graph_->GetArena());
+        if (constant != nullptr) {
+          inst->GetBlock()->ReplaceAndRemoveInstructionWith(inst, constant);
+        }
+      }
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/constant_propagation.h b/compiler/optimizing/constant_propagation.h
new file mode 100644
index 0000000..0729881
--- /dev/null
+++ b/compiler/optimizing/constant_propagation.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CONSTANT_PROPAGATION_H_
+#define ART_COMPILER_OPTIMIZING_CONSTANT_PROPAGATION_H_
+
+#include "nodes.h"
+
+namespace art {
+
+/**
+ * Optimization pass performing a simple constant propagation on the
+ * SSA form.
+ */
+class ConstantPropagation : public ValueObject {
+ public:
+  explicit ConstantPropagation(HGraph* graph)
+    : graph_(graph) {}
+
+  void Run();
+
+ private:
+  HGraph* const graph_;
+
+  DISALLOW_COPY_AND_ASSIGN(ConstantPropagation);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_CONSTANT_PROPAGATION_H_
diff --git a/compiler/optimizing/constant_propagation_test.cc b/compiler/optimizing/constant_propagation_test.cc
new file mode 100644
index 0000000..5c8c709
--- /dev/null
+++ b/compiler/optimizing/constant_propagation_test.cc
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constant_propagation.h"
+#include "dead_code_elimination.h"
+#include "pretty_printer.h"
+#include "graph_checker.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+static void TestCode(const uint16_t* data,
+                     const std::string& expected_before,
+                     const std::string& expected_after_cp,
+                     const std::string& expected_after_dce) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = CreateCFG(&allocator, data);
+  ASSERT_NE(graph, nullptr);
+
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+
+  StringPrettyPrinter printer_before(graph);
+  printer_before.VisitInsertionOrder();
+  std::string actual_before = printer_before.str();
+  ASSERT_EQ(expected_before, actual_before);
+
+  ConstantPropagation(graph).Run();
+
+  StringPrettyPrinter printer_after_cp(graph);
+  printer_after_cp.VisitInsertionOrder();
+  std::string actual_after_cp = printer_after_cp.str();
+  ASSERT_EQ(expected_after_cp, actual_after_cp);
+
+  DeadCodeElimination(graph).Run();
+
+  StringPrettyPrinter printer_after_dce(graph);
+  printer_after_dce.VisitInsertionOrder();
+  std::string actual_after_dce = printer_after_dce.str();
+  ASSERT_EQ(expected_after_dce, actual_after_dce);
+
+  SSAChecker ssa_checker(&allocator, graph);
+  ssa_checker.VisitInsertionOrder();
+  ASSERT_TRUE(ssa_checker.IsValid());
+}
+
+
+/**
+ * Tiny three-register program exercising int constant folding on addition.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     v0 <- 1                  0.      const/4 v0, #+1
+ *     v1 <- 2                  1.      const/4 v1, #+2
+ *     v2 <- v0 + v1            2.      add-int v2, v0, v1
+ *     return v2                4.      return v2
+ */
+TEST(ConstantPropagation, IntConstantFoldingOnAddition1) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 << 8 | 1 << 12,
+    Instruction::CONST_4 | 1 << 8 | 2 << 12,
+    Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
+    Instruction::RETURN | 2 << 8);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  3: IntConstant [9]\n"
+    "  5: IntConstant [9]\n"
+    "  14: SuspendCheck\n"
+    "  15: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 2\n"
+    "  9: Add(3, 5) [12]\n"
+    "  12: Return(9)\n"
+    "BasicBlock 2, pred: 1\n"
+    "  13: Exit\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_cp_diff = {
+    { "  3: IntConstant [9]\n", "  3: IntConstant\n" },
+    { "  5: IntConstant [9]\n", "  5: IntConstant\n" },
+    { "  9: Add(3, 5) [12]\n",  "  16: IntConstant [12]\n" },
+    { "  12: Return(9)\n",      "  12: Return(16)\n" }
+  };
+  std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+
+  // Expected difference after dead code elimination.
+  diff_t expected_dce_diff = {
+    { "  3: IntConstant\n", removed },
+    { "  5: IntConstant\n", removed }
+  };
+  std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+
+  TestCode(data, expected_before, expected_after_cp, expected_after_dce);
+}
+
+/**
+ * Small three-register program exercising int constant folding on addition.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     v0 <- 1                  0.      const/4 v0, #+1
+ *     v1 <- 2                  1.      const/4 v1, #+2
+ *     v0 <- v0 + v1            2.      add-int/2addr v0, v1
+ *     v1 <- 3                  3.      const/4 v1, #+3
+ *     v2 <- 4                  4.      const/4 v2, #+4
+ *     v1 <- v1 + v2            5.      add-int/2addr v1, v2
+ *     v2 <- v0 + v1            6.      add-int v2, v0, v1
+ *     return v2                8.      return v2
+ */
+TEST(ConstantPropagation, IntConstantFoldingOnAddition2) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 << 8 | 1 << 12,
+    Instruction::CONST_4 | 1 << 8 | 2 << 12,
+    Instruction::ADD_INT_2ADDR | 0 << 8 | 1 << 12,
+    Instruction::CONST_4 | 1 << 8 | 3 << 12,
+    Instruction::CONST_4 | 2 << 8 | 4 << 12,
+    Instruction::ADD_INT_2ADDR | 1 << 8 | 2 << 12,
+    Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
+    Instruction::RETURN | 2 << 8);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  3: IntConstant [9]\n"
+    "  5: IntConstant [9]\n"
+    "  11: IntConstant [17]\n"
+    "  13: IntConstant [17]\n"
+    "  26: SuspendCheck\n"
+    "  27: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 2\n"
+    "  9: Add(3, 5) [21]\n"
+    "  17: Add(11, 13) [21]\n"
+    "  21: Add(9, 17) [24]\n"
+    "  24: Return(21)\n"
+    "BasicBlock 2, pred: 1\n"
+    "  25: Exit\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_cp_diff = {
+    { "  3: IntConstant [9]\n",   "  3: IntConstant\n" },
+    { "  5: IntConstant [9]\n",   "  5: IntConstant\n" },
+    { "  11: IntConstant [17]\n", "  11: IntConstant\n" },
+    { "  13: IntConstant [17]\n", "  13: IntConstant\n" },
+    { "  9: Add(3, 5) [21]\n",    "  28: IntConstant\n" },
+    { "  17: Add(11, 13) [21]\n", "  29: IntConstant\n" },
+    { "  21: Add(9, 17) [24]\n",  "  30: IntConstant [24]\n" },
+    { "  24: Return(21)\n",       "  24: Return(30)\n" }
+  };
+  std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+
+  // Expected difference after dead code elimination.
+  diff_t expected_dce_diff = {
+    { "  3: IntConstant\n",  removed },
+    { "  5: IntConstant\n",  removed },
+    { "  11: IntConstant\n", removed },
+    { "  13: IntConstant\n", removed },
+    { "  28: IntConstant\n", removed },
+    { "  29: IntConstant\n", removed }
+  };
+  std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+
+  TestCode(data, expected_before, expected_after_cp, expected_after_dce);
+}
+
+/**
+ * Tiny three-register program exercising int constant folding on subtraction.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     v0 <- 3                  0.      const/4 v0, #+3
+ *     v1 <- 2                  1.      const/4 v1, #+2
+ *     v2 <- v0 - v1            2.      sub-int v2, v0, v1
+ *     return v2                4.      return v2
+ */
+TEST(ConstantPropagation, IntConstantFoldingOnSubtraction) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 << 8 | 3 << 12,
+    Instruction::CONST_4 | 1 << 8 | 2 << 12,
+    Instruction::SUB_INT | 2 << 8, 0 | 1 << 8,
+    Instruction::RETURN | 2 << 8);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  3: IntConstant [9]\n"
+    "  5: IntConstant [9]\n"
+    "  14: SuspendCheck\n"
+    "  15: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 2\n"
+    "  9: Sub(3, 5) [12]\n"
+    "  12: Return(9)\n"
+    "BasicBlock 2, pred: 1\n"
+    "  13: Exit\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_cp_diff = {
+    { "  3: IntConstant [9]\n", "  3: IntConstant\n" },
+    { "  5: IntConstant [9]\n", "  5: IntConstant\n" },
+    { "  9: Sub(3, 5) [12]\n",  "  16: IntConstant [12]\n" },
+    { "  12: Return(9)\n",      "  12: Return(16)\n" }
+  };
+  std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+
+  // Expected difference after dead code elimination.
+  diff_t expected_dce_diff = {
+    { "  3: IntConstant\n", removed },
+    { "  5: IntConstant\n", removed }
+  };
+  std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+
+  TestCode(data, expected_before, expected_after_cp, expected_after_dce);
+}
+
+#define SIX_REGISTERS_CODE_ITEM(...)                                     \
+    { 6, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+
+/**
+ * Tiny three-register-pair program exercising long constant folding
+ * on addition.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     (v0, v1) <- 1            0.      const-wide/16 v0, #+1
+ *     (v2, v3) <- 2            2.      const-wide/16 v2, #+2
+ *     (v4, v5) <-
+ *       (v0, v1) + (v1, v2)    4.      add-long v4, v0, v2
+ *     return (v4, v5)          6.      return-wide v4
+ */
+TEST(ConstantPropagation, LongConstantFoldingOnAddition) {
+  const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+    Instruction::CONST_WIDE_16 | 0 << 8, 1,
+    Instruction::CONST_WIDE_16 | 2 << 8, 2,
+    Instruction::ADD_LONG | 4 << 8, 0 | 2 << 8,
+    Instruction::RETURN_WIDE | 4 << 8);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  6: LongConstant [12]\n"
+    "  8: LongConstant [12]\n"
+    "  17: SuspendCheck\n"
+    "  18: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 2\n"
+    "  12: Add(6, 8) [15]\n"
+    "  15: Return(12)\n"
+    "BasicBlock 2, pred: 1\n"
+    "  16: Exit\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_cp_diff = {
+    { "  6: LongConstant [12]\n", "  6: LongConstant\n" },
+    { "  8: LongConstant [12]\n", "  8: LongConstant\n" },
+    { "  12: Add(6, 8) [15]\n",   "  19: LongConstant [15]\n" },
+    { "  15: Return(12)\n",       "  15: Return(19)\n" }
+  };
+  std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+
+  // Expected difference after dead code elimination.
+  diff_t expected_dce_diff = {
+    { "  6: LongConstant\n", removed },
+    { "  8: LongConstant\n", removed }
+  };
+  std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+
+  TestCode(data, expected_before, expected_after_cp, expected_after_dce);
+}
+
+/**
+ * Tiny three-register-pair program exercising long constant folding
+ * on subtraction.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     (v0, v1) <- 3            0.      const-wide/16 v0, #+3
+ *     (v2, v3) <- 2            2.      const-wide/16 v2, #+2
+ *     (v4, v5) <-
+ *       (v0, v1) - (v1, v2)    4.      sub-long v4, v0, v2
+ *     return (v4, v5)          6.      return-wide v4
+ */
+TEST(ConstantPropagation, LongConstantFoldingOnSubtraction) {
+  const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+    Instruction::CONST_WIDE_16 | 0 << 8, 3,
+    Instruction::CONST_WIDE_16 | 2 << 8, 2,
+    Instruction::SUB_LONG | 4 << 8, 0 | 2 << 8,
+    Instruction::RETURN_WIDE | 4 << 8);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  6: LongConstant [12]\n"
+    "  8: LongConstant [12]\n"
+    "  17: SuspendCheck\n"
+    "  18: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 2\n"
+    "  12: Sub(6, 8) [15]\n"
+    "  15: Return(12)\n"
+    "BasicBlock 2, pred: 1\n"
+    "  16: Exit\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_cp_diff = {
+    { "  6: LongConstant [12]\n", "  6: LongConstant\n" },
+    { "  8: LongConstant [12]\n", "  8: LongConstant\n" },
+    { "  12: Sub(6, 8) [15]\n",   "  19: LongConstant [15]\n" },
+    { "  15: Return(12)\n",       "  15: Return(19)\n" }
+  };
+  std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+
+  // Expected difference after dead code elimination.
+  diff_t expected_dce_diff = {
+    { "  6: LongConstant\n", removed },
+    { "  8: LongConstant\n", removed }
+  };
+  std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+
+  TestCode(data, expected_before, expected_after_cp, expected_after_dce);
+}
+
+/**
+ * Three-register program with jumps leading to the creation of many
+ * blocks.
+ *
+ * The intent of this test is to ensure that all constant expressions
+ * are actually evaluated at compile-time, thanks to the reverse
+ * (forward) post-order traversal of the the dominator tree.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     v0 <- 0                   0.     const/4 v0, #+0
+ *     v1 <- 1                   1.     const/4 v1, #+1
+ *     v2 <- v0 + v1             2.     add-int v2, v0, v1
+ *     goto L2                   4.     goto +4
+ * L1: v1 <- v0 + 3              5.     add-int/lit16 v1, v0, #+3
+ *     goto L3                   7.     goto +4
+ * L2: v0 <- v2 + 2              8.     add-int/lit16 v0, v2, #+2
+ *     goto L1                  10.     goto +(-5)
+ * L3: v2 <- v1 + 4             11.     add-int/lit16 v2, v1, #+4
+ *     return v2                13.     return v2
+ */
+TEST(ConstantPropagation, IntConstantFoldingAndJumps) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 << 8 | 0 << 12,
+    Instruction::CONST_4 | 1 << 8 | 1 << 12,
+    Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
+    Instruction::GOTO | 4 << 8,
+    Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 3,
+    Instruction::GOTO | 4 << 8,
+    Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 2,
+    static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+    Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 4,
+    Instruction::RETURN | 2 << 8);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  3: IntConstant [9]\n"
+    "  5: IntConstant [9]\n"
+    "  13: IntConstant [14]\n"
+    "  18: IntConstant [19]\n"
+    "  24: IntConstant [25]\n"
+    "  30: SuspendCheck\n"
+    "  31: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 3\n"
+    "  9: Add(3, 5) [19]\n"
+    "  11: Goto 3\n"
+    "BasicBlock 2, pred: 3, succ: 4\n"
+    "  14: Add(19, 13) [25]\n"
+    "  16: Goto 4\n"
+    "BasicBlock 3, pred: 1, succ: 2\n"
+    "  19: Add(9, 18) [14]\n"
+    "  21: SuspendCheck\n"
+    "  22: Goto 2\n"
+    "BasicBlock 4, pred: 2, succ: 5\n"
+    "  25: Add(14, 24) [28]\n"
+    "  28: Return(25)\n"
+    "BasicBlock 5, pred: 4\n"
+    "  29: Exit\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_cp_diff = {
+    { "  3: IntConstant [9]\n",   "  3: IntConstant\n" },
+    { "  5: IntConstant [9]\n",   "  5: IntConstant []\n" },
+    { "  13: IntConstant [14]\n", "  13: IntConstant\n" },
+    { "  18: IntConstant [19]\n", "  18: IntConstant\n" },
+    { "  24: IntConstant [25]\n", "  24: IntConstant\n" },
+    { "  9: Add(3, 5) [19]\n",    "  32: IntConstant []\n" },
+    { "  14: Add(19, 13) [25]\n", "  34: IntConstant\n" },
+    { "  19: Add(9, 18) [14]\n",  "  33: IntConstant []\n" },
+    { "  25: Add(14, 24) [28]\n", "  35: IntConstant [28]\n" },
+    { "  28: Return(25)\n",       "  28: Return(35)\n"}
+  };
+  std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+
+  // Expected difference after dead code elimination.
+  diff_t expected_dce_diff = {
+    { "  3: IntConstant\n",     removed },
+    { "  13: IntConstant\n",    removed },
+    { "  18: IntConstant\n",    removed },
+    { "  24: IntConstant\n",    removed },
+    { "  34: IntConstant\n",    removed },
+  };
+  std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+
+  TestCode(data, expected_before, expected_after_cp, expected_after_dce);
+}
+
+
+/**
+ * Three-register program with a constant (static) condition.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     v1 <- 1                  0.      const/4 v1, #+1
+ *     v0 <- 0                  1.      const/4 v0, #+0
+ *     if v1 >= 0 goto L1       2.      if-gez v1, +3
+ *     v0 <- v1                 4.      move v0, v1
+ * L1: v2 <- v0 + v1            5.      add-int v2, v0, v1
+ *     return-void              7.      return
+ */
+TEST(ConstantPropagation, ConstantCondition) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 1 << 8 | 1 << 12,
+    Instruction::CONST_4 | 0 << 8 | 0 << 12,
+    Instruction::IF_GEZ | 1 << 8, 3,
+    Instruction::MOVE | 0 << 8 | 1 << 12,
+    Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
+    Instruction::RETURN_VOID);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  3: IntConstant [15, 22, 8]\n"
+    "  5: IntConstant [22, 8]\n"
+    "  19: SuspendCheck\n"
+    "  20: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 5, 2\n"
+    "  8: GreaterThanOrEqual(3, 5) [9]\n"
+    "  9: If(8)\n"
+    "BasicBlock 2, pred: 1, succ: 3\n"
+    "  12: Goto 3\n"
+    "BasicBlock 3, pred: 2, 5, succ: 4\n"
+    "  22: Phi(3, 5) [15]\n"
+    "  15: Add(22, 3)\n"
+    "  17: ReturnVoid\n"
+    "BasicBlock 4, pred: 3\n"
+    "  18: Exit\n"
+    "BasicBlock 5, pred: 1, succ: 3\n"
+    "  21: Goto 3\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_cp_diff = {
+    { "  3: IntConstant [15, 22, 8]\n",      "  3: IntConstant [15, 22]\n" },
+    { "  5: IntConstant [22, 8]\n",          "  5: IntConstant [22]\n" },
+    { "  8: GreaterThanOrEqual(3, 5) [9]\n", "  23: IntConstant [9]\n" },
+    { "  9: If(8)\n",                        "  9: If(23)\n" }
+  };
+  std::string expected_after_cp = Patch(expected_before, expected_cp_diff);
+
+  // Expected difference after dead code elimination.
+  diff_t expected_dce_diff = {
+    { "  3: IntConstant [15, 22]\n", "  3: IntConstant [22]\n" },
+    { "  22: Phi(3, 5) [15]\n",      "  22: Phi(3, 5)\n" },
+    { "  15: Add(22, 3)\n",          removed }
+  };
+  std::string expected_after_dce = Patch(expected_after_cp, expected_dce_diff);
+
+  TestCode(data, expected_before, expected_after_cp, expected_after_dce);
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
new file mode 100644
index 0000000..2f881d1
--- /dev/null
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dead_code_elimination.h"
+
+#include "base/bit_vector-inl.h"
+
+namespace art {
+
+void DeadCodeElimination::Run() {
+  // Process basic blocks in post-order in the dominator tree, so that
+  // a dead instruction depending on another dead instruction is
+  // removed.
+  for (HPostOrderIterator b(*graph_); !b.Done(); b.Advance()) {
+    HBasicBlock* block = b.Current();
+    // Traverse this block's instructions in backward order and remove
+    // the unused ones.
+    HBackwardInstructionIterator i(block->GetInstructions());
+    // Skip the first iteration, as the last instruction of a block is
+    // a branching instruction.
+    DCHECK(i.Current()->IsControlFlow());
+    for (i.Advance(); !i.Done(); i.Advance()) {
+      HInstruction* inst = i.Current();
+      DCHECK(!inst->IsControlFlow());
+      if (!inst->HasSideEffects() && !inst->HasUses()) {
+        block->RemoveInstruction(inst);
+      }
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
new file mode 100644
index 0000000..48739be
--- /dev/null
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_DEAD_CODE_ELIMINATION_H_
+#define ART_COMPILER_OPTIMIZING_DEAD_CODE_ELIMINATION_H_
+
+#include "nodes.h"
+
+namespace art {
+
+/**
+ * Optimization pass performing dead code elimination (removal of
+ * unused variables/instructions) on the SSA form.
+ */
+class DeadCodeElimination : public ValueObject {
+ public:
+  explicit DeadCodeElimination(HGraph* graph)
+      : graph_(graph) {}
+
+  void Run();
+
+ private:
+  HGraph* const graph_;
+
+  DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_DEAD_CODE_ELIMINATION_H_
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
new file mode 100644
index 0000000..245bcb2
--- /dev/null
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dead_code_elimination.h"
+#include "pretty_printer.h"
+#include "graph_checker.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+static void TestCode(const uint16_t* data,
+                     const std::string& expected_before,
+                     const std::string& expected_after) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = CreateCFG(&allocator, data);
+  ASSERT_NE(graph, nullptr);
+
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+
+  StringPrettyPrinter printer_before(graph);
+  printer_before.VisitInsertionOrder();
+  std::string actual_before = printer_before.str();
+  ASSERT_EQ(actual_before, expected_before);
+
+  DeadCodeElimination(graph).Run();
+
+  StringPrettyPrinter printer_after(graph);
+  printer_after.VisitInsertionOrder();
+  std::string actual_after = printer_after.str();
+  ASSERT_EQ(actual_after, expected_after);
+
+  SSAChecker ssa_checker(&allocator, graph);
+  ssa_checker.VisitInsertionOrder();
+  ASSERT_TRUE(ssa_checker.IsValid());
+}
+
+
+/**
+ * Small three-register program.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     v1 <- 1                  0.      const/4 v1, #+1
+ *     v0 <- 0                  1.      const/4 v0, #+0
+ *     if v1 >= 0 goto L1       2.      if-gez v1, +3
+ *     v0 <- v1                 4.      move v0, v1
+ * L1: v2 <- v0 + v1            5.      add-int v2, v0, v1
+ *     return-void              7.      return
+ */
+TEST(DeadCodeElimination, AdditionAndConditionalJump) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 1 << 8 | 1 << 12,
+    Instruction::CONST_4 | 0 << 8 | 0 << 12,
+    Instruction::IF_GEZ | 1 << 8, 3,
+    Instruction::MOVE | 0 << 8 | 1 << 12,
+    Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
+    Instruction::RETURN_VOID);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  3: IntConstant [15, 22, 8]\n"
+    "  5: IntConstant [22, 8]\n"
+    "  19: SuspendCheck\n"
+    "  20: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 5, 2\n"
+    "  8: GreaterThanOrEqual(3, 5) [9]\n"
+    "  9: If(8)\n"
+    "BasicBlock 2, pred: 1, succ: 3\n"
+    "  12: Goto 3\n"
+    "BasicBlock 3, pred: 2, 5, succ: 4\n"
+    "  22: Phi(3, 5) [15]\n"
+    "  15: Add(22, 3)\n"
+    "  17: ReturnVoid\n"
+    "BasicBlock 4, pred: 3\n"
+    "  18: Exit\n"
+    "BasicBlock 5, pred: 1, succ: 3\n"
+    "  21: Goto 3\n";
+
+  diff_t expected_diff = {
+    { "  3: IntConstant [15, 22, 8]\n", "  3: IntConstant [22, 8]\n" },
+    { "  22: Phi(3, 5) [15]\n",         "  22: Phi(3, 5)\n" },
+    { "  15: Add(22, 3)\n",             removed }
+  };
+  std::string expected_after = Patch(expected_before, expected_diff);
+
+  TestCode(data, expected_before, expected_after);
+}
+
+/**
+ * Three-register program with jumps leading to the creation of many
+ * blocks.
+ *
+ * The intent of this test is to ensure that all dead instructions are
+ * actually pruned at compile-time, thanks to the (backward)
+ * post-order traversal of the the dominator tree.
+ *
+ *                              16-bit
+ *                              offset
+ *                              ------
+ *     v0 <- 0                   0.     const/4 v0, #+0
+ *     v1 <- 1                   1.     const/4 v1, #+1
+ *     v2 <- v0 + v1             2.     add-int v2, v0, v1
+ *     goto L2                   4.     goto +4
+ * L1: v1 <- v0 + 3              5.     add-int/lit16 v1, v0, #+3
+ *     goto L3                   7.     goto +4
+ * L2: v0 <- v2 + 2              8.     add-int/lit16 v0, v2, #+2
+ *     goto L1                  10.     goto +(-5)
+ * L3: v2 <- v1 + 4             11.     add-int/lit16 v2, v1, #+4
+ *     return                   13.     return-void
+ */
+TEST(DeadCodeElimination, AdditionsAndInconditionalJumps) {
+  const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 << 8 | 0 << 12,
+    Instruction::CONST_4 | 1 << 8 | 1 << 12,
+    Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
+    Instruction::GOTO | 4 << 8,
+    Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 3,
+    Instruction::GOTO | 4 << 8,
+    Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 2,
+    static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+    Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 4,
+    Instruction::RETURN_VOID);
+
+  std::string expected_before =
+    "BasicBlock 0, succ: 1\n"
+    "  3: IntConstant [9]\n"
+    "  5: IntConstant [9]\n"
+    "  13: IntConstant [14]\n"
+    "  18: IntConstant [19]\n"
+    "  24: IntConstant [25]\n"
+    "  29: SuspendCheck\n"
+    "  30: Goto 1\n"
+    "BasicBlock 1, pred: 0, succ: 3\n"
+    "  9: Add(3, 5) [19]\n"
+    "  11: Goto 3\n"
+    "BasicBlock 2, pred: 3, succ: 4\n"
+    "  14: Add(19, 13) [25]\n"
+    "  16: Goto 4\n"
+    "BasicBlock 3, pred: 1, succ: 2\n"
+    "  19: Add(9, 18) [14]\n"
+    "  21: SuspendCheck\n"
+    "  22: Goto 2\n"
+    "BasicBlock 4, pred: 2, succ: 5\n"
+    "  25: Add(14, 24)\n"
+    "  27: ReturnVoid\n"
+    "BasicBlock 5, pred: 4\n"
+    "  28: Exit\n";
+
+  // Expected difference after constant propagation.
+  diff_t expected_diff = {
+    { "  13: IntConstant [14]\n", removed },
+    { "  24: IntConstant [25]\n", removed },
+    { "  14: Add(19, 13) [25]\n", removed },
+    // The SuspendCheck instruction following this Add instruction
+    // inserts the latter in an environment, thus making it "used" and
+    // therefore non removable.  It ensues that some other Add and
+    // IntConstant instructions cannot be removed, as they are direct
+    // or indirect inputs of the initial Add instruction.
+    { "  19: Add(9, 18) [14]\n",  "  19: Add(9, 18) []\n" },
+    { "  25: Add(14, 24)\n",      removed },
+  };
+  std::string expected_after = Patch(expected_before, expected_diff);
+
+  TestCode(data, expected_before, expected_after);
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
new file mode 100644
index 0000000..ad9ed0c
--- /dev/null
+++ b/compiler/optimizing/graph_checker.cc
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph_checker.h"
+
+#include <string>
+#include <map>
+#include <sstream>
+
+namespace art {
+
+void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
+  current_block_ = block;
+
+  // Check consistency with respect to predecessors of `block`.
+  const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
+  std::map<HBasicBlock*, size_t> predecessors_count;
+  for (size_t i = 0, e = predecessors.Size(); i < e; ++i) {
+    HBasicBlock* p = predecessors.Get(i);
+    ++predecessors_count[p];
+  }
+  for (auto& pc : predecessors_count) {
+    HBasicBlock* p = pc.first;
+    size_t p_count_in_block_predecessors = pc.second;
+    const GrowableArray<HBasicBlock*>& p_successors = p->GetSuccessors();
+    size_t block_count_in_p_successors = 0;
+    for (size_t j = 0, f = p_successors.Size(); j < f; ++j) {
+      if (p_successors.Get(j) == block) {
+        ++block_count_in_p_successors;
+      }
+    }
+    if (p_count_in_block_predecessors != block_count_in_p_successors) {
+      std::stringstream error;
+      error << "Block " << block->GetBlockId()
+            << " lists " << p_count_in_block_predecessors
+            << " occurrences of block " << p->GetBlockId()
+            << " in its predecessors, whereas block " << p->GetBlockId()
+            << " lists " << block_count_in_p_successors
+            << " occurrences of block " << block->GetBlockId()
+            << " in its successors.";
+      errors_.Insert(error.str());
+    }
+  }
+
+  // Check consistency with respect to successors of `block`.
+  const GrowableArray<HBasicBlock*>& successors = block->GetSuccessors();
+  std::map<HBasicBlock*, size_t> successors_count;
+  for (size_t i = 0, e = successors.Size(); i < e; ++i) {
+    HBasicBlock* s = successors.Get(i);
+    ++successors_count[s];
+  }
+  for (auto& sc : successors_count) {
+    HBasicBlock* s = sc.first;
+    size_t s_count_in_block_successors = sc.second;
+    const GrowableArray<HBasicBlock*>& s_predecessors = s->GetPredecessors();
+    size_t block_count_in_s_predecessors = 0;
+    for (size_t j = 0, f = s_predecessors.Size(); j < f; ++j) {
+      if (s_predecessors.Get(j) == block) {
+        ++block_count_in_s_predecessors;
+      }
+    }
+    if (s_count_in_block_successors != block_count_in_s_predecessors) {
+      std::stringstream error;
+      error << "Block " << block->GetBlockId()
+            << " lists " << s_count_in_block_successors
+            << " occurrences of block " << s->GetBlockId()
+            << " in its successors, whereas block " << s->GetBlockId()
+            << " lists " << block_count_in_s_predecessors
+            << " occurrences of block " << block->GetBlockId()
+            << " in its predecessors.";
+      errors_.Insert(error.str());
+    }
+  }
+
+  // Ensure `block` ends with a branch instruction.
+  HInstruction* last_inst = block->GetLastInstruction();
+  if (last_inst == nullptr || !last_inst->IsControlFlow()) {
+    std::stringstream error;
+    error  << "Block " << block->GetBlockId()
+           << " does not end with a branch instruction.";
+    errors_.Insert(error.str());
+  }
+
+  // Visit this block's list of phis.
+  for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+    // Ensure this block's list of phis contains only phis.
+    if (!it.Current()->IsPhi()) {
+      std::stringstream error;
+      error << "Block " << current_block_->GetBlockId()
+            << " has a non-phi in its phi list.";
+      errors_.Insert(error.str());
+    }
+    it.Current()->Accept(this);
+  }
+
+  // Visit this block's list of instructions.
+  for (HInstructionIterator it(block->GetInstructions()); !it.Done();
+       it.Advance()) {
+    // Ensure this block's list of instructions does not contains phis.
+    if (it.Current()->IsPhi()) {
+      std::stringstream error;
+      error << "Block " << current_block_->GetBlockId()
+            << " has a phi in its non-phi list.";
+      errors_.Insert(error.str());
+    }
+    it.Current()->Accept(this);
+  }
+}
+
+void GraphChecker::VisitInstruction(HInstruction* instruction) {
+  // Ensure `instruction` is associated with `current_block_`.
+  if (instruction->GetBlock() != current_block_) {
+    std::stringstream error;
+    if (instruction->IsPhi()) {
+      error << "Phi ";
+    } else {
+      error << "Instruction ";
+    }
+    error << instruction->GetId() << " in block "
+          << current_block_->GetBlockId();
+    if (instruction->GetBlock() != nullptr) {
+      error << " associated with block "
+            << instruction->GetBlock()->GetBlockId() << ".";
+    } else {
+      error << " not associated with any block.";
+    }
+    errors_.Insert(error.str());
+  }
+}
+
+void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
+  super_type::VisitBasicBlock(block);
+
+  // Ensure there is no critical edge (i.e., an edge connecting a
+  // block with multiple successors to a block with multiple
+  // predecessors).
+  if (block->GetSuccessors().Size() > 1) {
+    for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
+      HBasicBlock* successor = block->GetSuccessors().Get(j);
+      if (successor->GetPredecessors().Size() > 1) {
+        std::stringstream error;
+        error << "Critical edge between blocks " << block->GetBlockId()
+              << " and "  << successor->GetBlockId() << ".";
+        errors_.Insert(error.str());
+      }
+    }
+  }
+}
+
+void SSAChecker::VisitInstruction(HInstruction* instruction) {
+  super_type::VisitInstruction(instruction);
+
+  // Ensure an instruction dominates all its uses (or in the present
+  // case, that all uses of an instruction (used as input) are
+  // dominated by its definition).
+  for (HInputIterator input_it(instruction); !input_it.Done();
+       input_it.Advance()) {
+    HInstruction* input = input_it.Current();
+    if (!input->Dominates(instruction)) {
+      std::stringstream error;
+      error << "Instruction " << input->GetId()
+            << " in block " << input->GetBlock()->GetBlockId()
+            << " does not dominate use " << instruction->GetId()
+            << " in block " << current_block_->GetBlockId() << ".";
+      errors_.Insert(error.str());
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
new file mode 100644
index 0000000..8ddd399
--- /dev/null
+++ b/compiler/optimizing/graph_checker.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
+#define ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
+
+#include "nodes.h"
+
+namespace art {
+
+// A control-flow graph visitor performing various checks.
+class GraphChecker : public HGraphVisitor {
+ public:
+  GraphChecker(ArenaAllocator* allocator, HGraph* graph)
+    : HGraphVisitor(graph),
+      allocator_(allocator),
+      errors_(allocator, 0) {}
+
+  // Check `block`.
+  virtual void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+
+  // Check `instruction`.
+  virtual void VisitInstruction(HInstruction* instruction) OVERRIDE;
+
+  // Was the last visit of the graph valid?
+  bool IsValid() const {
+    return errors_.IsEmpty();
+  }
+
+  // Get the list of detected errors.
+  const GrowableArray<std::string>& GetErrors() const {
+    return errors_;
+  }
+
+ protected:
+  ArenaAllocator* const allocator_;
+  // The block currently visited.
+  HBasicBlock* current_block_ = nullptr;
+  // Errors encountered while checking the graph.
+  GrowableArray<std::string> errors_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(GraphChecker);
+};
+
+
+// An SSA graph visitor performing various checks.
+class SSAChecker : public GraphChecker {
+ public:
+  typedef GraphChecker super_type;
+
+  SSAChecker(ArenaAllocator* allocator, HGraph* graph)
+    : GraphChecker(allocator, graph) {}
+
+  // Perform SSA form checks on `block`.
+  virtual void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+
+  // Perform SSA form checks on `instruction`.
+  virtual void VisitInstruction(HInstruction* instruction) OVERRIDE;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SSAChecker);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
new file mode 100644
index 0000000..ea06920
--- /dev/null
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph_checker.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+/**
+ * Create a simple control-flow graph composed of two blocks:
+ *
+ *   BasicBlock 0, succ: 1
+ *     0: Goto 1
+ *   BasicBlock 1, pred: 0
+ *     1: Exit
+ */
+HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
+  HGraph* graph = new (allocator) HGraph(allocator);
+  HBasicBlock* entry_block = new (allocator) HBasicBlock(graph);
+  entry_block->AddInstruction(new (allocator) HGoto());
+  graph->AddBlock(entry_block);
+  graph->SetEntryBlock(entry_block);
+  HBasicBlock* exit_block = new (allocator) HBasicBlock(graph);
+  exit_block->AddInstruction(new (allocator) HExit());
+  graph->AddBlock(exit_block);
+  graph->SetExitBlock(exit_block);
+  entry_block->AddSuccessor(exit_block);
+  return graph;
+}
+
+
+static void TestCode(const uint16_t* data) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = CreateCFG(&allocator, data);
+  ASSERT_NE(graph, nullptr);
+
+  GraphChecker graph_checker(&allocator, graph);
+  graph_checker.VisitInsertionOrder();
+  ASSERT_TRUE(graph_checker.IsValid());
+}
+
+static void TestCodeSSA(const uint16_t* data) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = CreateCFG(&allocator, data);
+  ASSERT_NE(graph, nullptr);
+
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+
+  SSAChecker ssa_checker(&allocator, graph);
+  ssa_checker.VisitInsertionOrder();
+  ASSERT_TRUE(ssa_checker.IsValid());
+}
+
+
+TEST(GraphChecker, ReturnVoid) {
+  const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+      Instruction::RETURN_VOID);
+
+  TestCode(data);
+}
+
+TEST(GraphChecker, CFG1) {
+  const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+      Instruction::GOTO | 0x100,
+      Instruction::RETURN_VOID);
+
+  TestCode(data);
+}
+
+TEST(GraphChecker, CFG2) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0x100,
+    Instruction::RETURN_VOID);
+
+  TestCode(data);
+}
+
+TEST(GraphChecker, CFG3) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0x100,
+    Instruction::GOTO | 0xFF00);
+
+  TestCode(data);
+}
+
+// Test case with an invalid graph containing inconsistent
+// predecessor/successor arcs in CFG.
+TEST(GraphChecker, InconsistentPredecessorsAndSuccessors) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  HGraph* graph = CreateSimpleCFG(&allocator);
+  GraphChecker graph_checker(&allocator, graph);
+  graph_checker.VisitInsertionOrder();
+  ASSERT_TRUE(graph_checker.IsValid());
+
+  // Remove the entry block from the exit block's predecessors, to create an
+  // inconsistent successor/predecessor relation.
+  graph->GetExitBlock()->RemovePredecessor(graph->GetEntryBlock());
+  graph_checker.VisitInsertionOrder();
+  ASSERT_FALSE(graph_checker.IsValid());
+}
+
+// Test case with an invalid graph containing a non-branch last
+// instruction in a block.
+TEST(GraphChecker, BlockEndingWithNonBranchInstruction) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  HGraph* graph = CreateSimpleCFG(&allocator);
+  GraphChecker graph_checker(&allocator, graph);
+  graph_checker.VisitInsertionOrder();
+  ASSERT_TRUE(graph_checker.IsValid());
+
+  // Remove the sole instruction of the exit block (composed of a
+  // single Exit instruction) to make it invalid (i.e. not ending by a
+  // branch instruction).
+  HBasicBlock* exit_block = graph->GetExitBlock();
+  HInstruction* last_inst = exit_block->GetLastInstruction();
+  exit_block->RemoveInstruction(last_inst);
+
+  graph_checker.VisitInsertionOrder();
+  ASSERT_FALSE(graph_checker.IsValid());
+}
+
+TEST(SSAChecker, SSAPhi) {
+  // This code creates one Phi function during the conversion to SSA form.
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::CONST_4 | 4 << 12 | 0,
+    Instruction::RETURN | 0 << 8);
+
+  TestCodeSSA(data);
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index f011e85..7f64be4 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -81,6 +81,23 @@
     }
   }
 
+  char GetTypeId(Primitive::Type type) {
+    switch (type) {
+      case Primitive::kPrimBoolean: return 'z';
+      case Primitive::kPrimByte: return 'b';
+      case Primitive::kPrimChar: return 'c';
+      case Primitive::kPrimShort: return 's';
+      case Primitive::kPrimInt: return 'i';
+      case Primitive::kPrimLong: return 'j';
+      case Primitive::kPrimFloat: return 'f';
+      case Primitive::kPrimDouble: return 'd';
+      case Primitive::kPrimNot: return 'l';
+      case Primitive::kPrimVoid: return 'v';
+    }
+    LOG(FATAL) << "Unreachable";
+    return 'v';
+  }
+
   void PrintPredecessors(HBasicBlock* block) {
     AddIndent();
     output_ << "predecessors";
@@ -140,7 +157,7 @@
     if (instruction->InputCount() > 0) {
       output_ << " [ ";
       for (HInputIterator inputs(instruction); !inputs.Done(); inputs.Advance()) {
-        output_ << "v" << inputs.Current()->GetId() << " ";
+        output_ << GetTypeId(inputs.Current()->GetType()) << inputs.Current()->GetId() << " ";
       }
       output_ << "]";
     }
@@ -175,7 +192,8 @@
       HInstruction* instruction = it.Current();
       AddIndent();
       int bci = 0;
-      output_ << bci << " " << instruction->NumberOfUses() << " v" << instruction->GetId() << " ";
+      output_ << bci << " " << instruction->NumberOfUses()
+              << " " << GetTypeId(instruction->GetType()) << instruction->GetId() << " ";
       instruction->Accept(this);
       output_ << kEndInstructionMarker << std::endl;
     }
@@ -214,7 +232,8 @@
     for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
       AddIndent();
       HInstruction* instruction = it.Current();
-      output_ << instruction->GetId() << " v" << instruction->GetId() << "[ ";
+      output_ << instruction->GetId() << " " << GetTypeId(instruction->GetType())
+              << instruction->GetId() << "[ ";
       for (HInputIterator inputs(instruction); !inputs.Done(); inputs.Advance()) {
         output_ << inputs.Current()->GetId() << " ";
       }
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 7cd74e9..6e2c6fd 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -25,8 +25,10 @@
 class DexCompilationUnit;
 class HGraph;
 
+// TODO: Create an analysis/optimization abstraction.
 static const char* kLivenessPassName = "liveness";
 static const char* kRegisterAllocatorPassName = "register";
+static const char* kGVNPassName = "gvn";
 
 /**
  * If enabled, emits compilation information suitable for the c1visualizer tool
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
new file mode 100644
index 0000000..027b3d4
--- /dev/null
+++ b/compiler/optimizing/gvn.cc
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gvn.h"
+
+namespace art {
+
+void GlobalValueNumberer::Run() {
+  ComputeSideEffects();
+
+  sets_.Put(graph_->GetEntryBlock()->GetBlockId(), new (allocator_) ValueSet(allocator_));
+
+  // Do reverse post order to ensure the non back-edge predecessors of a block are
+  // visited before the block itself.
+  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    VisitBasicBlock(it.Current());
+  }
+}
+
+void GlobalValueNumberer::UpdateLoopEffects(HLoopInformation* info, SideEffects effects) {
+  int id = info->GetHeader()->GetBlockId();
+  loop_effects_.Put(id, loop_effects_.Get(id).Union(effects));
+}
+
+void GlobalValueNumberer::ComputeSideEffects() {
+  if (kIsDebugBuild) {
+    for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+      HBasicBlock* block = it.Current();
+      SideEffects effects = GetBlockEffects(block);
+      DCHECK(!effects.HasSideEffects() && !effects.HasDependencies());
+      if (block->IsLoopHeader()) {
+        effects = GetLoopEffects(block);
+        DCHECK(!effects.HasSideEffects() && !effects.HasDependencies());
+      }
+    }
+  }
+
+  // Do a post order visit to ensure we visit a loop header after its loop body.
+  for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    HBasicBlock* block = it.Current();
+
+    SideEffects effects = SideEffects::None();
+    // Update `effects` with the side effects of all instructions in this block.
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+      HInstruction* instruction = it.Current();
+      effects = effects.Union(instruction->GetSideEffects());
+      if (effects.HasAllSideEffects()) {
+        break;
+      }
+    }
+
+    block_effects_.Put(block->GetBlockId(), effects);
+
+    if (block->IsLoopHeader()) {
+      // The side effects of the loop header are part of the loop.
+      UpdateLoopEffects(block->GetLoopInformation(), effects);
+      HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+      if (pre_header->IsInLoop()) {
+        // Update the side effects of the outer loop with the side effects of the inner loop.
+        // Note that this works because we know all the blocks of the inner loop are visited
+        // before the loop header of the outer loop.
+        UpdateLoopEffects(pre_header->GetLoopInformation(), GetLoopEffects(block));
+      }
+    } else if (block->IsInLoop()) {
+      // Update the side effects of the loop with the side effects of this block.
+      UpdateLoopEffects(block->GetLoopInformation(), effects);
+    }
+  }
+}
+
+SideEffects GlobalValueNumberer::GetLoopEffects(HBasicBlock* block) const {
+  DCHECK(block->IsLoopHeader());
+  return loop_effects_.Get(block->GetBlockId());
+}
+
+SideEffects GlobalValueNumberer::GetBlockEffects(HBasicBlock* block) const {
+  return block_effects_.Get(block->GetBlockId());
+}
+
+static bool IsLoopExit(HBasicBlock* block, HBasicBlock* successor) {
+  HLoopInformation* block_info = block->GetLoopInformation();
+  HLoopInformation* other_info = successor->GetLoopInformation();
+  return block_info != other_info && (other_info == nullptr || block_info->IsIn(*other_info));
+}
+
+void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
+  if (kIsDebugBuild) {
+    // Check that all non back-edge processors have been visited.
+    for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
+      HBasicBlock* predecessor = block->GetPredecessors().Get(i);
+      DCHECK(visited_.Get(predecessor->GetBlockId())
+             || (block->GetLoopInformation() != nullptr
+                 && (block->GetLoopInformation()->GetBackEdges().Get(0) == predecessor)));
+    }
+    visited_.Put(block->GetBlockId(), true);
+  }
+
+  ValueSet* set = sets_.Get(block->GetBlockId());
+
+  if (block->IsLoopHeader()) {
+    set->Kill(GetLoopEffects(block));
+  }
+
+  HInstruction* current = block->GetFirstInstruction();
+  while (current != nullptr) {
+    set->Kill(current->GetSideEffects());
+    // Save the next instruction in case `current` is removed from the graph.
+    HInstruction* next = current->GetNext();
+    if (current->CanBeMoved()) {
+      HInstruction* existing = set->Lookup(current);
+      if (existing != nullptr) {
+        current->ReplaceWith(existing);
+        current->GetBlock()->RemoveInstruction(current);
+      } else {
+        set->Add(current);
+      }
+    }
+    current = next;
+  }
+
+  if (block == graph_->GetEntryBlock()) {
+    // The entry block should only accumulate constant instructions, and
+    // the builder puts constants only in the entry block.
+    // Therefore, there is no need to propagate the value set to the next block.
+    DCHECK_EQ(block->GetDominatedBlocks().Size(), 1u);
+    HBasicBlock* dominated = block->GetDominatedBlocks().Get(0);
+    sets_.Put(dominated->GetBlockId(), new (allocator_) ValueSet(allocator_));
+    return;
+  }
+
+  // Copy the value set to dominated blocks. We can re-use
+  // the current set for the last dominated block because we are done visiting
+  // this block.
+  for (size_t i = 0, e = block->GetDominatedBlocks().Size(); i < e; ++i) {
+    HBasicBlock* dominated = block->GetDominatedBlocks().Get(i);
+    sets_.Put(dominated->GetBlockId(), i == e - 1 ? set : set->Copy());
+  }
+
+  // Kill instructions in the value set of each successor. If the successor
+  // is a loop exit, then we use the side effects of the loop. If not, we use
+  // the side effects of this block.
+  for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) {
+    HBasicBlock* successor = block->GetSuccessors().Get(i);
+    if (successor->IsLoopHeader()
+        && successor->GetLoopInformation()->GetBackEdges().Get(0) == block) {
+      // In case of a back edge, we already have visited the loop header.
+      // We should not update its value set, because the last dominated block
+      // of the loop header uses the same value set.
+      DCHECK(visited_.Get(successor->GetBlockId()));
+      continue;
+    }
+    DCHECK(!visited_.Get(successor->GetBlockId()));
+    ValueSet* successor_set = sets_.Get(successor->GetBlockId());
+    // The dominator sets the set, and we are guaranteed to have visited it already.
+    DCHECK(successor_set != nullptr);
+
+    // If this block dominates this successor there is nothing to do.
+    // Also if the set is empty, there is nothing to kill.
+    if (successor->GetDominator() != block && !successor_set->IsEmpty()) {
+      if (block->IsInLoop() && IsLoopExit(block, successor)) {
+        // All instructions killed in the loop must be killed for a loop exit.
+        SideEffects effects = GetLoopEffects(block->GetLoopInformation()->GetHeader());
+        sets_.Get(successor->GetBlockId())->Kill(effects);
+      } else {
+        // Following block (that might be in the same loop).
+        // Just kill instructions based on this block's side effects.
+        sets_.Get(successor->GetBlockId())->Kill(GetBlockEffects(block));
+      }
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
new file mode 100644
index 0000000..41b3ceb
--- /dev/null
+++ b/compiler/optimizing/gvn.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_GVN_H_
+#define ART_COMPILER_OPTIMIZING_GVN_H_
+
+#include <gtest/gtest.h>
+#include "nodes.h"
+
+namespace art {
+
+/**
+ * A node in the collision list of a ValueSet. Encodes the instruction,
+ * the hash code, and the next node in the collision list.
+ */
+class ValueSetNode : public ArenaObject {
+ public:
+  ValueSetNode(HInstruction* instruction, size_t hash_code, ValueSetNode* next)
+      : instruction_(instruction), hash_code_(hash_code), next_(next) {}
+
+  size_t GetHashCode() const { return hash_code_; }
+  HInstruction* GetInstruction() const { return instruction_; }
+  ValueSetNode* GetNext() const { return next_; }
+  void SetNext(ValueSetNode* node) { next_ = node; }
+
+ private:
+  HInstruction* const instruction_;
+  const size_t hash_code_;
+  ValueSetNode* next_;
+
+  DISALLOW_COPY_AND_ASSIGN(ValueSetNode);
+};
+
+/**
+ * A ValueSet holds instructions that can replace other instructions. It is updated
+ * through the `Add` method, and the `Kill` method. The `Kill` method removes
+ * instructions that are affected by the given side effect.
+ *
+ * The `Lookup` method returns an equivalent instruction to the given instruction
+ * if there is one in the set. In GVN, we would say those instructions have the
+ * same "number".
+ */
+class ValueSet : public ArenaObject {
+ public:
+  explicit ValueSet(ArenaAllocator* allocator)
+      : allocator_(allocator), number_of_entries_(0), collisions_(nullptr) {
+    for (size_t i = 0; i < kDefaultNumberOfEntries; ++i) {
+      table_[i] = nullptr;
+    }
+  }
+
+  // Adds an instruction in the set.
+  void Add(HInstruction* instruction) {
+    DCHECK(Lookup(instruction) == nullptr);
+    size_t hash_code = instruction->ComputeHashCode();
+    size_t index = hash_code % kDefaultNumberOfEntries;
+    if (table_[index] == nullptr) {
+      table_[index] = instruction;
+    } else {
+      collisions_ = new (allocator_) ValueSetNode(instruction, hash_code, collisions_);
+    }
+    ++number_of_entries_;
+  }
+
+  // If in the set, returns an equivalent instruction to the given instruction. Returns
+  // null otherwise.
+  HInstruction* Lookup(HInstruction* instruction) const {
+    size_t hash_code = instruction->ComputeHashCode();
+    size_t index = hash_code % kDefaultNumberOfEntries;
+    HInstruction* existing = table_[index];
+    if (existing != nullptr && existing->Equals(instruction)) {
+      return existing;
+    }
+
+    for (ValueSetNode* node = collisions_; node != nullptr; node = node->GetNext()) {
+      if (node->GetHashCode() == hash_code) {
+        existing = node->GetInstruction();
+        if (existing->Equals(instruction)) {
+          return existing;
+        }
+      }
+    }
+    return nullptr;
+  }
+
+  // Removes all instructions in the set that are affected by the given side effects.
+  void Kill(SideEffects side_effects) {
+    for (size_t i = 0; i < kDefaultNumberOfEntries; ++i) {
+      HInstruction* instruction = table_[i];
+      if (instruction != nullptr && instruction->GetSideEffects().DependsOn(side_effects)) {
+        table_[i] = nullptr;
+        --number_of_entries_;
+      }
+    }
+
+    ValueSetNode* current = collisions_;
+    ValueSetNode* previous = nullptr;
+    while (current != nullptr) {
+      HInstruction* instruction = current->GetInstruction();
+      if (instruction->GetSideEffects().DependsOn(side_effects)) {
+        if (previous == nullptr) {
+          collisions_ = current->GetNext();
+        } else {
+          previous->SetNext(current->GetNext());
+        }
+        --number_of_entries_;
+      } else {
+        previous = current;
+      }
+      current = current->GetNext();
+    }
+  }
+
+  // Returns a copy of this set.
+  ValueSet* Copy() const {
+    ValueSet* copy = new (allocator_) ValueSet(allocator_);
+
+    for (size_t i = 0; i < kDefaultNumberOfEntries; ++i) {
+      copy->table_[i] = table_[i];
+    }
+
+    // Note that the order will be inverted in the copy. This is fine, as the order is not
+    // relevant for a ValueSet.
+    for (ValueSetNode* node = collisions_; node != nullptr; node = node->GetNext()) {
+      copy->collisions_ = new (allocator_) ValueSetNode(
+          node->GetInstruction(), node->GetHashCode(), copy->collisions_);
+    }
+
+    copy->number_of_entries_ = number_of_entries_;
+    return copy;
+  }
+
+  bool IsEmpty() const { return number_of_entries_ == 0; }
+  size_t GetNumberOfEntries() const { return number_of_entries_; }
+
+ private:
+  static constexpr size_t kDefaultNumberOfEntries = 8;
+
+  ArenaAllocator* const allocator_;
+
+  // The number of entries in the set.
+  size_t number_of_entries_;
+
+  // The internal implementation of the set. It uses a combination of a hash code based
+  // fixed-size list, and a linked list to handle hash code collisions.
+  // TODO: Tune the fixed size list original size, and support growing it.
+  ValueSetNode* collisions_;
+  HInstruction* table_[kDefaultNumberOfEntries];
+
+  DISALLOW_COPY_AND_ASSIGN(ValueSet);
+};
+
+/**
+ * Optimization phase that removes redundant instruction.
+ */
+class GlobalValueNumberer : public ValueObject {
+ public:
+  GlobalValueNumberer(ArenaAllocator* allocator, HGraph* graph)
+      : allocator_(allocator),
+        graph_(graph),
+        block_effects_(allocator, graph->GetBlocks().Size()),
+        loop_effects_(allocator, graph->GetBlocks().Size()),
+        sets_(allocator, graph->GetBlocks().Size()),
+        visited_(allocator, graph->GetBlocks().Size()) {
+    size_t number_of_blocks = graph->GetBlocks().Size();
+    block_effects_.SetSize(number_of_blocks);
+    loop_effects_.SetSize(number_of_blocks);
+    sets_.SetSize(number_of_blocks);
+    visited_.SetSize(number_of_blocks);
+
+    for (size_t i = 0; i < number_of_blocks; ++i) {
+      block_effects_.Put(i, SideEffects::None());
+      loop_effects_.Put(i, SideEffects::None());
+    }
+  }
+
+  void Run();
+
+ private:
+  // Per-block GVN. Will also update the ValueSet of the dominated and
+  // successor blocks.
+  void VisitBasicBlock(HBasicBlock* block);
+
+  // Compute side effects of individual blocks and loops. The GVN algorithm
+  // will use these side effects to update the ValueSet of individual blocks.
+  void ComputeSideEffects();
+
+  void UpdateLoopEffects(HLoopInformation* info, SideEffects effects);
+  SideEffects GetLoopEffects(HBasicBlock* block) const;
+  SideEffects GetBlockEffects(HBasicBlock* block) const;
+
+  ArenaAllocator* const allocator_;
+  HGraph* const graph_;
+
+  // Side effects of individual blocks, that is the union of the side effects
+  // of the instructions in the block.
+  GrowableArray<SideEffects> block_effects_;
+
+  // Side effects of loops, that is the union of the side effects of the
+  // blocks contained in that loop.
+  GrowableArray<SideEffects> loop_effects_;
+
+  // ValueSet for blocks. Initially null, but for an individual block they
+  // are allocated and populated by the dominator, and updated by all blocks
+  // in the path from the dominator to the block.
+  GrowableArray<ValueSet*> sets_;
+
+  // Mark visisted blocks. Only used for debugging.
+  GrowableArray<bool> visited_;
+
+  FRIEND_TEST(GVNTest, LoopSideEffects);
+  DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_GVN_H_
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
new file mode 100644
index 0000000..ad6e338
--- /dev/null
+++ b/compiler/optimizing/gvn_test.cc
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "builder.h"
+#include "gvn.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(GVNTest, LocalFieldElimination) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  HGraph* graph = new (&allocator) HGraph(&allocator);
+  HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(entry);
+  graph->SetEntryBlock(entry);
+  HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+  entry->AddInstruction(parameter);
+
+  HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(block);
+  entry->AddSuccessor(block);
+
+  block->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(42)));
+  block->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(42)));
+  HInstruction* to_remove = block->GetLastInstruction();
+  block->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(43)));
+  HInstruction* different_offset = block->GetLastInstruction();
+  // Kill the value.
+  block->AddInstruction(new (&allocator) HInstanceFieldSet(
+      parameter, parameter, Primitive::kPrimNot, MemberOffset(42)));
+  block->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimNot, MemberOffset(42)));
+  HInstruction* use_after_kill = block->GetLastInstruction();
+  block->AddInstruction(new (&allocator) HExit());
+
+  ASSERT_EQ(to_remove->GetBlock(), block);
+  ASSERT_EQ(different_offset->GetBlock(), block);
+  ASSERT_EQ(use_after_kill->GetBlock(), block);
+
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  GlobalValueNumberer(&allocator, graph).Run();
+
+  ASSERT_TRUE(to_remove->GetBlock() == nullptr);
+  ASSERT_EQ(different_offset->GetBlock(), block);
+  ASSERT_EQ(use_after_kill->GetBlock(), block);
+}
+
+TEST(GVNTest, GlobalFieldElimination) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  HGraph* graph = new (&allocator) HGraph(&allocator);
+  HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(entry);
+  graph->SetEntryBlock(entry);
+  HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+  entry->AddInstruction(parameter);
+
+  HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(block);
+  entry->AddSuccessor(block);
+  block->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+
+  block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
+  HBasicBlock* then = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* else_ = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* join = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(then);
+  graph->AddBlock(else_);
+  graph->AddBlock(join);
+
+  block->AddSuccessor(then);
+  block->AddSuccessor(else_);
+  then->AddSuccessor(join);
+  else_->AddSuccessor(join);
+
+  then->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+  then->AddInstruction(new (&allocator) HGoto());
+  else_->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+  else_->AddInstruction(new (&allocator) HGoto());
+  join->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+  join->AddInstruction(new (&allocator) HExit());
+
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  GlobalValueNumberer(&allocator, graph).Run();
+
+  // Check that all field get instructions have been GVN'ed.
+  ASSERT_TRUE(then->GetFirstInstruction()->IsGoto());
+  ASSERT_TRUE(else_->GetFirstInstruction()->IsGoto());
+  ASSERT_TRUE(join->GetFirstInstruction()->IsExit());
+}
+
+TEST(GVNTest, LoopFieldElimination) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  HGraph* graph = new (&allocator) HGraph(&allocator);
+  HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(entry);
+  graph->SetEntryBlock(entry);
+
+  HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+  entry->AddInstruction(parameter);
+
+  HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(block);
+  entry->AddSuccessor(block);
+  block->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+  block->AddInstruction(new (&allocator) HGoto());
+
+  HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* loop_body = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
+
+  graph->AddBlock(loop_header);
+  graph->AddBlock(loop_body);
+  graph->AddBlock(exit);
+  block->AddSuccessor(loop_header);
+  loop_header->AddSuccessor(loop_body);
+  loop_header->AddSuccessor(exit);
+  loop_body->AddSuccessor(loop_header);
+
+  loop_header->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+  HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
+  loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
+
+  // Kill inside the loop body to prevent field gets inside the loop header
+  // and the body to be GVN'ed.
+  loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(
+      parameter, parameter, Primitive::kPrimNot, MemberOffset(42)));
+  HInstruction* field_set = loop_body->GetLastInstruction();
+  loop_body->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+  HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
+  loop_body->AddInstruction(new (&allocator) HGoto());
+
+  exit->AddInstruction(
+      new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
+  HInstruction* field_get_in_exit = exit->GetLastInstruction();
+  exit->AddInstruction(new (&allocator) HExit());
+
+  ASSERT_EQ(field_get_in_loop_header->GetBlock(), loop_header);
+  ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
+  ASSERT_EQ(field_get_in_exit->GetBlock(), exit);
+
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  graph->FindNaturalLoops();
+  GlobalValueNumberer(&allocator, graph).Run();
+
+  // Check that all field get instructions are still there.
+  ASSERT_EQ(field_get_in_loop_header->GetBlock(), loop_header);
+  ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
+  // The exit block is dominated by the loop header, whose field get
+  // does not get killed by the loop flags.
+  ASSERT_TRUE(field_get_in_exit->GetBlock() == nullptr);
+
+  // Now remove the field set, and check that all field get instructions have been GVN'ed.
+  loop_body->RemoveInstruction(field_set);
+  GlobalValueNumberer(&allocator, graph).Run();
+
+  ASSERT_TRUE(field_get_in_loop_header->GetBlock() == nullptr);
+  ASSERT_TRUE(field_get_in_loop_body->GetBlock() == nullptr);
+  ASSERT_TRUE(field_get_in_exit->GetBlock() == nullptr);
+}
+
+// Test that inner loops affect the side effects of the outer loop.
+TEST(GVNTest, LoopSideEffects) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+
+  HGraph* graph = new (&allocator) HGraph(&allocator);
+  HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+  graph->AddBlock(entry);
+  graph->SetEntryBlock(entry);
+
+  HBasicBlock* outer_loop_header = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* outer_loop_body = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* outer_loop_exit = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* inner_loop_header = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* inner_loop_body = new (&allocator) HBasicBlock(graph);
+  HBasicBlock* inner_loop_exit = new (&allocator) HBasicBlock(graph);
+
+  graph->AddBlock(outer_loop_header);
+  graph->AddBlock(outer_loop_body);
+  graph->AddBlock(outer_loop_exit);
+  graph->AddBlock(inner_loop_header);
+  graph->AddBlock(inner_loop_body);
+  graph->AddBlock(inner_loop_exit);
+
+  entry->AddSuccessor(outer_loop_header);
+  outer_loop_header->AddSuccessor(outer_loop_body);
+  outer_loop_header->AddSuccessor(outer_loop_exit);
+  outer_loop_body->AddSuccessor(inner_loop_header);
+  inner_loop_header->AddSuccessor(inner_loop_body);
+  inner_loop_header->AddSuccessor(inner_loop_exit);
+  inner_loop_body->AddSuccessor(inner_loop_header);
+  inner_loop_exit->AddSuccessor(outer_loop_header);
+
+  HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimBoolean);
+  entry->AddInstruction(parameter);
+  entry->AddInstruction(new (&allocator) HGoto());
+  outer_loop_header->AddInstruction(new (&allocator) HIf(parameter));
+  outer_loop_body->AddInstruction(new (&allocator) HGoto());
+  inner_loop_header->AddInstruction(new (&allocator) HIf(parameter));
+  inner_loop_body->AddInstruction(new (&allocator) HGoto());
+  inner_loop_exit->AddInstruction(new (&allocator) HGoto());
+  outer_loop_exit->AddInstruction(new (&allocator) HExit());
+
+  graph->BuildDominatorTree();
+  graph->TransformToSSA();
+  graph->FindNaturalLoops();
+
+  ASSERT_TRUE(inner_loop_header->GetLoopInformation()->IsIn(
+      *outer_loop_header->GetLoopInformation()));
+
+  // Check that the loops don't have side effects.
+  {
+    // Make one block with a side effect.
+    entry->AddInstruction(new (&allocator) HInstanceFieldSet(
+        parameter, parameter, Primitive::kPrimNot, MemberOffset(42)));
+
+    GlobalValueNumberer gvn(&allocator, graph);
+    gvn.Run();
+
+    ASSERT_TRUE(gvn.GetBlockEffects(entry).HasSideEffects());
+    ASSERT_FALSE(gvn.GetLoopEffects(outer_loop_header).HasSideEffects());
+    ASSERT_FALSE(gvn.GetLoopEffects(inner_loop_header).HasSideEffects());
+  }
+
+  // Check that the side effects of the outer loop does not affect the inner loop.
+  {
+    outer_loop_body->InsertInstructionBefore(
+        new (&allocator) HInstanceFieldSet(
+            parameter, parameter, Primitive::kPrimNot, MemberOffset(42)),
+        outer_loop_body->GetLastInstruction());
+
+    GlobalValueNumberer gvn(&allocator, graph);
+    gvn.Run();
+
+    ASSERT_TRUE(gvn.GetBlockEffects(entry).HasSideEffects());
+    ASSERT_TRUE(gvn.GetBlockEffects(outer_loop_body).HasSideEffects());
+    ASSERT_TRUE(gvn.GetLoopEffects(outer_loop_header).HasSideEffects());
+    ASSERT_FALSE(gvn.GetLoopEffects(inner_loop_header).HasSideEffects());
+  }
+
+  // Check that the side effects of the inner loop affects the outer loop.
+  {
+    outer_loop_body->RemoveInstruction(outer_loop_body->GetFirstInstruction());
+    inner_loop_body->InsertInstructionBefore(
+        new (&allocator) HInstanceFieldSet(
+            parameter, parameter, Primitive::kPrimNot, MemberOffset(42)),
+        inner_loop_body->GetLastInstruction());
+
+    GlobalValueNumberer gvn(&allocator, graph);
+    gvn.Run();
+
+    ASSERT_TRUE(gvn.GetBlockEffects(entry).HasSideEffects());
+    ASSERT_FALSE(gvn.GetBlockEffects(outer_loop_body).HasSideEffects());
+    ASSERT_TRUE(gvn.GetLoopEffects(outer_loop_header).HasSideEffects());
+    ASSERT_TRUE(gvn.GetLoopEffects(inner_loop_header).HasSideEffects());
+  }
+}
+}  // namespace art
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index e4f9371..6dd4207 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -19,6 +19,7 @@
 #include "base/stringprintf.h"
 #include "builder.h"
 #include "code_generator.h"
+#include "code_generator_x86.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "graph_visualizer.h"
@@ -45,8 +46,8 @@
   graph->TransformToSSA();
   graph->FindNaturalLoops();
 
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, InstructionSet::kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
   ASSERT_EQ(liveness.GetLinearPostOrder().Size(), number_of_blocks);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index a6e5ca9..a81a30e 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -16,6 +16,7 @@
 
 #include "builder.h"
 #include "code_generator.h"
+#include "code_generator_x86.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "nodes.h"
@@ -31,6 +32,9 @@
   HGraphBuilder builder(allocator);
   const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
   HGraph* graph = builder.BuildGraph(*item);
+  // Suspend checks implementation may change in the future, and this test relies
+  // on how instructions are ordered.
+  RemoveSuspendChecks(graph);
   graph->BuildDominatorTree();
   graph->TransformToSSA();
   graph->FindNaturalLoops();
@@ -58,8 +62,8 @@
   ArenaAllocator allocator(&pool);
   HGraph* graph = BuildGraph(data, &allocator);
 
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, InstructionSet::kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -104,8 +108,8 @@
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
   HGraph* graph = BuildGraph(data, &allocator);
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, InstructionSet::kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
   LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -153,8 +157,8 @@
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
   HGraph* graph = BuildGraph(data, &allocator);
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, InstructionSet::kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
   // Test for the 4 constant.
@@ -229,8 +233,8 @@
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
   HGraph* graph = BuildGraph(data, &allocator);
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, InstructionSet::kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
   // Test for the 0 constant.
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 1a4d745..84b2e33 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -16,6 +16,7 @@
 
 #include "builder.h"
 #include "code_generator.h"
+#include "code_generator_x86.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "nodes.h"
@@ -49,8 +50,8 @@
   graph->BuildDominatorTree();
   graph->TransformToSSA();
   graph->FindNaturalLoops();
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, InstructionSet::kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
   std::ostringstream buffer;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 207c605..1a24677 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -124,6 +124,7 @@
   // dominator of the block. We can then start visiting its successors.
   if (visits->Get(block->GetBlockId()) ==
       block->GetPredecessors().Size() - block->NumberOfBackEdges()) {
+    block->GetDominator()->AddDominatedBlock(block);
     reverse_post_order_.Add(block);
     for (size_t i = 0; i < block->GetSuccessors().Size(); i++) {
       VisitBlockForDominatorTree(block->GetSuccessors().Get(i), block, visits);
@@ -194,6 +195,11 @@
     }
     pre_header->AddSuccessor(header);
   }
+
+  // Make sure the second predecessor of a loop header is the back edge.
+  if (header->GetPredecessors().Get(1) != info->GetBackEdges().Get(0)) {
+    header->SwapPredecessors();
+  }
 }
 
 void HGraph::SimplifyCFG() {
@@ -307,6 +313,14 @@
   instruction->SetId(GetGraph()->GetNextInstructionId());
 }
 
+void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
+                                                  HInstruction* replacement) {
+  DCHECK(initial->GetBlock() == this);
+  InsertInstructionBefore(replacement, initial);
+  initial->ReplaceWith(replacement);
+  RemoveInstruction(initial);
+}
+
 static void Add(HInstructionList* instruction_list,
                 HBasicBlock* block,
                 HInstruction* instruction) {
@@ -392,6 +406,54 @@
   }
 }
 
+bool HInstructionList::FoundBefore(const HInstruction* instruction1,
+                                   const HInstruction* instruction2) const {
+  DCHECK_EQ(instruction1->GetBlock(), instruction2->GetBlock());
+  for (HInstructionIterator it(*this); !it.Done(); it.Advance()) {
+    if (it.Current() == instruction1) {
+      return true;
+    }
+    if (it.Current() == instruction2) {
+      return false;
+    }
+  }
+  LOG(FATAL) << "Did not find an order between two instructions of the same block.";
+  return true;
+}
+
+bool HInstruction::Dominates(HInstruction* other_instruction) const {
+  HBasicBlock* block = GetBlock();
+  HBasicBlock* other_block = other_instruction->GetBlock();
+  if (block != other_block) {
+    return GetBlock()->Dominates(other_instruction->GetBlock());
+  } else {
+    // If both instructions are in the same block, ensure this
+    // instruction comes before `other_instruction`.
+    if (IsPhi()) {
+      if (!other_instruction->IsPhi()) {
+        // Phis appear before non phi-instructions so this instruction
+        // dominates `other_instruction`.
+        return true;
+      } else {
+        // There is no order among phis.
+        LOG(FATAL) << "There is no dominance between phis of a same block.";
+        return false;
+      }
+    } else {
+      // `this` is not a phi.
+      if (other_instruction->IsPhi()) {
+        // Phis appear before non phi-instructions so this instruction
+        // does not dominate `other_instruction`.
+        return false;
+      } else {
+        // Check whether this instruction comes before
+        // `other_instruction` in the instruction list.
+        return block->GetInstructions().FoundBefore(this, other_instruction);
+      }
+    }
+  }
+}
+
 void HInstruction::ReplaceWith(HInstruction* other) {
   DCHECK(other != nullptr);
   for (HUseIterator<HInstruction> it(GetUses()); !it.Done(); it.Advance()) {
@@ -449,6 +511,18 @@
   }
 }
 
+HConstant* HBinaryOperation::TryStaticEvaluation(ArenaAllocator* allocator) const {
+  if (GetLeft()->IsIntConstant() && GetRight()->IsIntConstant()) {
+    int32_t value = Evaluate(GetLeft()->AsIntConstant()->GetValue(),
+                             GetRight()->AsIntConstant()->GetValue());
+    return new(allocator) HIntConstant(value);
+  } else if (GetLeft()->IsLongConstant() && GetRight()->IsLongConstant()) {
+    int64_t value = Evaluate(GetLeft()->AsLongConstant()->GetValue(),
+                             GetRight()->AsLongConstant()->GetValue());
+    return new(allocator) HLongConstant(value);
+  }
+  return nullptr;
+}
 
 bool HCondition::NeedsMaterialization() const {
   if (!HasOnlyOneUse()) {
@@ -470,6 +544,7 @@
 
 bool HInstruction::Equals(HInstruction* other) const {
   if (!InstructionTypeEquals(other)) return false;
+  DCHECK_EQ(GetKind(), other->GetKind());
   if (!InstructionDataEquals(other)) return false;
   if (GetType() != other->GetType()) return false;
   if (InputCount() != other->InputCount()) return false;
@@ -477,6 +552,7 @@
   for (size_t i = 0, e = InputCount(); i < e; ++i) {
     if (InputAt(i) != other->InputAt(i)) return false;
   }
+  DCHECK_EQ(ComputeHashCode(), other->ComputeHashCode());
   return true;
 }
 
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9018fee..af173c8 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -38,6 +38,7 @@
 static const int kDefaultNumberOfBlocks = 8;
 static const int kDefaultNumberOfSuccessors = 2;
 static const int kDefaultNumberOfPredecessors = 2;
+static const int kDefaultNumberOfDominatedBlocks = 1;
 static const int kDefaultNumberOfBackEdges = 1;
 
 enum IfCondition {
@@ -56,6 +57,12 @@
   void AddInstruction(HInstruction* instruction);
   void RemoveInstruction(HInstruction* instruction);
 
+  // Return true if `instruction1` is found before `instruction2` in
+  // this instruction list and false otherwise.  Abort if none
+  // of these instructions is found.
+  bool FoundBefore(const HInstruction* instruction1,
+                   const HInstruction* instruction2) const;
+
  private:
   HInstruction* first_instruction_;
   HInstruction* last_instruction_;
@@ -192,7 +199,8 @@
   HLoopInformation(HBasicBlock* header, HGraph* graph)
       : header_(header),
         back_edges_(graph->GetArena(), kDefaultNumberOfBackEdges),
-        blocks_(graph->GetArena(), graph->GetBlocks().Size(), false) {}
+        // Make bit vector growable, as the number of blocks may change.
+        blocks_(graph->GetArena(), graph->GetBlocks().Size(), true) {}
 
   HBasicBlock* GetHeader() const {
     return header_;
@@ -265,6 +273,7 @@
         successors_(graph->GetArena(), kDefaultNumberOfSuccessors),
         loop_information_(nullptr),
         dominator_(nullptr),
+        dominated_blocks_(graph->GetArena(), kDefaultNumberOfDominatedBlocks),
         block_id_(-1),
         lifetime_start_(kNoLifetime),
         lifetime_end_(kNoLifetime) {}
@@ -277,6 +286,10 @@
     return successors_;
   }
 
+  const GrowableArray<HBasicBlock*>& GetDominatedBlocks() const {
+    return dominated_blocks_;
+  }
+
   void AddBackEdge(HBasicBlock* back_edge) {
     if (loop_information_ == nullptr) {
       loop_information_ = new (graph_->GetArena()) HLoopInformation(this, graph_);
@@ -292,6 +305,7 @@
 
   HBasicBlock* GetDominator() const { return dominator_; }
   void SetDominator(HBasicBlock* dominator) { dominator_ = dominator; }
+  void AddDominatedBlock(HBasicBlock* block) { dominated_blocks_.Add(block); }
 
   int NumberOfBackEdges() const {
     return loop_information_ == nullptr
@@ -331,6 +345,13 @@
     block->successors_.Add(this);
   }
 
+  void SwapPredecessors() {
+    DCHECK_EQ(predecessors_.Size(), 2u);
+    HBasicBlock* temp = predecessors_.Get(0);
+    predecessors_.Put(0, predecessors_.Get(1));
+    predecessors_.Put(1, temp);
+  }
+
   size_t GetPredecessorIndexOf(HBasicBlock* predecessor) {
     for (size_t i = 0, e = predecessors_.Size(); i < e; ++i) {
       if (predecessors_.Get(i) == predecessor) {
@@ -352,6 +373,9 @@
   void AddInstruction(HInstruction* instruction);
   void RemoveInstruction(HInstruction* instruction);
   void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
+  // Replace instruction `initial` with `replacement` within this block.
+  void ReplaceAndRemoveInstructionWith(HInstruction* initial,
+                                       HInstruction* replacement);
   void AddPhi(HPhi* phi);
   void RemovePhi(HPhi* phi);
 
@@ -401,6 +425,7 @@
   HInstructionList phis_;
   HLoopInformation* loop_information_;
   HBasicBlock* dominator_;
+  GrowableArray<HBasicBlock*> dominated_blocks_;
   int block_id_;
   size_t lifetime_start_;
   size_t lifetime_end_;
@@ -422,6 +447,7 @@
   M(If)                                                    \
   M(IntConstant)                                           \
   M(InvokeStatic)                                          \
+  M(InvokeVirtual)                                         \
   M(LoadLocal)                                             \
   M(Local)                                                 \
   M(LongConstant)                                          \
@@ -443,22 +469,26 @@
   M(BoundsCheck)                                           \
   M(NullCheck)                                             \
   M(Temporary)                                             \
+  M(SuspendCheck)                                          \
 
 #define FOR_EACH_INSTRUCTION(M)                            \
   FOR_EACH_CONCRETE_INSTRUCTION(M)                         \
-  M(Constant)
+  M(Constant)                                              \
+  M(BinaryOperation)
 
 #define FORWARD_DECLARATION(type) class H##type;
 FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
 #undef FORWARD_DECLARATION
 
-#define DECLARE_INSTRUCTION(type)                          \
-  virtual const char* DebugName() const { return #type; }  \
-  virtual H##type* As##type() { return this; }             \
-  virtual bool InstructionTypeEquals(HInstruction* other) const {     \
-    return other->Is##type();                              \
-  }                                                        \
-  virtual void Accept(HGraphVisitor* visitor)              \
+#define DECLARE_INSTRUCTION(type)                                       \
+  virtual InstructionKind GetKind() const { return k##type; }           \
+  virtual const char* DebugName() const { return #type; }               \
+  virtual const H##type* As##type() const OVERRIDE { return this; }     \
+  virtual H##type* As##type() OVERRIDE { return this; }                 \
+  virtual bool InstructionTypeEquals(HInstruction* other) const {       \
+    return other->Is##type();                                           \
+  }                                                                     \
+  virtual void Accept(HGraphVisitor* visitor)
 
 template <typename T>
 class HUseListNode : public ArenaObject {
@@ -483,6 +513,8 @@
 // Represents the side effects an instruction may have.
 class SideEffects : public ValueObject {
  public:
+  SideEffects() : flags_(0) {}
+
   static SideEffects None() {
     return SideEffects(0);
   }
@@ -500,6 +532,31 @@
     return SideEffects(((1 << count) - 1) << kFlagChangesCount);
   }
 
+  SideEffects Union(SideEffects other) const {
+    return SideEffects(flags_ | other.flags_);
+  }
+
+  bool HasSideEffects() const {
+    size_t all_bits_set = (1 << kFlagChangesCount) - 1;
+    return (flags_ & all_bits_set) != 0;
+  }
+
+  bool HasAllSideEffects() const {
+    size_t all_bits_set = (1 << kFlagChangesCount) - 1;
+    return all_bits_set == (flags_ & all_bits_set);
+  }
+
+  bool DependsOn(SideEffects other) const {
+    size_t depends_flags = other.ComputeDependsFlags();
+    return (flags_ & depends_flags) != 0;
+  }
+
+  bool HasDependencies() const {
+    int count = kFlagDependsOnCount - kFlagChangesCount;
+    size_t all_bits_set = (1 << count) - 1;
+    return ((flags_ >> kFlagChangesCount) & all_bits_set) != 0;
+  }
+
  private:
   static constexpr int kFlagChangesSomething = 0;
   static constexpr int kFlagChangesCount = kFlagChangesSomething + 1;
@@ -507,10 +564,13 @@
   static constexpr int kFlagDependsOnSomething = kFlagChangesCount;
   static constexpr int kFlagDependsOnCount = kFlagDependsOnSomething + 1;
 
- private:
   explicit SideEffects(size_t flags) : flags_(flags) {}
 
-  const size_t flags_;
+  size_t ComputeDependsFlags() const {
+    return flags_ << kFlagChangesCount;
+  }
+
+  size_t flags_;
 };
 
 class HInstruction : public ArenaObject {
@@ -531,6 +591,12 @@
 
   virtual ~HInstruction() {}
 
+#define DECLARE_KIND(type) k##type,
+  enum InstructionKind {
+    FOR_EACH_INSTRUCTION(DECLARE_KIND)
+  };
+#undef DECLARE_KIND
+
   HInstruction* GetNext() const { return next_; }
   HInstruction* GetPrevious() const { return previous_; }
 
@@ -551,6 +617,7 @@
 
   virtual bool NeedsEnvironment() const { return false; }
   virtual bool IsControlFlow() const { return false; }
+  bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
 
   void AddUseAt(HInstruction* user, size_t index) {
     uses_ = new (block_->GetGraph()->GetArena()) HUseListNode<HInstruction>(user, index, uses_);
@@ -580,6 +647,10 @@
     return result;
   }
 
+  // Does this instruction dominate `other_instruction`?  Aborts if
+  // this instruction and `other_instruction` are both phis.
+  bool Dominates(HInstruction* other_instruction) const;
+
   int GetId() const { return id_; }
   void SetId(int id) { id_ = id; }
 
@@ -605,7 +676,8 @@
   }
 
 #define INSTRUCTION_TYPE_CHECK(type)                                           \
-  bool Is##type() { return (As##type() != nullptr); }                          \
+  bool Is##type() const { return (As##type() != nullptr); }                    \
+  virtual const H##type* As##type() const { return nullptr; }                  \
   virtual H##type* As##type() { return nullptr; }
 
   FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
@@ -627,6 +699,18 @@
   // 2) Their inputs are identical.
   bool Equals(HInstruction* other) const;
 
+  virtual InstructionKind GetKind() const = 0;
+
+  virtual size_t ComputeHashCode() const {
+    size_t result = GetKind();
+    for (size_t i = 0, e = InputCount(); i < e; ++i) {
+      result = (result * 31) + InputAt(i)->GetId();
+    }
+    return result;
+  }
+
+  SideEffects GetSideEffects() const { return side_effects_; }
+
   size_t GetLifetimePosition() const { return lifetime_position_; }
   void SetLifetimePosition(size_t position) { lifetime_position_ = position; }
   LiveInterval* GetLiveInterval() const { return live_interval_; }
@@ -982,6 +1066,17 @@
   virtual bool CanBeMoved() const { return true; }
   virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
 
+  // Try to statically evaluate `operation` and return an HConstant
+  // containing the result of this evaluation.  If `operation` cannot
+  // be evaluated as a constant, return nullptr.
+  HConstant* TryStaticEvaluation(ArenaAllocator* allocator) const;
+
+  // Apply this operation to `x` and `y`.
+  virtual int32_t Evaluate(int32_t x, int32_t y) const = 0;
+  virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
+
+  DECLARE_INSTRUCTION(BinaryOperation);
+
  private:
   DISALLOW_COPY_AND_ASSIGN(HBinaryOperation);
 };
@@ -1008,6 +1103,9 @@
   HEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x == y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x == y; }
+
   DECLARE_INSTRUCTION(Equal);
 
   virtual IfCondition GetCondition() const {
@@ -1023,6 +1121,9 @@
   HNotEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x != y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x != y; }
+
   DECLARE_INSTRUCTION(NotEqual);
 
   virtual IfCondition GetCondition() const {
@@ -1038,6 +1139,9 @@
   HLessThan(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x < y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x < y; }
+
   DECLARE_INSTRUCTION(LessThan);
 
   virtual IfCondition GetCondition() const {
@@ -1053,6 +1157,9 @@
   HLessThanOrEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x <= y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x <= y; }
+
   DECLARE_INSTRUCTION(LessThanOrEqual);
 
   virtual IfCondition GetCondition() const {
@@ -1068,6 +1175,9 @@
   HGreaterThan(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x > y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x > y; }
+
   DECLARE_INSTRUCTION(GreaterThan);
 
   virtual IfCondition GetCondition() const {
@@ -1083,6 +1193,9 @@
   HGreaterThanOrEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x >= y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x >= y; }
+
   DECLARE_INSTRUCTION(GreaterThanOrEqual);
 
   virtual IfCondition GetCondition() const {
@@ -1104,6 +1217,19 @@
     DCHECK_EQ(type, second->GetType());
   }
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const {
+    return
+      x == y ? 0 :
+      x > y ? 1 :
+      -1;
+  }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const {
+    return
+      x == y ? 0 :
+      x > y ? 1 :
+      -1;
+  }
+
   DECLARE_INSTRUCTION(Compare);
 
  private:
@@ -1184,6 +1310,8 @@
     return other->AsIntConstant()->value_ == value_;
   }
 
+  virtual size_t ComputeHashCode() const { return GetValue(); }
+
   DECLARE_INSTRUCTION(IntConstant);
 
  private:
@@ -1202,6 +1330,8 @@
     return other->AsLongConstant()->value_ == value_;
   }
 
+  virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+
   DECLARE_INSTRUCTION(LongConstant);
 
  private:
@@ -1271,6 +1401,26 @@
   DISALLOW_COPY_AND_ASSIGN(HInvokeStatic);
 };
 
+class HInvokeVirtual : public HInvoke {
+ public:
+  HInvokeVirtual(ArenaAllocator* arena,
+                 uint32_t number_of_arguments,
+                 Primitive::Type return_type,
+                 uint32_t dex_pc,
+                 uint32_t vtable_index)
+      : HInvoke(arena, number_of_arguments, return_type, dex_pc),
+        vtable_index_(vtable_index) {}
+
+  uint32_t GetVTableIndex() const { return vtable_index_; }
+
+  DECLARE_INSTRUCTION(InvokeVirtual);
+
+ private:
+  const uint32_t vtable_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
+};
+
 class HNewInstance : public HExpression<0> {
  public:
   HNewInstance(uint32_t dex_pc, uint16_t type_index)
@@ -1300,6 +1450,9 @@
 
   virtual bool IsCommutative() { return true; }
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x + y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x + y; }
+
   DECLARE_INSTRUCTION(Add);
 
  private:
@@ -1313,6 +1466,9 @@
 
   virtual bool IsCommutative() { return false; }
 
+  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x + y; }
+  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x + y; }
+
   DECLARE_INSTRUCTION(Sub);
 
  private:
@@ -1445,6 +1601,10 @@
     return other_offset == GetFieldOffset().SizeValue();
   }
 
+  virtual size_t ComputeHashCode() const {
+    return (HInstruction::ComputeHashCode() << 7) | GetFieldOffset().SizeValue();
+  }
+
   MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
   Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
 
@@ -1593,6 +1753,25 @@
   DISALLOW_COPY_AND_ASSIGN(HTemporary);
 };
 
+class HSuspendCheck : public HTemplateInstruction<0> {
+ public:
+  explicit HSuspendCheck(uint32_t dex_pc)
+      : HTemplateInstruction(SideEffects::ChangesSomething()), dex_pc_(dex_pc) {}
+
+  virtual bool NeedsEnvironment() const {
+    return true;
+  }
+
+  uint32_t GetDexPc() const { return dex_pc_; }
+
+  DECLARE_INSTRUCTION(SuspendCheck);
+
+ private:
+  const uint32_t dex_pc_;
+
+  DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
+};
+
 class MoveOperands : public ArenaObject {
  public:
   MoveOperands(Location source, Location destination)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 75f4155..702eba1 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -25,6 +25,7 @@
 #include "driver/compiler_driver.h"
 #include "driver/dex_compilation_unit.h"
 #include "graph_visualizer.h"
+#include "gvn.h"
 #include "nodes.h"
 #include "register_allocator.h"
 #include "ssa_phi_elimination.h"
@@ -38,7 +39,7 @@
  */
 class CodeVectorAllocator FINAL : public CodeAllocator {
  public:
-  CodeVectorAllocator() { }
+  CodeVectorAllocator() {}
 
   virtual uint8_t* Allocate(size_t size) {
     size_ = size;
@@ -70,6 +71,7 @@
 class OptimizingCompiler FINAL : public Compiler {
  public:
   explicit OptimizingCompiler(CompilerDriver* driver);
+  ~OptimizingCompiler();
 
   bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
       OVERRIDE;
@@ -113,6 +115,13 @@
   void UnInit() const OVERRIDE;
 
  private:
+  // Whether we should run any optimization or register allocation. If false, will
+  // just run the code generation after the graph was built.
+  const bool run_optimizations_;
+  mutable AtomicInteger total_compiled_methods_;
+  mutable AtomicInteger unoptimized_compiled_methods_;
+  mutable AtomicInteger optimized_compiled_methods_;
+
   std::unique_ptr<std::ostream> visualizer_output_;
 
   // Delegate to another compiler in case the optimizing compiler cannot compile a method.
@@ -122,8 +131,16 @@
   DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
 };
 
-OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : Compiler(driver, 100),
-    delegate_(Create(driver, Compiler::Kind::kQuick)) {
+static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
+
+OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
+    : Compiler(driver, kMaximumCompilationTimeBeforeWarning),
+      run_optimizations_(
+          driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
+      total_compiled_methods_(0),
+      unoptimized_compiled_methods_(0),
+      optimized_compiled_methods_(0),
+      delegate_(Create(driver, Compiler::Kind::kQuick)) {
   if (kIsVisualizerEnabled) {
     visualizer_output_.reset(new std::ofstream("art.cfg"));
   }
@@ -137,6 +154,18 @@
   delegate_->UnInit();
 }
 
+OptimizingCompiler::~OptimizingCompiler() {
+  if (total_compiled_methods_ == 0) {
+    LOG(INFO) << "Did not compile any method.";
+  } else {
+    size_t unoptimized_percent = (unoptimized_compiled_methods_ * 100 / total_compiled_methods_);
+    size_t optimized_percent = (optimized_compiled_methods_ * 100 / total_compiled_methods_);
+    LOG(INFO) << "Compiled " << total_compiled_methods_ << " methods: "
+              << unoptimized_percent << "% (" << unoptimized_compiled_methods_ << ") unoptimized, "
+              << optimized_percent << "% (" << optimized_compiled_methods_ << ") optimized.";
+  }
+}
+
 bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
                                           CompilationUnit* cu) const {
   return delegate_->CanCompileMethod(method_idx, dex_file, cu);
@@ -173,6 +202,7 @@
                                                uint32_t method_idx,
                                                jobject class_loader,
                                                const DexFile& dex_file) const {
+  total_compiled_methods_++;
   InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
   // Always use the thumb2 assembler: some runtime functionality (like implicit stack
   // overflow checks) assume thumb2.
@@ -222,7 +252,8 @@
 
   CodeVectorAllocator allocator;
 
-  if (RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+  if (run_optimizations_ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+    optimized_compiled_methods_++;
     graph->BuildDominatorTree();
     graph->TransformToSSA();
     visualizer.DumpGraph("ssa");
@@ -230,6 +261,8 @@
 
     SsaRedundantPhiElimination(graph).Run();
     SsaDeadPhiElimination(graph).Run();
+    GlobalValueNumberer(graph->GetArena(), graph).Run();
+    visualizer.DumpGraph(kGVNPassName);
 
     SsaLivenessAnalysis liveness(*graph, codegen);
     liveness.Analyze();
@@ -262,6 +295,7 @@
     LOG(FATAL) << "Could not allocate registers in optimizing compiler";
     return nullptr;
   } else {
+    unoptimized_compiled_methods_++;
     codegen->CompileBaseline(&allocator);
 
     // Run these phases to get some test coverage.
@@ -269,6 +303,9 @@
     graph->TransformToSSA();
     visualizer.DumpGraph("ssa");
     graph->FindNaturalLoops();
+    SsaRedundantPhiElimination(graph).Run();
+    SsaDeadPhiElimination(graph).Run();
+    GlobalValueNumberer(graph->GetArena(), graph).Run();
     SsaLivenessAnalysis liveness(*graph, codegen);
     liveness.Analyze();
     visualizer.DumpGraph(kLivenessPassName);
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 36a6a21..6dd53e5 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,8 +17,14 @@
 #ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
 #define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
 
+#include "nodes.h"
+#include "builder.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
 #include "ssa_liveness_analysis.h"
 
+#include "gtest/gtest.h"
+
 namespace art {
 
 #define NUM_INSTRUCTIONS(...)  \
@@ -48,6 +54,46 @@
   return interval;
 }
 
+void RemoveSuspendChecks(HGraph* graph) {
+  for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
+    for (HInstructionIterator it(graph->GetBlocks().Get(i)->GetInstructions());
+         !it.Done();
+         it.Advance()) {
+      HInstruction* current = it.Current();
+      if (current->IsSuspendCheck()) {
+        current->GetBlock()->RemoveInstruction(current);
+      }
+    }
+  }
+}
+
+// Create a control-flow graph from Dex instructions.
+inline HGraph* CreateCFG(ArenaAllocator* allocator, const uint16_t* data) {
+  HGraphBuilder builder(allocator);
+  const DexFile::CodeItem* item =
+    reinterpret_cast<const DexFile::CodeItem*>(data);
+  HGraph* graph = builder.BuildGraph(*item);
+  return graph;
+}
+
+// Naive string diff data type.
+typedef std::list<std::pair<std::string, std::string>> diff_t;
+
+// An alias for the empty string used to make it clear that a line is
+// removed in a diff.
+static const std::string removed = "";
+
+// Naive patch command: apply a diff to a string.
+inline std::string Patch(const std::string& original, const diff_t& diff) {
+  std::string result = original;
+  for (const auto& p : diff) {
+    std::string::size_type pos = result.find(p.first);
+    EXPECT_NE(pos, std::string::npos);
+    result.replace(pos, p.first.size(), p.second);
+  }
+  return result;
+}
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 7e604e9..da6b294 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -45,7 +45,8 @@
 
   const char* expected =
       "BasicBlock 0, succ: 1\n"
-      "  2: Goto 1\n"
+      "  2: SuspendCheck\n"
+      "  3: Goto 1\n"
       "BasicBlock 1, pred: 0, succ: 2\n"
       "  0: ReturnVoid\n"
       "BasicBlock 2, pred: 1\n"
@@ -57,7 +58,8 @@
 TEST(PrettyPrinterTest, CFG1) {
   const char* expected =
     "BasicBlock 0, succ: 1\n"
-    "  3: Goto 1\n"
+    "  3: SuspendCheck\n"
+    "  4: Goto 1\n"
     "BasicBlock 1, pred: 0, succ: 2\n"
     "  0: Goto 2\n"
     "BasicBlock 2, pred: 1, succ: 3\n"
@@ -76,7 +78,8 @@
 TEST(PrettyPrinterTest, CFG2) {
   const char* expected =
     "BasicBlock 0, succ: 1\n"
-    "  4: Goto 1\n"
+    "  4: SuspendCheck\n"
+    "  5: Goto 1\n"
     "BasicBlock 1, pred: 0, succ: 2\n"
     "  0: Goto 2\n"
     "BasicBlock 2, pred: 1, succ: 3\n"
@@ -97,15 +100,17 @@
 TEST(PrettyPrinterTest, CFG3) {
   const char* expected =
     "BasicBlock 0, succ: 1\n"
-    "  4: Goto 1\n"
+    "  5: SuspendCheck\n"
+    "  6: Goto 1\n"
     "BasicBlock 1, pred: 0, succ: 3\n"
     "  0: Goto 3\n"
     "BasicBlock 2, pred: 3, succ: 4\n"
     "  1: ReturnVoid\n"
     "BasicBlock 3, pred: 1, succ: 2\n"
-    "  2: Goto 2\n"
+    "  2: SuspendCheck\n"
+    "  3: Goto 2\n"
     "BasicBlock 4, pred: 2\n"
-    "  3: Exit\n";
+    "  4: Exit\n";
 
   const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
     Instruction::GOTO | 0x200,
@@ -132,11 +137,13 @@
 TEST(PrettyPrinterTest, CFG4) {
   const char* expected =
     "BasicBlock 0, succ: 1\n"
-    "  2: Goto 1\n"
+    "  3: SuspendCheck\n"
+    "  4: Goto 1\n"
     "BasicBlock 1, pred: 0, 1, succ: 1\n"
-    "  0: Goto 1\n"
+    "  0: SuspendCheck\n"
+    "  1: Goto 1\n"
     "BasicBlock 2\n"
-    "  1: Exit\n";
+    "  2: Exit\n";
 
   const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
     Instruction::NOP,
@@ -153,13 +160,15 @@
 TEST(PrettyPrinterTest, CFG5) {
   const char* expected =
     "BasicBlock 0, succ: 1\n"
-    "  3: Goto 1\n"
+    "  4: SuspendCheck\n"
+    "  5: Goto 1\n"
     "BasicBlock 1, pred: 0, 2, succ: 3\n"
     "  0: ReturnVoid\n"
     "BasicBlock 2, succ: 1\n"
-    "  1: Goto 1\n"
+    "  1: SuspendCheck\n"
+    "  2: Goto 1\n"
     "BasicBlock 3, pred: 1\n"
-    "  2: Exit\n";
+    "  3: Exit\n";
 
   const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
     Instruction::RETURN_VOID,
@@ -174,7 +183,8 @@
     "BasicBlock 0, succ: 1\n"
     "  0: Local [4, 3, 2]\n"
     "  1: IntConstant [2]\n"
-    "  10: Goto 1\n"
+    "  10: SuspendCheck\n"
+    "  11: Goto 1\n"
     "BasicBlock 1, pred: 0, succ: 3, 2\n"
     "  2: StoreLocal(0, 1)\n"
     "  3: LoadLocal(0) [5]\n"
@@ -202,7 +212,8 @@
     "BasicBlock 0, succ: 1\n"
     "  0: Local [4, 3, 2]\n"
     "  1: IntConstant [2]\n"
-    "  10: Goto 1\n"
+    "  11: SuspendCheck\n"
+    "  12: Goto 1\n"
     "BasicBlock 1, pred: 0, succ: 3, 2\n"
     "  2: StoreLocal(0, 1)\n"
     "  3: LoadLocal(0) [5]\n"
@@ -212,9 +223,10 @@
     "BasicBlock 2, pred: 1, 3, succ: 3\n"
     "  7: Goto 3\n"
     "BasicBlock 3, pred: 1, 2, succ: 2\n"
-    "  8: Goto 2\n"
+    "  8: SuspendCheck\n"
+    "  9: Goto 2\n"
     "BasicBlock 4\n"
-    "  9: Exit\n";
+    "  10: Exit\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -230,7 +242,8 @@
     "BasicBlock 0, succ: 1\n"
     "  0: Local [2]\n"
     "  1: IntConstant [2]\n"
-    "  5: Goto 1\n"
+    "  5: SuspendCheck\n"
+    "  6: Goto 1\n"
     "BasicBlock 1, pred: 0, succ: 2\n"
     "  2: StoreLocal(0, 1)\n"
     "  3: ReturnVoid\n"
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 54888ba..7862611 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -440,7 +440,8 @@
     DCHECK(inactive->HasRegister());
     size_t next_intersection = inactive->FirstIntersectionWith(current);
     if (next_intersection != kNoLifetime) {
-      free_until[inactive->GetRegister()] = next_intersection;
+      free_until[inactive->GetRegister()] =
+          std::min(free_until[inactive->GetRegister()], next_intersection);
     }
   }
 
@@ -738,9 +739,14 @@
   return instruction->GetLifetimePosition() == kInputMoveLifetimePosition;
 }
 
+static bool IsValidDestination(Location destination) {
+  return destination.IsRegister() || destination.IsStackSlot() || destination.IsDoubleStackSlot();
+}
+
 void RegisterAllocator::AddInputMoveFor(HInstruction* instruction,
                                         Location source,
                                         Location destination) const {
+  DCHECK(IsValidDestination(destination));
   if (source.Equals(destination)) return;
 
   DCHECK(instruction->AsPhi() == nullptr);
@@ -763,6 +769,7 @@
 void RegisterAllocator::InsertParallelMoveAt(size_t position,
                                              Location source,
                                              Location destination) const {
+  DCHECK(IsValidDestination(destination));
   if (source.Equals(destination)) return;
 
   HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
@@ -806,6 +813,7 @@
 void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
                                                    Location source,
                                                    Location destination) const {
+  DCHECK(IsValidDestination(destination));
   if (source.Equals(destination)) return;
 
   DCHECK_EQ(block->GetSuccessors().Size(), 1u);
@@ -828,6 +836,7 @@
 void RegisterAllocator::InsertParallelMoveAtEntryOf(HBasicBlock* block,
                                                     Location source,
                                                     Location destination) const {
+  DCHECK(IsValidDestination(destination));
   if (source.Equals(destination)) return;
 
   HInstruction* first = block->GetFirstInstruction();
@@ -845,6 +854,7 @@
 void RegisterAllocator::InsertMoveAfter(HInstruction* instruction,
                                         Location source,
                                         Location destination) const {
+  DCHECK(IsValidDestination(destination));
   if (source.Equals(destination)) return;
 
   if (instruction->AsPhi() != nullptr) {
@@ -892,7 +902,7 @@
         Location expected_location = locations->InAt(use->GetInputIndex());
         if (expected_location.IsUnallocated()) {
           locations->SetInAt(use->GetInputIndex(), source);
-        } else {
+        } else if (!expected_location.IsConstant()) {
           AddInputMoveFor(use->GetUser(), source, expected_location);
         }
       }
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index f737491..7d397e3 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -21,6 +21,8 @@
 #include "primitive.h"
 #include "utils/growable_array.h"
 
+#include "gtest/gtest.h"
+
 namespace art {
 
 class CodeGenerator;
@@ -177,6 +179,8 @@
   // Slots reserved for out arguments.
   size_t reserved_out_slots_;
 
+  FRIEND_TEST(RegisterAllocatorTest, FreeUntil);
+
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
 };
 
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 7539d44..3e3b6b1 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -16,6 +16,7 @@
 
 #include "builder.h"
 #include "code_generator.h"
+#include "code_generator_x86.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "nodes.h"
@@ -41,10 +42,10 @@
   graph->BuildDominatorTree();
   graph->TransformToSSA();
   graph->FindNaturalLoops();
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, codegen, liveness);
+  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
   register_allocator.AllocateRegisters();
   return register_allocator.Validate(false);
 }
@@ -57,7 +58,7 @@
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
   HGraph* graph = new (&allocator) HGraph(&allocator);
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  x86::CodeGeneratorX86 codegen(graph);
   GrowableArray<LiveInterval*> intervals(&allocator, 0);
 
   // Test with two intervals of the same range.
@@ -66,11 +67,11 @@
     intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
     intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
 
     intervals.Get(1)->SetRegister(0);
     ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
     intervals.Reset();
   }
 
@@ -81,11 +82,11 @@
     static constexpr size_t ranges2[][2] = {{42, 43}};
     intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
 
     intervals.Get(1)->SetRegister(0);
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
     intervals.Reset();
   }
 
@@ -96,11 +97,11 @@
     static constexpr size_t ranges2[][2] = {{42, 43}};
     intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
 
     intervals.Get(1)->SetRegister(0);
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
     intervals.Reset();
   }
 
@@ -111,11 +112,11 @@
     static constexpr size_t ranges2[][2] = {{42, 47}};
     intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
 
     intervals.Get(1)->SetRegister(0);
     ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
     intervals.Reset();
   }
 
@@ -127,16 +128,16 @@
     static constexpr size_t ranges2[][2] = {{42, 47}};
     intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
 
     intervals.Get(1)->SetRegister(0);
     // Sibling of the first interval has no register allocated to it.
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
 
     intervals.Get(0)->GetNextSibling()->SetRegister(0);
     ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
-        intervals, 0, 0, *codegen, &allocator, true, false));
+        intervals, 0, 0, codegen, &allocator, true, false));
   }
 }
 
@@ -298,10 +299,10 @@
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
   HGraph* graph = BuildSSAGraph(data, &allocator);
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, codegen, liveness);
+  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
   register_allocator.AllocateRegisters();
   ASSERT_TRUE(register_allocator.Validate(false));
 
@@ -330,8 +331,8 @@
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
   HGraph* graph = BuildSSAGraph(data, &allocator);
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kArm);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
   HAdd* first_add = graph->GetBlocks().Get(1)->GetFirstInstruction()->AsAdd();
@@ -383,12 +384,67 @@
   ArenaAllocator allocator(&pool);
   HGraph* graph = BuildSSAGraph(data, &allocator);
   SsaDeadPhiElimination(graph).Run();
-  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
-  SsaLivenessAnalysis liveness(*graph, codegen);
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
-  RegisterAllocator register_allocator(&allocator, codegen, liveness);
+  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
   register_allocator.AllocateRegisters();
   ASSERT_TRUE(register_allocator.Validate(false));
 }
 
+/**
+ * Test that the TryAllocateFreeReg method works in the presence of inactive intervals
+ * that share the same register. It should split the interval it is currently
+ * allocating for at the minimum lifetime position between the two inactive intervals.
+ */
+TEST(RegisterAllocatorTest, FreeUntil) {
+  const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::RETURN);
+
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = BuildSSAGraph(data, &allocator);
+  SsaDeadPhiElimination(graph).Run();
+  x86::CodeGeneratorX86 codegen(graph);
+  SsaLivenessAnalysis liveness(*graph, &codegen);
+  liveness.Analyze();
+  RegisterAllocator register_allocator(&allocator, &codegen, liveness);
+
+  // Add an artifical range to cover the temps that will be put in the unhandled list.
+  LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
+  unhandled->AddLoopRange(0, 60);
+
+  // Add three temps holding the same register, and starting at different positions.
+  // Put the one that should be picked in the middle of the inactive list to ensure
+  // we do not depend on an order.
+  LiveInterval* interval = LiveInterval::MakeTempInterval(&allocator, nullptr, Primitive::kPrimInt);
+  interval->SetRegister(0);
+  interval->AddRange(40, 50);
+  register_allocator.inactive_.Add(interval);
+
+  interval = LiveInterval::MakeTempInterval(&allocator, nullptr, Primitive::kPrimInt);
+  interval->SetRegister(0);
+  interval->AddRange(20, 30);
+  register_allocator.inactive_.Add(interval);
+
+  interval = LiveInterval::MakeTempInterval(&allocator, nullptr, Primitive::kPrimInt);
+  interval->SetRegister(0);
+  interval->AddRange(60, 70);
+  register_allocator.inactive_.Add(interval);
+
+  register_allocator.number_of_registers_ = 1;
+  register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
+  register_allocator.processing_core_registers_ = true;
+  register_allocator.unhandled_ = &register_allocator.unhandled_core_intervals_;
+
+  register_allocator.TryAllocateFreeReg(unhandled);
+
+  // Check that we have split the interval.
+  ASSERT_EQ(1u, register_allocator.unhandled_->Size());
+  // Check that we know need to find a new register where the next interval
+  // that uses the register starts.
+  ASSERT_EQ(20u, register_allocator.unhandled_->Get(0)->GetStart());
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 5de1ab9..680cc0a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -102,13 +102,14 @@
   // to differentiate between the start and end of an instruction. Adding 2 to
   // the lifetime position for each instruction ensures the start of an
   // instruction is different than the end of the previous instruction.
+  HGraphVisitor* location_builder = codegen_->GetLocationBuilder();
   for (HLinearOrderIterator it(*this); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
     block->SetLifetimeStart(lifetime_position);
 
     for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
-      current->Accept(codegen_->GetLocationBuilder());
+      current->Accept(location_builder);
       LocationSummary* locations = current->GetLocations();
       if (locations != nullptr && locations->Out().IsValid()) {
         instructions_from_ssa_index_.Add(current);
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 65675dc..d541a62 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -83,6 +83,10 @@
   }
 }
 
+static bool LoopPreHeaderIsFirstPredecessor(HBasicBlock* block) {
+  return block->GetPredecessors().Get(0) == block->GetLoopInformation()->GetPreHeader();
+}
+
 void SsaRedundantPhiElimination::Run() {
   // Add all phis in the worklist.
   for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
@@ -102,7 +106,10 @@
 
     // Find if the inputs of the phi are the same instruction.
     HInstruction* candidate = phi->InputAt(0);
-    // A loop phi cannot have itself as the first phi.
+    // A loop phi cannot have itself as the first phi. Note that this
+    // check relies on our simplification pass ensuring the pre-header
+    // block is first in the list of predecessors of the loop header.
+    DCHECK(!phi->IsLoopHeaderPhi() || LoopPreHeaderIsFirstPredecessor(phi->GetBlock()));
     DCHECK_NE(phi, candidate);
 
     for (size_t i = 1; i < phi->InputCount(); ++i) {
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 088a5c4..ad3b205 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -83,6 +83,9 @@
   HGraph* graph = builder.BuildGraph(*item);
   ASSERT_NE(graph, nullptr);
 
+  // Suspend checks implementation may change in the future, and this test relies
+  // on how instructions are ordered.
+  RemoveSuspendChecks(graph);
   graph->BuildDominatorTree();
   graph->TransformToSSA();
   ReNumberInstructions(graph);
@@ -204,8 +207,8 @@
     "BasicBlock 2, pred: 3, 6, succ: 3\n"
     "  4: Phi(6, 0) [6]\n"
     "  5: Goto\n"
-    "BasicBlock 3, pred: 2, 5, succ: 2\n"
-    "  6: Phi(4, 0) [4]\n"
+    "BasicBlock 3, pred: 5, 2, succ: 2\n"
+    "  6: Phi(0, 4) [4]\n"
     "  7: Goto\n"
     "BasicBlock 4\n"
     // Synthesized blocks to avoid critical edge.
@@ -295,8 +298,8 @@
     "  2: Goto\n"
     "BasicBlock 1, pred: 0, succ: 4\n"
     "  3: Goto\n"
-    "BasicBlock 2, pred: 3, 4, succ: 5, 3\n"
-    "  4: Phi(1, 0) [9, 5, 5]\n"
+    "BasicBlock 2, pred: 4, 3, succ: 5, 3\n"
+    "  4: Phi(0, 1) [9, 5, 5]\n"
     "  5: Equal(4, 4) [6]\n"
     "  6: If(5)\n"
     "BasicBlock 3, pred: 2, succ: 2\n"
@@ -336,8 +339,8 @@
     "  6: Goto\n"
     "BasicBlock 3, pred: 1, succ: 8\n"
     "  7: Goto\n"
-    "BasicBlock 4, pred: 5, 8, succ: 6, 5\n"
-    "  8: Phi(8, 14) [8, 12, 9, 9]\n"
+    "BasicBlock 4, pred: 8, 5, succ: 6, 5\n"
+    "  8: Phi(14, 8) [8, 12, 9, 9]\n"
     "  9: Equal(8, 8) [10]\n"
     "  10: If(9)\n"
     "BasicBlock 5, pred: 4, succ: 4\n"
diff --git a/compiler/optimizing/ssa_type_propagation.cc b/compiler/optimizing/ssa_type_propagation.cc
index 53fa74e..a860cb7 100644
--- a/compiler/optimizing/ssa_type_propagation.cc
+++ b/compiler/optimizing/ssa_type_propagation.cc
@@ -28,7 +28,11 @@
     case Primitive::kPrimNot:
       return existing;
     default:
-      return new_type;
+      // Phis are initialized with a void type, so if we are asked
+      // to merge with a void type, we should use the existing one.
+      return new_type == Primitive::kPrimVoid
+          ? existing
+          : new_type;
   }
 }
 
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
new file mode 100644
index 0000000..2e48ee8
--- /dev/null
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "builder.h"
+#include "dex_instruction.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+/**
+ * Check that the HGraphBuilder adds suspend checks to backward branches.
+ */
+
+static void TestCode(const uint16_t* data) {
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraphBuilder builder(&allocator);
+  const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+  HGraph* graph = builder.BuildGraph(*item);
+  ASSERT_NE(graph, nullptr);
+
+  HBasicBlock* first_block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+  HInstruction* first_instruction = first_block->GetFirstInstruction();
+  // Account for some tests having a store local as first instruction.
+  ASSERT_TRUE(first_instruction->IsSuspendCheck()
+              || first_instruction->GetNext()->IsSuspendCheck());
+}
+
+TEST(CodegenTest, CFG1) {
+  const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+    Instruction::NOP,
+    Instruction::GOTO | 0xFF00);
+
+  TestCode(data);
+}
+
+TEST(CodegenTest, CFG2) {
+  const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+    Instruction::GOTO_32, 0, 0);
+
+  TestCode(data);
+}
+
+TEST(CodegenTest, CFG3) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 0xFFFF,
+    Instruction::RETURN_VOID);
+
+  TestCode(data);
+}
+
+TEST(CodegenTest, CFG4) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_NE, 0xFFFF,
+    Instruction::RETURN_VOID);
+
+  TestCode(data);
+}
+
+TEST(CodegenTest, CFG5) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQZ, 0xFFFF,
+    Instruction::RETURN_VOID);
+
+  TestCode(data);
+}
+
+TEST(CodegenTest, CFG6) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_NEZ, 0xFFFF,
+    Instruction::RETURN_VOID);
+
+  TestCode(data);
+}
+}  // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0e7da55..afc01dc 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -160,7 +160,14 @@
   UsageError("      Example: --compiler-backend=Portable");
   UsageError("      Default: Quick");
   UsageError("");
-  UsageError("  --compiler-filter=(verify-none|interpret-only|space|balanced|speed|everything):");
+  UsageError("  --compiler-filter="
+                "(verify-none"
+                "|interpret-only"
+                "|space"
+                "|balanced"
+                "|speed"
+                "|everything"
+                "|time):");
   UsageError("      select compiler filter.");
   UsageError("      Example: --compiler-filter=everything");
 #if ART_SMALL_MODE
@@ -435,6 +442,11 @@
       return nullptr;
     }
 
+    // Flush result to disk. Patching code will re-open the file (mmap), so ensure that our view
+    // of the file already made it there and won't be re-ordered with writes from PatchOat or
+    // image patching.
+    oat_file->Flush();
+
     if (!driver->IsImage() && driver->GetCompilerOptions().GetIncludePatchInformation()) {
       t2.NewTiming("Patching ELF");
       std::string error_msg;
@@ -1029,6 +1041,7 @@
       include_debug_symbols = true;
     } else if (option == "--no-include-debug-symbols" || option == "--strip-symbols") {
       include_debug_symbols = false;
+      generate_gdb_information = false;  // Depends on debug symbols, see above.
     } else if (option.starts_with("--profile-file=")) {
       profile_file = option.substr(strlen("--profile-file=")).data();
       VLOG(compiler) << "dex2oat: profile file is " << profile_file;
@@ -1180,6 +1193,8 @@
     compiler_filter = CompilerOptions::kSpeed;
   } else if (strcmp(compiler_filter_string, "everything") == 0) {
     compiler_filter = CompilerOptions::kEverything;
+  } else if (strcmp(compiler_filter_string, "time") == 0) {
+    compiler_filter = CompilerOptions::kTime;
   } else {
     Usage("Unknown --compiler-filter value %s", compiler_filter_string);
   }
@@ -1375,7 +1390,7 @@
    * If we're not in interpret-only or verify-none mode, go ahead and compile small applications.
    * Don't bother to check if we're doing the image.
    */
-  if (!image && compiler_options->IsCompilationEnabled()) {
+  if (!image && compiler_options->IsCompilationEnabled() && compiler_kind == Compiler::kQuick) {
     size_t num_methods = 0;
     for (size_t i = 0; i != dex_files.size(); ++i) {
       const DexFile* dex_file = dex_files[i];
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 41ee213..c97bf64 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -19,6 +19,7 @@
 #include <iostream>
 
 #include "base/logging.h"
+#include "base/stringprintf.h"
 #include "disassembler_arm.h"
 #include "disassembler_arm64.h"
 #include "disassembler_mips.h"
@@ -26,21 +27,30 @@
 
 namespace art {
 
-Disassembler* Disassembler::Create(InstructionSet instruction_set) {
+Disassembler* Disassembler::Create(InstructionSet instruction_set, DisassemblerOptions* options) {
   if (instruction_set == kArm || instruction_set == kThumb2) {
-    return new arm::DisassemblerArm();
+    return new arm::DisassemblerArm(options);
   } else if (instruction_set == kArm64) {
-    return new arm64::DisassemblerArm64();
+    return new arm64::DisassemblerArm64(options);
   } else if (instruction_set == kMips) {
-    return new mips::DisassemblerMips();
+    return new mips::DisassemblerMips(options);
   } else if (instruction_set == kX86) {
-    return new x86::DisassemblerX86(false);
+    return new x86::DisassemblerX86(options, false);
   } else if (instruction_set == kX86_64) {
-    return new x86::DisassemblerX86(true);
+    return new x86::DisassemblerX86(options, true);
   } else {
     UNIMPLEMENTED(FATAL) << "no disassembler for " << instruction_set;
     return NULL;
   }
 }
 
+std::string Disassembler::FormatInstructionPointer(const uint8_t* begin) {
+  if (disassembler_options_->absolute_addresses_) {
+    return StringPrintf("%p", begin);
+  } else {
+    size_t offset = begin - disassembler_options_->base_address_;
+    return StringPrintf("0x%08zx", offset);
+  }
+}
+
 }  // namespace art
diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h
index 183e692..487f433 100644
--- a/disassembler/disassembler.h
+++ b/disassembler/disassembler.h
@@ -26,10 +26,31 @@
 
 namespace art {
 
+class DisassemblerOptions {
+ public:
+  // Should the disassembler print absolute or relative addresses.
+  const bool absolute_addresses_;
+
+  // Base addess for calculating relative code offsets when absolute_addresses_ is false.
+  const uint8_t* const base_address_;
+
+  DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address)
+      : absolute_addresses_(absolute_addresses), base_address_(base_address) {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DisassemblerOptions);
+};
+
 class Disassembler {
  public:
-  static Disassembler* Create(InstructionSet instruction_set);
-  virtual ~Disassembler() {}
+  // Creates a Disassembler for the given InstructionSet with the
+  // non-null DisassemblerOptions which become owned by the
+  // Disassembler.
+  static Disassembler* Create(InstructionSet instruction_set, DisassemblerOptions* options);
+
+  virtual ~Disassembler() {
+    delete disassembler_options_;
+  }
 
   // Dump a single instruction returning the length of that instruction.
   virtual size_t Dump(std::ostream& os, const uint8_t* begin) = 0;
@@ -37,9 +58,15 @@
   virtual void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) = 0;
 
  protected:
-  Disassembler() {}
+  explicit Disassembler(DisassemblerOptions* disassembler_options)
+      : disassembler_options_(disassembler_options) {
+    CHECK(disassembler_options_ != nullptr);
+  }
+
+  std::string FormatInstructionPointer(const uint8_t* begin);
 
  private:
+  DisassemblerOptions* disassembler_options_;
   DISALLOW_COPY_AND_ASSIGN(Disassembler);
 };
 
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 56023c1..54e7761 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -94,7 +94,7 @@
 }
 
 void DisassemblerArm::DumpBranchTarget(std::ostream& os, const uint8_t* instr_ptr, int32_t imm32) {
-  os << StringPrintf("%+d (%p)", imm32, instr_ptr + imm32);
+  os << StringPrintf("%+d (", imm32) << FormatInstructionPointer(instr_ptr + imm32) << ")";
 }
 
 static uint32_t ReadU16(const uint8_t* ptr) {
@@ -356,7 +356,9 @@
     opcode += kConditionCodeNames[cond];
     opcode += suffixes;
     // TODO: a more complete ARM disassembler could generate wider opcodes.
-    os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instruction, opcode.c_str()) << args.str() << '\n';
+    os << FormatInstructionPointer(instr_ptr)
+       << StringPrintf(": %08x\t%-7s ", instruction, opcode.c_str())
+       << args.str() << '\n';
 }
 
 int32_t ThumbExpand(int32_t imm12) {
@@ -1608,7 +1610,9 @@
     opcode << "UNKNOWN " << op2;
   }
 
-  os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instr, opcode.str().c_str()) << args.str() << '\n';
+  os << FormatInstructionPointer(instr_ptr)
+     << StringPrintf(": %08x\t%-7s ", instr, opcode.str().c_str())
+     << args.str() << '\n';
   return 4;
 }  // NOLINT(readability/fn_size)
 
@@ -1936,7 +1940,9 @@
       it_conditions_.pop_back();
     }
 
-    os << StringPrintf("%p: %04x    \t%-7s ", instr_ptr, instr, opcode.str().c_str()) << args.str() << '\n';
+    os << FormatInstructionPointer(instr_ptr)
+       << StringPrintf(": %04x    \t%-7s ", instr, opcode.str().c_str())
+       << args.str() << '\n';
   }
   return 2;
 }
diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h
index f6d7fda..f870e8e 100644
--- a/disassembler/disassembler_arm.h
+++ b/disassembler/disassembler_arm.h
@@ -26,8 +26,7 @@
 
 class DisassemblerArm FINAL : public Disassembler {
  public:
-  DisassemblerArm() {
-  }
+  explicit DisassemblerArm(DisassemblerOptions* options) : Disassembler(options) {}
 
   size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
   void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 864d22d..5d0c218 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -34,7 +34,8 @@
 size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
   uint32_t instruction = ReadU32(begin);
   decoder.Decode(reinterpret_cast<vixl::Instruction*>(&instruction));
-  os << StringPrintf("%p: %08x\t%s\n", begin, instruction, disasm.GetOutput());
+  os << FormatInstructionPointer(begin)
+     << StringPrintf(": %08x\t%s\n", instruction, disasm.GetOutput());
   return vixl::kInstructionSize;
 }
 
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 28c0fa7..ad20c70 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -27,7 +27,7 @@
 
 class DisassemblerArm64 FINAL : public Disassembler {
  public:
-  DisassemblerArm64() {
+  explicit DisassemblerArm64(DisassemblerOptions* options) : Disassembler(options) {
     decoder.AppendVisitor(&disasm);
   }
 
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 5e89f6f..bd5fac7 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -168,7 +168,7 @@
   return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
 }
 
-static void DumpMips(std::ostream& os, const uint8_t* instr_ptr) {
+size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
   uint32_t instruction = ReadU32(instr_ptr);
 
   uint32_t rs = (instruction >> 21) & 0x1f;  // I-type, R-type.
@@ -197,7 +197,8 @@
               int32_t offset = static_cast<int16_t>(instruction & 0xffff);
               offset <<= 2;
               offset += 4;  // Delay slot.
-              args << StringPrintf("%p  ; %+d", instr_ptr + offset, offset);
+              args << FormatInstructionPointer(instr_ptr + offset)
+                   << StringPrintf("  ; %+d", offset);
             }
             break;
           case 'D': args << 'r' << rd; break;
@@ -254,17 +255,15 @@
     }
   }
 
-  os << StringPrintf("%p: %08x\t%-7s ", instr_ptr, instruction, opcode.c_str()) << args.str() << '\n';
-}
-
-size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* begin) {
-  DumpMips(os, begin);
+  os << FormatInstructionPointer(instr_ptr)
+     << StringPrintf(": %08x\t%-7s ", instruction, opcode.c_str())
+     << args.str() << '\n';
   return 4;
 }
 
 void DisassemblerMips::Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) {
   for (const uint8_t* cur = begin; cur < end; cur += 4) {
-    DumpMips(os, cur);
+    Dump(os, cur);
   }
 }
 
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
index e1fb034..00b2f8d 100644
--- a/disassembler/disassembler_mips.h
+++ b/disassembler/disassembler_mips.h
@@ -26,8 +26,7 @@
 
 class DisassemblerMips FINAL : public Disassembler {
  public:
-  DisassemblerMips() {
-  }
+  explicit DisassemblerMips(DisassemblerOptions* options) : Disassembler(options) {}
 
   size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
   void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 1848abe..1d29765 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -1215,7 +1215,9 @@
       displacement = *reinterpret_cast<const int32_t*>(instr);
       instr += 4;
     }
-    args << StringPrintf("%+d (%p)", displacement, instr + displacement);
+    args << StringPrintf("%+d (", displacement)
+         << FormatInstructionPointer(instr + displacement)
+         << ")";
   }
   if (prefix[1] == kFs && !supports_rex_) {
     args << "  ; ";
@@ -1238,8 +1240,8 @@
     default: LOG(FATAL) << "Unreachable";
   }
   prefixed_opcode << opcode.str();
-  os << StringPrintf("%p: %22s    \t%-7s ", begin_instr, hex.str().c_str(),
-                     prefixed_opcode.str().c_str())
+  os << FormatInstructionPointer(begin_instr)
+     << StringPrintf(": %22s    \t%-7s ", hex.str().c_str(), prefixed_opcode.str().c_str())
      << args.str() << '\n';
   return instr - begin_instr;
 }  // NOLINT(readability/fn_size)
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index 2565bb1..f448662 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -24,8 +24,8 @@
 
 class DisassemblerX86 FINAL : public Disassembler {
  public:
-  explicit DisassemblerX86(bool supports_rex) : supports_rex_(supports_rex) {
-  }
+  DisassemblerX86(DisassemblerOptions* options, bool supports_rex)
+      : Disassembler(options), supports_rex_(supports_rex) {}
 
   size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE;
   void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 4cdf618..87de529 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -79,8 +79,8 @@
           "      Example: --boot-image=/system/framework/boot.art\n"
           "\n");
   fprintf(stderr,
-          "  --instruction-set=(arm|arm64|mips|x86|x86_64): for locating the image file based on the image location\n"
-          "      set.\n"
+          "  --instruction-set=(arm|arm64|mips|x86|x86_64): for locating the image\n"
+          "      file based on the image location set.\n"
           "      Example: --instruction-set=x86\n"
           "      Default: %s\n"
           "\n",
@@ -90,9 +90,20 @@
           "      Example: --output=/tmp/oatdump.txt\n"
           "\n");
   fprintf(stderr,
-          "  --dump:[raw_mapping_table|raw_gc_map]\n"
-          "    Example: --dump:raw_gc_map\n"
-          "    Default: neither\n"
+          "  --dump:raw_mapping_table enables dumping of the mapping table.\n"
+          "      Example: --dump:raw_mapping_table\n"
+          "\n");
+  fprintf(stderr,
+          "  --dump:raw_mapping_table enables dumping of the GC map.\n"
+          "      Example: --dump:raw_gc_map\n"
+          "\n");
+  fprintf(stderr,
+          "  --no-dump:vmap may be used to disable vmap dumping.\n"
+          "      Example: --no-dump:vmap\n"
+          "\n");
+  fprintf(stderr,
+          "  --no-disassemble may be used to disable disassembly.\n"
+          "      Example: --no-disassemble\n"
           "\n");
   exit(EXIT_FAILURE);
 }
@@ -220,14 +231,14 @@
     while (it.HasNextDirectMethod()) {
       const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx);
       WalkOatMethod(class_def, class_method_idx, oat_method, dex_file, it.GetMemberIndex(),
-                    it.GetMethodCodeItem(), it.GetMemberAccessFlags(), callback);
+                    it.GetMethodCodeItem(), it.GetMethodAccessFlags(), callback);
       class_method_idx++;
       it.Next();
     }
     while (it.HasNextVirtualMethod()) {
       const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx);
       WalkOatMethod(class_def, class_method_idx, oat_method, dex_file, it.GetMemberIndex(),
-                    it.GetMethodCodeItem(), it.GetMemberAccessFlags(), callback);
+                    it.GetMethodCodeItem(), it.GetMethodAccessFlags(), callback);
       class_method_idx++;
       it.Next();
     }
@@ -326,18 +337,45 @@
   std::string output_name_;
 };
 
+class OatDumperOptions {
+ public:
+  OatDumperOptions(bool dump_raw_mapping_table,
+                   bool dump_raw_gc_map,
+                   bool dump_vmap,
+                   bool disassemble_code,
+                   bool absolute_addresses)
+    : dump_raw_mapping_table_(dump_raw_mapping_table),
+      dump_raw_gc_map_(dump_raw_gc_map),
+      dump_vmap_(dump_vmap),
+      disassemble_code_(disassemble_code),
+      absolute_addresses_(absolute_addresses) {}
+
+  const bool dump_raw_mapping_table_;
+  const bool dump_raw_gc_map_;
+  const bool dump_vmap_;
+  const bool disassemble_code_;
+  const bool absolute_addresses_;
+};
+
 class OatDumper {
  public:
-  explicit OatDumper(const OatFile& oat_file, bool dump_raw_mapping_table, bool dump_raw_gc_map)
+  explicit OatDumper(const OatFile& oat_file, OatDumperOptions* options)
     : oat_file_(oat_file),
       oat_dex_files_(oat_file.GetOatDexFiles()),
-      dump_raw_mapping_table_(dump_raw_mapping_table),
-      dump_raw_gc_map_(dump_raw_gc_map),
-      disassembler_(Disassembler::Create(oat_file_.GetOatHeader().GetInstructionSet())) {
+      options_(options),
+      disassembler_(Disassembler::Create(oat_file_.GetOatHeader().GetInstructionSet(),
+                                         new DisassemblerOptions(options_->absolute_addresses_,
+                                                                 oat_file.Begin()))) {
     AddAllOffsets();
   }
 
-  void Dump(std::ostream& os) {
+  ~OatDumper() {
+    delete options_;
+    delete disassembler_;
+  }
+
+  bool Dump(std::ostream& os) {
+    bool success = true;
     const OatHeader& oat_header = oat_file_.GetOatHeader();
 
     os << "MAGIC:\n";
@@ -358,7 +396,7 @@
 #define DUMP_OAT_HEADER_OFFSET(label, offset) \
     os << label " OFFSET:\n"; \
     os << StringPrintf("0x%08x", oat_header.offset()); \
-    if (oat_header.offset() != 0) { \
+    if (oat_header.offset() != 0 && options_->absolute_addresses_) { \
       os << StringPrintf(" (%p)", oat_file_.Begin() + oat_header.offset()); \
     } \
     os << StringPrintf("\n\n");
@@ -386,7 +424,10 @@
                            GetQuickToInterpreterBridgeOffset);
 #undef DUMP_OAT_HEADER_OFFSET
 
-    os << "IMAGE PATCH DELTA:\n" << oat_header.GetImagePatchDelta() << "\n\n";
+    os << "IMAGE PATCH DELTA:\n";
+    os << StringPrintf("%d (0x%08x)\n\n",
+                       oat_header.GetImagePatchDelta(),
+                       oat_header.GetImagePatchDelta());
 
     os << "IMAGE FILE LOCATION OAT CHECKSUM:\n";
     os << StringPrintf("0x%08x\n\n", oat_header.GetImageFileLocationOatChecksum());
@@ -407,19 +448,28 @@
       os << "\n";
     }
 
-    os << "BEGIN:\n";
-    os << reinterpret_cast<const void*>(oat_file_.Begin()) << "\n\n";
+    if (options_->absolute_addresses_) {
+      os << "BEGIN:\n";
+      os << reinterpret_cast<const void*>(oat_file_.Begin()) << "\n\n";
 
-    os << "END:\n";
-    os << reinterpret_cast<const void*>(oat_file_.End()) << "\n\n";
+      os << "END:\n";
+      os << reinterpret_cast<const void*>(oat_file_.End()) << "\n\n";
+    }
+
+    os << "SIZE:\n";
+    os << oat_file_.Size() << "\n\n";
 
     os << std::flush;
 
     for (size_t i = 0; i < oat_dex_files_.size(); i++) {
       const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
-      CHECK(oat_dex_file != NULL);
-      DumpOatDexFile(os, *oat_dex_file);
+      CHECK(oat_dex_file != nullptr);
+      if (!DumpOatDexFile(os, *oat_dex_file)) {
+        success = false;
+      }
     }
+    os << std::flush;
+    return success;
   }
 
   size_t ComputeSize(const void* oat_data) {
@@ -451,7 +501,7 @@
       } else {
         const DexFile::ClassDef* class_def =
             dex_file->FindClassDef(m->GetDeclaringClassDescriptor());
-        if (class_def != NULL) {
+        if (class_def != nullptr) {
           uint16_t class_def_index = dex_file->GetIndexForClassDef(*class_def);
           const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
           size_t method_index = m->GetMethodIndex();
@@ -459,7 +509,7 @@
         }
       }
     }
-    return NULL;
+    return nullptr;
   }
 
  private:
@@ -470,7 +520,7 @@
     // of a piece of code by using upper_bound to find the start of the next region.
     for (size_t i = 0; i < oat_dex_files_.size(); i++) {
       const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
-      CHECK(oat_dex_file != NULL);
+      CHECK(oat_dex_file != nullptr);
       std::string error_msg;
       std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(&error_msg));
       if (dex_file.get() == nullptr) {
@@ -485,7 +535,7 @@
         const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
         const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
         const byte* class_data = dex_file->GetClassData(class_def);
-        if (class_data != NULL) {
+        if (class_data != nullptr) {
           ClassDataItemIterator it(*dex_file, class_data);
           SkipAllFields(it);
           uint32_t class_method_index = 0;
@@ -507,6 +557,10 @@
     offsets_.insert(oat_file_.Size());
   }
 
+  static uint32_t AlignCodeOffset(uint32_t maybe_thumb_offset) {
+    return maybe_thumb_offset & ~0x1;  // TODO: Make this Thumb2 specific.
+  }
+
   void AddOffsets(const OatFile::OatMethod& oat_method) {
     uint32_t code_offset = oat_method.GetCodeOffset();
     if (oat_file_.GetOatHeader().GetInstructionSet() == kThumb2) {
@@ -518,8 +572,9 @@
     offsets_.insert(oat_method.GetNativeGcMapOffset());
   }
 
-  void DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
-    os << "OAT DEX FILE:\n";
+  bool DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
+    bool success = true;
+    os << "OatDexFile:\n";
     os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str());
     os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
 
@@ -527,26 +582,32 @@
 
     std::string error_msg;
     std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
-    if (dex_file.get() == NULL) {
+    if (dex_file.get() == nullptr) {
       os << "NOT FOUND: " << error_msg << "\n\n";
-      return;
+      os << std::flush;
+      return false;
     }
     for (size_t class_def_index = 0;
          class_def_index < dex_file->NumClassDefs();
          class_def_index++) {
       const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
       const char* descriptor = dex_file->GetClassDescriptor(class_def);
+      uint32_t oat_class_offset = oat_dex_file.GetOatClassOffset(class_def_index);
       const OatFile::OatClass oat_class = oat_dex_file.GetOatClass(class_def_index);
-      os << StringPrintf("%zd: %s (type_idx=%d)", class_def_index, descriptor, class_def.class_idx_)
+      os << StringPrintf("%zd: %s (offset=0x%08x) (type_idx=%d)",
+                         class_def_index, descriptor, oat_class_offset, class_def.class_idx_)
          << " (" << oat_class.GetStatus() << ")"
          << " (" << oat_class.GetType() << ")\n";
       // TODO: include bitmap here if type is kOatClassSomeCompiled?
       Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
       std::ostream indented_os(&indent_filter);
-      DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def);
+      if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def)) {
+        success = false;
+      }
     }
 
     os << std::flush;
+    return success;
   }
 
   static void SkipAllFields(ClassDataItemIterator& it) {
@@ -558,38 +619,51 @@
     }
   }
 
-  void DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
+  bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
                     const DexFile::ClassDef& class_def) {
+    bool success = true;
     const byte* class_data = dex_file.GetClassData(class_def);
-    if (class_data == NULL) {  // empty class such as a marker interface?
-      return;
+    if (class_data == nullptr) {  // empty class such as a marker interface?
+      os << std::flush;
+      return success;
     }
     ClassDataItemIterator it(dex_file, class_data);
     SkipAllFields(it);
-    uint32_t class_method_idx = 0;
+    uint32_t class_method_index = 0;
     while (it.HasNextDirectMethod()) {
-      const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx);
-      DumpOatMethod(os, class_def, class_method_idx, oat_method, dex_file,
-                    it.GetMemberIndex(), it.GetMethodCodeItem(), it.GetMemberAccessFlags());
-      class_method_idx++;
+      if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
+                         it.GetMemberIndex(), it.GetMethodCodeItem(),
+                         it.GetRawMemberAccessFlags())) {
+        success = false;
+      }
+      class_method_index++;
       it.Next();
     }
     while (it.HasNextVirtualMethod()) {
-      const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx);
-      DumpOatMethod(os, class_def, class_method_idx, oat_method, dex_file,
-                    it.GetMemberIndex(), it.GetMethodCodeItem(), it.GetMemberAccessFlags());
-      class_method_idx++;
+      if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
+                         it.GetMemberIndex(), it.GetMethodCodeItem(),
+                         it.GetRawMemberAccessFlags())) {
+        success = false;
+      }
+      class_method_index++;
       it.Next();
     }
     DCHECK(!it.HasNext());
     os << std::flush;
+    return success;
   }
 
-  void DumpOatMethod(std::ostream& os, const DexFile::ClassDef& class_def,
+  static constexpr uint32_t kPrologueBytes = 16;
+
+  // When this was picked, the largest arm method was 55,256 bytes and arm64 was 50,412 bytes.
+  static constexpr uint32_t kMaxCodeSize = 100 * 1000;
+
+  bool DumpOatMethod(std::ostream& os, const DexFile::ClassDef& class_def,
                      uint32_t class_method_index,
-                     const OatFile::OatMethod& oat_method, const DexFile& dex_file,
+                     const OatFile::OatClass& oat_class, const DexFile& dex_file,
                      uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
                      uint32_t method_access_flags) {
+    bool success = true;
     os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
                        class_method_index, PrettyMethod(dex_method_idx, dex_file, true).c_str(),
                        dex_method_idx);
@@ -601,71 +675,196 @@
       *indent1_os << "DEX CODE:\n";
       DumpDexCode(*indent2_os, dex_file, code_item);
     }
-    if (Runtime::Current() != NULL) {
+
+    std::unique_ptr<verifier::MethodVerifier> verifier;
+    if (Runtime::Current() != nullptr) {
       *indent1_os << "VERIFIER TYPE ANALYSIS:\n";
-      DumpVerifier(*indent2_os, dex_method_idx, &dex_file, class_def, code_item,
-                   method_access_flags);
+      verifier.reset(DumpVerifier(*indent2_os, dex_method_idx, &dex_file, class_def, code_item,
+                                  method_access_flags));
     }
+
+    uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index);
+    const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index);
+    const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index);
     {
-      *indent1_os << "OAT DATA:\n";
-
-      *indent2_os << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes());
-      *indent2_os << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask());
-      DumpSpillMask(*indent2_os, oat_method.GetCoreSpillMask(), false);
-      *indent2_os << StringPrintf("\nfp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask());
-      DumpSpillMask(*indent2_os, oat_method.GetFpSpillMask(), true);
-      *indent2_os << StringPrintf("\nvmap_table: %p (offset=0x%08x)\n",
-                                  oat_method.GetVmapTable(), oat_method.GetVmapTableOffset());
-
-      if (oat_method.GetNativeGcMap() != nullptr) {
-        // The native GC map is null for methods compiled with the optimizing compiler.
-        DumpVmap(*indent2_os, oat_method);
+      *indent1_os << "OatMethodOffsets ";
+      if (options_->absolute_addresses_) {
+        *indent1_os << StringPrintf("%p ", oat_method_offsets);
       }
-      DumpVregLocations(*indent2_os, oat_method, code_item);
-      *indent2_os << StringPrintf("mapping_table: %p (offset=0x%08x)\n",
-                                  oat_method.GetMappingTable(), oat_method.GetMappingTableOffset());
-      if (dump_raw_mapping_table_) {
-        Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
-        std::ostream indent3_os(&indent3_filter);
-        DumpMappingTable(indent3_os, oat_method);
+      *indent1_os << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset);
+      if (oat_method_offsets_offset > oat_file_.Size()) {
+        *indent1_os << StringPrintf(
+            "WARNING: oat method offsets offset 0x%08x is past end of file 0x%08zx.\n",
+            oat_method_offsets_offset, oat_file_.Size());
+        // If we can't read OatMethodOffsets, the rest of the data is dangerous to read.
+        os << std::flush;
+        return false;
       }
-      *indent2_os << StringPrintf("gc_map: %p (offset=0x%08x)\n",
-                                  oat_method.GetNativeGcMap(), oat_method.GetNativeGcMapOffset());
-      if (dump_raw_gc_map_) {
+
+      uint32_t code_offset = oat_method.GetCodeOffset();
+      *indent2_os << StringPrintf("code_offset: 0x%08x ", code_offset);
+      uint32_t aligned_code_begin = AlignCodeOffset(oat_method.GetCodeOffset());
+      if (aligned_code_begin > oat_file_.Size()) {
+        *indent2_os << StringPrintf("WARNING: "
+                                    "code offset 0x%08x is past end of file 0x%08zx.\n",
+                                    aligned_code_begin, oat_file_.Size());
+        success = false;
+      }
+      *indent2_os << "\n";
+
+      *indent2_os << "gc_map: ";
+      if (options_->absolute_addresses_) {
+        *indent2_os << StringPrintf("%p ", oat_method.GetNativeGcMap());
+      }
+      uint32_t gc_map_offset = oat_method.GetNativeGcMapOffset();
+      *indent2_os << StringPrintf("(offset=0x%08x)\n", gc_map_offset);
+      if (gc_map_offset > oat_file_.Size()) {
+        *indent2_os << StringPrintf("WARNING: "
+                                    "gc map table offset 0x%08x is past end of file 0x%08zx.\n",
+                                    gc_map_offset, oat_file_.Size());
+        success = false;
+      } else if (options_->dump_raw_gc_map_) {
         Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
         std::ostream indent3_os(&indent3_filter);
         DumpGcMap(indent3_os, oat_method, code_item);
       }
     }
     {
-      const void* code = oat_method.GetQuickCode();
-      uint32_t code_size = oat_method.GetQuickCodeSize();
-      if (code == nullptr) {
-        code = oat_method.GetPortableCode();
-        code_size = oat_method.GetPortableCodeSize();
-      }
-      *indent1_os << StringPrintf("CODE: %p (offset=0x%08x size=%d)%s\n",
-                                 code,
-                                 oat_method.GetCodeOffset(),
-                                 code_size,
-                                 code != nullptr ? "..." : "");
+      *indent1_os << "OatQuickMethodHeader ";
+      uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset();
+      const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
 
-      Runtime* runtime = Runtime::Current();
-      if (runtime != nullptr) {
-        ScopedObjectAccess soa(Thread::Current());
-        StackHandleScope<1> hs(soa.Self());
-        Handle<mirror::DexCache> dex_cache(
-            hs.NewHandle(runtime->GetClassLinker()->FindDexCache(dex_file)));
-        verifier::MethodVerifier verifier(&dex_file, dex_cache, NullHandle<mirror::ClassLoader>(),
-                                          &class_def, code_item, dex_method_idx,
-                                          NullHandle<mirror::ArtMethod>(), method_access_flags,
-                                          true, true, true);
-        verifier.Verify();
-        DumpCode(*indent2_os, &verifier, oat_method, code_item);
-      } else {
-        DumpCode(*indent2_os, nullptr, oat_method, code_item);
+      if (options_->absolute_addresses_) {
+        *indent1_os << StringPrintf("%p ", method_header);
+      }
+      *indent1_os << StringPrintf("(offset=0x%08x)\n", method_header_offset);
+      if (method_header_offset > oat_file_.Size()) {
+        *indent1_os << StringPrintf(
+            "WARNING: oat quick method header offset 0x%08x is past end of file 0x%08zx.\n",
+            method_header_offset, oat_file_.Size());
+        // If we can't read the OatQuickMethodHeader, the rest of the data is dangerous to read.
+        os << std::flush;
+        return false;
+      }
+
+      *indent2_os << "mapping_table: ";
+      if (options_->absolute_addresses_) {
+        *indent2_os << StringPrintf("%p ", oat_method.GetMappingTable());
+      }
+      uint32_t mapping_table_offset = oat_method.GetMappingTableOffset();
+      *indent2_os << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset());
+      if (mapping_table_offset > oat_file_.Size()) {
+        *indent2_os << StringPrintf("WARNING: "
+                                    "mapping table offset 0x%08x is past end of file 0x%08zx. "
+                                    "mapping table offset was loaded from offset 0x%08x.\n",
+                                    mapping_table_offset, oat_file_.Size(),
+                                    oat_method.GetMappingTableOffsetOffset());
+        success = false;
+      } else if (options_->dump_raw_mapping_table_) {
+        Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
+        std::ostream indent3_os(&indent3_filter);
+        DumpMappingTable(indent3_os, oat_method);
+      }
+
+      *indent2_os << "vmap_table: ";
+      if (options_->absolute_addresses_) {
+        *indent2_os << StringPrintf("%p ", oat_method.GetVmapTable());
+      }
+      uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
+      *indent2_os << StringPrintf("(offset=0x%08x)\n", vmap_table_offset);
+      if (vmap_table_offset > oat_file_.Size()) {
+        *indent2_os << StringPrintf("WARNING: "
+                                    "vmap table offset 0x%08x is past end of file 0x%08zx. "
+                                    "vmap table offset was loaded from offset 0x%08x.\n",
+                                    vmap_table_offset, oat_file_.Size(),
+                                    oat_method.GetVmapTableOffsetOffset());
+        success = false;
+      } else if (options_->dump_vmap_) {
+        DumpVmap(*indent2_os, oat_method);
       }
     }
+    {
+      *indent1_os << "QuickMethodFrameInfo\n";
+
+      *indent2_os << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes());
+      *indent2_os << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask());
+      DumpSpillMask(*indent2_os, oat_method.GetCoreSpillMask(), false);
+      *indent2_os << "\n";
+      *indent2_os << StringPrintf("fp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask());
+      DumpSpillMask(*indent2_os, oat_method.GetFpSpillMask(), true);
+      *indent2_os << "\n";
+    }
+    {
+        // Based on spill masks from QuickMethodFrameInfo so placed
+        // after it is dumped, but useful for understanding quick
+        // code, so dumped here.
+        DumpVregLocations(*indent2_os, oat_method, code_item);
+    }
+    {
+      *indent1_os << "CODE: ";
+      uint32_t code_size_offset = oat_method.GetQuickCodeSizeOffset();
+      if (code_size_offset > oat_file_.Size()) {
+        *indent2_os << StringPrintf("WARNING: "
+                                    "code size offset 0x%08x is past end of file 0x%08zx.",
+                                    code_size_offset, oat_file_.Size());
+        success = false;
+      } else {
+        const void* code = oat_method.GetQuickCode();
+        uint32_t code_size = oat_method.GetQuickCodeSize();
+        if (code == nullptr) {
+          code = oat_method.GetPortableCode();
+          code_size = oat_method.GetPortableCodeSize();
+          code_size_offset = 0;
+        }
+        uint32_t code_offset = oat_method.GetCodeOffset();
+        uint32_t aligned_code_begin = AlignCodeOffset(code_offset);
+        uint64_t aligned_code_end = aligned_code_begin + code_size;
+
+        if (options_->absolute_addresses_) {
+          *indent1_os << StringPrintf("%p ", code);
+        }
+        *indent1_os << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n",
+                                    code_offset,
+                                    code_size_offset,
+                                    code_size,
+                                    code != nullptr ? "..." : "");
+
+        if (aligned_code_begin > oat_file_.Size()) {
+          *indent2_os << StringPrintf("WARNING: "
+                                      "start of code at 0x%08x is past end of file 0x%08zx.",
+                                      aligned_code_begin, oat_file_.Size());
+          success = false;
+        } else if (aligned_code_end > oat_file_.Size()) {
+          *indent2_os << StringPrintf("WARNING: "
+                                      "end of code at 0x%08" PRIx64 " is past end of file 0x%08zx. "
+                                      "code size is 0x%08x loaded from offset 0x%08x.\n",
+                                      aligned_code_end, oat_file_.Size(),
+                                      code_size, code_size_offset);
+          success = false;
+          if (options_->disassemble_code_) {
+            if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
+              DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+            }
+          }
+        } else if (code_size > kMaxCodeSize) {
+          *indent2_os << StringPrintf("WARNING: "
+                                      "code size %d is bigger than max expected threshold of %d. "
+                                      "code size is 0x%08x loaded from offset 0x%08x.\n",
+                                      code_size, kMaxCodeSize,
+                                      code_size, code_size_offset);
+          success = false;
+          if (options_->disassemble_code_) {
+            if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
+              DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
+            }
+          }
+        } else if (options_->disassemble_code_) {
+          DumpCode(*indent2_os, verifier.get(), oat_method, code_item, !success, 0);
+        }
+      }
+    }
+    os << std::flush;
+    return success;
   }
 
   void DumpSpillMask(std::ostream& os, uint32_t spill_mask, bool is_float) {
@@ -692,8 +891,14 @@
   }
 
   void DumpVmap(std::ostream& os, const OatFile::OatMethod& oat_method) {
+    // If the native GC map is null, then this method has been compiled with the
+    // optimizing compiler. The optimizing compiler currently outputs its stack map
+    // in the vmap table, and the code below does not work with such a stack map.
+    if (oat_method.GetNativeGcMap() == nullptr) {
+      return;
+    }
     const uint8_t* raw_table = oat_method.GetVmapTable();
-    if (raw_table != NULL) {
+    if (raw_table != nullptr) {
       const VmapTable vmap_table(raw_table);
       bool first = true;
       bool processing_fp = false;
@@ -760,7 +965,7 @@
   void DescribeVReg(std::ostream& os, const OatFile::OatMethod& oat_method,
                     const DexFile::CodeItem* code_item, size_t reg, VRegKind kind) {
     const uint8_t* raw_table = oat_method.GetVmapTable();
-    if (raw_table != NULL) {
+    if (raw_table != nullptr) {
       const VmapTable vmap_table(raw_table);
       uint32_t vmap_offset;
       if (vmap_table.IsInContext(reg, kind, &vmap_offset)) {
@@ -883,7 +1088,7 @@
   void DumpGcMapAtNativePcOffset(std::ostream& os, const OatFile::OatMethod& oat_method,
                                  const DexFile::CodeItem* code_item, size_t native_pc_offset) {
     const uint8_t* gc_map_raw = oat_method.GetNativeGcMap();
-    if (gc_map_raw != NULL) {
+    if (gc_map_raw != nullptr) {
       NativePcOffsetToReferenceMap map(gc_map_raw);
       if (map.HasEntry(native_pc_offset)) {
         size_t num_regs = map.RegWidth() * 8;
@@ -948,7 +1153,7 @@
 
 
   void DumpDexCode(std::ostream& os, const DexFile& dex_file, const DexFile::CodeItem* code_item) {
-    if (code_item != NULL) {
+    if (code_item != nullptr) {
       size_t i = 0;
       while (i < code_item->insns_size_in_code_units_) {
         const Instruction* instruction = Instruction::At(&code_item->insns_[i]);
@@ -958,27 +1163,36 @@
     }
   }
 
-  void DumpVerifier(std::ostream& os, uint32_t dex_method_idx, const DexFile* dex_file,
-                    const DexFile::ClassDef& class_def, const DexFile::CodeItem* code_item,
-                    uint32_t method_access_flags) {
+  verifier::MethodVerifier* DumpVerifier(std::ostream& os, uint32_t dex_method_idx,
+                                         const DexFile* dex_file,
+                                         const DexFile::ClassDef& class_def,
+                                         const DexFile::CodeItem* code_item,
+                                         uint32_t method_access_flags) {
     if ((method_access_flags & kAccNative) == 0) {
       ScopedObjectAccess soa(Thread::Current());
       StackHandleScope<1> hs(soa.Self());
       Handle<mirror::DexCache> dex_cache(
           hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
-      verifier::MethodVerifier::VerifyMethodAndDump(os, dex_method_idx, dex_file, dex_cache,
-                                                    NullHandle<mirror::ClassLoader>(), &class_def,
-                                                    code_item, NullHandle<mirror::ArtMethod>(),
-                                                    method_access_flags);
+      return verifier::MethodVerifier::VerifyMethodAndDump(soa.Self(), os, dex_method_idx, dex_file,
+                                                           dex_cache,
+                                                           NullHandle<mirror::ClassLoader>(),
+                                                           &class_def, code_item,
+                                                           NullHandle<mirror::ArtMethod>(),
+                                                           method_access_flags);
     }
+
+    return nullptr;
   }
 
   void DumpCode(std::ostream& os, verifier::MethodVerifier* verifier,
-                const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item) {
+                const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item,
+                bool bad_input, size_t code_size) {
     const void* portable_code = oat_method.GetPortableCode();
     const void* quick_code = oat_method.GetQuickCode();
 
-    size_t code_size = oat_method.GetQuickCodeSize();
+    if (code_size == 0) {
+      code_size = oat_method.GetQuickCodeSize();
+    }
     if ((code_size == 0) || ((portable_code == nullptr) && (quick_code == nullptr))) {
       os << "NO CODE!\n";
       return;
@@ -986,13 +1200,17 @@
       const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
       size_t offset = 0;
       while (offset < code_size) {
-        DumpMappingAtOffset(os, oat_method, offset, false);
+        if (!bad_input) {
+          DumpMappingAtOffset(os, oat_method, offset, false);
+        }
         offset += disassembler_->Dump(os, quick_native_pc + offset);
-        uint32_t dex_pc = DumpMappingAtOffset(os, oat_method, offset, true);
-        if (dex_pc != DexFile::kDexNoIndex) {
-          DumpGcMapAtNativePcOffset(os, oat_method, code_item, offset);
-          if (verifier != nullptr) {
-            DumpVRegsAtDexPc(os, verifier, oat_method, code_item, dex_pc);
+        if (!bad_input) {
+          uint32_t dex_pc = DumpMappingAtOffset(os, oat_method, offset, true);
+          if (dex_pc != DexFile::kDexNoIndex) {
+            DumpGcMapAtNativePcOffset(os, oat_method, code_item, offset);
+            if (verifier != nullptr) {
+              DumpVRegsAtDexPc(os, verifier, oat_method, code_item, dex_pc);
+            }
           }
         }
       }
@@ -1003,23 +1221,22 @@
   }
 
   const OatFile& oat_file_;
-  std::vector<const OatFile::OatDexFile*> oat_dex_files_;
-  bool dump_raw_mapping_table_;
-  bool dump_raw_gc_map_;
+  const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
+  const OatDumperOptions* options_;
   std::set<uintptr_t> offsets_;
-  std::unique_ptr<Disassembler> disassembler_;
+  Disassembler* disassembler_;
 };
 
 class ImageDumper {
  public:
   explicit ImageDumper(std::ostream* os, gc::space::ImageSpace& image_space,
-                       const ImageHeader& image_header, bool dump_raw_mapping_table,
-                       bool dump_raw_gc_map)
-      : os_(os), image_space_(image_space), image_header_(image_header),
-        dump_raw_mapping_table_(dump_raw_mapping_table),
-        dump_raw_gc_map_(dump_raw_gc_map) {}
+                       const ImageHeader& image_header, OatDumperOptions* oat_dumper_options)
+      : os_(os),
+        image_space_(image_space),
+        image_header_(image_header),
+        oat_dumper_options_(oat_dumper_options) {}
 
-  void Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     std::ostream& os = *os_;
     os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
 
@@ -1071,7 +1288,7 @@
               indent2_os << StringPrintf("%d to %zd: ", i, i + run);
               i = i + run;
             }
-            if (value != NULL) {
+            if (value != nullptr) {
               PrettyObjectValue(indent2_os, value->GetClass(), value);
             } else {
               indent2_os << i << ": null\n";
@@ -1090,21 +1307,20 @@
     std::string error_msg;
     const OatFile* oat_file = class_linker->FindOpenedOatFileFromOatLocation(oat_location);
     if (oat_file == nullptr) {
-      oat_file = OatFile::Open(oat_location, oat_location, NULL, false, &error_msg);
+      oat_file = OatFile::Open(oat_location, oat_location, nullptr, false, &error_msg);
       if (oat_file == nullptr) {
         os << "NOT FOUND: " << error_msg << "\n";
-        return;
+        return false;
       }
     }
     os << "\n";
 
     stats_.oat_file_bytes = oat_file->Size();
 
-    oat_dumper_.reset(new OatDumper(*oat_file, dump_raw_mapping_table_,
-        dump_raw_gc_map_));
+    oat_dumper_.reset(new OatDumper(*oat_file, oat_dumper_options_.release()));
 
     for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
-      CHECK(oat_dex_file != NULL);
+      CHECK(oat_dex_file != nullptr);
       stats_.oat_dex_file_sizes.push_back(std::make_pair(oat_dex_file->GetDexFileLocation(),
                                                          oat_dex_file->FileSize()));
     }
@@ -1152,10 +1368,10 @@
     }
     os << "STATS:\n" << std::flush;
     std::unique_ptr<File> file(OS::OpenFileForReading(image_filename.c_str()));
-    if (file.get() == NULL) {
+    if (file.get() == nullptr) {
       LOG(WARNING) << "Failed to find image in " << image_filename;
     }
-    if (file.get() != NULL) {
+    if (file.get() != nullptr) {
       stats_.file_bytes = file->GetLength();
     }
     size_t header_bytes = sizeof(ImageHeader);
@@ -1169,14 +1385,14 @@
 
     os << std::flush;
 
-    oat_dumper_->Dump(os);
+    return oat_dumper_->Dump(os);
   }
 
  private:
   static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(type != NULL);
-    if (value == NULL) {
+    CHECK(type != nullptr);
+    if (value == nullptr) {
       os << StringPrintf("null   %s\n", PrettyDescriptor(type).c_str());
     } else if (type->IsStringClass()) {
       mirror::String* string = value->AsString();
@@ -1229,14 +1445,14 @@
       // Get the value, don't compute the type unless it is non-null as we don't want
       // to cause class loading.
       mirror::Object* value = field->GetObj(obj);
-      if (value == NULL) {
+      if (value == nullptr) {
         os << StringPrintf("null   %s\n", PrettyDescriptor(descriptor).c_str());
       } else {
         // Grab the field type without causing resolution.
         StackHandleScope<1> hs(Thread::Current());
         FieldHelper fh(hs.NewHandle(field));
         mirror::Class* field_type = fh.GetType(false);
-        if (field_type != NULL) {
+        if (field_type != nullptr) {
           PrettyObjectValue(os, field_type, value);
         } else {
           os << StringPrintf("%p   %s\n", value, PrettyDescriptor(descriptor).c_str());
@@ -1248,11 +1464,11 @@
   static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     mirror::Class* super = klass->GetSuperClass();
-    if (super != NULL) {
+    if (super != nullptr) {
       DumpFields(os, obj, super);
     }
     mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
-    if (fields != NULL) {
+    if (fields != nullptr) {
       for (int32_t i = 0; i < fields->GetLength(); i++) {
         mirror::ArtField* field = fields->Get(i);
         PrintField(os, field, obj);
@@ -1288,16 +1504,16 @@
   const void* GetQuickOatCodeEnd(mirror::ArtMethod* m)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m));
-    if (oat_code_begin == NULL) {
-      return NULL;
+    if (oat_code_begin == nullptr) {
+      return nullptr;
     }
     return oat_code_begin + GetQuickOatCodeSize(m);
   }
 
   static void Callback(mirror::Object* obj, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(obj != NULL);
-    DCHECK(arg != NULL);
+    DCHECK(obj != nullptr);
+    DCHECK(arg != nullptr);
     ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
     if (!state->InDumpSpace(obj)) {
       return;
@@ -1352,12 +1568,12 @@
           i = i + run;
         }
         mirror::Class* value_class =
-            (value == NULL) ? obj_class->GetComponentType() : value->GetClass();
+            (value == nullptr) ? obj_class->GetComponentType() : value->GetClass();
         PrettyObjectValue(indent_os, value_class, value);
       }
     } else if (obj->IsClass()) {
       mirror::ObjectArray<mirror::ArtField>* sfields = obj->AsClass()->GetSFields();
-      if (sfields != NULL) {
+      if (sfields != nullptr) {
         indent_os << "STATICS:\n";
         Indenter indent2_filter(indent_os.rdbuf(), kIndentChar, kIndentBy1Count);
         std::ostream indent2_os(&indent2_filter);
@@ -1385,8 +1601,8 @@
       } else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
           method->IsResolutionMethod() || method->IsImtConflictMethod() ||
           method->IsClassInitializer()) {
-        DCHECK(method->GetNativeGcMap() == NULL) << PrettyMethod(method);
-        DCHECK(method->GetMappingTable() == NULL) << PrettyMethod(method);
+        DCHECK(method->GetNativeGcMap() == nullptr) << PrettyMethod(method);
+        DCHECK(method->GetMappingTable() == nullptr) << PrettyMethod(method);
       } else {
         const DexFile::CodeItem* code_item = method->GetCodeItem();
         size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
@@ -1733,12 +1949,11 @@
     // threshold, we assume 2 bytes per instruction and 2 instructions per block.
     kLargeMethodDexBytes = 16000
   };
-  std::unique_ptr<OatDumper> oat_dumper_;
   std::ostream* os_;
   gc::space::ImageSpace& image_space_;
   const ImageHeader& image_header_;
-  bool dump_raw_mapping_table_;
-  bool dump_raw_gc_map_;
+  std::unique_ptr<OatDumper> oat_dumper_;
+  std::unique_ptr<OatDumperOptions> oat_dumper_options_;
 
   DISALLOW_COPY_AND_ASSIGN(ImageDumper);
 };
@@ -1755,9 +1970,9 @@
     usage();
   }
 
-  const char* oat_filename = NULL;
-  const char* image_location = NULL;
-  const char* boot_image_location = NULL;
+  const char* oat_filename = nullptr;
+  const char* image_location = nullptr;
+  const char* boot_image_location = nullptr;
   InstructionSet instruction_set = kRuntimeISA;
   std::string elf_filename_prefix;
   std::ostream* os = &std::cout;
@@ -1765,6 +1980,8 @@
   std::string output_name;
   bool dump_raw_mapping_table = false;
   bool dump_raw_gc_map = false;
+  bool dump_vmap = true;
+  bool disassemble_code = true;
   bool symbolize = false;
 
   for (int i = 0; i < argc; i++) {
@@ -1788,15 +2005,14 @@
       } else if (instruction_set_str == "x86_64") {
         instruction_set = kX86_64;
       }
-    } else if (option.starts_with("--dump:")) {
-        if (option == "--dump:raw_mapping_table") {
-          dump_raw_mapping_table = true;
-        } else if (option == "--dump:raw_gc_map") {
-          dump_raw_gc_map = true;
-        } else {
-          fprintf(stderr, "Unknown argument %s\n", option.data());
-          usage();
-        }
+    } else if (option =="--dump:raw_mapping_table") {
+      dump_raw_mapping_table = true;
+    } else if (option == "--dump:raw_gc_map") {
+      dump_raw_gc_map = true;
+    } else if (option == "--no-dump:vmap") {
+      dump_vmap = false;
+    } else if (option == "--no-disassemble") {
+      disassemble_code = false;
     } else if (option.starts_with("--output=")) {
       output_name = option.substr(strlen("--output=")).ToString();
       const char* filename = output_name.c_str();
@@ -1815,21 +2031,28 @@
     }
   }
 
-  if (image_location == NULL && oat_filename == NULL) {
+  if (image_location == nullptr && oat_filename == nullptr) {
     fprintf(stderr, "Either --image or --oat must be specified\n");
     return EXIT_FAILURE;
   }
 
-  if (image_location != NULL && oat_filename != NULL) {
+  if (image_location != nullptr && oat_filename != nullptr) {
     fprintf(stderr, "Either --image or --oat must be specified but not both\n");
     return EXIT_FAILURE;
   }
 
-  if (oat_filename != NULL) {
+  // If we are only doing the oat file, disable absolute_addresses. Keep them for image dumping.
+  bool absolute_addresses = (oat_filename == nullptr);
+  std::unique_ptr<OatDumperOptions> oat_dumper_options(new OatDumperOptions(dump_raw_mapping_table,
+                                                                            dump_raw_gc_map,
+                                                                            dump_vmap,
+                                                                            disassemble_code,
+                                                                            absolute_addresses));
+  if (oat_filename != nullptr) {
     std::string error_msg;
     OatFile* oat_file =
-        OatFile::Open(oat_filename, oat_filename, NULL, false, &error_msg);
-    if (oat_file == NULL) {
+        OatFile::Open(oat_filename, oat_filename, nullptr, false, &error_msg);
+    if (oat_file == nullptr) {
       fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
       return EXIT_FAILURE;
     }
@@ -1844,8 +2067,9 @@
         return EXIT_FAILURE;
       }
     } else {
-      OatDumper oat_dumper(*oat_file, dump_raw_mapping_table, dump_raw_gc_map);
-      oat_dumper.Dump(*os);
+      OatDumper oat_dumper(*oat_file, oat_dumper_options.release());
+      bool success = oat_dumper.Dump(*os);
+      return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
     }
     return EXIT_SUCCESS;
   }
@@ -1860,15 +2084,15 @@
   NoopCompilerCallbacks callbacks;
   options.push_back(std::make_pair("compilercallbacks", &callbacks));
 
-  if (boot_image_location != NULL) {
+  if (boot_image_location != nullptr) {
     boot_image_option += "-Ximage:";
     boot_image_option += boot_image_location;
-    options.push_back(std::make_pair(boot_image_option.c_str(), reinterpret_cast<void*>(NULL)));
+    options.push_back(std::make_pair(boot_image_option.c_str(), nullptr));
   }
-  if (image_location != NULL) {
+  if (image_location != nullptr) {
     image_option += "-Ximage:";
     image_option += image_location;
-    options.push_back(std::make_pair(image_option.c_str(), reinterpret_cast<void*>(NULL)));
+    options.push_back(std::make_pair(image_option.c_str(), nullptr));
   }
   options.push_back(
       std::make_pair("imageinstructionset",
@@ -1885,16 +2109,15 @@
   ScopedObjectAccess soa(Thread::Current());
   gc::Heap* heap = Runtime::Current()->GetHeap();
   gc::space::ImageSpace* image_space = heap->GetImageSpace();
-  CHECK(image_space != NULL);
+  CHECK(image_space != nullptr);
   const ImageHeader& image_header = image_space->GetImageHeader();
   if (!image_header.IsValid()) {
     fprintf(stderr, "Invalid image header %s\n", image_location);
     return EXIT_FAILURE;
   }
-  ImageDumper image_dumper(os, *image_space, image_header,
-                           dump_raw_mapping_table, dump_raw_gc_map);
-  image_dumper.Dump();
-  return EXIT_SUCCESS;
+  ImageDumper image_dumper(os, *image_space, image_header, oat_dumper_options.release());
+  bool success = image_dumper.Dump();
+  return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
 }
 
 }  // namespace art
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index bbdf3a3..50b4ece 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -79,9 +79,10 @@
 
   bool have_android_data = false;
   bool dalvik_cache_exists = false;
+  bool is_global_cache = false;
   std::string dalvik_cache;
   GetDalvikCache(GetInstructionSetString(isa), false, &dalvik_cache,
-                 &have_android_data, &dalvik_cache_exists);
+                 &have_android_data, &dalvik_cache_exists, &is_global_cache);
 
   std::string cache_filename;
   if (have_android_data && dalvik_cache_exists) {
@@ -522,14 +523,15 @@
   t.NewTiming("Fixup Elf Headers");
   // Fixup Phdr's
   for (unsigned int i = 0; i < oat_file_->GetProgramHeaderNum(); i++) {
-    Elf32_Phdr& hdr = oat_file_->GetProgramHeader(i);
-    if (hdr.p_vaddr != 0 && hdr.p_vaddr != hdr.p_offset) {
+    Elf32_Phdr* hdr = oat_file_->GetProgramHeader(i);
+    CHECK(hdr != nullptr);
+    if (hdr->p_vaddr != 0 && hdr->p_vaddr != hdr->p_offset) {
       need_fixup = true;
-      hdr.p_vaddr += delta_;
+      hdr->p_vaddr += delta_;
     }
-    if (hdr.p_paddr != 0 && hdr.p_paddr != hdr.p_offset) {
+    if (hdr->p_paddr != 0 && hdr->p_paddr != hdr->p_offset) {
       need_fixup = true;
-      hdr.p_paddr += delta_;
+      hdr->p_paddr += delta_;
     }
   }
   if (!need_fixup) {
@@ -539,9 +541,10 @@
   }
   t.NewTiming("Fixup Section Headers");
   for (unsigned int i = 0; i < oat_file_->GetSectionHeaderNum(); i++) {
-    Elf32_Shdr& hdr = oat_file_->GetSectionHeader(i);
-    if (hdr.sh_addr != 0) {
-      hdr.sh_addr += delta_;
+    Elf32_Shdr* hdr = oat_file_->GetSectionHeader(i);
+    CHECK(hdr != nullptr);
+    if (hdr->sh_addr != 0) {
+      hdr->sh_addr += delta_;
     }
   }
 
@@ -984,9 +987,11 @@
     std::string cache_filename;
     bool has_cache = false;
     bool has_android_data_unused = false;
+    bool is_global_cache = false;
     if (!gc::space::ImageSpace::FindImageFilename(patched_image_location.c_str(), isa,
                                                   &system_filename, &has_system, &cache_filename,
-                                                  &has_android_data_unused, &has_cache)) {
+                                                  &has_android_data_unused, &has_cache,
+                                                  &is_global_cache)) {
       Usage("Unable to determine image file for location %s", patched_image_location.c_str());
     }
     if (has_cache) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 5065c58..61bc9ff 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -120,6 +120,7 @@
   native/java_lang_Thread.cc \
   native/java_lang_Throwable.cc \
   native/java_lang_VMClassLoader.cc \
+  native/java_lang_ref_FinalizerReference.cc \
   native/java_lang_ref_Reference.cc \
   native/java_lang_reflect_Array.cc \
   native/java_lang_reflect_Constructor.cc \
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index fb26f5f..ee005e8 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -150,8 +150,8 @@
 
   if (two_byte) {
     switch (opcode) {
-      case 0x10:            // vmovsd/ss
-      case 0x11:            // vmovsd/ss
+      case 0x10:        // vmovsd/ss
+      case 0x11:        // vmovsd/ss
       case 0xb6:        // movzx
       case 0xb7:
       case 0xbe:        // movsx
@@ -165,7 +165,8 @@
     }
   } else {
     switch (opcode) {
-      case 0x89:            // mov
+      case 0x88:        // mov byte
+      case 0x89:        // mov
       case 0x8b:
       case 0x38:        // cmp with memory.
       case 0x39:
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 5f43bec..b8edad3 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -57,17 +57,19 @@
   }
 }
 
-void Barrier::Increment(Thread* self, int delta, uint32_t timeout_ms) {
+bool Barrier::Increment(Thread* self, int delta, uint32_t timeout_ms) {
   MutexLock mu(self, lock_);
   SetCountLocked(self, count_ + delta);
+  bool timed_out = false;
   if (count_ != 0) {
-    condition_.TimedWait(self, timeout_ms, 0);
+    timed_out = condition_.TimedWait(self, timeout_ms, 0);
   }
+  return timed_out;
 }
 
 void Barrier::SetCountLocked(Thread* self, int count) {
   count_ = count;
-  if (count_ == 0) {
+  if (count == 0) {
     condition_.Broadcast(self);
   }
 }
diff --git a/runtime/barrier.h b/runtime/barrier.h
index a433cac..167e1d6 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -38,10 +38,11 @@
   void Init(Thread* self, int count);
 
   // Increment the count by delta, wait on condition if count is non zero.
-  void Increment(Thread* self, int delta);
+  void Increment(Thread* self, int delta) LOCKS_EXCLUDED(lock_);
 
-  // Increment the count by delta, wait on condition if count is non zero, with a timeout
-  void Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
+  // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns
+  // true if time out occurred.
+  bool Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
 
  private:
   void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_);
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index a7adb02..2c3e966 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -66,6 +66,7 @@
   kAllocatorTagCompileTimeClassPath,
   kAllocatorTagOatFile,
   kAllocatorTagDexFileVerifier,
+  kAllocatorTagRosAlloc,
   kAllocatorTagCount,  // Must always be last element.
 };
 std::ostream& operator<<(std::ostream& os, const AllocatorTag& tag);
@@ -149,6 +150,10 @@
     Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
 };
 
+template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
+class AllocationTrackingSet : public std::set<Key, Compare, TrackingAllocator<Key, kTag>> {
+};
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index f01ea0c..2c95ede 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -37,19 +37,25 @@
 ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
 Mutex* Locks::deoptimization_lock_ = nullptr;
 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::intern_table_lock_ = nullptr;
 Mutex* Locks::jni_libraries_lock_ = nullptr;
 Mutex* Locks::logging_lock_ = nullptr;
 Mutex* Locks::mem_maps_lock_ = nullptr;
 Mutex* Locks::modify_ldt_lock_ = nullptr;
 ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
 Mutex* Locks::profiler_lock_ = nullptr;
+Mutex* Locks::reference_processor_lock_ = nullptr;
+Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
+Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
 Mutex* Locks::thread_list_lock_ = nullptr;
 Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
 Mutex* Locks::thread_suspend_count_lock_ = nullptr;
 Mutex* Locks::trace_lock_ = nullptr;
 Mutex* Locks::unexpected_signal_lock_ = nullptr;
-Mutex* Locks::intern_table_lock_ = nullptr;
 
 struct AllMutexData {
   // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -773,8 +779,9 @@
   guard_.recursion_count_ = old_recursion_count;
 }
 
-void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
+bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
   DCHECK(self == NULL || self == Thread::Current());
+  bool timed_out = false;
   guard_.AssertExclusiveHeld(self);
   guard_.CheckSafeToWait(self);
   unsigned int old_recursion_count = guard_.recursion_count_;
@@ -790,6 +797,7 @@
   if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
     if (errno == ETIMEDOUT) {
       // Timed out we're done.
+      timed_out = true;
     } else if ((errno == EAGAIN) || (errno == EINTR)) {
       // A signal or ConditionVariable::Signal/Broadcast has come in.
     } else {
@@ -814,13 +822,16 @@
   timespec ts;
   InitTimeSpec(true, clock, ms, ns, &ts);
   int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
-  if (rc != 0 && rc != ETIMEDOUT) {
+  if (rc == ETIMEDOUT) {
+    timed_out = true;
+  } else if (rc != 0) {
     errno = rc;
     PLOG(FATAL) << "TimedWait failed for " << name_;
   }
   guard_.exclusive_owner_ = old_owner;
 #endif
   guard_.recursion_count_ = old_recursion_count;
+  return timed_out;
 }
 
 void Locks::Init() {
@@ -873,6 +884,10 @@
     DCHECK(heap_bitmap_lock_ == nullptr);
     heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+    DCHECK(trace_lock_ == nullptr);
+    trace_lock_ = new Mutex("trace lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
     DCHECK(runtime_shutdown_lock_ == nullptr);
     runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
@@ -881,10 +896,6 @@
     DCHECK(profiler_lock_ == nullptr);
     profiler_lock_ = new Mutex("profiler lock", current_lock_level);
 
-    UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
-    DCHECK(trace_lock_ == nullptr);
-    trace_lock_ = new Mutex("trace lock", current_lock_level);
-
     UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
     DCHECK(deoptimization_lock_ == nullptr);
     deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
@@ -928,6 +939,30 @@
     DCHECK(intern_table_lock_ == nullptr);
     intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
+    DCHECK(reference_processor_lock_ == nullptr);
+    reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
+    DCHECK(reference_queue_cleared_references_lock_ == nullptr);
+    reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
+    DCHECK(reference_queue_weak_references_lock_ == nullptr);
+    reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
+    DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
+    reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
+    DCHECK(reference_queue_phantom_references_lock_ == nullptr);
+    reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
+
+    UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
+    DCHECK(reference_queue_soft_references_lock_ == nullptr);
+    reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
     DCHECK(abort_lock_ == nullptr);
     abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 6642b1e..8d2cdce 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,11 +60,16 @@
   kThreadSuspendCountLock,
   kAbortLock,
   kJdwpSocketLock,
+  kReferenceQueueSoftReferencesLock,
+  kReferenceQueuePhantomReferencesLock,
+  kReferenceQueueFinalizerReferencesLock,
+  kReferenceQueueWeakReferencesLock,
+  kReferenceQueueClearedReferencesLock,
+  kReferenceProcessorLock,
   kRosAllocGlobalLock,
   kRosAllocBracketLock,
   kRosAllocBulkFreeLock,
   kAllocSpaceLock,
-  kReferenceProcessorLock,
   kDexFileMethodInlinerLock,
   kDexFileToMethodInlinerMapLock,
   kMarkSweepMarkStackLock,
@@ -87,12 +92,12 @@
   kBreakpointInvokeLock,
   kAllocTrackerLock,
   kDeoptimizationLock,
-  kTraceLock,
   kProfilerLock,
   kJdwpEventListLock,
   kJdwpAttachLock,
   kJdwpStartLock,
   kRuntimeShutdownLock,
+  kTraceLock,
   kHeapBitmapLock,
   kMutatorLock,
   kThreadListSuspendThreadLock,
@@ -382,7 +387,7 @@
   // TODO: No thread safety analysis on Wait and TimedWait as they call mutex operations via their
   //       pointer copy, thereby defeating annotalysis.
   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
-  void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
+  bool TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
   // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
   // when waiting.
   // TODO: remove this.
@@ -594,8 +599,26 @@
   // Guards intern table.
   static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
 
+  // Guards reference processor.
+  static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
+
+  // Guards cleared references queue.
+  static Mutex* reference_queue_cleared_references_lock_ ACQUIRED_AFTER(reference_processor_lock_);
+
+  // Guards weak references queue.
+  static Mutex* reference_queue_weak_references_lock_ ACQUIRED_AFTER(reference_queue_cleared_references_lock_);
+
+  // Guards finalizer references queue.
+  static Mutex* reference_queue_finalizer_references_lock_ ACQUIRED_AFTER(reference_queue_weak_references_lock_);
+
+  // Guards phantom references queue.
+  static Mutex* reference_queue_phantom_references_lock_ ACQUIRED_AFTER(reference_queue_finalizer_references_lock_);
+
+  // Guards soft references queue.
+  static Mutex* reference_queue_soft_references_lock_ ACQUIRED_AFTER(reference_queue_phantom_references_lock_);
+
   // Have an exclusive aborting thread.
-  static Mutex* abort_lock_ ACQUIRED_AFTER(intern_table_lock_);
+  static Mutex* abort_lock_ ACQUIRED_AFTER(reference_queue_soft_references_lock_);
 
   // Allow mutual exclusion when manipulating Thread::suspend_count_.
   // TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index 91b83f6..2dde245 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -184,10 +184,22 @@
   return memcmp(p1, p2, len) == 0;
 }
 
+inline bool operator==(const StringPiece& x, const char* y) {
+  if (y == nullptr) {
+    return x.size() == 0;
+  } else {
+    return strncmp(x.data(), y, x.size()) == 0 && y[x.size()] == '\0';
+  }
+}
+
 inline bool operator!=(const StringPiece& x, const StringPiece& y) {
   return !(x == y);
 }
 
+inline bool operator!=(const StringPiece& x, const char* y) {
+  return !(x == y);
+}
+
 inline bool operator<(const StringPiece& x, const StringPiece& y) {
   const int r = memcmp(x.data(), y.data(),
                        std::min(x.size(), y.size()));
diff --git a/runtime/base/stringprintf_test.cc b/runtime/base/stringprintf_test.cc
new file mode 100644
index 0000000..0bfde33
--- /dev/null
+++ b/runtime/base/stringprintf_test.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stringprintf.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(StringPrintfTest, HexSizeT) {
+  size_t size = 0x00107e59;
+  EXPECT_STREQ("00107e59", StringPrintf("%08zx", size).c_str());
+  EXPECT_STREQ("0x00107e59", StringPrintf("0x%08zx", size).c_str());
+}
+
+}  // namespace art
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
new file mode 100644
index 0000000..1a78d72
--- /dev/null
+++ b/runtime/check_reference_map_visitor.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+
+#include "gc_map.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack_map.h"
+
+namespace art {
+
+// Helper class for tests checking that the compiler keeps track of dex registers
+// holding references.
+class CheckReferenceMapVisitor : public StackVisitor {
+ public:
+  explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : StackVisitor(thread, nullptr) {}
+
+  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* m = GetMethod();
+    if (m->IsCalleeSaveMethod() || m->IsNative()) {
+      CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
+    }
+
+    if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
+      return true;
+    }
+
+    LOG(INFO) << "At " << PrettyMethod(m, false);
+
+    if (m->IsCalleeSaveMethod()) {
+      LOG(WARNING) << "no PC for " << PrettyMethod(m);
+      return true;
+    }
+
+    return false;
+  }
+
+  void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (GetMethod()->IsOptimized()) {
+      CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
+    } else {
+      CheckQuickMethod(registers, number_of_references, native_pc_offset);
+    }
+  }
+
+ private:
+  void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* m = GetMethod();
+    CodeInfo code_info = m->GetOptimizedCodeInfo();
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+    DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
+    MemoryRegion stack_mask = stack_map.GetStackMask();
+    uint32_t register_mask = stack_map.GetRegisterMask();
+    for (int i = 0; i < number_of_references; ++i) {
+      int reg = registers[i];
+      CHECK(reg < m->GetCodeItem()->registers_size_);
+      DexRegisterMap::LocationKind location = dex_register_map.GetLocationKind(reg);
+      switch (location) {
+        case DexRegisterMap::kNone:
+          // Not set, should not be a reference.
+          CHECK(false);
+          break;
+        case DexRegisterMap::kInStack:
+          CHECK(stack_mask.LoadBit(dex_register_map.GetValue(reg) >> 2));
+          break;
+        case DexRegisterMap::kInRegister:
+          CHECK_NE(register_mask & dex_register_map.GetValue(reg), 0u);
+          break;
+        case DexRegisterMap::kConstant:
+          CHECK_EQ(dex_register_map.GetValue(0), 0);
+          break;
+      }
+    }
+  }
+
+  void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* m = GetMethod();
+    NativePcOffsetToReferenceMap map(m->GetNativeGcMap());
+    const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
+    CHECK(ref_bitmap);
+    for (int i = 0; i < number_of_references; ++i) {
+      int reg = registers[i];
+      CHECK(reg < m->GetCodeItem()->registers_size_);
+      CHECK((*((ref_bitmap) + reg / 8) >> (reg % 8) ) & 0x01)
+          << "Error: Reg @" << i << " is not in GC map";
+    }
+  }
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4342234..cb0fe0a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -64,7 +64,7 @@
 #include "ScopedLocalRef.h"
 #include "scoped_thread_state_change.h"
 #include "handle_scope-inl.h"
-#include "thread.h"
+#include "thread-inl.h"
 #include "utils.h"
 #include "verifier/method_verifier.h"
 #include "well_known_classes.h"
@@ -89,21 +89,29 @@
   // a NoClassDefFoundError (v2 2.17.5).  The exception to this rule is if we
   // failed in verification, in which case v2 5.4.1 says we need to re-throw
   // the previous error.
-  if (!Runtime::Current()->IsCompiler()) {  // Give info if this occurs at runtime.
+  Runtime* runtime = Runtime::Current();
+  bool is_compiler = runtime->IsCompiler();
+  if (!is_compiler) {  // Give info if this occurs at runtime.
     LOG(INFO) << "Rejecting re-init on previously-failed class " << PrettyClass(c);
   }
 
   CHECK(c->IsErroneous()) << PrettyClass(c) << " " << c->GetStatus();
   Thread* self = Thread::Current();
-  ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-  if (c->GetVerifyErrorClass() != NULL) {
-    // TODO: change the verifier to store an _instance_, with a useful detail message?
-    std::string temp;
-    self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor(&temp),
-                            PrettyDescriptor(c).c_str());
+  if (is_compiler) {
+    // At compile time, accurate errors and NCDFE are disabled to speed compilation.
+    mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
+    self->SetException(ThrowLocation(), pre_allocated);
   } else {
-    self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;",
-                            PrettyDescriptor(c).c_str());
+    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+    if (c->GetVerifyErrorClass() != NULL) {
+      // TODO: change the verifier to store an _instance_, with a useful detail message?
+      std::string temp;
+      self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor(&temp),
+                              PrettyDescriptor(c).c_str());
+    } else {
+      self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;",
+                              PrettyDescriptor(c).c_str());
+    }
   }
 }
 
@@ -112,7 +120,7 @@
   JNIEnv* env = self->GetJniEnv();
 
   ScopedLocalRef<jthrowable> cause(env, env->ExceptionOccurred());
-  CHECK(cause.get() != NULL);
+  CHECK(cause.get() != nullptr);
 
   env->ExceptionClear();
   bool is_error = env->IsInstanceOf(cause.get(), WellKnownClasses::java_lang_Error);
@@ -121,7 +129,8 @@
   // We only wrap non-Error exceptions; an Error can just be used as-is.
   if (!is_error) {
     ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-    self->ThrowNewWrappedException(throw_location, "Ljava/lang/ExceptionInInitializerError;", NULL);
+    self->ThrowNewWrappedException(throw_location, "Ljava/lang/ExceptionInInitializerError;",
+                                   nullptr);
   }
 }
 
@@ -298,7 +307,7 @@
       heap->AllocNonMovableObject<true>(self, nullptr,
                                         mirror::Class::ClassClassSize(),
                                         VoidFunctor()))));
-  CHECK(java_lang_Class.Get() != NULL);
+  CHECK(java_lang_Class.Get() != nullptr);
   mirror::Class::SetClassClass(java_lang_Class.Get());
   java_lang_Class->SetClass(java_lang_Class.Get());
   if (kUseBakerOrBrooksReadBarrier) {
@@ -316,7 +325,7 @@
   // java_lang_Object comes next so that object_array_class can be created.
   Handle<mirror::Class> java_lang_Object(hs.NewHandle(
       AllocClass(self, java_lang_Class.Get(), mirror::Object::ClassSize())));
-  CHECK(java_lang_Object.Get() != NULL);
+  CHECK(java_lang_Object.Get() != nullptr);
   // backfill Object as the super class of Class.
   java_lang_Class->SetSuperClass(java_lang_Object.Get());
   java_lang_Object->SetStatus(mirror::Class::kStatusLoaded, self);
@@ -397,7 +406,7 @@
   // that FindClass can link members.
   Handle<mirror::Class> java_lang_reflect_ArtField(hs.NewHandle(
       AllocClass(self, java_lang_Class.Get(), mirror::ArtField::ClassSize())));
-  CHECK(java_lang_reflect_ArtField.Get() != NULL);
+  CHECK(java_lang_reflect_ArtField.Get() != nullptr);
   java_lang_reflect_ArtField->SetObjectSize(mirror::ArtField::InstanceSize());
   SetClassRoot(kJavaLangReflectArtField, java_lang_reflect_ArtField.Get());
   java_lang_reflect_ArtField->SetStatus(mirror::Class::kStatusResolved, self);
@@ -405,7 +414,7 @@
 
   Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
     AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize())));
-  CHECK(java_lang_reflect_ArtMethod.Get() != NULL);
+  CHECK(java_lang_reflect_ArtMethod.Get() != nullptr);
   java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize());
   SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
   java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
@@ -437,8 +446,8 @@
   CHECK_NE(0U, boot_class_path.size());
   for (size_t i = 0; i != boot_class_path.size(); ++i) {
     const DexFile* dex_file = boot_class_path[i];
-    CHECK(dex_file != NULL);
-    AppendToBootClassPath(*dex_file);
+    CHECK(dex_file != nullptr);
+    AppendToBootClassPath(self, *dex_file);
   }
 
   // now we can use FindSystemClass
@@ -514,9 +523,9 @@
 
   // Setup the single, global copy of "iftable".
   mirror::Class* java_lang_Cloneable = FindSystemClass(self, "Ljava/lang/Cloneable;");
-  CHECK(java_lang_Cloneable != NULL);
+  CHECK(java_lang_Cloneable != nullptr);
   mirror::Class* java_io_Serializable = FindSystemClass(self, "Ljava/io/Serializable;");
-  CHECK(java_io_Serializable != NULL);
+  CHECK(java_io_Serializable != nullptr);
   // We assume that Cloneable/Serializable don't have superinterfaces -- normally we'd have to
   // crawl up and explicitly list all of the supers as well.
   {
@@ -644,8 +653,8 @@
   for (size_t i = 0; i < kClassRootsMax; i++) {
     ClassRoot class_root = static_cast<ClassRoot>(i);
     mirror::Class* klass = GetClassRoot(class_root);
-    CHECK(klass != NULL);
-    DCHECK(klass->IsArrayClass() || klass->IsPrimitive() || klass->GetDexCache() != NULL);
+    CHECK(klass != nullptr);
+    DCHECK(klass->IsArrayClass() || klass->IsPrimitive() || klass->GetDexCache() != nullptr);
     // note SetClassRoot does additional validation.
     // if possible add new checks there to catch errors early
   }
@@ -666,7 +675,7 @@
     if (!c->IsArrayClass() && !c->IsPrimitive()) {
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(GetClassRoot(ClassRoot(i))));
-      EnsureInitialized(h_class, true, true);
+      EnsureInitialized(self, h_class, true, true);
       self->AssertNoPendingException();
     }
   }
@@ -1043,7 +1052,7 @@
                                                                uint32_t dex_location_checksum,
                                                                const char* oat_location,
                                                                std::string* error_msg) {
-  std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, NULL,
+  std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr,
                                             !Runtime::Current()->IsCompiler(),
                                             error_msg));
   if (oat_file.get() == nullptr) {
@@ -1115,7 +1124,7 @@
     error_msgs->push_back(error_msg);
     return nullptr;
   }
-  std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, NULL,
+  std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr,
                                             !Runtime::Current()->IsCompiler(),
                                             &error_msg));
   if (oat_file.get() == nullptr) {
@@ -1201,7 +1210,7 @@
 
   const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
                                                                     &dex_location_checksum);
-  if (oat_dex_file == NULL) {
+  if (oat_dex_file == nullptr) {
     *error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x",
                               oat_file->GetLocation().c_str(), dex_location, dex_location_checksum);
     for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
@@ -1228,7 +1237,7 @@
     // If no classes.dex found in dex_location, it has been stripped or is corrupt, assume oat is
     // up-to-date. This is the common case in user builds for jar's and apk's in the /system
     // directory.
-    const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, NULL);
+    const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, nullptr);
     if (oat_dex_file == nullptr) {
       *error_msg = StringPrintf("Dex checksum mismatch for location '%s' and failed to find oat "
                                 "dex file '%s': %s", oat_file->GetLocation().c_str(), dex_location,
@@ -1314,8 +1323,9 @@
   std::string dalvik_cache;
   bool have_android_data = false;
   bool have_dalvik_cache = false;
+  bool is_global_cache = false;
   GetDalvikCache(GetInstructionSetString(kRuntimeISA), false, &dalvik_cache,
-                 &have_android_data, &have_dalvik_cache);
+                 &have_android_data, &have_dalvik_cache, &is_global_cache);
   std::string cache_filename;
   if (have_dalvik_cache) {
     cache_filename = GetDalvikCacheFilenameOrDie(dex_location.c_str(), dalvik_cache.c_str());
@@ -1361,7 +1371,7 @@
   {
     // There is a high probability that these both these oat files map similar/the same address
     // spaces so we must scope them like this so they each gets its turn.
-    std::unique_ptr<OatFile> odex_oat_file(OatFile::Open(odex_filename, odex_filename, NULL,
+    std::unique_ptr<OatFile> odex_oat_file(OatFile::Open(odex_filename, odex_filename, nullptr,
                                                          executable, &odex_error_msg));
     if (odex_oat_file.get() != nullptr && CheckOatFile(odex_oat_file.get(), isa,
                                                        &odex_checksum_verified,
@@ -1383,7 +1393,7 @@
   bool should_patch_cache = false;
   bool cache_checksum_verified = false;
   if (have_dalvik_cache) {
-    std::unique_ptr<OatFile> cache_oat_file(OatFile::Open(cache_filename, cache_filename, NULL,
+    std::unique_ptr<OatFile> cache_oat_file(OatFile::Open(cache_filename, cache_filename, nullptr,
                                                           executable, &cache_error_msg));
     if (cache_oat_file.get() != nullptr && CheckOatFile(cache_oat_file.get(), isa,
                                                         &cache_checksum_verified,
@@ -1460,7 +1470,7 @@
                                                   InstructionSet isa,
                                                   std::string* error_msg) {
   // We open it non-executable
-  std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, NULL, false, error_msg));
+  std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, nullptr, false, error_msg));
   if (output.get() == nullptr) {
     return nullptr;
   }
@@ -1515,7 +1525,7 @@
   LOG(INFO) << "Relocate Oat File: " << command_line;
   bool success = Exec(argv, error_msg);
   if (success) {
-    std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, NULL,
+    std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr,
                                                   !Runtime::Current()->IsCompiler(), error_msg));
     bool checksum_verified = false;
     if (output.get() != nullptr && CheckOatFile(output.get(), isa, &checksum_verified, error_msg)) {
@@ -1624,20 +1634,16 @@
     return oat_file;
   }
 
-  oat_file = OatFile::Open(oat_location, oat_location, NULL, !Runtime::Current()->IsCompiler(),
-                           error_msg);
-  if (oat_file == NULL) {
-    return NULL;
-  }
-  return oat_file;
+  return OatFile::Open(oat_location, oat_location, nullptr, !Runtime::Current()->IsCompiler(),
+                       error_msg);
 }
 
 static void InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
 
-  DCHECK(obj != NULL);
-  DCHECK(class_linker != NULL);
+  DCHECK(obj != nullptr);
+  DCHECK(class_linker != nullptr);
 
   if (obj->IsArtMethod()) {
     mirror::ArtMethod* method = obj->AsArtMethod();
@@ -1659,7 +1665,7 @@
   gc::Heap* heap = Runtime::Current()->GetHeap();
   gc::space::ImageSpace* space = heap->GetImageSpace();
   dex_cache_image_class_lookup_required_ = true;
-  CHECK(space != NULL);
+  CHECK(space != nullptr);
   OatFile& oat_file = GetImageOatFile(space);
   CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
   CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
@@ -1694,10 +1700,10 @@
     const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
     const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
                                                                      nullptr);
-    CHECK(oat_dex_file != NULL) << oat_file.GetLocation() << " " << dex_file_location;
+    CHECK(oat_dex_file != nullptr) << oat_file.GetLocation() << " " << dex_file_location;
     std::string error_msg;
     const DexFile* dex_file = oat_dex_file->OpenDexFile(&error_msg);
-    if (dex_file == NULL) {
+    if (dex_file == nullptr) {
       LOG(FATAL) << "Failed to open dex file " << dex_file_location
                  << " from within oat file " << oat_file.GetLocation()
                  << " error '" << error_msg << "'";
@@ -1870,7 +1876,7 @@
   } else {
     Thread* self = Thread::Current();
     StackHandleScope<1> hs(self);
-    Handle<mirror::ObjectArray<mirror::Class>> classes =
+    MutableHandle<mirror::ObjectArray<mirror::Class>> classes =
         hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr);
     GetClassesVisitorArrayArg local_arg;
     local_arg.classes = &classes;
@@ -1880,7 +1886,7 @@
     while (!local_arg.success) {
       size_t class_table_size;
       {
-        ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+        ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
         class_table_size = class_table_.size();
       }
       mirror::Class* class_type = mirror::Class::GetJavaLangClass();
@@ -1932,33 +1938,33 @@
       hs.NewHandle(down_cast<mirror::DexCache*>(
           heap->AllocObject<true>(self, dex_cache_class.Get(), dex_cache_class->GetObjectSize(),
                                   VoidFunctor()))));
-  if (dex_cache.Get() == NULL) {
-    return NULL;
+  if (dex_cache.Get() == nullptr) {
+    return nullptr;
   }
   Handle<mirror::String>
       location(hs.NewHandle(intern_table_->InternStrong(dex_file.GetLocation().c_str())));
-  if (location.Get() == NULL) {
-    return NULL;
+  if (location.Get() == nullptr) {
+    return nullptr;
   }
   Handle<mirror::ObjectArray<mirror::String>>
       strings(hs.NewHandle(AllocStringArray(self, dex_file.NumStringIds())));
-  if (strings.Get() == NULL) {
-    return NULL;
+  if (strings.Get() == nullptr) {
+    return nullptr;
   }
   Handle<mirror::ObjectArray<mirror::Class>>
       types(hs.NewHandle(AllocClassArray(self, dex_file.NumTypeIds())));
-  if (types.Get() == NULL) {
-    return NULL;
+  if (types.Get() == nullptr) {
+    return nullptr;
   }
   Handle<mirror::ObjectArray<mirror::ArtMethod>>
       methods(hs.NewHandle(AllocArtMethodArray(self, dex_file.NumMethodIds())));
-  if (methods.Get() == NULL) {
-    return NULL;
+  if (methods.Get() == nullptr) {
+    return nullptr;
   }
   Handle<mirror::ObjectArray<mirror::ArtField>>
       fields(hs.NewHandle(AllocArtFieldArray(self, dex_file.NumFieldIds())));
-  if (fields.Get() == NULL) {
-    return NULL;
+  if (fields.Get() == nullptr) {
+    return nullptr;
   }
   dex_cache->Init(&dex_file, location.Get(), strings.Get(), types.Get(), methods.Get(),
                   fields.Get());
@@ -2002,7 +2008,7 @@
 
 mirror::Class* ClassLinker::EnsureResolved(Thread* self, const char* descriptor,
                                            mirror::Class* klass) {
-  DCHECK(klass != NULL);
+  DCHECK(klass != nullptr);
 
   // For temporary classes we must wait for them to be retired.
   if (init_done_ && klass->IsTemp()) {
@@ -2024,7 +2030,7 @@
     }
     CHECK(h_class->IsRetired());
     // Get the updated class from class table.
-    klass = LookupClass(descriptor, h_class.Get()->GetClassLoader());
+    klass = LookupClass(self, descriptor, h_class.Get()->GetClassLoader());
   }
 
   // Wait for the class if it has not already been linked.
@@ -2062,17 +2068,102 @@
   for (size_t i = 0; i != class_path.size(); ++i) {
     const DexFile* dex_file = class_path[i];
     const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor);
-    if (dex_class_def != NULL) {
+    if (dex_class_def != nullptr) {
       return ClassPathEntry(dex_file, dex_class_def);
     }
   }
   // TODO: remove reinterpret_cast when issue with -std=gnu++0x host issue resolved
-  return ClassPathEntry(reinterpret_cast<const DexFile*>(NULL),
-                        reinterpret_cast<const DexFile::ClassDef*>(NULL));
+  return ClassPathEntry(static_cast<const DexFile*>(nullptr),
+                        static_cast<const DexFile::ClassDef*>(nullptr));
+}
+
+mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                                       Thread* self, const char* descriptor,
+                                                       Handle<mirror::ClassLoader> class_loader) {
+  if (class_loader->GetClass() !=
+      soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader) ||
+      class_loader->GetParent()->GetClass() !=
+          soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)) {
+    return nullptr;
+  }
+  ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
+  // Check if this would be found in the parent boot class loader.
+  if (pair.second != nullptr) {
+    mirror::Class* klass = LookupClass(self, descriptor, nullptr);
+    if (klass != nullptr) {
+      return EnsureResolved(self, descriptor, klass);
+    }
+    klass = DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+                        *pair.second);
+    if (klass != nullptr) {
+      return klass;
+    }
+    CHECK(self->IsExceptionPending()) << descriptor;
+    self->ClearException();
+  } else {
+    // RegisterDexFile may allocate dex caches (and cause thread suspension).
+    StackHandleScope<3> hs(self);
+    // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
+    // We need to get the DexPathList and loop through it.
+    Handle<mirror::ArtField> cookie_field =
+        hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie));
+    Handle<mirror::ArtField> dex_file_field =
+        hs.NewHandle(
+            soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList$Element_dexFile));
+    mirror::Object* dex_path_list =
+        soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
+        GetObject(class_loader.Get());
+    if (dex_path_list != nullptr && dex_file_field.Get() != nullptr &&
+        cookie_field.Get() != nullptr) {
+      // DexPathList has an array dexElements of Elements[] which each contain a dex file.
+      mirror::Object* dex_elements_obj =
+          soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+          GetObject(dex_path_list);
+      // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
+      // at the mCookie which is a DexFile vector.
+      if (dex_elements_obj != nullptr) {
+        Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
+            hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
+        for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
+          mirror::Object* element = dex_elements->GetWithoutChecks(i);
+          if (element == nullptr) {
+            // Should never happen, fall back to java code to throw a NPE.
+            break;
+          }
+          mirror::Object* dex_file = dex_file_field->GetObject(element);
+          if (dex_file != nullptr) {
+            const uint64_t cookie = cookie_field->GetLong(dex_file);
+            auto* dex_files =
+                reinterpret_cast<std::vector<const DexFile*>*>(static_cast<uintptr_t>(cookie));
+            if (dex_files == nullptr) {
+              // This should never happen so log a warning.
+              LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
+              break;
+            }
+            for (const DexFile* dex_file : *dex_files) {
+              const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor);
+              if (dex_class_def != nullptr) {
+                RegisterDexFile(*dex_file);
+                mirror::Class* klass =
+                    DefineClass(self, descriptor, class_loader, *dex_file, *dex_class_def);
+                if (klass == nullptr) {
+                  CHECK(self->IsExceptionPending()) << descriptor;
+                  self->ClearException();
+                  return nullptr;
+                }
+                return klass;
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+  return nullptr;
 }
 
 mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
-                                      ConstHandle<mirror::ClassLoader> class_loader) {
+                                      Handle<mirror::ClassLoader> class_loader) {
   DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
   DCHECK(self != nullptr);
   self->AssertNoPendingException();
@@ -2082,7 +2173,7 @@
     return FindPrimitiveClass(descriptor[0]);
   }
   // Find the class in the loaded classes table.
-  mirror::Class* klass = LookupClass(descriptor, class_loader.Get());
+  mirror::Class* klass = LookupClass(self, descriptor, class_loader.Get());
   if (klass != nullptr) {
     return EnsureResolved(self, descriptor, klass);
   }
@@ -2093,7 +2184,8 @@
     // The boot class loader, search the boot class path.
     ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
     if (pair.second != nullptr) {
-      return DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first, *pair.second);
+      return DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+                         *pair.second);
     } else {
       // The boot class loader is searched ahead of the application class loader, failures are
       // expected and will be wrapped in a ClassNotFoundException. Use the pre-allocated error to
@@ -2105,7 +2197,7 @@
   } else if (Runtime::Current()->UseCompileTimeClassPath()) {
     // First try with the bootstrap class loader.
     if (class_loader.Get() != nullptr) {
-      klass = LookupClass(descriptor, nullptr);
+      klass = LookupClass(self, descriptor, nullptr);
       if (klass != nullptr) {
         return EnsureResolved(self, descriptor, klass);
       }
@@ -2114,8 +2206,8 @@
     // a NoClassDefFoundError being allocated.
     ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
     if (pair.second != nullptr) {
-      StackHandleScope<1> hs(self);
-      return DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first, *pair.second);
+      return DefineClass(self, descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
+                         *pair.second);
     }
     // Next try the compile time class path.
     const std::vector<const DexFile*>* class_path;
@@ -2127,90 +2219,18 @@
     }
     pair = FindInClassPath(descriptor, *class_path);
     if (pair.second != nullptr) {
-      return DefineClass(descriptor, class_loader, *pair.first, *pair.second);
+      return DefineClass(self, descriptor, class_loader, *pair.first, *pair.second);
+    } else {
+      // Use the pre-allocated NCDFE at compile time to avoid wasting time constructing exceptions.
+      mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+      self->SetException(ThrowLocation(), pre_allocated);
+      return nullptr;
     }
   } else {
     ScopedObjectAccessUnchecked soa(self);
-    if (class_loader->GetClass() ==
-            soa.Decode<mirror::Class*>(WellKnownClasses::dalvik_system_PathClassLoader) &&
-        class_loader->GetParent()->GetClass() ==
-            soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader)) {
-      ClassPathEntry pair = FindInClassPath(descriptor, boot_class_path_);
-      // Check if this would be found in the parent boot class loader.
-      if (pair.second != nullptr) {
-        mirror::Class* klass = LookupClass(descriptor, nullptr);
-        if (klass != nullptr) {
-          return EnsureResolved(self, descriptor, klass);
-        }
-        klass = DefineClass(descriptor, NullHandle<mirror::ClassLoader>(), *pair.first,
-                            *pair.second);
-        if (klass == nullptr) {
-          CHECK(self->IsExceptionPending()) << descriptor;
-          self->ClearException();
-        } else {
-          return klass;
-        }
-      } else {
-        // RegisterDexFile may allocate dex caches (and cause thread suspension).
-        StackHandleScope<3> hs(self);
-        // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
-        // We need to get the DexPathList and loop through it.
-        Handle<mirror::ArtField> cookie_field =
-            hs.NewHandle(soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie));
-        Handle<mirror::ArtField> dex_file_field =
-            hs.NewHandle(
-                soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList$Element_dexFile));
-        mirror::Object* dex_path_list =
-            soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)->
-            GetObject(class_loader.Get());
-        if (dex_path_list != nullptr && dex_file_field.Get() != nullptr &&
-            cookie_field.Get() != nullptr) {
-          // DexPathList has an array dexElements of Elements[] which each contain a dex file.
-          mirror::Object* dex_elements_obj =
-              soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
-              GetObject(dex_path_list);
-          // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
-          // at the mCookie which is a DexFile vector.
-          if (dex_elements_obj != nullptr) {
-            Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
-                hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
-            for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
-              mirror::Object* element = dex_elements->GetWithoutChecks(i);
-              if (element == nullptr) {
-                // Should never happen, fall back to java code to throw a NPE.
-                break;
-              }
-              mirror::Object* dex_file = dex_file_field->GetObject(element);
-              if (dex_file != nullptr) {
-                const uint64_t cookie = cookie_field->GetLong(dex_file);
-                auto* dex_files =
-                    reinterpret_cast<std::vector<const DexFile*>*>(static_cast<uintptr_t>(cookie));
-                if (dex_files == nullptr) {
-                  // This should never happen so log a warning.
-                  LOG(WARNING) << "Null DexFile::mCookie for " << descriptor;
-                  break;
-                }
-                for (const DexFile* dex_file : *dex_files) {
-                  const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor);
-                  if (dex_class_def != nullptr) {
-                    RegisterDexFile(*dex_file);
-                    mirror::Class* klass =
-                        DefineClass(descriptor, class_loader, *dex_file, *dex_class_def);
-                    if (klass == nullptr) {
-                      CHECK(self->IsExceptionPending()) << descriptor;
-                      self->ClearException();
-                      // Exit the loop to make the java code generate an exception.
-                      i = dex_elements->GetLength();
-                      break;
-                    }
-                    return klass;
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
+    mirror::Class* klass = FindClassInPathClassLoader(soa, self, descriptor, class_loader);
+    if (klass != nullptr) {
+      return klass;
     }
     ScopedLocalRef<jobject> class_loader_object(soa.Env(),
                                                 soa.AddLocalReference<jobject>(class_loader.Get()));
@@ -2247,11 +2267,10 @@
   return nullptr;
 }
 
-mirror::Class* ClassLinker::DefineClass(const char* descriptor,
-                                        ConstHandle<mirror::ClassLoader> class_loader,
+mirror::Class* ClassLinker::DefineClass(Thread* self, const char* descriptor,
+                                        Handle<mirror::ClassLoader> class_loader,
                                         const DexFile& dex_file,
                                         const DexFile::ClassDef& dex_class_def) {
-  Thread* self = Thread::Current();
   StackHandleScope<3> hs(self);
   auto klass = hs.NewHandle<mirror::Class>(nullptr);
   bool should_allocate = false;
@@ -2292,7 +2311,7 @@
     return nullptr;
   }
   klass->SetDexCache(FindDexCache(dex_file));
-  LoadClass(dex_file, dex_class_def, klass, class_loader.Get());
+  LoadClass(self, dex_file, dex_class_def, klass, class_loader.Get());
   ObjectLock<mirror::Class> lock(self, klass);
   if (self->IsExceptionPending()) {
     // An exception occured during load, set status to erroneous while holding klass' lock in case
@@ -2365,7 +2384,7 @@
   size_t num_16 = 0;
   size_t num_32 = 0;
   size_t num_64 = 0;
-  if (class_data != NULL) {
+  if (class_data != nullptr) {
     for (ClassDataItemIterator it(dex_file, class_data); it.HasNextStaticField(); it.Next()) {
       const DexFile::FieldId& field_id = dex_file.GetFieldId(it.GetMemberIndex());
       const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id);
@@ -2415,7 +2434,7 @@
                                                  uint32_t method_idx) {
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx);
   const byte* class_data = dex_file.GetClassData(class_def);
-  CHECK(class_data != NULL);
+  CHECK(class_data != nullptr);
   ClassDataItemIterator it(dex_file, class_data);
   // Skip fields
   while (it.HasNextStaticField()) {
@@ -2676,7 +2695,7 @@
   // Ignore virtual methods on the iterator.
 }
 
-void ClassLinker::LinkCode(ConstHandle<mirror::ArtMethod> method,
+void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
                            const OatFile::OatClass* oat_class,
                            const DexFile& dex_file, uint32_t dex_method_index,
                            uint32_t method_index) {
@@ -2757,33 +2776,32 @@
 
 
 
-void ClassLinker::LoadClass(const DexFile& dex_file,
+void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
                             const DexFile::ClassDef& dex_class_def,
-                            ConstHandle<mirror::Class> klass,
+                            Handle<mirror::Class> klass,
                             mirror::ClassLoader* class_loader) {
-  CHECK(klass.Get() != NULL);
-  CHECK(klass->GetDexCache() != NULL);
+  CHECK(klass.Get() != nullptr);
+  CHECK(klass->GetDexCache() != nullptr);
   CHECK_EQ(mirror::Class::kStatusNotReady, klass->GetStatus());
   const char* descriptor = dex_file.GetClassDescriptor(dex_class_def);
-  CHECK(descriptor != NULL);
+  CHECK(descriptor != nullptr);
 
   klass->SetClass(GetClassRoot(kJavaLangClass));
   if (kUseBakerOrBrooksReadBarrier) {
     klass->AssertReadBarrierPointer();
   }
-  uint32_t access_flags = dex_class_def.access_flags_;
-  // Make sure that none of our runtime-only flags are set.
+  uint32_t access_flags = dex_class_def.GetJavaAccessFlags();
   CHECK_EQ(access_flags & ~kAccJavaFlagsMask, 0U);
   klass->SetAccessFlags(access_flags);
   klass->SetClassLoader(class_loader);
   DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
-  klass->SetStatus(mirror::Class::kStatusIdx, NULL);
+  klass->SetStatus(mirror::Class::kStatusIdx, nullptr);
 
   klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
   klass->SetDexTypeIndex(dex_class_def.class_idx_);
 
   const byte* class_data = dex_file.GetClassData(dex_class_def);
-  if (class_data == NULL) {
+  if (class_data == nullptr) {
     return;  // no fields or methods - for example a marker interface
   }
 
@@ -2793,25 +2811,24 @@
     OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
                                                &has_oat_class);
     if (has_oat_class) {
-      LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
+      LoadClassMembers(self, dex_file, class_data, klass, class_loader, &oat_class);
     }
   }
   if (!has_oat_class) {
-    LoadClassMembers(dex_file, class_data, klass, class_loader, nullptr);
+    LoadClassMembers(self, dex_file, class_data, klass, class_loader, nullptr);
   }
 }
 
-void ClassLinker::LoadClassMembers(const DexFile& dex_file,
+void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
                                    const byte* class_data,
-                                   ConstHandle<mirror::Class> klass,
+                                   Handle<mirror::Class> klass,
                                    mirror::ClassLoader* class_loader,
                                    const OatFile::OatClass* oat_class) {
   // Load fields.
   ClassDataItemIterator it(dex_file, class_data);
-  Thread* self = Thread::Current();
   if (it.NumStaticFields() != 0) {
     mirror::ObjectArray<mirror::ArtField>* statics = AllocArtFieldArray(self, it.NumStaticFields());
-    if (UNLIKELY(statics == NULL)) {
+    if (UNLIKELY(statics == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
@@ -2820,16 +2837,17 @@
   if (it.NumInstanceFields() != 0) {
     mirror::ObjectArray<mirror::ArtField>* fields =
         AllocArtFieldArray(self, it.NumInstanceFields());
-    if (UNLIKELY(fields == NULL)) {
+    if (UNLIKELY(fields == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
     klass->SetIFields(fields);
   }
   for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtField> sfield(hs.NewHandle(AllocArtField(self)));
-    if (UNLIKELY(sfield.Get() == NULL)) {
+    if (UNLIKELY(sfield.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
@@ -2837,9 +2855,10 @@
     LoadField(dex_file, it, klass, sfield);
   }
   for (size_t i = 0; it.HasNextInstanceField(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtField> ifield(hs.NewHandle(AllocArtField(self)));
-    if (UNLIKELY(ifield.Get() == NULL)) {
+    if (UNLIKELY(ifield.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
@@ -2852,7 +2871,7 @@
     // TODO: append direct methods to class object
     mirror::ObjectArray<mirror::ArtMethod>* directs =
          AllocArtMethodArray(self, it.NumDirectMethods());
-    if (UNLIKELY(directs == NULL)) {
+    if (UNLIKELY(directs == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
@@ -2862,7 +2881,7 @@
     // TODO: append direct methods to class object
     mirror::ObjectArray<mirror::ArtMethod>* virtuals =
         AllocArtMethodArray(self, it.NumVirtualMethods());
-    if (UNLIKELY(virtuals == NULL)) {
+    if (UNLIKELY(virtuals == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
@@ -2872,9 +2891,10 @@
   uint32_t last_dex_method_index = DexFile::kDexNoIndex;
   size_t last_class_def_method_index = 0;
   for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
-    if (UNLIKELY(method.Get() == NULL)) {
+    if (UNLIKELY(method.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
@@ -2892,9 +2912,10 @@
     class_def_method_index++;
   }
   for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) {
+    self->AllowThreadSuspension();
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> method(hs.NewHandle(LoadMethod(self, dex_file, it, klass)));
-    if (UNLIKELY(method.Get() == NULL)) {
+    if (UNLIKELY(method.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return;
     }
@@ -2907,25 +2928,25 @@
 }
 
 void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it,
-                            ConstHandle<mirror::Class> klass,
-                            ConstHandle<mirror::ArtField> dst) {
+                            Handle<mirror::Class> klass,
+                            Handle<mirror::ArtField> dst) {
   uint32_t field_idx = it.GetMemberIndex();
   dst->SetDexFieldIndex(field_idx);
   dst->SetDeclaringClass(klass.Get());
-  dst->SetAccessFlags(it.GetMemberAccessFlags());
+  dst->SetAccessFlags(it.GetFieldAccessFlags());
 }
 
 mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
                                            const ClassDataItemIterator& it,
-                                           ConstHandle<mirror::Class> klass) {
+                                           Handle<mirror::Class> klass) {
   uint32_t dex_method_idx = it.GetMemberIndex();
   const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
   const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
 
   mirror::ArtMethod* dst = AllocArtMethod(self);
-  if (UNLIKELY(dst == NULL)) {
+  if (UNLIKELY(dst == nullptr)) {
     CHECK(self->IsExceptionPending());  // OOME.
-    return NULL;
+    return nullptr;
   }
   DCHECK(dst->IsArtMethod()) << PrettyDescriptor(dst->GetClass());
 
@@ -2938,13 +2959,13 @@
   dst->SetDexCacheResolvedMethods(klass->GetDexCache()->GetResolvedMethods());
   dst->SetDexCacheResolvedTypes(klass->GetDexCache()->GetResolvedTypes());
 
-  uint32_t access_flags = it.GetMemberAccessFlags();
+  uint32_t access_flags = it.GetMethodAccessFlags();
 
   if (UNLIKELY(strcmp("finalize", method_name) == 0)) {
     // Set finalizable flag on declaring class.
     if (strcmp("V", dex_file.GetShorty(method_id.proto_idx_)) == 0) {
       // Void return type.
-      if (klass->GetClassLoader() != NULL) {  // All non-boot finalizer methods are flagged.
+      if (klass->GetClassLoader() != nullptr) {  // All non-boot finalizer methods are flagged.
         klass->SetFinalizable();
       } else {
         std::string temp;
@@ -2980,17 +3001,17 @@
   return dst;
 }
 
-void ClassLinker::AppendToBootClassPath(const DexFile& dex_file) {
-  Thread* self = Thread::Current();
+void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
   StackHandleScope<1> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
-  CHECK(dex_cache.Get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
+  CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
+                                    << dex_file.GetLocation();
   AppendToBootClassPath(dex_file, dex_cache);
 }
 
 void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
-                                        ConstHandle<mirror::DexCache> dex_cache) {
-  CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
+                                        Handle<mirror::DexCache> dex_cache) {
+  CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
   boot_class_path_.push_back(&dex_file);
   RegisterDexFile(dex_file, dex_cache);
 }
@@ -3012,9 +3033,9 @@
 }
 
 void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
-                                        ConstHandle<mirror::DexCache> dex_cache) {
+                                        Handle<mirror::DexCache> dex_cache) {
   dex_lock_.AssertExclusiveHeld(Thread::Current());
-  CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
+  CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
   CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
       << dex_cache->GetLocation()->ToModifiedUtf8() << " " << dex_file.GetLocation();
   dex_caches_.push_back(GcRoot<mirror::DexCache>(dex_cache.Get()));
@@ -3038,7 +3059,8 @@
   // get to a suspend point.
   StackHandleScope<1> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
-  CHECK(dex_cache.Get() != NULL) << "Failed to allocate dex cache for " << dex_file.GetLocation();
+  CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
+                                    << dex_file.GetLocation();
   {
     WriterMutexLock mu(self, dex_lock_);
     if (IsDexFileRegisteredLocked(dex_file)) {
@@ -3049,7 +3071,7 @@
 }
 
 void ClassLinker::RegisterDexFile(const DexFile& dex_file,
-                                  ConstHandle<mirror::DexCache> dex_cache) {
+                                  Handle<mirror::DexCache> dex_cache) {
   WriterMutexLock mu(Thread::Current(), dex_lock_);
   RegisterDexFileLocked(dex_file, dex_cache);
 }
@@ -3077,7 +3099,7 @@
     LOG(ERROR) << "Registered dex file " << i << " = " << dex_cache->GetDexFile()->GetLocation();
   }
   LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
-  return NULL;
+  return nullptr;
 }
 
 void ClassLinker::FixupDexCaches(mirror::ArtMethod* resolution_method) {
@@ -3090,15 +3112,15 @@
 
 mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) {
   mirror::Class* klass = AllocClass(self, mirror::Class::PrimitiveClassSize());
-  if (UNLIKELY(klass == NULL)) {
-    return NULL;
+  if (UNLIKELY(klass == nullptr)) {
+    return nullptr;
   }
   return InitializePrimitiveClass(klass, type);
 }
 
 mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_class,
                                                      Primitive::Type type) {
-  CHECK(primitive_class != NULL);
+  CHECK(primitive_class != nullptr);
   // Must hold lock on object when initializing.
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
@@ -3109,7 +3131,7 @@
   primitive_class->SetStatus(mirror::Class::kStatusInitialized, self);
   const char* descriptor = Primitive::Descriptor(type);
   mirror::Class* existing = InsertClass(descriptor, primitive_class, Hash(descriptor));
-  CHECK(existing == NULL) << "InitPrimitiveClass(" << type << ") failed";
+  CHECK(existing == nullptr) << "InitPrimitiveClass(" << type << ") failed";
   return primitive_class;
 }
 
@@ -3125,17 +3147,18 @@
 // the right context.  It does NOT become the class loader for the
 // array class; that always comes from the base element class.
 //
-// Returns NULL with an exception raised on failure.
+// Returns nullptr with an exception raised on failure.
 mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor,
-                                             ConstHandle<mirror::ClassLoader> class_loader) {
+                                             Handle<mirror::ClassLoader> class_loader) {
   // Identify the underlying component type
   CHECK_EQ('[', descriptor[0]);
   StackHandleScope<2> hs(self);
-  Handle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1, class_loader)));
+  MutableHandle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1,
+                                                                     class_loader)));
   if (component_type.Get() == nullptr) {
     DCHECK(self->IsExceptionPending());
     // We need to accept erroneous classes as component types.
-    component_type.Assign(LookupClass(descriptor + 1, class_loader.Get()));
+    component_type.Assign(LookupClass(self, descriptor + 1, class_loader.Get()));
     if (component_type.Get() == nullptr) {
       DCHECK(self->IsExceptionPending());
       return nullptr;
@@ -3165,8 +3188,8 @@
   // class to the hash table --- necessary because of possible races with
   // other threads.)
   if (class_loader.Get() != component_type->GetClassLoader()) {
-    mirror::Class* new_class = LookupClass(descriptor, component_type->GetClassLoader());
-    if (new_class != NULL) {
+    mirror::Class* new_class = LookupClass(self, descriptor, component_type->GetClassLoader());
+    if (new_class != nullptr) {
       return new_class;
     }
   }
@@ -3208,7 +3231,7 @@
     new_class->SetComponentType(component_type.Get());
   }
   ObjectLock<mirror::Class> lock(self, new_class);  // Must hold lock on object when initializing.
-  DCHECK(new_class->GetComponentType() != NULL);
+  DCHECK(new_class->GetComponentType() != nullptr);
   mirror::Class* java_lang_Object = GetClassRoot(kJavaLangObject);
   new_class->SetSuperClass(java_lang_Object);
   new_class->SetVTable(java_lang_Object->GetVTable());
@@ -3288,7 +3311,7 @@
   }
   std::string printable_type(PrintableChar(type));
   ThrowNoClassDefFoundError("Not a primitive type: %s", printable_type.c_str());
-  return NULL;
+  return nullptr;
 }
 
 mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass,
@@ -3296,7 +3319,7 @@
   if (VLOG_IS_ON(class_linker)) {
     mirror::DexCache* dex_cache = klass->GetDexCache();
     std::string source;
-    if (dex_cache != NULL) {
+    if (dex_cache != nullptr) {
       source += " from ";
       source += dex_cache->GetLocation()->ToModifiedUtf8();
     }
@@ -3305,15 +3328,15 @@
   WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
   mirror::Class* existing =
       LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
-  if (existing != NULL) {
+  if (existing != nullptr) {
     return existing;
   }
-  if (kIsDebugBuild && !klass->IsTemp() && klass->GetClassLoader() == NULL &&
+  if (kIsDebugBuild && !klass->IsTemp() && klass->GetClassLoader() == nullptr &&
       dex_cache_image_class_lookup_required_) {
     // Check a class loaded with the system class loader matches one in the image if the class
     // is in the image.
     existing = LookupClassFromImage(descriptor);
-    if (existing != NULL) {
+    if (existing != nullptr) {
       CHECK(klass == existing);
     }
   }
@@ -3322,7 +3345,7 @@
   if (log_new_class_table_roots_) {
     new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
   }
-  return NULL;
+  return nullptr;
 }
 
 mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass,
@@ -3340,8 +3363,8 @@
   CHECK(!existing->IsResolved()) << descriptor;
   CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor;
 
-  for (auto it = class_table_.lower_bound(hash), end = class_table_.end(); it != end && it->first == hash;
-       ++it) {
+  for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
+       it != end && it->first == hash; ++it) {
     mirror::Class* klass = it->second.Read();
     if (klass == existing) {
       class_table_.erase(it);
@@ -3384,22 +3407,22 @@
   return false;
 }
 
-mirror::Class* ClassLinker::LookupClass(const char* descriptor,
+mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor,
                                         const mirror::ClassLoader* class_loader) {
   size_t hash = Hash(descriptor);
   {
-    ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+    ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
     mirror::Class* result = LookupClassFromTableLocked(descriptor, class_loader, hash);
-    if (result != NULL) {
+    if (result != nullptr) {
       return result;
     }
   }
-  if (class_loader != NULL || !dex_cache_image_class_lookup_required_) {
-    return NULL;
+  if (class_loader != nullptr || !dex_cache_image_class_lookup_required_) {
+    return nullptr;
   } else {
     // Lookup failed but need to search dex_caches_.
     mirror::Class* result = LookupClassFromImage(descriptor);
-    if (result != NULL) {
+    if (result != nullptr) {
       InsertClass(descriptor, result, hash);
     } else {
       // Searching the image dex files/caches failed, we don't want to get into this situation
@@ -3434,13 +3457,13 @@
       return klass;
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace();
-  CHECK(image != NULL);
+  CHECK(image != nullptr);
   mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
   return root->AsObjectArray<mirror::DexCache>();
 }
@@ -3460,12 +3483,12 @@
     mirror::ObjectArray<mirror::Class>* types = dex_cache->GetResolvedTypes();
     for (int32_t j = 0; j < types->GetLength(); j++) {
       mirror::Class* klass = types->Get(j);
-      if (klass != NULL) {
-        DCHECK(klass->GetClassLoader() == NULL);
+      if (klass != nullptr) {
+        DCHECK(klass->GetClassLoader() == nullptr);
         const char* descriptor = klass->GetDescriptor(&temp);
         size_t hash = Hash(descriptor);
-        mirror::Class* existing = LookupClassFromTableLocked(descriptor, NULL, hash);
-        if (existing != NULL) {
+        mirror::Class* existing = LookupClassFromTableLocked(descriptor, nullptr, hash);
+        if (existing != nullptr) {
           CHECK(existing == klass) << PrettyClassAndClassLoader(existing) << " != "
               << PrettyClassAndClassLoader(klass);
         } else {
@@ -3491,13 +3514,13 @@
     const DexFile* dex_file = dex_cache->GetDexFile();
     // Try binary searching the string/type index.
     const DexFile::StringId* string_id = dex_file->FindStringId(descriptor);
-    if (string_id != NULL) {
+    if (string_id != nullptr) {
       const DexFile::TypeId* type_id =
           dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
-      if (type_id != NULL) {
+      if (type_id != nullptr) {
         uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
         mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
-        if (klass != NULL) {
+        if (klass != nullptr) {
           self->EndAssertNoThreadSuspension(old_no_suspend_cause);
           return klass;
         }
@@ -3505,7 +3528,7 @@
     }
   }
   self->EndAssertNoThreadSuspension(old_no_suspend_cause);
-  return NULL;
+  return nullptr;
 }
 
 void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Class*>& result) {
@@ -3524,14 +3547,16 @@
   }
 }
 
-void ClassLinker::VerifyClass(ConstHandle<mirror::Class> klass) {
+void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
   // TODO: assert that the monitor on the Class is held
-  Thread* self = Thread::Current();
   ObjectLock<mirror::Class> lock(self, klass);
 
   // Don't attempt to re-verify if already sufficiently verified.
-  if (klass->IsVerified() ||
-      (klass->IsCompileTimeVerified() && Runtime::Current()->IsCompiler())) {
+  if (klass->IsVerified()) {
+    EnsurePreverifiedMethods(klass);
+    return;
+  }
+  if (klass->IsCompileTimeVerified() && Runtime::Current()->IsCompiler()) {
     return;
   }
 
@@ -3554,18 +3579,19 @@
   // Skip verification if disabled.
   if (!Runtime::Current()->IsVerificationEnabled()) {
     klass->SetStatus(mirror::Class::kStatusVerified, self);
+    EnsurePreverifiedMethods(klass);
     return;
   }
 
   // Verify super class.
   StackHandleScope<2> hs(self);
   Handle<mirror::Class> super(hs.NewHandle(klass->GetSuperClass()));
-  if (super.Get() != NULL) {
+  if (super.Get() != nullptr) {
     // Acquire lock to prevent races on verifying the super class.
     ObjectLock<mirror::Class> lock(self, super);
 
     if (!super->IsVerified() && !super->IsErroneous()) {
-      VerifyClass(super);
+      VerifyClass(self, super);
     }
     if (!super->IsCompileTimeVerified()) {
       std::string error_msg(
@@ -3606,7 +3632,7 @@
   verifier::MethodVerifier::FailureKind verifier_failure = verifier::MethodVerifier::kNoFailure;
   std::string error_msg;
   if (!preverified) {
-    verifier_failure = verifier::MethodVerifier::VerifyClass(klass.Get(),
+    verifier_failure = verifier::MethodVerifier::VerifyClass(self, klass.Get(),
                                                              Runtime::Current()->IsCompiler(),
                                                              &error_msg);
   }
@@ -3622,7 +3648,7 @@
     if (verifier_failure == verifier::MethodVerifier::kNoFailure) {
       // Even though there were no verifier failures we need to respect whether the super-class
       // was verified or requiring runtime reverification.
-      if (super.Get() == NULL || super->IsVerified()) {
+      if (super.Get() == nullptr || super->IsVerified()) {
         klass->SetStatus(mirror::Class::kStatusVerified, self);
       } else {
         CHECK_EQ(super->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
@@ -3639,6 +3665,9 @@
         klass->SetStatus(mirror::Class::kStatusRetryVerificationAtRuntime, self);
       } else {
         klass->SetStatus(mirror::Class::kStatusVerified, self);
+        // As this is a fake verified status, make sure the methods are _not_ marked preverified
+        // later.
+        klass->SetPreverified();
       }
     }
   } else {
@@ -3656,7 +3685,14 @@
     // Note: we're going here during compilation and at runtime. When we set the
     // kAccPreverified flag when compiling image classes, the flag is recorded
     // in the image and is set when loading the image.
+    EnsurePreverifiedMethods(klass);
+  }
+}
+
+void ClassLinker::EnsurePreverifiedMethods(Handle<mirror::Class> klass) {
+  if (!klass->IsPreverified()) {
     klass->SetPreverifiedFlagOnAllMethods();
+    klass->SetPreverified();
   }
 }
 
@@ -3674,19 +3710,17 @@
     // We are compiling an app (not the image).
 
     // Is this an app class? (I.e. not a bootclasspath class)
-    if (klass->GetClassLoader() != NULL) {
+    if (klass->GetClassLoader() != nullptr) {
       return false;
     }
   }
 
   const OatFile::OatDexFile* oat_dex_file = FindOpenedOatDexFileForDexFile(dex_file);
-  // Make this work with gtests, which do not set up the image properly.
-  // TODO: we should clean up gtests to set up the image path properly.
-  if (Runtime::Current()->IsCompiler() || (oat_dex_file == nullptr)) {
+  // In case we run without an image there won't be a backing oat file.
+  if (oat_dex_file == nullptr) {
     return false;
   }
 
-  CHECK(oat_dex_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
   uint16_t class_def_index = klass->GetDexClassDefIndex();
   oat_file_class_status = oat_dex_file->GetOatClass(class_def_index).GetStatus();
   if (oat_file_class_status == mirror::Class::kStatusVerified ||
@@ -3734,7 +3768,7 @@
 }
 
 void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
-                                                    ConstHandle<mirror::Class> klass) {
+                                                    Handle<mirror::Class> klass) {
   for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
     ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i));
   }
@@ -3747,7 +3781,7 @@
                                                      mirror::ArtMethod* method) {
   // similar to DexVerifier::ScanTryCatchBlocks and dex2oat's ResolveExceptionsForMethod.
   const DexFile::CodeItem* code_item = dex_file.GetCodeItem(method->GetCodeItemOffset());
-  if (code_item == NULL) {
+  if (code_item == nullptr) {
     return;  // native or abstract method
   }
   if (code_item->tries_size_ == 0) {
@@ -3763,7 +3797,7 @@
       // unresolved exception types will be ignored by exception delivery
       if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
         mirror::Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method);
-        if (exception_type == NULL) {
+        if (exception_type == nullptr) {
           DCHECK(Thread::Current()->IsExceptionPending());
           Thread::Current()->ClearException();
         }
@@ -3774,23 +3808,24 @@
 }
 
 static void CheckProxyConstructor(mirror::ArtMethod* constructor);
-static void CheckProxyMethod(ConstHandle<mirror::ArtMethod> method,
-                             ConstHandle<mirror::ArtMethod> prototype);
+static void CheckProxyMethod(Handle<mirror::ArtMethod> method,
+                             Handle<mirror::ArtMethod> prototype);
 
 mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
                                              jobjectArray interfaces, jobject loader,
                                              jobjectArray methods, jobjectArray throws) {
   Thread* self = soa.Self();
   StackHandleScope<8> hs(self);
-  Handle<mirror::Class> klass(hs.NewHandle(
+  MutableHandle<mirror::Class> klass(hs.NewHandle(
       AllocClass(self, GetClassRoot(kJavaLangClass), sizeof(mirror::Class))));
-  if (klass.Get() == NULL) {
+  if (klass.Get() == nullptr) {
     CHECK(self->IsExceptionPending());  // OOME.
-    return NULL;
+    return nullptr;
   }
-  DCHECK(klass->GetClass() != NULL);
+  DCHECK(klass->GetClass() != nullptr);
   klass->SetObjectSize(sizeof(mirror::Proxy));
-  klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal);
+  // Set the class access flags incl. preverified, so we do not try to set the flag on the methods.
+  klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccPreverified);
   klass->SetClassLoader(soa.Decode<mirror::ClassLoader*>(loader));
   DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
   klass->SetName(soa.Decode<mirror::String*>(name));
@@ -3801,9 +3836,9 @@
   // Instance fields are inherited, but we add a couple of static fields...
   {
     mirror::ObjectArray<mirror::ArtField>* sfields = AllocArtFieldArray(self, 2);
-    if (UNLIKELY(sfields == NULL)) {
+    if (UNLIKELY(sfields == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
-      return NULL;
+      return nullptr;
     }
     klass->SetSFields(sfields);
   }
@@ -3851,9 +3886,9 @@
   {
     mirror::ObjectArray<mirror::ArtMethod>* virtuals = AllocArtMethodArray(self,
                                                                            num_virtual_methods);
-    if (UNLIKELY(virtuals == NULL)) {
+    if (UNLIKELY(virtuals == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
-      return NULL;
+      return nullptr;
     }
     klass->SetVirtualMethods(virtuals);
   }
@@ -3877,7 +3912,8 @@
   std::string descriptor(GetDescriptorForProxy(klass.Get()));
   mirror::Class* new_class = nullptr;
   {
-    ObjectLock<mirror::Class> resolution_lock(self, klass);  // Must hold lock on object when resolved.
+    // Must hold lock on object when resolved.
+    ObjectLock<mirror::Class> resolution_lock(self, klass);
     // Link the fields and virtual methods, creating vtable and iftables
     Handle<mirror::ObjectArray<mirror::Class> > h_interfaces(
         hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)));
@@ -3892,9 +3928,11 @@
   klass.Assign(new_class);
 
   CHECK_EQ(interfaces_sfield->GetDeclaringClass(), new_class);
-  interfaces_sfield->SetObject<false>(klass.Get(), soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
+  interfaces_sfield->SetObject<false>(klass.Get(),
+                                      soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
   CHECK_EQ(throws_sfield->GetDeclaringClass(), new_class);
-  throws_sfield->SetObject<false>(klass.Get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
+  throws_sfield->SetObject<false>(klass.Get(),
+      soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
 
   {
     // Lock on klass is released. Lock new class object.
@@ -3937,7 +3975,7 @@
 std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) {
   DCHECK(proxy_class->IsProxyClass());
   mirror::String* name = proxy_class->GetName();
-  DCHECK(name != NULL);
+  DCHECK(name != nullptr);
   return DotToDescriptor(name->ToModifiedUtf8().c_str());
 }
 
@@ -3966,7 +4004,7 @@
 
 
 mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
-                                                       ConstHandle<mirror::Class> klass,
+                                                       Handle<mirror::Class> klass,
                                                        mirror::Class* proxy_class) {
   // Create constructor for Proxy that must initialize h
   mirror::ObjectArray<mirror::ArtMethod>* proxy_direct_methods =
@@ -4000,8 +4038,8 @@
 }
 
 mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
-                                                  ConstHandle<mirror::Class> klass,
-                                                  ConstHandle<mirror::ArtMethod> prototype) {
+                                                  Handle<mirror::Class> klass,
+                                                  Handle<mirror::ArtMethod> prototype) {
   // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
   // prototype method
   prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
@@ -4009,9 +4047,9 @@
   // We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
   // as necessary
   mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(prototype->Clone(self));
-  if (UNLIKELY(method == NULL)) {
+  if (UNLIKELY(method == nullptr)) {
     CHECK(self->IsExceptionPending());  // OOME.
-    return NULL;
+    return nullptr;
   }
 
   // Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to
@@ -4028,8 +4066,8 @@
   return method;
 }
 
-static void CheckProxyMethod(ConstHandle<mirror::ArtMethod> method,
-                             ConstHandle<mirror::ArtMethod> prototype)
+static void CheckProxyMethod(Handle<mirror::ArtMethod> method,
+                             Handle<mirror::ArtMethod> prototype)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Basic sanity
   CHECK(!prototype->IsFinal());
@@ -4061,13 +4099,13 @@
   if (!can_init_statics) {
     // Check if there's a class initializer.
     mirror::ArtMethod* clinit = klass->FindClassInitializer();
-    if (clinit != NULL) {
+    if (clinit != nullptr) {
       return false;
     }
     // Check if there are encoded static values needing initialization.
     if (klass->NumStaticFields() != 0) {
       const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
-      DCHECK(dex_class_def != NULL);
+      DCHECK(dex_class_def != nullptr);
       if (dex_class_def->static_values_off_ != 0) {
         return false;
       }
@@ -4086,12 +4124,8 @@
   return true;
 }
 
-bool ClassLinker::IsInitialized() const {
-  return init_done_;
-}
-
-bool ClassLinker::InitializeClass(ConstHandle<mirror::Class> klass, bool can_init_statics,
-                                  bool can_init_parents) {
+bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
+                                  bool can_init_statics, bool can_init_parents) {
   // see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
 
   // Are we already initialized and therefore done?
@@ -4106,7 +4140,7 @@
     return false;
   }
 
-  Thread* self = Thread::Current();
+  self->AllowThreadSuspension();
   uint64_t t0;
   {
     ObjectLock<mirror::Class> lock(self, klass);
@@ -4125,7 +4159,7 @@
     CHECK(klass->IsResolved()) << PrettyClass(klass.Get()) << ": state=" << klass->GetStatus();
 
     if (!klass->IsVerified()) {
-      VerifyClass(klass);
+      VerifyClass(self, klass);
       if (!klass->IsVerified()) {
         // We failed to verify, expect either the klass to be erroneous or verification failed at
         // compile time.
@@ -4164,6 +4198,7 @@
       klass->SetStatus(mirror::Class::kStatusError, self);
       return false;
     }
+    self->AllowThreadSuspension();
 
     CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusVerified) << PrettyClass(klass.Get());
 
@@ -4183,7 +4218,7 @@
       CHECK(can_init_parents);
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
-      bool super_initialized = InitializeClass(handle_scope_super, can_init_statics, true);
+      bool super_initialized = InitializeClass(self, handle_scope_super, can_init_statics, true);
       if (!super_initialized) {
         // The super class was verified ahead of entering initializing, we should only be here if
         // the super class became erroneous due to initialization.
@@ -4192,7 +4227,7 @@
             << PrettyDescriptor(handle_scope_super.Get())
             << " that has unexpected status " << handle_scope_super->GetStatus()
             << "\nPending exception:\n"
-            << (self->GetException(NULL) != NULL ? self->GetException(NULL)->Dump() : "");
+            << (self->GetException(nullptr) != nullptr ? self->GetException(nullptr)->Dump() : "");
         ObjectLock<mirror::Class> lock(self, klass);
         // Initialization failed because the super-class is erroneous.
         klass->SetStatus(mirror::Class::kStatusError, self);
@@ -4203,7 +4238,7 @@
 
   if (klass->NumStaticFields() > 0) {
     const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
-    CHECK(dex_class_def != NULL);
+    CHECK(dex_class_def != nullptr);
     const DexFile& dex_file = klass->GetDexFile();
     StackHandleScope<3> hs(self);
     Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
@@ -4230,12 +4265,13 @@
   }
 
   mirror::ArtMethod* clinit = klass->FindClassInitializer();
-  if (clinit != NULL) {
+  if (clinit != nullptr) {
     CHECK(can_init_statics);
     JValue result;
-    clinit->Invoke(self, NULL, 0, &result, "V");
+    clinit->Invoke(self, nullptr, 0, &result, "V");
   }
 
+  self->AllowThreadSuspension();
   uint64_t t1 = NanoTime();
 
   bool success = true;
@@ -4267,7 +4303,7 @@
   return success;
 }
 
-bool ClassLinker::WaitForInitializeClass(ConstHandle<mirror::Class> klass, Thread* self,
+bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
                                          ObjectLock<mirror::Class>& lock)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   while (true) {
@@ -4307,14 +4343,14 @@
   LOG(FATAL) << "Not Reached" << PrettyClass(klass.Get());
 }
 
-bool ClassLinker::ValidateSuperClassDescriptors(ConstHandle<mirror::Class> klass) {
+bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) {
   if (klass->IsInterface()) {
     return true;
   }
   // Begin with the methods local to the superclass.
   StackHandleScope<2> hs(Thread::Current());
-  MethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
-  MethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+  MutableMethodHelper mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+  MutableMethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
   if (klass->HasSuperClass() &&
       klass->GetClassLoader() != klass->GetSuperClass()->GetClassLoader()) {
     for (int i = klass->GetSuperClass()->GetVTableLength() - 1; i >= 0; --i) {
@@ -4352,14 +4388,14 @@
   return true;
 }
 
-bool ClassLinker::EnsureInitialized(ConstHandle<mirror::Class> c, bool can_init_fields,
+bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields,
                                     bool can_init_parents) {
   DCHECK(c.Get() != nullptr);
   if (c->IsInitialized()) {
+    EnsurePreverifiedMethods(c);
     return true;
   }
-  const bool success = InitializeClass(c, can_init_fields, can_init_parents);
-  Thread* self = Thread::Current();
+  const bool success = InitializeClass(self, c, can_init_fields, can_init_parents);
   if (!success) {
     if (can_init_fields && can_init_parents) {
       CHECK(self->IsExceptionPending()) << PrettyClass(c.Get());
@@ -4408,8 +4444,8 @@
   }
 }
 
-bool ClassLinker::LinkClass(Thread* self, const char* descriptor, ConstHandle<mirror::Class> klass,
-                            ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces,
+bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
+                            Handle<mirror::ObjectArray<mirror::Class>> interfaces,
                             mirror::Class** new_class) {
   CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
 
@@ -4419,11 +4455,11 @@
   if (!LinkMethods(self, klass, interfaces)) {
     return false;
   }
-  if (!LinkInstanceFields(klass)) {
+  if (!LinkInstanceFields(self, klass)) {
     return false;
   }
   size_t class_size;
-  if (!LinkStaticFields(klass, &class_size)) {
+  if (!LinkStaticFields(self, klass, &class_size)) {
     return false;
   }
   CreateReferenceInstanceOffsets(klass);
@@ -4446,7 +4482,7 @@
     CHECK(!klass->IsResolved());
     // Retire the temporary class and create the correctly sized resolved class.
     *new_class = klass->CopyOf(self, class_size);
-    if (UNLIKELY(*new_class == NULL)) {
+    if (UNLIKELY(*new_class == nullptr)) {
       CHECK(self->IsExceptionPending());  // Expect an OOME.
       klass->SetStatus(mirror::Class::kStatusError, self);
       return false;
@@ -4460,7 +4496,7 @@
     FixupTemporaryDeclaringClass(klass.Get(), new_class_h.Get());
 
     mirror::Class* existing = UpdateClass(descriptor, new_class_h.Get(), Hash(descriptor));
-    CHECK(existing == NULL || existing == klass.Get());
+    CHECK(existing == nullptr || existing == klass.Get());
 
     // This will notify waiters on temp class that saw the not yet resolved class in the
     // class_table_ during EnsureResolved.
@@ -4474,13 +4510,13 @@
   return true;
 }
 
-bool ClassLinker::LoadSuperAndInterfaces(ConstHandle<mirror::Class> klass, const DexFile& dex_file) {
+bool ClassLinker::LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file) {
   CHECK_EQ(mirror::Class::kStatusIdx, klass->GetStatus());
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
   uint16_t super_class_idx = class_def.superclass_idx_;
   if (super_class_idx != DexFile::kDexNoIndex16) {
     mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.Get());
-    if (super_class == NULL) {
+    if (super_class == nullptr) {
       DCHECK(Thread::Current()->IsExceptionPending());
       return false;
     }
@@ -4495,11 +4531,11 @@
     klass->SetSuperClass(super_class);
   }
   const DexFile::TypeList* interfaces = dex_file.GetInterfacesList(class_def);
-  if (interfaces != NULL) {
+  if (interfaces != nullptr) {
     for (size_t i = 0; i < interfaces->Size(); i++) {
       uint16_t idx = interfaces->GetTypeItem(i).type_idx_;
       mirror::Class* interface = ResolveType(dex_file, idx, klass.Get());
-      if (interface == NULL) {
+      if (interface == nullptr) {
         DCHECK(Thread::Current()->IsExceptionPending());
         return false;
       }
@@ -4514,21 +4550,21 @@
     }
   }
   // Mark the class as loaded.
-  klass->SetStatus(mirror::Class::kStatusLoaded, NULL);
+  klass->SetStatus(mirror::Class::kStatusLoaded, nullptr);
   return true;
 }
 
-bool ClassLinker::LinkSuperClass(ConstHandle<mirror::Class> klass) {
+bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
   CHECK(!klass->IsPrimitive());
   mirror::Class* super = klass->GetSuperClass();
   if (klass.Get() == GetClassRoot(kJavaLangObject)) {
-    if (super != NULL) {
+    if (super != nullptr) {
       ThrowClassFormatError(klass.Get(), "java.lang.Object must not have a superclass");
       return false;
     }
     return true;
   }
-  if (super == NULL) {
+  if (super == nullptr) {
     ThrowLinkageError(klass.Get(), "No superclass defined for class %s",
                       PrettyDescriptor(klass.Get()).c_str());
     return false;
@@ -4569,7 +4605,7 @@
 
   if (kIsDebugBuild) {
     // Ensure super classes are fully resolved prior to resolving fields..
-    while (super != NULL) {
+    while (super != nullptr) {
       CHECK(super->IsResolved());
       super = super->GetSuperClass();
     }
@@ -4578,8 +4614,9 @@
 }
 
 // Populate the class vtable and itable. Compute return type indices.
-bool ClassLinker::LinkMethods(Thread* self, ConstHandle<mirror::Class> klass,
-                              ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces) {
+bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
+                              Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
+  self->AllowThreadSuspension();
   if (klass->IsInterface()) {
     // No vtable.
     size_t count = klass->NumVirtualMethods();
@@ -4599,7 +4636,7 @@
   return true;
 }
 
-bool ClassLinker::LinkVirtualMethods(Thread* self, ConstHandle<mirror::Class> klass) {
+bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
   if (klass->HasSuperClass()) {
     uint32_t max_count = klass->NumVirtualMethods() +
         klass->GetSuperClass()->GetVTableLength();
@@ -4607,7 +4644,7 @@
     CHECK_LE(actual_count, max_count);
     StackHandleScope<4> hs(self);
     Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
-    Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
+    MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
     if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
       vtable = hs.NewHandle(AllocArtMethodArray(self, max_count));
       if (UNLIKELY(vtable.Get() == nullptr)) {
@@ -4628,8 +4665,8 @@
     }
 
     // See if any of our virtual methods override the superclass.
-    MethodHelper local_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
-    MethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+    MutableMethodHelper local_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+    MutableMethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
     for (size_t i = 0; i < klass->NumVirtualMethods(); ++i) {
       mirror::ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i);
       local_mh.ChangeMethod(local_method);
@@ -4671,7 +4708,7 @@
     CHECK_LE(actual_count, max_count);
     if (actual_count < max_count) {
       vtable.Assign(vtable->CopyOf(self, actual_count));
-      if (UNLIKELY(vtable.Get() == NULL)) {
+      if (UNLIKELY(vtable.Get() == nullptr)) {
         CHECK(self->IsExceptionPending());  // OOME.
         return false;
       }
@@ -4687,7 +4724,7 @@
     StackHandleScope<1> hs(self);
     Handle<mirror::ObjectArray<mirror::ArtMethod>>
         vtable(hs.NewHandle(AllocArtMethodArray(self, num_virtual_methods)));
-    if (UNLIKELY(vtable.Get() == NULL)) {
+    if (UNLIKELY(vtable.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return false;
     }
@@ -4701,8 +4738,8 @@
   return true;
 }
 
-bool ClassLinker::LinkInterfaceMethods(ConstHandle<mirror::Class> klass,
-                                       ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces) {
+bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
+                                       Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
   Thread* const self = Thread::Current();
   Runtime* const runtime = Runtime::Current();
   // Set the imt table to be all conflicts by default.
@@ -4725,7 +4762,7 @@
   if (ifcount == 0) {
     // Class implements no interfaces.
     DCHECK_EQ(klass->GetIfTableCount(), 0);
-    DCHECK(klass->GetIfTable() == NULL);
+    DCHECK(klass->GetIfTable() == nullptr);
     return true;
   }
   if (ifcount == super_ifcount) {
@@ -4745,8 +4782,8 @@
     }
   }
   StackHandleScope<5> hs(self);
-  Handle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
-  if (UNLIKELY(iftable.Get() == NULL)) {
+  MutableHandle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
+  if (UNLIKELY(iftable.Get() == nullptr)) {
     CHECK(self->IsExceptionPending());  // OOME.
     return false;
   }
@@ -4757,13 +4794,14 @@
       iftable->SetInterface(i, super_interface);
     }
   }
+  self->AllowThreadSuspension();
   // Flatten the interface inheritance hierarchy.
   size_t idx = super_ifcount;
   for (size_t i = 0; i < num_interfaces; i++) {
     mirror::Class* interface =
         interfaces.Get() == nullptr ? mirror::Class::GetDirectInterface(self, klass, i) :
             interfaces->Get(i);
-    DCHECK(interface != NULL);
+    DCHECK(interface != nullptr);
     if (!interface->IsInterface()) {
       std::string temp;
       ThrowIncompatibleClassChangeError(klass.Get(), "Class %s implements non-interface class %s",
@@ -4800,10 +4838,11 @@
       }
     }
   }
+  self->AllowThreadSuspension();
   // Shrink iftable in case duplicates were found
   if (idx < ifcount) {
     iftable.Assign(down_cast<mirror::IfTable*>(iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
-    if (UNLIKELY(iftable.Get() == NULL)) {
+    if (UNLIKELY(iftable.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return false;
     }
@@ -4817,16 +4856,17 @@
   if (klass->IsInterface()) {
     return true;
   }
+  self->AllowThreadSuspension();
   // Allocate imtable
   bool imtable_changed = false;
   Handle<mirror::ObjectArray<mirror::ArtMethod>> imtable(
       hs.NewHandle(AllocArtMethodArray(self, mirror::Class::kImtSize)));
-  if (UNLIKELY(imtable.Get() == NULL)) {
+  if (UNLIKELY(imtable.Get() == nullptr)) {
     CHECK(self->IsExceptionPending());  // OOME.
     return false;
   }
-  MethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
-  MethodHelper vtable_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+  MutableMethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+  MutableMethodHelper vtable_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
   size_t max_miranda_methods = 0;  // The max size of miranda_list.
   for (size_t i = 0; i < ifcount; ++i) {
     max_miranda_methods += iftable->GetInterface(i)->NumVirtualMethods();
@@ -4835,6 +4875,7 @@
       miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods)));
   size_t miranda_list_size = 0;  // The current size of miranda_list.
   for (size_t i = 0; i < ifcount; ++i) {
+    self->AllowThreadSuspension();
     size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
     if (num_methods > 0) {
       StackHandleScope<2> hs(self);
@@ -4872,7 +4913,7 @@
             method_array->Set<false>(j, vtable_mh.Get());
             // Place method in imt if entry is empty, place conflict otherwise.
             uint32_t imt_index = interface_mh.Get()->GetDexMethodIndex() % mirror::Class::kImtSize;
-            if (imtable->Get(imt_index) == NULL) {
+            if (imtable->Get(imt_index) == nullptr) {
               imtable->Set<false>(imt_index, vtable_mh.Get());
               imtable_changed = true;
             } else {
@@ -4893,10 +4934,10 @@
               break;
             }
           }
-          if (miranda_method.Get() == NULL) {
+          if (miranda_method.Get() == nullptr) {
             // Point the interface table at a phantom slot.
             miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_mh.Get()->Clone(self)));
-            if (UNLIKELY(miranda_method.Get() == NULL)) {
+            if (UNLIKELY(miranda_method.Get() == nullptr)) {
               CHECK(self->IsExceptionPending());  // OOME.
               return false;
             }
@@ -4912,7 +4953,7 @@
     // Fill in empty entries in interface method table with conflict.
     mirror::ArtMethod* imt_conflict_method = runtime->GetImtConflictMethod();
     for (size_t i = 0; i < mirror::Class::kImtSize; i++) {
-      if (imtable->Get(i) == NULL) {
+      if (imtable->Get(i) == nullptr) {
         imtable->Set<false>(i, imt_conflict_method);
       }
     }
@@ -4927,20 +4968,20 @@
     } else {
       virtuals = klass->GetVirtualMethods()->CopyOf(self, new_method_count);
     }
-    if (UNLIKELY(virtuals == NULL)) {
+    if (UNLIKELY(virtuals == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return false;
     }
     klass->SetVirtualMethods(virtuals);
 
     StackHandleScope<1> hs(self);
-    Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
+    MutableHandle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
         hs.NewHandle(klass->GetVTableDuringLinking()));
-    CHECK(vtable.Get() != NULL);
+    CHECK(vtable.Get() != nullptr);
     int old_vtable_count = vtable->GetLength();
     int new_vtable_count = old_vtable_count + miranda_list_size;
     vtable.Assign(vtable->CopyOf(self, new_vtable_count));
-    if (UNLIKELY(vtable.Get() == NULL)) {
+    if (UNLIKELY(vtable.Get() == nullptr)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return false;
     }
@@ -4958,22 +4999,22 @@
 
   mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking();
   for (int i = 0; i < vtable->GetLength(); ++i) {
-    CHECK(vtable->Get(i) != NULL);
+    CHECK(vtable->Get(i) != nullptr);
   }
 
-//  klass->DumpClass(std::cerr, Class::kDumpClassFullDetail);
+  self->AllowThreadSuspension();
 
   return true;
 }
 
-bool ClassLinker::LinkInstanceFields(ConstHandle<mirror::Class> klass) {
-  CHECK(klass.Get() != NULL);
-  return LinkFields(klass, false, nullptr);
+bool ClassLinker::LinkInstanceFields(Thread* self, Handle<mirror::Class> klass) {
+  CHECK(klass.Get() != nullptr);
+  return LinkFields(self, klass, false, nullptr);
 }
 
-bool ClassLinker::LinkStaticFields(ConstHandle<mirror::Class> klass, size_t* class_size) {
-  CHECK(klass.Get() != NULL);
-  return LinkFields(klass, true, class_size);
+bool ClassLinker::LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size) {
+  CHECK(klass.Get() != nullptr);
+  return LinkFields(self, klass, true, class_size);
 }
 
 struct LinkFieldsComparator {
@@ -5003,7 +5044,9 @@
   }
 };
 
-bool ClassLinker::LinkFields(ConstHandle<mirror::Class> klass, bool is_static, size_t* class_size) {
+bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static,
+                             size_t* class_size) {
+  self->AllowThreadSuspension();
   size_t num_fields =
       is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
 
@@ -5022,23 +5065,23 @@
     field_offset = MemberOffset(base);
   } else {
     mirror::Class* super_class = klass->GetSuperClass();
-    if (super_class != NULL) {
+    if (super_class != nullptr) {
       CHECK(super_class->IsResolved())
           << PrettyClass(klass.Get()) << " " << PrettyClass(super_class);
       field_offset = MemberOffset(super_class->GetObjectSize());
     }
   }
 
-  CHECK_EQ(num_fields == 0, fields == NULL) << PrettyClass(klass.Get());
+  CHECK_EQ(num_fields == 0, fields == nullptr) << PrettyClass(klass.Get());
 
   // we want a relatively stable order so that adding new fields
   // minimizes disruption of C++ version such as Class and Method.
   std::deque<mirror::ArtField*> grouped_and_sorted_fields;
-  const char* old_no_suspend_cause  = Thread::Current()->StartAssertNoThreadSuspension(
+  const char* old_no_suspend_cause  = self->StartAssertNoThreadSuspension(
       "Naked ArtField references in deque");
   for (size_t i = 0; i < num_fields; i++) {
     mirror::ArtField* f = fields->Get(i);
-    CHECK(f != NULL) << PrettyClass(klass.Get());
+    CHECK(f != nullptr) << PrettyClass(klass.Get());
     grouped_and_sorted_fields.push_back(f);
   }
   std::sort(grouped_and_sorted_fields.begin(), grouped_and_sorted_fields.end(),
@@ -5080,8 +5123,7 @@
                     fields, &grouped_and_sorted_fields, &gaps);
   CHECK(grouped_and_sorted_fields.empty()) << "Missed " << grouped_and_sorted_fields.size() <<
       " fields.";
-
-  Thread::Current()->EndAssertNoThreadSuspension(old_no_suspend_cause);
+  self->EndAssertNoThreadSuspension(old_no_suspend_cause);
 
   // We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
   if (!is_static && klass->DescriptorEquals("Ljava/lang/ref/Reference;")) {
@@ -5148,10 +5190,10 @@
 
 //  Set the bitmap of reference offsets, refOffsets, from the ifields
 //  list.
-void ClassLinker::CreateReferenceInstanceOffsets(ConstHandle<mirror::Class> klass) {
+void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) {
   uint32_t reference_offsets = 0;
   mirror::Class* super_class = klass->GetSuperClass();
-  if (super_class != NULL) {
+  if (super_class != nullptr) {
     reference_offsets = super_class->GetReferenceInstanceOffsets();
     // If our superclass overflowed, we don't stand a chance.
     if (reference_offsets == CLASS_WALK_SUPER) {
@@ -5162,7 +5204,7 @@
   CreateReferenceOffsets(klass, reference_offsets);
 }
 
-void ClassLinker::CreateReferenceOffsets(ConstHandle<mirror::Class> klass,
+void ClassLinker::CreateReferenceOffsets(Handle<mirror::Class> klass,
                                          uint32_t reference_offsets) {
   size_t num_reference_fields = klass->NumReferenceInstanceFieldsDuringLinking();
   mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
@@ -5187,10 +5229,10 @@
 }
 
 mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
-                                           ConstHandle<mirror::DexCache> dex_cache) {
+                                           Handle<mirror::DexCache> dex_cache) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
-  if (resolved != NULL) {
+  if (resolved != nullptr) {
     return resolved;
   }
   uint32_t utf16_length;
@@ -5209,15 +5251,15 @@
 }
 
 mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
-                                        ConstHandle<mirror::DexCache> dex_cache,
-                                        ConstHandle<mirror::ClassLoader> class_loader) {
-  DCHECK(dex_cache.Get() != NULL);
+                                        Handle<mirror::DexCache> dex_cache,
+                                        Handle<mirror::ClassLoader> class_loader) {
+  DCHECK(dex_cache.Get() != nullptr);
   mirror::Class* resolved = dex_cache->GetResolvedType(type_idx);
-  if (resolved == NULL) {
+  if (resolved == nullptr) {
     Thread* self = Thread::Current();
     const char* descriptor = dex_file.StringByTypeIdx(type_idx);
     resolved = FindClass(self, descriptor, class_loader);
-    if (resolved != NULL) {
+    if (resolved != nullptr) {
       // TODO: we used to throw here if resolved's class loader was not the
       //       boot class loader. This was to permit different classes with the
       //       same name to be loaded simultaneously by different loaders
@@ -5229,22 +5271,22 @@
       StackHandleScope<1> hs(self);
       Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
       if (cause->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) {
-        DCHECK(resolved == NULL);  // No Handle needed to preserve resolved.
+        DCHECK(resolved == nullptr);  // No Handle needed to preserve resolved.
         self->ClearException();
         ThrowNoClassDefFoundError("Failed resolution of: %s", descriptor);
-        self->GetException(NULL)->SetCause(cause.Get());
+        self->GetException(nullptr)->SetCause(cause.Get());
       }
     }
   }
-  DCHECK((resolved == NULL) || resolved->IsResolved() || resolved->IsErroneous())
+  DCHECK((resolved == nullptr) || resolved->IsResolved() || resolved->IsErroneous())
           << PrettyDescriptor(resolved) << " " << resolved->GetStatus();
   return resolved;
 }
 
 mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
-                                              ConstHandle<mirror::DexCache> dex_cache,
-                                              ConstHandle<mirror::ClassLoader> class_loader,
-                                              ConstHandle<mirror::ArtMethod> referrer,
+                                              Handle<mirror::DexCache> dex_cache,
+                                              Handle<mirror::ClassLoader> class_loader,
+                                              Handle<mirror::ArtMethod> referrer,
                                               InvokeType type) {
   DCHECK(dex_cache.Get() != nullptr);
   // Check for hit in the dex cache.
@@ -5395,8 +5437,8 @@
 }
 
 mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
-                                            ConstHandle<mirror::DexCache> dex_cache,
-                                            ConstHandle<mirror::ClassLoader> class_loader,
+                                            Handle<mirror::DexCache> dex_cache,
+                                            Handle<mirror::ClassLoader> class_loader,
                                             bool is_static) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
@@ -5429,7 +5471,7 @@
     }
     if (resolved == nullptr) {
       ThrowNoSuchFieldError(is_static ? "static " : "instance ", klass.Get(), type, name);
-      return NULL;
+      return nullptr;
     }
   }
   dex_cache->SetResolvedField(field_idx, resolved);
@@ -5438,8 +5480,8 @@
 
 mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
                                                uint32_t field_idx,
-                                               ConstHandle<mirror::DexCache> dex_cache,
-                                               ConstHandle<mirror::ClassLoader> class_loader) {
+                                               Handle<mirror::DexCache> dex_cache,
+                                               Handle<mirror::ClassLoader> class_loader) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
   if (resolved != nullptr) {
@@ -5450,16 +5492,16 @@
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> klass(
       hs.NewHandle(ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader)));
-  if (klass.Get() == NULL) {
+  if (klass.Get() == nullptr) {
     DCHECK(Thread::Current()->IsExceptionPending());
-    return NULL;
+    return nullptr;
   }
 
   StringPiece name(dex_file.StringDataByIdx(field_id.name_idx_));
   StringPiece type(dex_file.StringDataByIdx(
       dex_file.GetTypeId(field_id.type_idx_).descriptor_idx_));
   resolved = mirror::Class::FindField(self, klass, name, type);
-  if (resolved != NULL) {
+  if (resolved != nullptr) {
     dex_cache->SetResolvedField(field_idx, resolved);
   } else {
     ThrowNoSuchFieldError("", klass.Get(), type, name);
@@ -5497,10 +5539,12 @@
 }
 
 void ClassLinker::DumpForSigQuit(std::ostream& os) {
+  Thread* self = Thread::Current();
   if (dex_cache_image_class_lookup_required_) {
+    ScopedObjectAccess soa(self);
     MoveImageClassesToClassTable();
   }
-  ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+  ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
   os << "Loaded classes: " << class_table_.size() << " allocated classes\n";
 }
 
@@ -5523,12 +5567,12 @@
 void ClassLinker::SetClassRoot(ClassRoot class_root, mirror::Class* klass) {
   DCHECK(!init_done_);
 
-  DCHECK(klass != NULL);
-  DCHECK(klass->GetClassLoader() == NULL);
+  DCHECK(klass != nullptr);
+  DCHECK(klass->GetClassLoader() == nullptr);
 
   mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
-  DCHECK(class_roots != NULL);
-  DCHECK(class_roots->Get(class_root) == NULL);
+  DCHECK(class_roots != nullptr);
+  DCHECK(class_roots->Get(class_root) == nullptr);
   class_roots->Set<false>(class_root, klass);
 }
 
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 158816d..3ea74e0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -48,7 +48,7 @@
   class StackTraceElement;
 }  // namespace mirror
 
-template<class T> class ConstHandle;
+template<class T> class Handle;
 class InternTable;
 template<class T> class ObjectLock;
 class ScopedObjectAccessAlreadyRunnable;
@@ -73,7 +73,13 @@
   // Finds a class by its descriptor, loading it if necessary.
   // If class_loader is null, searches boot_class_path_.
   mirror::Class* FindClass(Thread* self, const char* descriptor,
-                           ConstHandle<mirror::ClassLoader> class_loader)
+                           Handle<mirror::ClassLoader> class_loader)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Find a class in the path class loader, loading it if necessary.
+  mirror::Class* FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                            Thread* self, const char* descriptor,
+                                            Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Finds a class by its descriptor using the "system" class loader, ie by searching the
@@ -86,17 +92,20 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns true if the class linker is initialized.
-  bool IsInitialized() const;
+  bool IsInitialized() const {
+    return init_done_;
+  }
 
   // Define a new a class based on a ClassDef from a DexFile
-  mirror::Class* DefineClass(const char* descriptor,
-                             ConstHandle<mirror::ClassLoader> class_loader,
+  mirror::Class* DefineClass(Thread* self, const char* descriptor,
+                             Handle<mirror::ClassLoader> class_loader,
                              const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
   // by the given 'class_loader'.
-  mirror::Class* LookupClass(const char* descriptor, const mirror::ClassLoader* class_loader)
+  mirror::Class* LookupClass(Thread* self, const char* descriptor,
+                             const mirror::ClassLoader* class_loader)
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -118,8 +127,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
 
   size_t NumLoadedClasses()
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
@@ -134,7 +142,7 @@
   // Resolve a String with the given index from the DexFile, storing the
   // result in the DexCache.
   mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
-                                ConstHandle<mirror::DexCache> dex_cache)
+                                Handle<mirror::DexCache> dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolve a Type with the given index from the DexFile, storing the
@@ -157,8 +165,8 @@
   // type, since it may be referenced from but not contained within
   // the given DexFile.
   mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
-                             ConstHandle<mirror::DexCache> dex_cache,
-                             ConstHandle<mirror::ClassLoader> class_loader)
+                             Handle<mirror::DexCache> dex_cache,
+                             Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolve a method with a given ID from the DexFile, storing the
@@ -168,9 +176,9 @@
   // virtual method.
   mirror::ArtMethod* ResolveMethod(const DexFile& dex_file,
                                    uint32_t method_idx,
-                                   ConstHandle<mirror::DexCache> dex_cache,
-                                   ConstHandle<mirror::ClassLoader> class_loader,
-                                   ConstHandle<mirror::ArtMethod> referrer,
+                                   Handle<mirror::DexCache> dex_cache,
+                                   Handle<mirror::ClassLoader> class_loader,
+                                   Handle<mirror::ArtMethod> referrer,
                                    InvokeType type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -194,8 +202,8 @@
   // field.
   mirror::ArtField* ResolveField(const DexFile& dex_file,
                                  uint32_t field_idx,
-                                 ConstHandle<mirror::DexCache> dex_cache,
-                                 ConstHandle<mirror::ClassLoader> class_loader,
+                                 Handle<mirror::DexCache> dex_cache,
+                                 Handle<mirror::ClassLoader> class_loader,
                                  bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -204,8 +212,8 @@
   // in ResolveType. No is_static argument is provided so that Java
   // field resolution semantics are followed.
   mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
-                                    ConstHandle<mirror::DexCache> dex_cache,
-                                    ConstHandle<mirror::ClassLoader> class_loader)
+                                    Handle<mirror::DexCache> dex_cache,
+                                    Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get shorty from method index without resolution. Used to do handlerization.
@@ -215,7 +223,8 @@
   // Returns true on success, false if there's an exception pending.
   // can_run_clinit=false allows the compiler to attempt to init a class,
   // given the restriction that no <clinit> execution is possible.
-  bool EnsureInitialized(ConstHandle<mirror::Class> c, bool can_init_fields, bool can_init_parents)
+  bool EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields,
+                         bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Initializes classes that have instances in the image but that have
@@ -225,7 +234,7 @@
   void RegisterDexFile(const DexFile& dex_file)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void RegisterDexFile(const DexFile& dex_file, ConstHandle<mirror::DexCache> dex_cache)
+  void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -317,12 +326,13 @@
                                                                               size_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void VerifyClass(ConstHandle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VerifyClass(Thread* self, Handle<mirror::Class> klass)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
                                mirror::Class::Status& oat_file_class_status)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
-                                         ConstHandle<mirror::Class> klass)
+                                         Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -435,12 +445,12 @@
 
 
   mirror::Class* CreateArrayClass(Thread* self, const char* descriptor,
-                                  ConstHandle<mirror::ClassLoader> class_loader)
+                                  Handle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void AppendToBootClassPath(const DexFile& dex_file)
+  void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AppendToBootClassPath(const DexFile& dex_file, ConstHandle<mirror::DexCache> dex_cache)
+  void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Precomputes size needed for Class, in the case of a non-temporary class this size must be
@@ -448,25 +458,21 @@
   uint32_t SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
                                             const DexFile::ClassDef& dex_class_def);
 
-  void LoadClass(const DexFile& dex_file,
-                 const DexFile::ClassDef& dex_class_def,
-                 ConstHandle<mirror::Class> klass,
-                 mirror::ClassLoader* class_loader)
+  void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
+                 Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void LoadClassMembers(const DexFile& dex_file,
-                        const byte* class_data,
-                        ConstHandle<mirror::Class> klass,
-                        mirror::ClassLoader* class_loader,
+  void LoadClassMembers(Thread* self, const DexFile& dex_file, const byte* class_data,
+                        Handle<mirror::Class> klass, mirror::ClassLoader* class_loader,
                         const OatFile::OatClass* oat_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
-                 ConstHandle<mirror::Class> klass, ConstHandle<mirror::ArtField> dst)
+                 Handle<mirror::Class> klass, Handle<mirror::ArtField> dst)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file,
                                 const ClassDataItemIterator& dex_method,
-                                ConstHandle<mirror::Class> klass)
+                                Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -476,23 +482,23 @@
   OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void RegisterDexFileLocked(const DexFile& dex_file, ConstHandle<mirror::DexCache> dex_cache)
+  void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
       EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsDexFileRegisteredLocked(const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
 
-  bool InitializeClass(ConstHandle<mirror::Class> klass, bool can_run_clinit,
+  bool InitializeClass(Thread* self, Handle<mirror::Class> klass, bool can_run_clinit,
                        bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool WaitForInitializeClass(ConstHandle<mirror::Class> klass, Thread* self,
+  bool WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
                               ObjectLock<mirror::Class>& lock);
-  bool ValidateSuperClassDescriptors(ConstHandle<mirror::Class> klass)
+  bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor,
-                                                ConstHandle<mirror::ClassLoader> class_loader1,
-                                                ConstHandle<mirror::ClassLoader> class_loader2)
+                                                Handle<mirror::ClassLoader> class_loader1,
+                                                Handle<mirror::ClassLoader> class_loader2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, mirror::ArtMethod* method,
@@ -500,40 +506,40 @@
                                                      mirror::Class* klass2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkClass(Thread* self, const char* descriptor, ConstHandle<mirror::Class> klass,
-                 ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces,
+  bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
+                 Handle<mirror::ObjectArray<mirror::Class>> interfaces,
                  mirror::Class** new_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkSuperClass(ConstHandle<mirror::Class> klass)
+  bool LinkSuperClass(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LoadSuperAndInterfaces(ConstHandle<mirror::Class> klass, const DexFile& dex_file)
+  bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkMethods(Thread* self, ConstHandle<mirror::Class> klass,
-                   ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces)
+  bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
+                   Handle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkVirtualMethods(Thread* self, ConstHandle<mirror::Class> klass)
+  bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkInterfaceMethods(ConstHandle<mirror::Class> klass,
-                            ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces)
+  bool LinkInterfaceMethods(Handle<mirror::Class> klass,
+                            Handle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkStaticFields(ConstHandle<mirror::Class> klass, size_t* class_size)
+  bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkInstanceFields(ConstHandle<mirror::Class> klass)
+  bool LinkInstanceFields(Thread* self, Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkFields(ConstHandle<mirror::Class> klass, bool is_static, size_t* class_size)
+  bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void LinkCode(ConstHandle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
+  void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
                 const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CreateReferenceInstanceOffsets(ConstHandle<mirror::Class> klass)
+  void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CreateReferenceOffsets(ConstHandle<mirror::Class> klass, uint32_t reference_offsets)
+  void CreateReferenceOffsets(Handle<mirror::Class> klass, uint32_t reference_offsets)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // For use by ImageWriter to find DexCaches for its roots
@@ -625,11 +631,16 @@
                             const uint32_t* dex_location_checksum,
                             std::string* error_msg);
 
-  mirror::ArtMethod* CreateProxyConstructor(Thread* self, ConstHandle<mirror::Class> klass,
+  mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
                                             mirror::Class* proxy_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::ArtMethod* CreateProxyMethod(Thread* self, ConstHandle<mirror::Class> klass,
-                                       ConstHandle<mirror::ArtMethod> prototype)
+  mirror::ArtMethod* CreateProxyMethod(Thread* self, Handle<mirror::Class> klass,
+                                       Handle<mirror::ArtMethod> prototype)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the
+  // class access flags to determine whether this has been done before.
+  void EnsurePreverifiedMethods(Handle<mirror::Class> c)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   mirror::Class* LookupClassFromTableLocked(const char* descriptor,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index b250918..613ac66 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -278,10 +278,11 @@
     // Confirm that all instances fields are packed together at the start
     EXPECT_GE(klass->NumInstanceFields(), klass->NumReferenceInstanceFields());
     StackHandleScope<1> hs(Thread::Current());
-    FieldHelper fh(hs.NewHandle<mirror::ArtField>(nullptr));
+    MutableHandle<mirror::ArtField> fhandle = hs.NewHandle<mirror::ArtField>(nullptr);
     for (size_t i = 0; i < klass->NumReferenceInstanceFields(); i++) {
       mirror::ArtField* field = klass->GetInstanceField(i);
-      fh.ChangeField(field);
+      fhandle.Assign(field);
+      FieldHelper fh(fhandle);
       ASSERT_TRUE(!field->IsPrimitiveType());
       mirror::Class* field_type = fh.GetType();
       ASSERT_TRUE(field_type != NULL);
@@ -289,7 +290,8 @@
     }
     for (size_t i = klass->NumReferenceInstanceFields(); i < klass->NumInstanceFields(); i++) {
       mirror::ArtField* field = klass->GetInstanceField(i);
-      fh.ChangeField(field);
+      fhandle.Assign(field);
+      FieldHelper fh(fhandle);
       mirror::Class* field_type = fh.GetType();
       ASSERT_TRUE(field_type != NULL);
       if (!fh.GetField()->IsPrimitiveType() || !field_type->IsPrimitive()) {
@@ -855,7 +857,7 @@
       hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Statics"))));
   Handle<mirror::Class> statics(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
-  class_linker_->EnsureInitialized(statics, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), statics, true, true);
 
   // Static final primitives that are initialized by a compile-time constant
   // expression resolve to a copy of a constant value from the constant pool.
@@ -1091,4 +1093,65 @@
   EXPECT_EQ(c->GetClassSize(), mirror::ArtMethod::ClassSize());
 }
 
+static void CheckMethod(mirror::ArtMethod* method, bool verified)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (!method->IsNative() && !method->IsAbstract()) {
+    EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified)
+        << PrettyMethod(method, true);
+  }
+}
+
+static void CheckPreverified(mirror::Class* c, bool preverified)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified)
+      << "Class " << PrettyClass(c) << " not as expected";
+  for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+    CheckMethod(c->GetDirectMethod(i), preverified);
+  }
+  for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+    CheckMethod(c->GetVirtualMethod(i), preverified);
+  }
+}
+
+TEST_F(ClassLinkerTest, Preverified_InitializedBoot) {
+  ScopedObjectAccess soa(Thread::Current());
+
+  mirror::Class* JavaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
+  ASSERT_TRUE(JavaLangObject != NULL);
+  EXPECT_TRUE(JavaLangObject->IsInitialized()) << "Not testing already initialized class from the "
+                                                  "core";
+  CheckPreverified(JavaLangObject, true);
+}
+
+TEST_F(ClassLinkerTest, Preverified_UninitializedBoot) {
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<1> hs(soa.Self());
+
+  Handle<mirror::Class> security_manager(hs.NewHandle(class_linker_->FindSystemClass(
+      soa.Self(), "Ljava/lang/SecurityManager;")));
+  EXPECT_FALSE(security_manager->IsInitialized()) << "Not testing uninitialized class from the "
+                                                     "core";
+
+  CheckPreverified(security_manager.Get(), false);
+
+  class_linker_->EnsureInitialized(soa.Self(), security_manager, true, true);
+  CheckPreverified(security_manager.Get(), true);
+}
+
+TEST_F(ClassLinkerTest, Preverified_App) {
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<2> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader(
+      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(LoadDex("Statics"))));
+  Handle<mirror::Class> statics(
+      hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStatics;", class_loader)));
+
+  CheckPreverified(statics.Get(), false);
+
+  class_linker_->EnsureInitialized(soa.Self(), statics, true, true);
+  CheckPreverified(statics.Get(), true);
+}
+
 }  // namespace art
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 488e6e7..aced954 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -311,7 +311,7 @@
 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
 static Dbg::HpsgWhat gDdmNhsgWhat;
 
-static ObjectRegistry* gRegistry = nullptr;
+ObjectRegistry* Dbg::gRegistry = nullptr;
 
 // Recent allocation tracking.
 AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
@@ -401,7 +401,7 @@
 
 static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(id, error);
+  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
   if (o == nullptr) {
     *error = JDWP::ERR_INVALID_OBJECT;
     return nullptr;
@@ -416,7 +416,7 @@
 
 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(id, error);
+  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
   if (o == nullptr) {
     *error = JDWP::ERR_INVALID_OBJECT;
     return nullptr;
@@ -434,7 +434,7 @@
     EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
     LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id, error);
+  mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
   if (thread_peer == nullptr) {
     // This isn't even an object.
     *error = JDWP::ERR_INVALID_OBJECT;
@@ -511,8 +511,7 @@
  *
  * Null objects are tagged JT_OBJECT.
  */
-static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
   return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
 }
 
@@ -842,8 +841,13 @@
   if (!o->IsClass()) {
     return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
   }
+  return GetClassName(o->AsClass());
+}
+
+std::string Dbg::GetClassName(mirror::Class* klass) {
+  DCHECK(klass != nullptr);
   std::string temp;
-  return DescriptorToName(o->AsClass()->GetDescriptor(&temp));
+  return DescriptorToName(klass->GetDescriptor(&temp));
 }
 
 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
@@ -1108,8 +1112,7 @@
   gRegistry->DisposeObject(object_id, reference_count);
 }
 
-static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
   DCHECK(klass != nullptr);
   if (klass->IsArrayClass()) {
     return JDWP::TT_ARRAY;
@@ -1158,7 +1161,8 @@
   };
 
   ClassListCreator clc(classes);
-  Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc);
+  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
+                                                                       &clc);
 }
 
 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
@@ -1421,17 +1425,7 @@
   return JDWP::ERR_NONE;
 }
 
-bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
-  JDWP::JdwpError error;
-  mirror::Class* c1 = DecodeClass(instance_class_id, &error);
-  CHECK(c1 != nullptr);
-  mirror::Class* c2 = DecodeClass(class_id, &error);
-  CHECK(c2 != nullptr);
-  return c2->IsAssignableFrom(c1);
-}
-
-static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+JDWP::FieldId Dbg::ToFieldId(const mirror::ArtField* f) {
   CHECK(!kMovingFields);
   return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
 }
@@ -1454,10 +1448,49 @@
   return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
 }
 
-static void SetLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
+bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
+  CHECK(event_thread != nullptr);
+  JDWP::JdwpError error;
+  mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id,
+                                                                         &error);
+  return expected_thread_peer == event_thread->GetPeer();
+}
+
+bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
+                        const JDWP::EventLocation& event_location) {
+  if (expected_location.dex_pc != event_location.dex_pc) {
+    return false;
+  }
+  mirror::ArtMethod* m = FromMethodId(expected_location.method_id);
+  return m == event_location.method;
+}
+
+bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
+  JDWP::JdwpError error;
+  mirror::Class* expected_class = DecodeClass(class_id, &error);
+  CHECK(expected_class != nullptr);
+  return expected_class->IsAssignableFrom(event_class);
+}
+
+bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
+                     mirror::ArtField* event_field) {
+  mirror::ArtField* expected_field = FromFieldId(expected_field_id);
+  if (expected_field != event_field) {
+    return false;
+  }
+  return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
+}
+
+bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
+  JDWP::JdwpError error;
+  mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
+  return modifier_instance == event_instance;
+}
+
+void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (m == nullptr) {
-    memset(&location, 0, sizeof(location));
+    memset(&location, 0, sizeof(*location));
   } else {
     mirror::Class* c = m->GetDeclaringClass();
     location->type_tag = GetTypeTag(c);
@@ -1756,7 +1789,7 @@
     return error;
   }
 
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
+  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
   if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
     return JDWP::ERR_INVALID_OBJECT;
   }
@@ -1818,7 +1851,7 @@
                                          uint64_t value, int width, bool is_static)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   JDWP::JdwpError error;
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
+  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
   if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
     return JDWP::ERR_INVALID_OBJECT;
   }
@@ -1852,7 +1885,7 @@
       f->Set32<false>(o, value);
     }
   } else {
-    mirror::Object* v = gRegistry->Get<mirror::Object*>(value, &error);
+    mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
     if (error != JDWP::ERR_NONE) {
       return JDWP::ERR_INVALID_OBJECT;
     }
@@ -1885,11 +1918,25 @@
   return SetFieldValueImpl(0, field_id, value, width, true);
 }
 
-std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
+JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
   JDWP::JdwpError error;
-  mirror::String* s = gRegistry->Get<mirror::String*>(string_id, &error);
-  CHECK(s != nullptr) << error;
-  return s->ToModifiedUtf8();
+  mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
+  if (error != JDWP::ERR_NONE) {
+    return error;
+  }
+  if (obj == nullptr) {
+    return JDWP::ERR_INVALID_OBJECT;
+  }
+  {
+    ScopedObjectAccessUnchecked soa(Thread::Current());
+    mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
+    if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
+      // This isn't a string.
+      return JDWP::ERR_INVALID_STRING;
+    }
+  }
+  *str = obj->AsString()->ToModifiedUtf8();
+  return JDWP::ERR_NONE;
 }
 
 void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
@@ -1938,7 +1985,7 @@
 }
 
 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
-  ScopedObjectAccess soa(Thread::Current());
+  ScopedObjectAccessUnchecked soa(Thread::Current());
   JDWP::JdwpError error;
   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
   if (error != JDWP::ERR_NONE) {
@@ -1969,26 +2016,55 @@
   return error;
 }
 
-std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
-  ScopedObjectAccess soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
-  CHECK(thread_group != nullptr) << error;
-  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
+static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
+                                         JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
+                                                                                error);
+  if (*error != JDWP::ERR_NONE) {
+    return nullptr;
+  }
+  if (thread_group == nullptr) {
+    *error = JDWP::ERR_INVALID_OBJECT;
+    return nullptr;
+  }
   mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
   CHECK(c != nullptr);
+  if (!c->IsAssignableFrom(thread_group->GetClass())) {
+    // This is not a java.lang.ThreadGroup.
+    *error = JDWP::ERR_INVALID_THREAD_GROUP;
+    return nullptr;
+  }
+  *error = JDWP::ERR_NONE;
+  return thread_group;
+}
+
+JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
+  ScopedObjectAccessUnchecked soa(Thread::Current());
+  JDWP::JdwpError error;
+  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+  if (error != JDWP::ERR_NONE) {
+    return error;
+  }
+  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
+  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
   mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
   CHECK(f != nullptr);
   mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
   soa.Self()->EndAssertNoThreadSuspension(old_cause);
-  return s->ToModifiedUtf8();
+
+  std::string thread_group_name(s->ToModifiedUtf8());
+  expandBufAddUtf8String(pReply, thread_group_name);
+  return JDWP::ERR_NONE;
 }
 
-JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
+JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
   ScopedObjectAccessUnchecked soa(Thread::Current());
   JDWP::JdwpError error;
-  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
-  CHECK(thread_group != nullptr) << error;
+  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+  if (error != JDWP::ERR_NONE) {
+    return error;
+  }
   const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
   mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
   CHECK(c != nullptr);
@@ -1996,7 +2072,65 @@
   CHECK(f != nullptr);
   mirror::Object* parent = f->GetObject(thread_group);
   soa.Self()->EndAssertNoThreadSuspension(old_cause);
-  return gRegistry->Add(parent);
+
+  JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
+  expandBufAddObjectId(pReply, parent_group_id);
+  return JDWP::ERR_NONE;
+}
+
+static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
+                                 std::vector<JDWP::ObjectId>* child_thread_group_ids)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  CHECK(thread_group != nullptr);
+
+  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
+  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
+  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
+
+  // Get the array and size out of the ArrayList<ThreadGroup>...
+  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
+  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
+  mirror::ObjectArray<mirror::Object>* groups_array =
+      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
+  const int32_t size = size_field->GetInt(groups_array_list);
+
+  // Copy the first 'size' elements out of the array into the result.
+  ObjectRegistry* registry = Dbg::GetObjectRegistry();
+  for (int32_t i = 0; i < size; ++i) {
+    child_thread_group_ids->push_back(registry->Add(groups_array->Get(i)));
+  }
+}
+
+JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
+                                            JDWP::ExpandBuf* pReply) {
+  ScopedObjectAccessUnchecked soa(Thread::Current());
+  JDWP::JdwpError error;
+  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+  if (error != JDWP::ERR_NONE) {
+    return error;
+  }
+
+  // Add child threads.
+  {
+    std::vector<JDWP::ObjectId> child_thread_ids;
+    GetThreads(thread_group, &child_thread_ids);
+    expandBufAdd4BE(pReply, child_thread_ids.size());
+    for (JDWP::ObjectId child_thread_id : child_thread_ids) {
+      expandBufAddObjectId(pReply, child_thread_id);
+    }
+  }
+
+  // Add child thread groups.
+  {
+    std::vector<JDWP::ObjectId> child_thread_groups_ids;
+    GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
+    expandBufAdd4BE(pReply, child_thread_groups_ids.size());
+    for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
+      expandBufAddObjectId(pReply, child_thread_group_id);
+    }
+  }
+
+  return JDWP::ERR_NONE;
 }
 
 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
@@ -2006,13 +2140,6 @@
   return gRegistry->Add(group);
 }
 
-JDWP::ObjectId Dbg::GetMainThreadGroupId() {
-  ScopedObjectAccess soa(Thread::Current());
-  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
-  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
-  return gRegistry->Add(group);
-}
-
 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
   switch (state) {
     case kBlocked:
@@ -2097,91 +2224,49 @@
   return JDWP::ERR_NONE;
 }
 
-void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>* thread_ids) {
-  class ThreadListVisitor {
-   public:
-    ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group,
-                      std::vector<JDWP::ObjectId>* thread_ids)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-        : soa_(soa), desired_thread_group_(desired_thread_group), thread_ids_(thread_ids) {}
-
-    static void Visit(Thread* t, void* arg) {
-      reinterpret_cast<ThreadListVisitor*>(arg)->Visit(t);
-    }
-
-    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
-    // annotalysis.
-    void Visit(Thread* t) NO_THREAD_SAFETY_ANALYSIS {
-      if (t == Dbg::GetDebugThread()) {
-        // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
-        // query all threads, so it's easier if we just don't tell them about this thread.
-        return;
-      }
-      if (t->IsStillStarting()) {
-        // This thread is being started (and has been registered in the thread list). However, it is
-        // not completely started yet so we must ignore it.
-        return;
-      }
-      mirror::Object* peer = t->GetPeer();
-      if (IsInDesiredThreadGroup(peer)) {
-        thread_ids_->push_back(gRegistry->Add(peer));
-      }
-    }
-
-   private:
-    bool IsInDesiredThreadGroup(mirror::Object* peer)
-        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-      // peer might be nullptr if the thread is still starting up.
-      if (peer == nullptr) {
-        // We can't tell the debugger about this thread yet.
-        // TODO: if we identified threads to the debugger by their Thread*
-        // rather than their peer's mirror::Object*, we could fix this.
-        // Doing so might help us report ZOMBIE threads too.
-        return false;
-      }
-      // Do we want threads from all thread groups?
-      if (desired_thread_group_ == nullptr) {
-        return true;
-      }
-      mirror::Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer);
-      return (group == desired_thread_group_);
-    }
-
-    const ScopedObjectAccessUnchecked& soa_;
-    mirror::Object* const desired_thread_group_;
-    std::vector<JDWP::ObjectId>* const thread_ids_;
-  };
-
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
-  CHECK_EQ(error, JDWP::ERR_NONE);
-  ThreadListVisitor tlv(soa, thread_group, thread_ids);
-  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
-  Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv);
+static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
+                                   mirror::Object* desired_thread_group, mirror::Object* peer)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Do we want threads from all thread groups?
+  if (desired_thread_group == nullptr) {
+    return true;
+  }
+  mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
+  DCHECK(thread_group_field != nullptr);
+  mirror::Object* group = thread_group_field->GetObject(peer);
+  return (group == desired_thread_group);
 }
 
-void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id,
-                               std::vector<JDWP::ObjectId>* child_thread_group_ids) {
-  ScopedObjectAccess soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
-  CHECK(thread_group != nullptr) << error;
-
-  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
-  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
-  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
-
-  // Get the array and size out of the ArrayList<ThreadGroup>...
-  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
-  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
-  mirror::ObjectArray<mirror::Object>* groups_array =
-      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
-  const int32_t size = size_field->GetInt(groups_array_list);
-
-  // Copy the first 'size' elements out of the array into the result.
-  for (int32_t i = 0; i < size; ++i) {
-    child_thread_group_ids->push_back(gRegistry->Add(groups_array->Get(i)));
+void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
+  ScopedObjectAccessUnchecked soa(Thread::Current());
+  std::list<Thread*> all_threads_list;
+  {
+    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
+  }
+  for (Thread* t : all_threads_list) {
+    if (t == Dbg::GetDebugThread()) {
+      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
+      // query all threads, so it's easier if we just don't tell them about this thread.
+      continue;
+    }
+    if (t->IsStillStarting()) {
+      // This thread is being started (and has been registered in the thread list). However, it is
+      // not completely started yet so we must ignore it.
+      continue;
+    }
+    mirror::Object* peer = t->GetPeer();
+    if (peer == nullptr) {
+      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
+      // this thread yet.
+      // TODO: if we identified threads to the debugger by their Thread*
+      // rather than their peer's mirror::Object*, we could fix this.
+      // Doing so might help us report ZOMBIE threads too.
+      continue;
+    }
+    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
+      thread_ids->push_back(gRegistry->Add(peer));
+    }
   }
 }
 
@@ -2245,7 +2330,7 @@
       if (depth_ >= start_frame_) {
         JDWP::FrameId frame_id(GetFrameId());
         JDWP::JdwpLocation location;
-        SetLocation(&location, GetMethod(), GetDexPc());
+        SetJdwpLocation(&location, GetMethod(), GetDexPc());
         VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
         expandBufAdd8BE(buf_, frame_id);
         expandBufAddLocation(buf_, location);
@@ -2277,8 +2362,12 @@
 }
 
 JDWP::ObjectId Dbg::GetThreadSelfId() {
+  return GetThreadId(Thread::Current());
+}
+
+JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
   ScopedObjectAccessUnchecked soa(Thread::Current());
-  return gRegistry->Add(soa.Self()->GetPeer());
+  return gRegistry->Add(thread->GetPeer());
 }
 
 void Dbg::SuspendVM() {
@@ -2682,16 +2771,15 @@
   return visitor.error_;
 }
 
-JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) {
-  // If 'this_object' isn't already in the registry, we know that we're not looking for it, so
-  // there's no point adding it to the registry and burning through ids.
-  // When registering an event request with an instance filter, we've been given an existing object
-  // id so it must already be present in the registry when the event fires.
-  JDWP::ObjectId this_id = 0;
-  if (this_object != nullptr && gRegistry->Contains(this_object)) {
-    this_id = gRegistry->Add(this_object);
+static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(location != nullptr);
+  if (m == nullptr) {
+    memset(location, 0, sizeof(*location));
+  } else {
+    location->method = m;
+    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
   }
-  return this_id;
 }
 
 void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
@@ -2701,12 +2789,10 @@
   }
   DCHECK(m != nullptr);
   DCHECK_EQ(m->IsStatic(), this_object == nullptr);
-  JDWP::JdwpLocation location;
-  SetLocation(&location, m, dex_pc);
+  JDWP::EventLocation location;
+  SetEventLocation(&location, m, dex_pc);
 
-  // We need 'this' for InstanceOnly filters only.
-  JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object);
-  gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value);
+  gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
 }
 
 void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
@@ -2716,14 +2802,10 @@
   }
   DCHECK(m != nullptr);
   DCHECK(f != nullptr);
-  JDWP::JdwpLocation location;
-  SetLocation(&location, m, dex_pc);
+  JDWP::EventLocation location;
+  SetEventLocation(&location, m, dex_pc);
 
-  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
-  JDWP::FieldId field_id = ToFieldId(f);
-  JDWP::ObjectId this_id = gRegistry->Add(this_object);
-
-  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false);
+  gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
 }
 
 void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
@@ -2735,14 +2817,10 @@
   DCHECK(m != nullptr);
   DCHECK(f != nullptr);
   DCHECK(field_value != nullptr);
-  JDWP::JdwpLocation location;
-  SetLocation(&location, m, dex_pc);
+  JDWP::EventLocation location;
+  SetEventLocation(&location, m, dex_pc);
 
-  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
-  JDWP::FieldId field_id = ToFieldId(f);
-  JDWP::ObjectId this_id = gRegistry->Add(this_object);
-
-  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true);
+  gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
 }
 
 void Dbg::PostException(const ThrowLocation& throw_location,
@@ -2751,33 +2829,20 @@
   if (!IsDebuggerActive()) {
     return;
   }
+  JDWP::EventLocation exception_throw_location;
+  SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
+  JDWP::EventLocation exception_catch_location;
+  SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
 
-  JDWP::JdwpLocation jdwp_throw_location;
-  SetLocation(&jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
-  JDWP::JdwpLocation catch_location;
-  SetLocation(&catch_location, catch_method, catch_dex_pc);
-
-  // We need 'this' for InstanceOnly filters only.
-  JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis());
-  JDWP::ObjectId exception_id = gRegistry->Add(exception_object);
-  JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass());
-
-  gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location,
-                            this_id);
+  gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
+                            throw_location.GetThis());
 }
 
 void Dbg::PostClassPrepare(mirror::Class* c) {
   if (!IsDebuggerActive()) {
     return;
   }
-
-  // OLD-TODO - we currently always send both "verified" and "prepared" since
-  // debuggers seem to like that.  There might be some advantage to honesty,
-  // since the class may not yet be verified.
-  int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
-  JDWP::JdwpTypeTag tag = GetTypeTag(c);
-  std::string temp;
-  gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(&temp), state);
+  gJdwpState->PostClassPrepare(c);
 }
 
 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
@@ -3065,7 +3130,7 @@
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), dex_cache, class_loader,
+  verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
                                     &m->GetClassDef(), code_item, m->GetDexMethodIndex(), method,
                                     m->GetAccessFlags(), false, true, false);
   // Note: we don't need to verify the method.
@@ -3194,7 +3259,7 @@
         self_suspend_ = true;
       } else {
         soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
-        jobject thread_peer = gRegistry->GetJObject(thread_id);
+        jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
         bool timed_out;
         Thread* suspended_thread;
         {
@@ -3625,7 +3690,7 @@
   }
 
   // Translate the method through the vtable, unless the debugger wants to suppress it.
-  Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
+  MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
   if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != nullptr) {
     mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
     if (actual_method != m.Get()) {
@@ -3857,9 +3922,7 @@
 
 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
   if (IsDebuggerActive()) {
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
-    gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
+    gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
   }
   Dbg::DdmSendThreadNotification(t, type);
 }
@@ -4337,7 +4400,7 @@
       recent_allocation_records_ = new AllocRecord[alloc_record_max_];
       CHECK(recent_allocation_records_ != nullptr);
     }
-    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
   } else {
     {
       ScopedObjectAccess soa(self);  // For type_cache_.Clear();
@@ -4353,7 +4416,7 @@
       type_cache_.Clear();
     }
     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
-    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(false);
   }
 }
 
@@ -4389,10 +4452,7 @@
   size_t depth;
 };
 
-void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
-  Thread* self = Thread::Current();
-  CHECK(self != nullptr);
-
+void Dbg::RecordAllocation(Thread* self, mirror::Class* type, size_t byte_count) {
   MutexLock mu(self, *Locks::alloc_tracker_lock_);
   if (recent_allocation_records_ == nullptr) {
     // In the process of shutting down recording, bail.
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 3e16288..97985ec 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -43,6 +43,8 @@
 class Throwable;
 }  // namespace mirror
 class AllocRecord;
+class ObjectRegistry;
+class ScopedObjectAccessUnchecked;
 class Thread;
 class ThrowLocation;
 
@@ -250,6 +252,8 @@
    */
   static std::string GetClassName(JDWP::RefTypeId id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static std::string GetClassName(mirror::Class* klass)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id)
@@ -294,7 +298,24 @@
                                            JDWP::ObjectId* new_array)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static bool MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id)
+  //
+  // Event filtering.
+  //
+  static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static bool MatchLocation(const JDWP::JdwpLocation& expected_location,
+                            const JDWP::EventLocation& event_location)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
+                         mirror::ArtField* event_field)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   //
@@ -381,7 +402,7 @@
   static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static std::string StringToUtf8(JDWP::ObjectId string_id)
+  static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -393,13 +414,19 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       LOCKS_EXCLUDED(Locks::thread_list_lock_);
   static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       LOCKS_EXCLUDED(Locks::thread_list_lock_);
-  static std::string GetThreadGroupName(JDWP::ObjectId thread_group_id);
-  static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId thread_group_id)
+  static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
+                                            JDWP::ExpandBuf* pReply)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
+                                              JDWP::ExpandBuf* pReply)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
+                                                JDWP::ExpandBuf* pReply)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static JDWP::ObjectId GetSystemThreadGroupId()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static JDWP::ObjectId GetMainThreadGroupId();
 
   static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
   static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
@@ -414,11 +441,9 @@
 
   // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
   // returns all threads.
-  static void GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>* thread_ids)
+  static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
       LOCKS_EXCLUDED(Locks::thread_list_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void GetChildThreadGroups(JDWP::ObjectId thread_group_id,
-                                   std::vector<JDWP::ObjectId>* child_thread_group_ids);
 
   static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
       LOCKS_EXCLUDED(Locks::thread_list_lock_);
@@ -427,8 +452,9 @@
       LOCKS_EXCLUDED(Locks::thread_list_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static JDWP::ObjectId GetThreadSelfId()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static JDWP::ObjectId GetThreadSelfId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   static void SuspendVM()
       LOCKS_EXCLUDED(Locks::thread_list_lock_,
                      Locks::thread_suspend_count_lock_);
@@ -561,7 +587,7 @@
   /*
    * Recent allocation tracking support.
    */
-  static void RecordAllocation(mirror::Class* type, size_t byte_count)
+  static void RecordAllocation(Thread* self, mirror::Class* type, size_t byte_count)
       LOCKS_EXCLUDED(Locks::alloc_tracker_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
@@ -598,6 +624,22 @@
   static void DdmSendHeapSegments(bool native)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  static ObjectRegistry* GetObjectRegistry() {
+    return gRegistry;
+  }
+
+  static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static void SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void PostThreadStartOrStop(Thread*, uint32_t)
@@ -608,9 +650,6 @@
                                 const JValue* return_value)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static JDWP::ObjectId GetThisObjectIdForEvent(mirror::Object* this_object)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -623,6 +662,8 @@
   static size_t alloc_record_head_ GUARDED_BY(Locks::alloc_tracker_lock_);
   static size_t alloc_record_count_ GUARDED_BY(Locks::alloc_tracker_lock_);
 
+  static ObjectRegistry* gRegistry;
+
   // Deoptimization requests to be processed each time the event list is updated. This is used when
   // registering and unregistering events so we do not deoptimize while holding the event list
   // lock.
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 8656bd3..6ef62c5 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -354,8 +354,7 @@
       proto_ids_(reinterpret_cast<const ProtoId*>(base + header_->proto_ids_off_)),
       class_defs_(reinterpret_cast<const ClassDef*>(base + header_->class_defs_off_)),
       find_class_def_misses_(0),
-      class_def_index_(nullptr),
-      build_class_def_index_mutex_("DexFile index creation mutex") {
+      class_def_index_(nullptr) {
   CHECK(begin_ != NULL) << GetLocation();
   CHECK_GT(size_, 0U) << GetLocation();
 }
@@ -444,20 +443,21 @@
   // up. This isn't done eagerly at construction as construction is not performed in multi-threaded
   // sections of tools like dex2oat. If we're lazy we hopefully increase the chance of balancing
   // out which thread builds the index.
-  find_class_def_misses_++;
   const uint32_t kMaxFailedDexClassDefLookups = 100;
-  if (find_class_def_misses_ > kMaxFailedDexClassDefLookups) {
-    MutexLock mu(Thread::Current(), build_class_def_index_mutex_);
-    // Are we the first ones building the index?
-    if (class_def_index_.LoadSequentiallyConsistent() == nullptr) {
-      index = new Index(num_class_defs);
-      for (uint32_t i = 0; i < num_class_defs;  ++i) {
-        const ClassDef& class_def = GetClassDef(i);
-        const char* descriptor = GetClassDescriptor(class_def);
-        index->insert(std::make_pair(descriptor, &class_def));
-      }
-      class_def_index_.StoreSequentiallyConsistent(index);
+  uint32_t old_misses = find_class_def_misses_.FetchAndAddSequentiallyConsistent(1);
+  if (old_misses == kMaxFailedDexClassDefLookups) {
+    // Are we the ones moving the miss count past the max? Sanity check the index doesn't exist.
+    CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
+    // Build the index.
+    index = new Index(num_class_defs);
+    for (uint32_t i = 0; i < num_class_defs;  ++i) {
+      const ClassDef& class_def = GetClassDef(i);
+      const char* descriptor = GetClassDescriptor(class_def);
+      index->insert(std::make_pair(descriptor, &class_def));
     }
+    // Sanity check the index still doesn't exist, only 1 thread should build it.
+    CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr);
+    class_def_index_.StoreSequentiallyConsistent(index);
   }
   return nullptr;
 }
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 1b46a12..c160253 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -201,6 +201,24 @@
     uint32_t class_data_off_;  // file offset to class_data_item
     uint32_t static_values_off_;  // file offset to EncodedArray
 
+    // Returns the valid access flags, that is, Java modifier bits relevant to the ClassDef type
+    // (class or interface). These are all in the lower 16b and do not contain runtime flags.
+    uint32_t GetJavaAccessFlags() const {
+      // Make sure that none of our runtime-only flags are set.
+      COMPILE_ASSERT((kAccValidClassFlags & kAccJavaFlagsMask) == kAccValidClassFlags,
+                     valid_class_flags_not_subset_of_java_flags);
+      COMPILE_ASSERT((kAccValidInterfaceFlags & kAccJavaFlagsMask) == kAccValidInterfaceFlags,
+                     valid_interface_flags_not_subset_of_java_flags);
+
+      if ((access_flags_ & kAccInterface) != 0) {
+        // Interface.
+        return access_flags_ & kAccValidInterfaceFlags;
+      } else {
+        // Class.
+        return access_flags_ & kAccValidClassFlags;
+      }
+    }
+
    private:
     DISALLOW_COPY_AND_ASSIGN(ClassDef);
   };
@@ -979,7 +997,6 @@
   };
   typedef std::unordered_map<const char*, const ClassDef*, UTF16HashCmp, UTF16HashCmp> Index;
   mutable Atomic<Index*> class_def_index_;
-  mutable Mutex build_class_def_index_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 };
 std::ostream& operator<<(std::ostream& os, const DexFile& dex_file);
 
@@ -1113,7 +1130,7 @@
       return last_idx_ + method_.method_idx_delta_;
     }
   }
-  uint32_t GetMemberAccessFlags() const {
+  uint32_t GetRawMemberAccessFlags() const {
     if (pos_ < EndOfInstanceFieldsPos()) {
       return field_.access_flags_;
     } else {
@@ -1121,18 +1138,30 @@
       return method_.access_flags_;
     }
   }
+  uint32_t GetFieldAccessFlags() const {
+    return GetRawMemberAccessFlags() & kAccValidFieldFlags;
+  }
+  uint32_t GetMethodAccessFlags() const {
+    return GetRawMemberAccessFlags() & kAccValidMethodFlags;
+  }
+  bool MemberIsNative() const {
+    return GetRawMemberAccessFlags() & kAccNative;
+  }
+  bool MemberIsFinal() const {
+    return GetRawMemberAccessFlags() & kAccFinal;
+  }
   InvokeType GetMethodInvokeType(const DexFile::ClassDef& class_def) const {
     if (HasNextDirectMethod()) {
-      if ((GetMemberAccessFlags() & kAccStatic) != 0) {
+      if ((GetRawMemberAccessFlags() & kAccStatic) != 0) {
         return kStatic;
       } else {
         return kDirect;
       }
     } else {
-      DCHECK_EQ(GetMemberAccessFlags() & kAccStatic, 0U);
+      DCHECK_EQ(GetRawMemberAccessFlags() & kAccStatic, 0U);
       if ((class_def.access_flags_ & kAccInterface) != 0) {
         return kInterface;
-      } else if ((GetMemberAccessFlags() & kAccConstructor) != 0) {
+      } else if ((GetRawMemberAccessFlags() & kAccConstructor) != 0) {
         return kSuper;
       } else {
         return kVirtual;
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 976cac9..9eba92f 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -456,9 +456,7 @@
     return false;
   }
 
-  uint32_t access_field_mask = kAccPublic | kAccPrivate | kAccProtected | kAccStatic |
-      kAccFinal | kAccVolatile | kAccTransient | kAccSynthetic | kAccEnum;
-  if (UNLIKELY((access_flags & ~access_field_mask) != 0)) {
+  if (UNLIKELY((access_flags & ~kAccJavaFlagsMask) != 0)) {
     ErrorStringPrintf("Bad class_data_item field access_flags %x", access_flags);
     return false;
   }
@@ -482,9 +480,8 @@
     return false;
   }
 
-  uint32_t access_method_mask = kAccPublic | kAccPrivate | kAccProtected | kAccStatic |
-      kAccFinal | kAccSynchronized | kAccBridge | kAccVarargs | kAccNative | kAccAbstract |
-      kAccStrict | kAccSynthetic | kAccConstructor | kAccDeclaredSynchronized;
+  constexpr uint32_t access_method_mask = kAccJavaFlagsMask | kAccConstructor |
+      kAccDeclaredSynchronized;
   if (UNLIKELY(((access_flags & ~access_method_mask) != 0) ||
                (is_synchronized && !allow_synchronized))) {
     ErrorStringPrintf("Bad class_data_item method access_flags %x", access_flags);
@@ -686,24 +683,26 @@
 bool DexFileVerifier::CheckIntraClassDataItem() {
   ClassDataItemIterator it(*dex_file_, ptr_);
 
+  // These calls use the raw access flags to check whether the whole dex field is valid.
+
   for (; it.HasNextStaticField(); it.Next()) {
-    if (!CheckClassDataItemField(it.GetMemberIndex(), it.GetMemberAccessFlags(), true)) {
+    if (!CheckClassDataItemField(it.GetMemberIndex(), it.GetRawMemberAccessFlags(), true)) {
       return false;
     }
   }
   for (; it.HasNextInstanceField(); it.Next()) {
-    if (!CheckClassDataItemField(it.GetMemberIndex(), it.GetMemberAccessFlags(), false)) {
+    if (!CheckClassDataItemField(it.GetMemberIndex(), it.GetRawMemberAccessFlags(), false)) {
       return false;
     }
   }
   for (; it.HasNextDirectMethod(); it.Next()) {
-    if (!CheckClassDataItemMethod(it.GetMemberIndex(), it.GetMemberAccessFlags(),
+    if (!CheckClassDataItemMethod(it.GetMemberIndex(), it.GetRawMemberAccessFlags(),
         it.GetMethodCodeItemOffset(), true)) {
       return false;
     }
   }
   for (; it.HasNextVirtualMethod(); it.Next()) {
-    if (!CheckClassDataItemMethod(it.GetMemberIndex(), it.GetMemberAccessFlags(),
+    if (!CheckClassDataItemMethod(it.GetMemberIndex(), it.GetRawMemberAccessFlags(),
         it.GetMethodCodeItemOffset(), false)) {
       return false;
     }
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index b6810b0..b913220 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -190,7 +190,7 @@
   }
 
   // Reads an instruction out of the stream from the current address plus an offset.
-  const Instruction* RelativeAt(int32_t offset) const {
+  const Instruction* RelativeAt(int32_t offset) const WARN_UNUSED {
     return At(reinterpret_cast<const uint16_t*>(this) + offset);
   }
 
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index 64c9185..6a9976a 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -253,10 +253,10 @@
   V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
   V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
   V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
-  V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kUnknown, 0, kVerifyError) \
-  V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
-  V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
-  V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+  V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+  V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+  V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+  V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
   V(0xEF, UNUSED_EF, "unused-ef", k10x, false, kUnknown, 0, kVerifyError) \
   V(0xF0, UNUSED_F0, "unused-f0", k10x, false, kUnknown, 0, kVerifyError) \
   V(0xF1, UNUSED_F1, "unused-f1", k10x, false, kUnknown, 0, kVerifyError) \
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 529cd53..65a557b 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -202,12 +202,25 @@
     }
   }
 
-  // Either way, the program header is relative to the elf header
-  program_headers_start_ = Begin() + GetHeader().e_phoff;
+  if (program_header_only_) {
+    program_headers_start_ = Begin() + GetHeader().e_phoff;
+  } else {
+    if (!CheckAndSet(GetHeader().e_phoff, "program headers", &program_headers_start_, error_msg)) {
+      return false;
+    }
 
-  if (!program_header_only_) {
     // Setup section headers.
-    section_headers_start_ = Begin() + GetHeader().e_shoff;
+    if (!CheckAndSet(GetHeader().e_shoff, "section headers", &section_headers_start_, error_msg)) {
+      return false;
+    }
+
+    // Find shstrtab.
+    Elf32_Shdr* shstrtab_section_header = GetSectionNameStringSection();
+    if (shstrtab_section_header == nullptr) {
+      *error_msg = StringPrintf("Failed to find shstrtab section header in ELF file: '%s'",
+                                file_->GetPath().c_str());
+      return false;
+    }
 
     // Find .dynamic section info from program header
     dynamic_program_header_ = FindProgamHeaderByType(PT_DYNAMIC);
@@ -217,48 +230,84 @@
       return false;
     }
 
-    dynamic_section_start_
-        = reinterpret_cast<Elf32_Dyn*>(Begin() + GetDynamicProgramHeader().p_offset);
+    if (!CheckAndSet(GetDynamicProgramHeader().p_offset, "dynamic section",
+                     reinterpret_cast<byte**>(&dynamic_section_start_), error_msg)) {
+      return false;
+    }
 
     // Find other sections from section headers
     for (Elf32_Word i = 0; i < GetSectionHeaderNum(); i++) {
-      Elf32_Shdr& section_header = GetSectionHeader(i);
-      byte* section_addr = Begin() + section_header.sh_offset;
-      switch (section_header.sh_type) {
+      Elf32_Shdr* section_header = GetSectionHeader(i);
+      if (section_header == nullptr) {
+        *error_msg = StringPrintf("Failed to find section header for section %d in ELF file: '%s'",
+                                  i, file_->GetPath().c_str());
+        return false;
+      }
+      switch (section_header->sh_type) {
         case SHT_SYMTAB: {
-          symtab_section_start_ = reinterpret_cast<Elf32_Sym*>(section_addr);
+          if (!CheckAndSet(section_header->sh_offset, "symtab",
+                           reinterpret_cast<byte**>(&symtab_section_start_), error_msg)) {
+            return false;
+          }
           break;
         }
         case SHT_DYNSYM: {
-          dynsym_section_start_ = reinterpret_cast<Elf32_Sym*>(section_addr);
+          if (!CheckAndSet(section_header->sh_offset, "dynsym",
+                           reinterpret_cast<byte**>(&dynsym_section_start_), error_msg)) {
+            return false;
+          }
           break;
         }
         case SHT_STRTAB: {
           // TODO: base these off of sh_link from .symtab and .dynsym above
-          if ((section_header.sh_flags & SHF_ALLOC) != 0) {
-            dynstr_section_start_ = reinterpret_cast<char*>(section_addr);
+          if ((section_header->sh_flags & SHF_ALLOC) != 0) {
+            // Check that this is named ".dynstr" and ignore otherwise.
+            const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
+            if (strncmp(".dynstr", header_name, 8) == 0) {
+              if (!CheckAndSet(section_header->sh_offset, "dynstr",
+                               reinterpret_cast<byte**>(&dynstr_section_start_), error_msg)) {
+                return false;
+              }
+            }
           } else {
-            strtab_section_start_ = reinterpret_cast<char*>(section_addr);
+            // Check that this is named ".strtab" and ignore otherwise.
+            const char* header_name = GetString(*shstrtab_section_header, section_header->sh_name);
+            if (strncmp(".strtab", header_name, 8) == 0) {
+              if (!CheckAndSet(section_header->sh_offset, "strtab",
+                               reinterpret_cast<byte**>(&strtab_section_start_), error_msg)) {
+                return false;
+              }
+            }
           }
           break;
         }
         case SHT_DYNAMIC: {
-          if (reinterpret_cast<byte*>(dynamic_section_start_) != section_addr) {
+          if (reinterpret_cast<byte*>(dynamic_section_start_) !=
+              Begin() + section_header->sh_offset) {
             LOG(WARNING) << "Failed to find matching SHT_DYNAMIC for PT_DYNAMIC in "
                          << file_->GetPath() << ": " << std::hex
                          << reinterpret_cast<void*>(dynamic_section_start_)
-                         << " != " << reinterpret_cast<void*>(section_addr);
+                         << " != " << reinterpret_cast<void*>(Begin() + section_header->sh_offset);
             return false;
           }
           break;
         }
         case SHT_HASH: {
-          hash_section_start_ = reinterpret_cast<Elf32_Word*>(section_addr);
+          if (!CheckAndSet(section_header->sh_offset, "hash section",
+                           reinterpret_cast<byte**>(&hash_section_start_), error_msg)) {
+            return false;
+          }
           break;
         }
       }
     }
+
+    // Check for the existence of some sections.
+    if (!CheckSectionsExist(error_msg)) {
+      return false;
+    }
   }
+
   return true;
 }
 
@@ -272,6 +321,117 @@
   }
 }
 
+bool ElfFile::CheckAndSet(Elf32_Off offset, const char* label,
+                          byte** target, std::string* error_msg) {
+  if (Begin() + offset >= End()) {
+    *error_msg = StringPrintf("Offset %d is out of range for %s in ELF file: '%s'", offset, label,
+                              file_->GetPath().c_str());
+    return false;
+  }
+  *target = Begin() + offset;
+  return true;
+}
+
+bool ElfFile::CheckSectionsLinked(const byte* source, const byte* target) const {
+  // Only works in whole-program mode, as we need to iterate over the sections.
+  // Note that we normally can't search by type, as duplicates are allowed for most section types.
+  if (program_header_only_) {
+    return true;
+  }
+
+  Elf32_Shdr* source_section = nullptr;
+  Elf32_Word target_index = 0;
+  bool target_found = false;
+  for (Elf32_Word i = 0; i < GetSectionHeaderNum(); i++) {
+    Elf32_Shdr* section_header = GetSectionHeader(i);
+
+    if (Begin() + section_header->sh_offset == source) {
+      // Found the source.
+      source_section = section_header;
+      if (target_index) {
+        break;
+      }
+    } else if (Begin() + section_header->sh_offset == target) {
+      target_index = i;
+      target_found = true;
+      if (source_section != nullptr) {
+        break;
+      }
+    }
+  }
+
+  return target_found && source_section != nullptr && source_section->sh_link == target_index;
+}
+
+bool ElfFile::CheckSectionsExist(std::string* error_msg) const {
+  if (!program_header_only_) {
+    // If in full mode, need section headers.
+    if (section_headers_start_ == nullptr) {
+      *error_msg = StringPrintf("No section headers in ELF file: '%s'", file_->GetPath().c_str());
+      return false;
+    }
+  }
+
+  // This is redundant, but defensive.
+  if (dynamic_program_header_ == nullptr) {
+    *error_msg = StringPrintf("Failed to find PT_DYNAMIC program header in ELF file: '%s'",
+                              file_->GetPath().c_str());
+    return false;
+  }
+
+  // Need a dynamic section. This is redundant, but defensive.
+  if (dynamic_section_start_ == nullptr) {
+    *error_msg = StringPrintf("Failed to find dynamic section in ELF file: '%s'",
+                              file_->GetPath().c_str());
+    return false;
+  }
+
+  // Symtab validation. These is not really a hard failure, as we are currently not using the
+  // symtab internally, but it's nice to be defensive.
+  if (symtab_section_start_ != nullptr) {
+    // When there's a symtab, there should be a strtab.
+    if (strtab_section_start_ == nullptr) {
+      *error_msg = StringPrintf("No strtab for symtab in ELF file: '%s'", file_->GetPath().c_str());
+      return false;
+    }
+
+    // The symtab should link to the strtab.
+    if (!CheckSectionsLinked(reinterpret_cast<const byte*>(symtab_section_start_),
+                             reinterpret_cast<const byte*>(strtab_section_start_))) {
+      *error_msg = StringPrintf("Symtab is not linked to the strtab in ELF file: '%s'",
+                                file_->GetPath().c_str());
+      return false;
+    }
+  }
+
+  // We always need a dynstr & dynsym.
+  if (dynstr_section_start_ == nullptr) {
+    *error_msg = StringPrintf("No dynstr in ELF file: '%s'", file_->GetPath().c_str());
+    return false;
+  }
+  if (dynsym_section_start_ == nullptr) {
+    *error_msg = StringPrintf("No dynsym in ELF file: '%s'", file_->GetPath().c_str());
+    return false;
+  }
+
+  // Need a hash section for dynamic symbol lookup.
+  if (hash_section_start_ == nullptr) {
+    *error_msg = StringPrintf("Failed to find hash section in ELF file: '%s'",
+                              file_->GetPath().c_str());
+    return false;
+  }
+
+  // And the hash section should be linking to the dynsym.
+  if (!CheckSectionsLinked(reinterpret_cast<const byte*>(hash_section_start_),
+                           reinterpret_cast<const byte*>(dynsym_section_start_))) {
+    *error_msg = StringPrintf("Hash section is not linked to the dynstr in ELF file: '%s'",
+                              file_->GetPath().c_str());
+    return false;
+  }
+
+  return true;
+}
+
 bool ElfFile::SetMap(MemMap* map, std::string* error_msg) {
   if (map == nullptr) {
     // MemMap::Open should have already set an error.
@@ -407,70 +567,68 @@
 
 
 Elf32_Ehdr& ElfFile::GetHeader() const {
-  CHECK(header_ != nullptr);
+  CHECK(header_ != nullptr);  // Header has been checked in SetMap. This is a sanity check.
   return *header_;
 }
 
 byte* ElfFile::GetProgramHeadersStart() const {
-  CHECK(program_headers_start_ != nullptr);
+  CHECK(program_headers_start_ != nullptr);  // Header has been set in Setup. This is a sanity
+                                             // check.
   return program_headers_start_;
 }
 
 byte* ElfFile::GetSectionHeadersStart() const {
-  CHECK(section_headers_start_ != nullptr);
+  CHECK(!program_header_only_);              // Only used in "full" mode.
+  CHECK(section_headers_start_ != nullptr);  // Is checked in CheckSectionsExist. Sanity check.
   return section_headers_start_;
 }
 
 Elf32_Phdr& ElfFile::GetDynamicProgramHeader() const {
-  CHECK(dynamic_program_header_ != nullptr);
+  CHECK(dynamic_program_header_ != nullptr);  // Is checked in CheckSectionsExist. Sanity check.
   return *dynamic_program_header_;
 }
 
 Elf32_Dyn* ElfFile::GetDynamicSectionStart() const {
-  CHECK(dynamic_section_start_ != nullptr);
+  CHECK(dynamic_section_start_ != nullptr);  // Is checked in CheckSectionsExist. Sanity check.
   return dynamic_section_start_;
 }
 
+static bool IsSymbolSectionType(Elf32_Word section_type) {
+  return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM));
+}
+
 Elf32_Sym* ElfFile::GetSymbolSectionStart(Elf32_Word section_type) const {
   CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
-  Elf32_Sym* symbol_section_start;
   switch (section_type) {
     case SHT_SYMTAB: {
-      symbol_section_start = symtab_section_start_;
+      return symtab_section_start_;
       break;
     }
     case SHT_DYNSYM: {
-      symbol_section_start = dynsym_section_start_;
+      return dynsym_section_start_;
       break;
     }
     default: {
       LOG(FATAL) << section_type;
-      symbol_section_start = nullptr;
+      return nullptr;
     }
   }
-  CHECK(symbol_section_start != nullptr);
-  return symbol_section_start;
 }
 
 const char* ElfFile::GetStringSectionStart(Elf32_Word section_type) const {
   CHECK(IsSymbolSectionType(section_type)) << file_->GetPath() << " " << section_type;
-  const char* string_section_start;
   switch (section_type) {
     case SHT_SYMTAB: {
-      string_section_start = strtab_section_start_;
-      break;
+      return strtab_section_start_;
     }
     case SHT_DYNSYM: {
-      string_section_start = dynstr_section_start_;
-      break;
+      return dynstr_section_start_;
     }
     default: {
       LOG(FATAL) << section_type;
-      string_section_start = nullptr;
+      return nullptr;
     }
   }
-  CHECK(string_section_start != nullptr);
-  return string_section_start;
 }
 
 const char* ElfFile::GetString(Elf32_Word section_type, Elf32_Word i) const {
@@ -479,12 +637,16 @@
     return nullptr;
   }
   const char* string_section_start = GetStringSectionStart(section_type);
-  const char* string = string_section_start + i;
-  return string;
+  if (string_section_start == nullptr) {
+    return nullptr;
+  }
+  return string_section_start + i;
 }
 
+// WARNING: The following methods do not check for an error condition (non-existent hash section).
+//          It is the caller's job to do this.
+
 Elf32_Word* ElfFile::GetHashSectionStart() const {
-  CHECK(hash_section_start_ != nullptr);
   return hash_section_start_;
 }
 
@@ -496,14 +658,22 @@
   return GetHashSectionStart()[1];
 }
 
-Elf32_Word ElfFile::GetHashBucket(size_t i) const {
-  CHECK_LT(i, GetHashBucketNum());
+Elf32_Word ElfFile::GetHashBucket(size_t i, bool* ok) const {
+  if (i >= GetHashBucketNum()) {
+    *ok = false;
+    return 0;
+  }
+  *ok = true;
   // 0 is nbucket, 1 is nchain
   return GetHashSectionStart()[2 + i];
 }
 
-Elf32_Word ElfFile::GetHashChain(size_t i) const {
-  CHECK_LT(i, GetHashChainNum());
+Elf32_Word ElfFile::GetHashChain(size_t i, bool* ok) const {
+  if (i >= GetHashBucketNum()) {
+    *ok = false;
+    return 0;
+  }
+  *ok = true;
   // 0 is nbucket, 1 is nchain, & chains are after buckets
   return GetHashSectionStart()[2 + GetHashBucketNum() + i];
 }
@@ -512,18 +682,20 @@
   return GetHeader().e_phnum;
 }
 
-Elf32_Phdr& ElfFile::GetProgramHeader(Elf32_Word i) const {
-  CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath();
+Elf32_Phdr* ElfFile::GetProgramHeader(Elf32_Word i) const {
+  CHECK_LT(i, GetProgramHeaderNum()) << file_->GetPath();  // Sanity check for caller.
   byte* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
-  CHECK_LT(program_header, End()) << file_->GetPath();
-  return *reinterpret_cast<Elf32_Phdr*>(program_header);
+  if (program_header >= End()) {
+    return nullptr;  // Failure condition.
+  }
+  return reinterpret_cast<Elf32_Phdr*>(program_header);
 }
 
 Elf32_Phdr* ElfFile::FindProgamHeaderByType(Elf32_Word type) const {
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
-    Elf32_Phdr& program_header = GetProgramHeader(i);
-    if (program_header.p_type == type) {
-      return &program_header;
+    Elf32_Phdr* program_header = GetProgramHeader(i);
+    if (program_header->p_type == type) {
+      return program_header;
     }
   }
   return nullptr;
@@ -533,14 +705,18 @@
   return GetHeader().e_shnum;
 }
 
-Elf32_Shdr& ElfFile::GetSectionHeader(Elf32_Word i) const {
+Elf32_Shdr* ElfFile::GetSectionHeader(Elf32_Word i) const {
   // Can only access arbitrary sections when we have the whole file, not just program header.
   // Even if we Load(), it doesn't bring in all the sections.
   CHECK(!program_header_only_) << file_->GetPath();
-  CHECK_LT(i, GetSectionHeaderNum()) << file_->GetPath();
+  if (i >= GetSectionHeaderNum()) {
+    return nullptr;  // Failure condition.
+  }
   byte* section_header = GetSectionHeadersStart() + (i * GetHeader().e_shentsize);
-  CHECK_LT(section_header, End()) << file_->GetPath();
-  return *reinterpret_cast<Elf32_Shdr*>(section_header);
+  if (section_header >= End()) {
+    return nullptr;  // Failure condition.
+  }
+  return reinterpret_cast<Elf32_Shdr*>(section_header);
 }
 
 Elf32_Shdr* ElfFile::FindSectionByType(Elf32_Word type) const {
@@ -548,9 +724,9 @@
   // We could change this to switch on known types if they were detected during loading.
   CHECK(!program_header_only_) << file_->GetPath();
   for (Elf32_Word i = 0; i < GetSectionHeaderNum(); i++) {
-    Elf32_Shdr& section_header = GetSectionHeader(i);
-    if (section_header.sh_type == type) {
-      return &section_header;
+    Elf32_Shdr* section_header = GetSectionHeader(i);
+    if (section_header->sh_type == type) {
+      return section_header;
     }
   }
   return nullptr;
@@ -570,11 +746,15 @@
   return h;
 }
 
-Elf32_Shdr& ElfFile::GetSectionNameStringSection() const {
+Elf32_Shdr* ElfFile::GetSectionNameStringSection() const {
   return GetSectionHeader(GetHeader().e_shstrndx);
 }
 
 const byte* ElfFile::FindDynamicSymbolAddress(const std::string& symbol_name) const {
+  // Check that we have a hash section.
+  if (GetHashSectionStart() == nullptr) {
+    return nullptr;  // Failure condition.
+  }
   const Elf32_Sym* sym = FindDynamicSymbol(symbol_name);
   if (sym != nullptr) {
     return base_address_ + sym->st_value;
@@ -583,6 +763,7 @@
   }
 }
 
+// WARNING: Only called from FindDynamicSymbolAddress. Elides check for hash section.
 const Elf32_Sym* ElfFile::FindDynamicSymbol(const std::string& symbol_name) const {
   if (GetHashBucketNum() == 0) {
     // No dynamic symbols at all.
@@ -590,22 +771,28 @@
   }
   Elf32_Word hash = elfhash(symbol_name.c_str());
   Elf32_Word bucket_index = hash % GetHashBucketNum();
-  Elf32_Word symbol_and_chain_index = GetHashBucket(bucket_index);
+  bool ok;
+  Elf32_Word symbol_and_chain_index = GetHashBucket(bucket_index, &ok);
+  if (!ok) {
+    return nullptr;
+  }
   while (symbol_and_chain_index != 0 /* STN_UNDEF */) {
-    Elf32_Sym& symbol = GetSymbol(SHT_DYNSYM, symbol_and_chain_index);
-    const char* name = GetString(SHT_DYNSYM, symbol.st_name);
-    if (symbol_name == name) {
-      return &symbol;
+    Elf32_Sym* symbol = GetSymbol(SHT_DYNSYM, symbol_and_chain_index);
+    if (symbol == nullptr) {
+      return nullptr;  // Failure condition.
     }
-    symbol_and_chain_index = GetHashChain(symbol_and_chain_index);
+    const char* name = GetString(SHT_DYNSYM, symbol->st_name);
+    if (symbol_name == name) {
+      return symbol;
+    }
+    symbol_and_chain_index = GetHashChain(symbol_and_chain_index, &ok);
+    if (!ok) {
+      return nullptr;
+    }
   }
   return nullptr;
 }
 
-bool ElfFile::IsSymbolSectionType(Elf32_Word section_type) {
-  return ((section_type == SHT_SYMTAB) || (section_type == SHT_DYNSYM));
-}
-
 Elf32_Word ElfFile::GetSymbolNum(Elf32_Shdr& section_header) const {
   CHECK(IsSymbolSectionType(section_header.sh_type))
       << file_->GetPath() << " " << section_header.sh_type;
@@ -613,9 +800,13 @@
   return section_header.sh_size / section_header.sh_entsize;
 }
 
-Elf32_Sym& ElfFile::GetSymbol(Elf32_Word section_type,
+Elf32_Sym* ElfFile::GetSymbol(Elf32_Word section_type,
                               Elf32_Word i) const {
-  return *(GetSymbolSectionStart(section_type) + i);
+  Elf32_Sym* sym_start = GetSymbolSectionStart(section_type);
+  if (sym_start == nullptr) {
+    return nullptr;
+  }
+  return sym_start + i;
 }
 
 ElfFile::SymbolTable** ElfFile::GetSymbolTable(Elf32_Word section_type) {
@@ -646,27 +837,37 @@
       DCHECK(build_map);
       *symbol_table = new SymbolTable;
       Elf32_Shdr* symbol_section = FindSectionByType(section_type);
-      CHECK(symbol_section != nullptr) << file_->GetPath();
-      Elf32_Shdr& string_section = GetSectionHeader(symbol_section->sh_link);
+      if (symbol_section == nullptr) {
+        return nullptr;  // Failure condition.
+      }
+      Elf32_Shdr* string_section = GetSectionHeader(symbol_section->sh_link);
+      if (string_section == nullptr) {
+        return nullptr;  // Failure condition.
+      }
       for (uint32_t i = 0; i < GetSymbolNum(*symbol_section); i++) {
-        Elf32_Sym& symbol = GetSymbol(section_type, i);
-        unsigned char type = ELF32_ST_TYPE(symbol.st_info);
+        Elf32_Sym* symbol = GetSymbol(section_type, i);
+        if (symbol == nullptr) {
+          return nullptr;  // Failure condition.
+        }
+        unsigned char type = ELF32_ST_TYPE(symbol->st_info);
         if (type == STT_NOTYPE) {
           continue;
         }
-        const char* name = GetString(string_section, symbol.st_name);
+        const char* name = GetString(*string_section, symbol->st_name);
         if (name == nullptr) {
           continue;
         }
         std::pair<SymbolTable::iterator, bool> result =
-            (*symbol_table)->insert(std::make_pair(name, &symbol));
+            (*symbol_table)->insert(std::make_pair(name, symbol));
         if (!result.second) {
           // If a duplicate, make sure it has the same logical value. Seen on x86.
-          CHECK_EQ(symbol.st_value, result.first->second->st_value);
-          CHECK_EQ(symbol.st_size, result.first->second->st_size);
-          CHECK_EQ(symbol.st_info, result.first->second->st_info);
-          CHECK_EQ(symbol.st_other, result.first->second->st_other);
-          CHECK_EQ(symbol.st_shndx, result.first->second->st_shndx);
+          if ((symbol->st_value != result.first->second->st_value) ||
+              (symbol->st_size != result.first->second->st_size) ||
+              (symbol->st_info != result.first->second->st_info) ||
+              (symbol->st_other != result.first->second->st_other) ||
+              (symbol->st_shndx != result.first->second->st_shndx)) {
+            return nullptr;  // Failure condition.
+          }
         }
       }
     }
@@ -680,16 +881,24 @@
 
   // Fall back to linear search
   Elf32_Shdr* symbol_section = FindSectionByType(section_type);
-  CHECK(symbol_section != nullptr) << file_->GetPath();
-  Elf32_Shdr& string_section = GetSectionHeader(symbol_section->sh_link);
+  if (symbol_section == nullptr) {
+    return nullptr;
+  }
+  Elf32_Shdr* string_section = GetSectionHeader(symbol_section->sh_link);
+  if (string_section == nullptr) {
+    return nullptr;
+  }
   for (uint32_t i = 0; i < GetSymbolNum(*symbol_section); i++) {
-    Elf32_Sym& symbol = GetSymbol(section_type, i);
-    const char* name = GetString(string_section, symbol.st_name);
+    Elf32_Sym* symbol = GetSymbol(section_type, i);
+    if (symbol == nullptr) {
+      return nullptr;  // Failure condition.
+    }
+    const char* name = GetString(*string_section, symbol->st_name);
     if (name == nullptr) {
       continue;
     }
     if (symbol_name == name) {
-      return &symbol;
+      return symbol;
     }
   }
   return nullptr;
@@ -708,14 +917,20 @@
 const char* ElfFile::GetString(Elf32_Shdr& string_section, Elf32_Word i) const {
   CHECK(!program_header_only_) << file_->GetPath();
   // TODO: remove this static_cast from enum when using -std=gnu++0x
-  CHECK_EQ(static_cast<Elf32_Word>(SHT_STRTAB), string_section.sh_type) << file_->GetPath();
-  CHECK_LT(i, string_section.sh_size) << file_->GetPath();
+  if (static_cast<Elf32_Word>(SHT_STRTAB) != string_section.sh_type) {
+    return nullptr;  // Failure condition.
+  }
+  if (i >= string_section.sh_size) {
+    return nullptr;
+  }
   if (i == 0) {
     return nullptr;
   }
   byte* strings = Begin() + string_section.sh_offset;
   byte* string = strings + i;
-  CHECK_LT(string, End()) << file_->GetPath();
+  if (string >= End()) {
+    return nullptr;
+  }
   return reinterpret_cast<const char*>(string);
 }
 
@@ -785,15 +1000,15 @@
   Elf32_Addr min_vaddr = 0xFFFFFFFFu;
   Elf32_Addr max_vaddr = 0x00000000u;
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
-    Elf32_Phdr& program_header = GetProgramHeader(i);
-    if (program_header.p_type != PT_LOAD) {
+    Elf32_Phdr* program_header = GetProgramHeader(i);
+    if (program_header->p_type != PT_LOAD) {
       continue;
     }
-    Elf32_Addr begin_vaddr = program_header.p_vaddr;
+    Elf32_Addr begin_vaddr = program_header->p_vaddr;
     if (begin_vaddr < min_vaddr) {
        min_vaddr = begin_vaddr;
     }
-    Elf32_Addr end_vaddr = program_header.p_vaddr + program_header.p_memsz;
+    Elf32_Addr end_vaddr = program_header->p_vaddr + program_header->p_memsz;
     if (end_vaddr > max_vaddr) {
       max_vaddr = end_vaddr;
     }
@@ -843,16 +1058,21 @@
 
   bool reserved = false;
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
-    Elf32_Phdr& program_header = GetProgramHeader(i);
+    Elf32_Phdr* program_header = GetProgramHeader(i);
+    if (program_header == nullptr) {
+      *error_msg = StringPrintf("No program header for entry %d in ELF file %s.",
+                                i, file_->GetPath().c_str());
+      return false;
+    }
 
     // Record .dynamic header information for later use
-    if (program_header.p_type == PT_DYNAMIC) {
-      dynamic_program_header_ = &program_header;
+    if (program_header->p_type == PT_DYNAMIC) {
+      dynamic_program_header_ = program_header;
       continue;
     }
 
     // Not something to load, move on.
-    if (program_header.p_type != PT_LOAD) {
+    if (program_header->p_type != PT_LOAD) {
       continue;
     }
 
@@ -874,8 +1094,8 @@
     }
     size_t file_length = static_cast<size_t>(temp_file_length);
     if (!reserved) {
-      byte* reserve_base = ((program_header.p_vaddr != 0) ?
-                            reinterpret_cast<byte*>(program_header.p_vaddr) : nullptr);
+      byte* reserve_base = ((program_header->p_vaddr != 0) ?
+                            reinterpret_cast<byte*>(program_header->p_vaddr) : nullptr);
       std::string reservation_name("ElfFile reservation for ");
       reservation_name += file_->GetPath();
       std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
@@ -894,18 +1114,18 @@
       segments_.push_back(reserve.release());
     }
     // empty segment, nothing to map
-    if (program_header.p_memsz == 0) {
+    if (program_header->p_memsz == 0) {
       continue;
     }
-    byte* p_vaddr = base_address_ + program_header.p_vaddr;
+    byte* p_vaddr = base_address_ + program_header->p_vaddr;
     int prot = 0;
-    if (executable && ((program_header.p_flags & PF_X) != 0)) {
+    if (executable && ((program_header->p_flags & PF_X) != 0)) {
       prot |= PROT_EXEC;
     }
-    if ((program_header.p_flags & PF_W) != 0) {
+    if ((program_header->p_flags & PF_W) != 0) {
       prot |= PROT_WRITE;
     }
-    if ((program_header.p_flags & PF_R) != 0) {
+    if ((program_header->p_flags & PF_R) != 0) {
       prot |= PROT_READ;
     }
     int flags = 0;
@@ -915,17 +1135,17 @@
     } else {
       flags |= MAP_PRIVATE;
     }
-    if (file_length < (program_header.p_offset + program_header.p_memsz)) {
+    if (file_length < (program_header->p_offset + program_header->p_memsz)) {
       *error_msg = StringPrintf("File size of %zd bytes not large enough to contain ELF segment "
                                 "%d of %d bytes: '%s'", file_length, i,
-                                program_header.p_offset + program_header.p_memsz,
+                                program_header->p_offset + program_header->p_memsz,
                                 file_->GetPath().c_str());
       return false;
     }
     std::unique_ptr<MemMap> segment(MemMap::MapFileAtAddress(p_vaddr,
-                                                       program_header.p_memsz,
+                                                       program_header->p_memsz,
                                                        prot, flags, file_->Fd(),
-                                                       program_header.p_offset,
+                                                       program_header->p_offset,
                                                        true,  // implies MAP_FIXED
                                                        file_->GetPath().c_str(),
                                                        error_msg));
@@ -944,8 +1164,14 @@
   }
 
   // Now that we are done loading, .dynamic should be in memory to find .dynstr, .dynsym, .hash
-  dynamic_section_start_
-      = reinterpret_cast<Elf32_Dyn*>(base_address_ + GetDynamicProgramHeader().p_vaddr);
+  byte* dsptr = base_address_ + GetDynamicProgramHeader().p_vaddr;
+  if ((dsptr < Begin() || dsptr >= End()) && !ValidPointer(dsptr)) {
+    *error_msg = StringPrintf("dynamic section address invalid in ELF file %s",
+                              file_->GetPath().c_str());
+    return false;
+  }
+  dynamic_section_start_ = reinterpret_cast<Elf32_Dyn*>(dsptr);
+
   for (Elf32_Word i = 0; i < GetDynamicNum(); i++) {
     Elf32_Dyn& elf_dyn = GetDynamic(i);
     byte* d_ptr = base_address_ + elf_dyn.d_un.d_ptr;
@@ -989,6 +1215,11 @@
     }
   }
 
+  // Check for the existence of some sections.
+  if (!CheckSectionsExist(error_msg)) {
+    return false;
+  }
+
   // Use GDB JIT support to do stack backtrace, etc.
   if (executable) {
     GdbJITSupport();
@@ -1010,15 +1241,21 @@
 
 Elf32_Shdr* ElfFile::FindSectionByName(const std::string& name) const {
   CHECK(!program_header_only_);
-  Elf32_Shdr& shstrtab_sec = GetSectionNameStringSection();
+  Elf32_Shdr* shstrtab_sec = GetSectionNameStringSection();
+  if (shstrtab_sec == nullptr) {
+    return nullptr;
+  }
   for (uint32_t i = 0; i < GetSectionHeaderNum(); i++) {
-    Elf32_Shdr& shdr = GetSectionHeader(i);
-    const char* sec_name = GetString(shstrtab_sec, shdr.sh_name);
+    Elf32_Shdr* shdr = GetSectionHeader(i);
+    if (shdr == nullptr) {
+      return nullptr;
+    }
+    const char* sec_name = GetString(*shstrtab_sec, shdr->sh_name);
     if (sec_name == nullptr) {
       continue;
     }
     if (name == sec_name) {
-      return &shdr;
+      return shdr;
     }
   }
   return nullptr;
@@ -1337,7 +1574,7 @@
   }
 
  private:
-  explicit DebugTag(uint32_t index) : index_(index) {}
+  explicit DebugTag(uint32_t index) : index_(index), size_(0), tag_(0), has_child_(false) {}
   void AddAttribute(uint32_t type, uint32_t attr_size) {
     off_map_.insert(std::pair<uint32_t, uint32_t>(type, size_));
     size_map_.insert(std::pair<uint32_t, uint32_t>(type, attr_size));
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 1922911..916d693 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -67,35 +67,20 @@
   Elf32_Ehdr& GetHeader() const;
 
   Elf32_Word GetProgramHeaderNum() const;
-  Elf32_Phdr& GetProgramHeader(Elf32_Word) const;
-  Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type) const;
+  Elf32_Phdr* GetProgramHeader(Elf32_Word) const;
 
   Elf32_Word GetSectionHeaderNum() const;
-  Elf32_Shdr& GetSectionHeader(Elf32_Word) const;
+  Elf32_Shdr* GetSectionHeader(Elf32_Word) const;
   Elf32_Shdr* FindSectionByType(Elf32_Word type) const;
   Elf32_Shdr* FindSectionByName(const std::string& name) const;
 
-  Elf32_Shdr& GetSectionNameStringSection() const;
+  Elf32_Shdr* GetSectionNameStringSection() const;
 
   // Find .dynsym using .hash for more efficient lookup than FindSymbolAddress.
   const byte* FindDynamicSymbolAddress(const std::string& symbol_name) const;
-  const Elf32_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
 
-  static bool IsSymbolSectionType(Elf32_Word section_type);
   Elf32_Word GetSymbolNum(Elf32_Shdr&) const;
-  Elf32_Sym& GetSymbol(Elf32_Word section_type, Elf32_Word i) const;
-
-  // Find symbol in specified table, returning nullptr if it is not found.
-  //
-  // If build_map is true, builds a map to speed repeated access. The
-  // map does not included untyped symbol values (aka STT_NOTYPE)
-  // since they can contain duplicates. If build_map is false, the map
-  // will be used if it was already created. Typically build_map
-  // should be set unless only a small number of symbols will be
-  // looked up.
-  Elf32_Sym* FindSymbolByName(Elf32_Word section_type,
-                              const std::string& symbol_name,
-                              bool build_map);
+  Elf32_Sym* GetSymbol(Elf32_Word section_type, Elf32_Word i) const;
 
   // Find address of symbol in specified table, returning 0 if it is
   // not found. See FindSymbolByName for an explanation of build_map.
@@ -107,13 +92,8 @@
   // special 0 offset.
   const char* GetString(Elf32_Shdr&, Elf32_Word) const;
 
-  // Lookup a string by section type. Returns nullptr for special 0 offset.
-  const char* GetString(Elf32_Word section_type, Elf32_Word) const;
-
   Elf32_Word GetDynamicNum() const;
   Elf32_Dyn& GetDynamic(Elf32_Word) const;
-  Elf32_Dyn* FindDynamicByType(Elf32_Sword type) const;
-  Elf32_Word FindDynamicValueByType(Elf32_Sword type) const;
 
   Elf32_Word GetRelNum(Elf32_Shdr&) const;
   Elf32_Rel& GetRel(Elf32_Shdr&, Elf32_Word) const;
@@ -148,14 +128,45 @@
   Elf32_Word* GetHashSectionStart() const;
   Elf32_Word GetHashBucketNum() const;
   Elf32_Word GetHashChainNum() const;
-  Elf32_Word GetHashBucket(size_t i) const;
-  Elf32_Word GetHashChain(size_t i) const;
+  Elf32_Word GetHashBucket(size_t i, bool* ok) const;
+  Elf32_Word GetHashChain(size_t i, bool* ok) const;
 
   typedef std::map<std::string, Elf32_Sym*> SymbolTable;
   SymbolTable** GetSymbolTable(Elf32_Word section_type);
 
   bool ValidPointer(const byte* start) const;
 
+  const Elf32_Sym* FindDynamicSymbol(const std::string& symbol_name) const;
+
+  // Check that certain sections and their dependencies exist.
+  bool CheckSectionsExist(std::string* error_msg) const;
+
+  // Check that the link of the first section links to the second section.
+  bool CheckSectionsLinked(const byte* source, const byte* target) const;
+
+  // Check whether the offset is in range, and set to target to Begin() + offset if OK.
+  bool CheckAndSet(Elf32_Off offset, const char* label, byte** target, std::string* error_msg);
+
+  // Find symbol in specified table, returning nullptr if it is not found.
+  //
+  // If build_map is true, builds a map to speed repeated access. The
+  // map does not included untyped symbol values (aka STT_NOTYPE)
+  // since they can contain duplicates. If build_map is false, the map
+  // will be used if it was already created. Typically build_map
+  // should be set unless only a small number of symbols will be
+  // looked up.
+  Elf32_Sym* FindSymbolByName(Elf32_Word section_type,
+                              const std::string& symbol_name,
+                              bool build_map);
+
+  Elf32_Phdr* FindProgamHeaderByType(Elf32_Word type) const;
+
+  Elf32_Dyn* FindDynamicByType(Elf32_Sword type) const;
+  Elf32_Word FindDynamicValueByType(Elf32_Sword type) const;
+
+  // Lookup a string by section type. Returns nullptr for special 0 offset.
+  const char* GetString(Elf32_Word section_type, Elf32_Word) const;
+
   const File* const file_;
   const bool writable_;
   const bool program_header_only_;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 38842cb..4ef7d74 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -79,7 +79,7 @@
     // has changed and to null-check the return value in case the
     // initialization fails.
     *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     } else {
@@ -107,7 +107,7 @@
     // has changed and to null-check the return value in case the
     // initialization fails.
     *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
       DCHECK(self->IsExceptionPending());
       return nullptr;  // Failure
     }
@@ -324,7 +324,7 @@
     } else {
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(fields_class));
-      if (LIKELY(class_linker->EnsureInitialized(h_class, true, true))) {
+      if (LIKELY(class_linker->EnsureInitialized(self, h_class, true, true))) {
         // Otherwise let's ensure the class is initialized before resolving the field.
         return resolved_field;
       }
@@ -603,7 +603,7 @@
   }
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_class(hs.NewHandle(klass));
-  if (!class_linker->EnsureInitialized(h_class, true, true)) {
+  if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
     CHECK(self->IsExceptionPending());
     return nullptr;  // Failure - Indicate to caller to deliver exception
   }
@@ -640,18 +640,6 @@
   }
 }
 
-static inline void CheckSuspend(Thread* thread) {
-  for (;;) {
-    if (thread->ReadFlag(kCheckpointRequest)) {
-      thread->RunCheckpointFunction();
-    } else if (thread->ReadFlag(kSuspendRequest)) {
-      thread->FullSuspendCheck();
-    } else {
-      break;
-    }
-  }
-}
-
 template <typename INT_TYPE, typename FLOAT_TYPE>
 static inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
   const INT_TYPE kMaxInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::max());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index d834d4d..a78c2c0 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -33,8 +33,10 @@
 
 namespace art {
 
-static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, mirror::ArtMethod* referrer,
-                                                      int32_t component_count, Thread* self,
+static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx,
+                                                      mirror::ArtMethod* referrer,
+                                                      int32_t component_count,
+                                                      Thread* self,
                                                       bool access_check)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (UNLIKELY(component_count < 0)) {
@@ -56,9 +58,10 @@
     } else {
       ThrowLocation throw_location = self->GetCurrentLocationForThrow();
       DCHECK(throw_location.GetMethod() == referrer);
-      self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;",
-                               "Found type %s; filled-new-array not implemented for anything but 'int'",
-                               PrettyDescriptor(klass).c_str());
+      self->ThrowNewExceptionF(
+          throw_location, "Ljava/lang/InternalError;",
+          "Found type %s; filled-new-array not implemented for anything but 'int'",
+          PrettyDescriptor(klass).c_str());
     }
     return nullptr;  // Failure
   }
@@ -92,8 +95,10 @@
 }
 
 // Helper function to allocate array for FILLED_NEW_ARRAY.
-mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror::ArtMethod* referrer,
-                                                      int32_t component_count, Thread* self,
+mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
+                                                      mirror::ArtMethod* referrer,
+                                                      int32_t component_count,
+                                                      Thread* self,
                                                       bool access_check,
                                                       gc::AllocatorType /* allocator_type */) {
   mirror::Class* klass = CheckFilledNewArrayAlloc(type_idx, referrer, component_count, self,
@@ -144,24 +149,19 @@
     // TODO: Use String::FromModifiedUTF...?
     ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
     if (s.get() != nullptr) {
-      jfieldID detail_message_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
-                                                   "detailMessage", "Ljava/lang/String;");
-      env->SetObjectField(exc.get(), detail_message_id, s.get());
+      env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
 
       // cause.
-      jfieldID cause_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
-                                          "cause", "Ljava/lang/Throwable;");
-      env->SetObjectField(exc.get(), cause_id, exc.get());
+      env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
 
       // suppressedExceptions.
-      jfieldID emptylist_id = env->GetStaticFieldID(WellKnownClasses::java_util_Collections,
-                                                    "EMPTY_LIST", "Ljava/util/List;");
       ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
-              WellKnownClasses::java_util_Collections, emptylist_id));
+          WellKnownClasses::java_util_Collections,
+          WellKnownClasses::java_util_Collections_EMPTY_LIST));
       CHECK(emptylist.get() != nullptr);
-      jfieldID suppressed_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
-                                               "suppressedExceptions", "Ljava/util/List;");
-      env->SetObjectField(exc.get(), suppressed_id, emptylist.get());
+      env->SetObjectField(exc.get(),
+                          WellKnownClasses::java_lang_Throwable_suppressedExceptions,
+                          emptylist.get());
 
       // stackState is set as result of fillInStackTrace. fillInStackTrace calls
       // nativeFillInStackTrace.
@@ -171,19 +171,17 @@
         stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
       }
       if (stack_state_val.get() != nullptr) {
-        jfieldID stackstateID = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
-            "stackState", "Ljava/lang/Object;");
-        env->SetObjectField(exc.get(), stackstateID, stack_state_val.get());
+        env->SetObjectField(exc.get(),
+                            WellKnownClasses::java_lang_Throwable_stackState,
+                            stack_state_val.get());
 
         // stackTrace.
-        jfieldID stack_trace_elem_id = env->GetStaticFieldID(
-            WellKnownClasses::libcore_util_EmptyArray, "STACK_TRACE_ELEMENT",
-            "[Ljava/lang/StackTraceElement;");
         ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
-                WellKnownClasses::libcore_util_EmptyArray, stack_trace_elem_id));
-        jfieldID stacktrace_id = env->GetFieldID(
-            WellKnownClasses::java_lang_Throwable, "stackTrace", "[Ljava/lang/StackTraceElement;");
-        env->SetObjectField(exc.get(), stacktrace_id, stack_trace_elem.get());
+            WellKnownClasses::libcore_util_EmptyArray,
+            WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
+        env->SetObjectField(exc.get(),
+                            WellKnownClasses::java_lang_Throwable_stackTrace,
+                            stack_trace_elem.get());
 
         // Throw the exception.
         ThrowLocation throw_location = self->GetCurrentLocationForThrow();
@@ -326,7 +324,8 @@
         }
       }
       CHECK_NE(throws_index, -1);
-      mirror::ObjectArray<mirror::Class>* declared_exceptions = proxy_class->GetThrows()->Get(throws_index);
+      mirror::ObjectArray<mirror::Class>* declared_exceptions =
+          proxy_class->GetThrows()->Get(throws_index);
       mirror::Class* exception_class = exception->GetClass();
       bool declares_exception = false;
       for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 44c89ad..08edecf 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -174,8 +174,6 @@
 void CheckReferenceResult(mirror::Object* o, Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
 JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
                                     jobject rcvr_jobj, jobject interface_art_method_jobj,
                                     std::vector<jvalue>& args)
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 64faf76..b617636 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -36,7 +36,8 @@
       self->PushShadowFrame(shadow_frame);
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
-      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true))) {
+      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true,
+                                                                            true))) {
         self->PopShadowFrame();
         DCHECK(self->IsExceptionPending());
         return;
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
index 23e1c36..7d5ccc2 100644
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -14,11 +14,9 @@
  * limitations under the License.
  */
 
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/object-inl.h"
 #include "verifier/dex_gc_map.h"
 #include "stack.h"
+#include "thread-inl.h"
 
 namespace art {
 
@@ -71,7 +69,7 @@
 
 extern "C" void art_portable_test_suspend_from_code(Thread* self)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  CheckSuspend(self);
+  self->CheckSuspend();
   if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) {
     // Save out the shadow frame to the heap
     ShadowFrameCopyVisitor visitor(self);
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 9f75b0f..7f6144b 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -215,7 +215,7 @@
     if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
       // Ensure static method's class is initialized.
       Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
         DCHECK(Thread::Current()->IsExceptionPending());
         self->PopManagedStackFragment(fragment);
         return 0;
@@ -399,7 +399,7 @@
     // Ensure that the called method's class is initialized.
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
-    linker->EnsureInitialized(called_class, true, true);
+    linker->EnsureInitialized(self, called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
       code = called->GetEntryPointFromPortableCompiledCode();
       // TODO: remove this after we solve the link issue.
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 6537249..87f04bb 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -14,15 +14,8 @@
  * limitations under the License.
  */
 
-#include "dex_file-inl.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object.h"
 #include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
+#include "thread-inl.h"
 #include "verify_object-inl.h"
 
 namespace art {
@@ -56,7 +49,7 @@
     // In fast JNI mode we never transitioned out of runnable. Perform a suspend check if there
     // is a flag raised.
     DCHECK(Locks::mutator_lock_->IsSharedHeld(self));
-    CheckSuspend(self);
+    self->CheckSuspend();
   }
 }
 
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 118cd7f..ea75fb6 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -15,17 +15,15 @@
  */
 
 #include "callee_save_frame.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "thread.h"
-#include "thread_list.h"
+#include "thread-inl.h"
 
 namespace art {
 
-extern "C" void artTestSuspendFromCode(Thread* thread, StackReference<mirror::ArtMethod>* sp)
+extern "C" void artTestSuspendFromCode(Thread* self, StackReference<mirror::ArtMethod>* sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Called when suspend count check value is 0 and thread->suspend_count_ != 0
-  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
-  CheckSuspend(thread);
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  self->CheckSuspend();
 }
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index dfd2e11..1dbbb70 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -496,7 +496,7 @@
       // Ensure static method's class is initialized.
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
         DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(method);
         self->PopManagedStackFragment(fragment);
         return 0;
@@ -808,7 +808,7 @@
     // Ensure that the called method's class is initialized.
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
-    linker->EnsureInitialized(called_class, true, true);
+    linker->EnsureInitialized(soa.Self(), called_class, true, true);
     if (LIKELY(called_class->IsInitialized())) {
       code = called->GetEntryPointFromQuickCompiledCode();
     } else if (called_class->IsInitializing()) {
@@ -1488,7 +1488,7 @@
       // Initialize padding entries.
       size_t expected_slots = handle_scope_->NumberOfReferences();
       while (cur_entry_ < expected_slots) {
-        handle_scope_->GetHandle(cur_entry_++).Assign(nullptr);
+        handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
       }
       DCHECK_NE(cur_entry_, 0U);
     }
@@ -1509,7 +1509,7 @@
 
 uintptr_t BuildGenericJniFrameVisitor::FillJniCall::PushHandle(mirror::Object* ref) {
   uintptr_t tmp;
-  Handle<mirror::Object> h = handle_scope_->GetHandle(cur_entry_);
+  MutableHandle<mirror::Object> h = handle_scope_->GetMutableHandle(cur_entry_);
   h.Assign(ref);
   tmp = reinterpret_cast<uintptr_t>(h.ToJObject());
   cur_entry_++;
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 99633a3..6033a5f 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -45,7 +45,7 @@
     my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader);
     ASSERT_TRUE(my_klass_ != NULL);
     Handle<mirror::Class> klass(hs.NewHandle(my_klass_));
-    class_linker_->EnsureInitialized(klass, true, true);
+    class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
     my_klass_ = klass.Get();
 
     dex_ = my_klass_->GetDexCache()->GetDexFile();
diff --git a/runtime/field_helper.h b/runtime/field_helper.h
index 5eae55e..8097025 100644
--- a/runtime/field_helper.h
+++ b/runtime/field_helper.h
@@ -27,11 +27,6 @@
  public:
   explicit FieldHelper(Handle<mirror::ArtField> f) : field_(f) {}
 
-  void ChangeField(mirror::ArtField* new_f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(new_f != nullptr);
-    field_.Assign(new_f);
-  }
-
   mirror::ArtField* GetField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return field_.Get();
   }
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index ad22a2e..a7e5e74 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -569,7 +569,7 @@
 
 RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
   // Get the lowest address non-full run from the binary tree.
-  std::set<Run*>* const bt = &non_full_runs_[idx];
+  auto* const bt = &non_full_runs_[idx];
   if (!bt->empty()) {
     // If there's one, use it as the current run.
     auto it = bt->begin();
@@ -767,7 +767,7 @@
   }
   // Free the slot in the run.
   run->FreeSlot(ptr);
-  std::set<Run*>* non_full_runs = &non_full_runs_[idx];
+  auto* non_full_runs = &non_full_runs_[idx];
   if (run->IsAllFree()) {
     // It has just become completely free. Free the pages of this run.
     std::set<Run*>::iterator pos = non_full_runs->find(run);
@@ -793,9 +793,8 @@
     // already in the non-full run set (i.e., it was full) insert it
     // into the non-full run set.
     if (run != current_runs_[idx]) {
-      std::unordered_set<Run*, hash_run, eq_run>* full_runs =
-          kIsDebugBuild ? &full_runs_[idx] : NULL;
-      std::set<Run*>::iterator pos = non_full_runs->find(run);
+      auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
+      auto pos = non_full_runs->find(run);
       if (pos == non_full_runs->end()) {
         DCHECK(run_was_full);
         DCHECK(full_runs->find(run) != full_runs->end());
@@ -1266,9 +1265,8 @@
       }
       // Check if the run should be moved to non_full_runs_ or
       // free_page_runs_.
-      std::set<Run*>* non_full_runs = &non_full_runs_[idx];
-      std::unordered_set<Run*, hash_run, eq_run>* full_runs =
-          kIsDebugBuild ? &full_runs_[idx] : NULL;
+      auto* non_full_runs = &non_full_runs_[idx];
+      auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
       if (run->IsAllFree()) {
         // It has just become completely free. Free the pages of the
         // run.
@@ -2056,7 +2054,7 @@
     // in a run set.
     if (!is_current_run) {
       MutexLock mu(self, rosalloc->lock_);
-      std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
+      auto& non_full_runs = rosalloc->non_full_runs_[idx];
       // If it's all free, it must be a free page run rather than a run.
       CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
       if (!IsFull()) {
@@ -2066,7 +2064,7 @@
       } else {
         // If it's full, it must in the full run set (debug build only.)
         if (kIsDebugBuild) {
-          std::unordered_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
+          auto& full_runs = rosalloc->full_runs_[idx];
           CHECK(full_runs.find(this) != full_runs.end())
               << " A full run isn't in the full run set " << Dump();
         }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index b2a5a3c..2fbd97a 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -26,6 +26,7 @@
 #include <unordered_set>
 #include <vector>
 
+#include "base/allocator.h"
 #include "base/mutex.h"
 #include "base/logging.h"
 #include "globals.h"
@@ -53,7 +54,7 @@
       size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
       size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
       DCHECK_GE(byte_size, static_cast<size_t>(0));
-      DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
+      DCHECK_ALIGNED(byte_size, kPageSize);
       return byte_size;
     }
     void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
@@ -403,6 +404,7 @@
 
   // We use thread-local runs for the size Brackets whose indexes
   // are less than this index. We use shared (current) runs for the rest.
+
   static const size_t kNumThreadLocalSizeBrackets = 11;
 
  private:
@@ -423,12 +425,13 @@
 
   // The run sets that hold the runs whose slots are not all
   // full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
-  std::set<Run*> non_full_runs_[kNumOfSizeBrackets];
+  AllocationTrackingSet<Run*, kAllocatorTagRosAlloc> non_full_runs_[kNumOfSizeBrackets];
   // The run sets that hold the runs whose slots are all full. This is
   // debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
-  std::unordered_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
+  std::unordered_set<Run*, hash_run, eq_run, TrackingAllocator<Run*, kAllocatorTagRosAlloc>>
+      full_runs_[kNumOfSizeBrackets];
   // The set of free pages.
-  std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
+  AllocationTrackingSet<FreePageRun*, kAllocatorTagRosAlloc> free_page_runs_ GUARDED_BY(lock_);
   // The dedicated full run, it is always full and shared by all threads when revoking happens.
   // This is an optimization since enables us to avoid a null check for revoked runs.
   static Run* dedicated_full_run_;
@@ -460,7 +463,8 @@
   // The table that indicates the size of free page runs. These sizes
   // are stored here to avoid storing in the free page header and
   // release backing pages.
-  std::vector<size_t> free_page_run_size_map_ GUARDED_BY(lock_);
+  std::vector<size_t, TrackingAllocator<size_t, kAllocatorTagRosAlloc>> free_page_run_size_map_
+      GUARDED_BY(lock_);
   // The global lock. Used to guard the page map, the free page set,
   // and the footprint.
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 4044852..b3bed64 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -547,8 +547,11 @@
 }
 
 void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());\
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 95530be..930499a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -374,7 +374,8 @@
     }
     space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
     if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
-                 (kIsDebugBuild && !large_object_space->Contains(obj)))) {
+                 (kIsDebugBuild && large_object_space != nullptr &&
+                     !large_object_space->Contains(obj)))) {
       LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
       LOG(ERROR) << "Attempting see if it's a bad root";
       mark_sweep_->VerifyRoots();
@@ -481,7 +482,7 @@
   // See if the root is on any space bitmap.
   if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    if (!large_object_space->Contains(root)) {
+    if (large_object_space != nullptr && !large_object_space->Contains(root)) {
       LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
       if (visitor != NULL) {
         LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
@@ -1074,20 +1075,22 @@
   }
   // Handle the large object space.
   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
-  accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
-  if (swap_bitmaps) {
-    std::swap(large_live_objects, large_mark_objects);
-  }
-  for (size_t i = 0; i < count; ++i) {
-    Object* obj = objects[i];
-    // Handle large objects.
-    if (kUseThreadLocalAllocationStack && obj == nullptr) {
-      continue;
+  if (large_object_space != nullptr) {
+    accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+    accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
+    if (swap_bitmaps) {
+      std::swap(large_live_objects, large_mark_objects);
     }
-    if (!large_mark_objects->Test(obj)) {
-      ++freed_los.objects;
-      freed_los.bytes += large_object_space->Free(self, obj);
+    for (size_t i = 0; i < count; ++i) {
+      Object* obj = objects[i];
+      // Handle large objects.
+      if (kUseThreadLocalAllocationStack && obj == nullptr) {
+        continue;
+      }
+      if (!large_mark_objects->Test(obj)) {
+        ++freed_los.objects;
+        freed_los.bytes += large_object_space->Free(self, obj);
+      }
     }
   }
   {
@@ -1125,8 +1128,11 @@
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 8fb33ce..c8fa869 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -365,23 +365,23 @@
   }
 
   CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
-  if (is_large_object_space_immune_) {
+  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
+  if (is_large_object_space_immune_ && los != nullptr) {
     TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
     DCHECK(collect_from_space_only_);
     // Delay copying the live set to the marked set until here from
     // BindBitmaps() as the large objects on the allocation stack may
     // be newly added to the live set above in MarkAllocStackAsLive().
-    GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+    los->CopyLiveToMarked();
 
     // When the large object space is immune, we need to scan the
     // large object space as roots as they contain references to their
     // classes (primitive array classes) that could move though they
     // don't contain any other references.
-    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
+    accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
     SemiSpaceScanObjectVisitor visitor(this);
-    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
-                                        reinterpret_cast<uintptr_t>(large_object_space->End()),
+    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
+                                        reinterpret_cast<uintptr_t>(los->End()),
                                         visitor);
   }
   // Recursively process the mark stack.
@@ -655,8 +655,11 @@
 
 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
   DCHECK(!is_large_object_space_immune_);
-  TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
@@ -751,6 +754,7 @@
   from_space_ = nullptr;
   CHECK(mark_stack_->IsEmpty());
   mark_stack_->Reset();
+  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
   if (generational_) {
     // Decide whether to do a whole heap collection or a bump pointer
     // only space collection at the next collection by updating
@@ -762,7 +766,7 @@
       bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
       bool bytes_promoted_threshold_exceeded =
           bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
-      uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+      uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
       uint64_t last_los_bytes_allocated =
           large_object_bytes_allocated_at_last_whole_heap_collection_;
       bool large_object_bytes_threshold_exceeded =
@@ -775,7 +779,7 @@
       // Reset the counters.
       bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
       large_object_bytes_allocated_at_last_whole_heap_collection_ =
-          GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+          los != nullptr ? los->GetBytesAllocated() : 0U;
       collect_from_space_only_ = true;
     }
   }
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 5a58446..4ed6abc 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -16,7 +16,7 @@
 
 #include "gc/heap.h"
 #include "gc/space/large_object_space.h"
-#include "gc/space/space.h"
+#include "gc/space/space-inl.h"
 #include "sticky_mark_sweep.h"
 #include "thread-inl.h"
 
@@ -32,7 +32,6 @@
 
 void StickyMarkSweep::BindBitmaps() {
   PartialMarkSweep::BindBitmaps();
-
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   // For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
   // know what was allocated since the last GC. A side-effect of binding the allocation space mark
@@ -44,7 +43,10 @@
       space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
     }
   }
-  GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+  for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
+    CHECK(space->IsLargeObjectSpace());
+    space->AsLargeObjectSpace()->CopyLiveToMarked();
+  }
 }
 
 void StickyMarkSweep::MarkReachableObjects() {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index d1fb600..c971449 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -140,7 +140,7 @@
   }
   if (kInstrumented) {
     if (Dbg::IsAllocTrackingEnabled()) {
-      Dbg::RecordAllocation(klass, bytes_allocated);
+      Dbg::RecordAllocation(self, klass, bytes_allocated);
     }
   } else {
     DCHECK(!Dbg::IsAllocTrackingEnabled());
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2048160..b744a62 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -83,8 +83,6 @@
 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
 // threads (lower pauses, use less memory bandwidth).
 static constexpr double kStickyGcThroughputAdjustment = 1.0;
-// Whether or not we use the free list large object space.
-static constexpr bool kUseFreeListSpaceForLOS = false;
 // Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
 // How many reserve entries are at the end of the allocation stack, these are only needed if the
@@ -102,8 +100,9 @@
            double target_utilization, double foreground_heap_growth_multiplier,
            size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
            const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
-           CollectorType background_collector_type, size_t parallel_gc_threads,
-           size_t conc_gc_threads, bool low_memory_mode,
+           CollectorType background_collector_type,
+           space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
+           size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
            size_t long_pause_log_threshold, size_t long_gc_log_threshold,
            bool ignore_max_footprint, bool use_tlab,
            bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
@@ -130,7 +129,7 @@
       ignore_max_footprint_(ignore_max_footprint),
       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
       zygote_space_(nullptr),
-      large_object_threshold_(kDefaultLargeObjectThreshold),  // Starts out disabled.
+      large_object_threshold_(large_object_threshold),
       collector_type_running_(kCollectorTypeNone),
       last_gc_type_(collector::kGcTypeNone),
       next_gc_type_(collector::kGcTypePartial),
@@ -333,13 +332,21 @@
   CHECK(non_moving_space_ != nullptr);
   CHECK(!non_moving_space_->CanMoveObjects());
   // Allocate the large object space.
-  if (kUseFreeListSpaceForLOS) {
-    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
+  if (large_object_space_type == space::kLargeObjectSpaceTypeFreeList) {
+    large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
+                                                       capacity_);
+    CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
+  } else if (large_object_space_type == space::kLargeObjectSpaceTypeMap) {
+    large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
+    CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
   } else {
-    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
+    // Disable the large object space by making the cutoff excessively large.
+    large_object_threshold_ = std::numeric_limits<size_t>::max();
+    large_object_space_ = nullptr;
   }
-  CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
-  AddSpace(large_object_space_);
+  if (large_object_space_ != nullptr) {
+    AddSpace(large_object_space_);
+  }
   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
   CHECK(!continuous_spaces_.empty());
   // Relies on the spaces being sorted.
@@ -419,7 +426,7 @@
     }
   }
   if (running_on_valgrind_) {
-    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
   }
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() exiting";
@@ -707,7 +714,8 @@
   CHECK(space1 != nullptr);
   CHECK(space2 != nullptr);
   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
-                 large_object_space_->GetLiveBitmap(), stack);
+                 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
+                 stack);
 }
 
 void Heap::DeleteThreadPool() {
@@ -997,7 +1005,10 @@
       total_alloc_space_size += malloc_space->Size();
     }
   }
-  total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
+  total_alloc_space_allocated = GetBytesAllocated();
+  if (large_object_space_ != nullptr) {
+    total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
+  }
   if (bump_pointer_space_ != nullptr) {
     total_alloc_space_allocated -= bump_pointer_space_->Size();
   }
@@ -2013,6 +2024,7 @@
       } else if (bitmap2->HasAddress(obj)) {
         bitmap2->Set(obj);
       } else {
+        DCHECK(large_objects != nullptr);
         large_objects->Set(obj);
       }
     }
@@ -2092,6 +2104,7 @@
   // Back to back GCs can cause 0 ms of wait time in between GC invocations.
   if (LIKELY(ms_delta != 0)) {
     allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
+    ATRACE_INT("Allocation rate KB/s", allocation_rate_ / KB);
     VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
   }
 
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9742277..faaea40 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -30,6 +30,7 @@
 #include "gc/collector/garbage_collector.h"
 #include "gc/collector/gc_type.h"
 #include "gc/collector_type.h"
+#include "gc/space/large_object_space.h"
 #include "globals.h"
 #include "gtest/gtest.h"
 #include "instruction_set.h"
@@ -129,9 +130,6 @@
  public:
   // If true, measure the total allocation time.
   static constexpr bool kMeasureAllocationTime = false;
-  // Primitive arrays larger than this size are put in the large object space.
-  static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
-
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -143,7 +141,17 @@
   static constexpr size_t kDefaultTLABSize = 256 * KB;
   static constexpr double kDefaultTargetUtilization = 0.5;
   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
-
+  // Primitive arrays larger than this size are put in the large object space.
+  static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+  // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
+  // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
+#if USE_ART_LOW_4G_ALLOCATOR
+  static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
+      space::kLargeObjectSpaceTypeFreeList;
+#else
+  static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
+      space::kLargeObjectSpaceTypeMap;
+#endif
   // Used so that we don't overflow the allocation time atomic integer.
   static constexpr size_t kTimeAdjust = 1024;
 
@@ -162,6 +170,7 @@
                 const std::string& original_image_file_name,
                 InstructionSet image_instruction_set,
                 CollectorType foreground_collector_type, CollectorType background_collector_type,
+                space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
                 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
                 size_t long_pause_threshold, size_t long_gc_threshold,
                 bool ignore_max_footprint, bool use_tlab,
@@ -460,7 +469,7 @@
                                                               bool fail_ok) const;
   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
 
-  void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpForSigQuit(std::ostream& os);
 
   // Do a pending heap transition or trim.
   void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index d3641d1..bfaa2bb 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -30,8 +30,13 @@
 
 ReferenceProcessor::ReferenceProcessor()
     : process_references_args_(nullptr, nullptr, nullptr),
-      preserving_references_(false), lock_("reference processor lock", kReferenceProcessorLock),
-      condition_("reference processor condition", lock_) {
+      preserving_references_(false),
+      condition_("reference processor condition", *Locks::reference_processor_lock_) ,
+      soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
+      weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
+      finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
+      phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
+      cleared_references_(Locks::reference_queue_cleared_references_lock_) {
 }
 
 void ReferenceProcessor::EnableSlowPath() {
@@ -50,7 +55,7 @@
   if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
     return referent;
   }
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::reference_processor_lock_);
   while (SlowPathEnabled()) {
     mirror::HeapReference<mirror::Object>* const referent_addr =
         reference->GetReferentReferenceAddr();
@@ -93,12 +98,12 @@
 }
 
 void ReferenceProcessor::StartPreservingReferences(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::reference_processor_lock_);
   preserving_references_ = true;
 }
 
 void ReferenceProcessor::StopPreservingReferences(Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::reference_processor_lock_);
   preserving_references_ = false;
   // We are done preserving references, some people who are blocked may see a marked referent.
   condition_.Broadcast(self);
@@ -114,7 +119,7 @@
   TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
   Thread* self = Thread::Current();
   {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::reference_processor_lock_);
     process_references_args_.is_marked_callback_ = is_marked_callback;
     process_references_args_.mark_callback_ = mark_object_callback;
     process_references_args_.arg_ = arg;
@@ -127,7 +132,6 @@
     if (concurrent) {
       StartPreservingReferences(self);
     }
-
     soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
                                                 &process_references_args_);
     process_mark_stack_callback(arg);
@@ -163,7 +167,7 @@
   DCHECK(finalizer_reference_queue_.IsEmpty());
   DCHECK(phantom_reference_queue_.IsEmpty());
   {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::reference_processor_lock_);
     // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
     // could result in a stale is_marked_callback_ being called before the reference processing
     // starts since there is a small window of time where slow_path_enabled_ is enabled but the
@@ -225,5 +229,31 @@
   }
 }
 
+bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, *Locks::reference_processor_lock_);
+  // Wait untul we are done processing reference.
+  while (SlowPathEnabled()) {
+    condition_.Wait(self);
+  }
+  // At this point, since the sentinel of the reference is live, it is guaranteed to not be
+  // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
+  // phase. Since we are holding the reference processor lock, it guarantees that reference
+  // processing can't begin. The GC could have just enqueued the reference one one of the internal
+  // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
+  // race.
+  MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
+  if (!reference->IsEnqueued()) {
+    CHECK(reference->IsFinalizerReferenceInstance());
+    if (Runtime::Current()->IsActiveTransaction()) {
+      reference->SetPendingNext<true>(reference);
+    } else {
+      reference->SetPendingNext<false>(reference);
+    }
+    return true;
+  }
+  return false;
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index 7274457..5eb095b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -28,6 +28,7 @@
 class TimingLogger;
 
 namespace mirror {
+class FinalizerReference;
 class Object;
 class Reference;
 }  // namespace mirror
@@ -48,20 +49,25 @@
                          ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      LOCKS_EXCLUDED(lock_);
+      LOCKS_EXCLUDED(Locks::reference_processor_lock_);
   // The slow path bool is contained in the reference class object, can only be set once
   // Only allow setting this with mutators suspended so that we can avoid using a lock in the
   // GetReferent fast path as an optimization.
   void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Decode the referent, may block if references are being processed.
   mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
   void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_);
   void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
                               IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void UpdateRoots(IsMarkedCallback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
+  bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::reference_processor_lock_,
+                     Locks::reference_queue_finalizer_references_lock_);
 
  private:
   class ProcessReferencesArgs {
@@ -78,23 +84,21 @@
   };
   bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Called by ProcessReferences.
-  void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_)
+  void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // If we are preserving references it means that some dead objects may become live, we use start
   // and stop preserving to block mutators using GetReferrent from getting access to these
   // referents.
-  void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
-  void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+  void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
+  void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_);
   // Process args, used by the GetReferent to return referents which are already marked.
-  ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
+  ProcessReferencesArgs process_references_args_ GUARDED_BY(Locks::reference_processor_lock_);
   // Boolean for whether or not we are preserving references (either soft references or finalizers).
   // If this is true, then we cannot return a referent (see comment in GetReferent).
-  bool preserving_references_ GUARDED_BY(lock_);
-  // Lock that guards the reference processing.
-  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  bool preserving_references_ GUARDED_BY(Locks::reference_processor_lock_);
   // Condition that people wait on if they attempt to get the referent of a reference while
   // processing is in progress.
-  ConditionVariable condition_ GUARDED_BY(lock_);
+  ConditionVariable condition_ GUARDED_BY(Locks::reference_processor_lock_);
   // Reference queues used by the GC.
   ReferenceQueue soft_reference_queue_;
   ReferenceQueue weak_reference_queue_;
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index c3931e8..4003524 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -25,13 +25,12 @@
 namespace art {
 namespace gc {
 
-ReferenceQueue::ReferenceQueue()
-    : lock_("reference queue lock"), list_(nullptr) {
+ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) {
 }
 
 void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) {
   DCHECK(ref != NULL);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *lock_);
   if (!ref->IsEnqueued()) {
     EnqueuePendingReference(ref);
   }
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index cd814bb..dbf4abc 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -44,7 +44,7 @@
 // java.lang.ref.Reference objects.
 class ReferenceQueue {
  public:
-  explicit ReferenceQueue();
+  explicit ReferenceQueue(Mutex* lock);
   // Enqueue a reference if is not already enqueued. Thread safe to call from multiple threads
   // since it uses a lock to avoid a race between checking for the references presence and adding
   // it.
@@ -90,7 +90,7 @@
  private:
   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
   // calling AtomicEnqueueIfNotEnqueued.
-  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  Mutex* lock_;
   // The actual reference list. Only a root for the mark compact GC since it will be null for other
   // GC types.
   mirror::Reference* list_;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 41c34c9..353d00c 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -125,7 +125,7 @@
   }
   // We should clean up so we are more likely to have room for the image.
   if (Runtime::Current()->IsZygote()) {
-    LOG(INFO) << "Pruning dalvik-cache since we are relocating an image and will need to recompile";
+    LOG(INFO) << "Pruning dalvik-cache since we are generating an image and will need to recompile";
     PruneDexCache(image_isa);
   }
 
@@ -177,7 +177,8 @@
                                    bool* has_system,
                                    std::string* cache_filename,
                                    bool* dalvik_cache_exists,
-                                   bool* has_cache) {
+                                   bool* has_cache,
+                                   bool* is_global_cache) {
   *has_system = false;
   *has_cache = false;
   // image_location = /system/framework/boot.art
@@ -192,7 +193,7 @@
   *dalvik_cache_exists = false;
   std::string dalvik_cache;
   GetDalvikCache(GetInstructionSetString(image_isa), true, &dalvik_cache,
-                 &have_android_data, dalvik_cache_exists);
+                 &have_android_data, dalvik_cache_exists, is_global_cache);
 
   if (have_android_data && *dalvik_cache_exists) {
     // Always set output location even if it does not exist,
@@ -285,8 +286,9 @@
   std::string cache_filename;
   bool has_cache = false;
   bool dalvik_cache_exists = false;
+  bool is_global_cache = false;
   if (FindImageFilename(image_location, image_isa, &system_filename, &has_system,
-                        &cache_filename, &dalvik_cache_exists, &has_cache)) {
+                        &cache_filename, &dalvik_cache_exists, &has_cache, &is_global_cache)) {
     if (Runtime::Current()->ShouldRelocate()) {
       if (has_system && has_cache) {
         std::unique_ptr<ImageHeader> sys_hdr(new ImageHeader);
@@ -344,6 +346,21 @@
       && hdr_a.GetOatChecksum() == hdr_b.GetOatChecksum();
 }
 
+static bool ImageCreationAllowed(bool is_global_cache, std::string* error_msg) {
+  // Anyone can write into a "local" cache.
+  if (!is_global_cache) {
+    return true;
+  }
+
+  // Only the zygote is allowed to create the global boot image.
+  if (Runtime::Current()->IsZygote()) {
+    return true;
+  }
+
+  *error_msg = "Only the zygote can create the global boot image.";
+  return false;
+}
+
 ImageSpace* ImageSpace::Create(const char* image_location,
                                const InstructionSet image_isa,
                                std::string* error_msg) {
@@ -352,9 +369,10 @@
   std::string cache_filename;
   bool has_cache = false;
   bool dalvik_cache_exists = false;
+  bool is_global_cache = true;
   const bool found_image = FindImageFilename(image_location, image_isa, &system_filename,
                                              &has_system, &cache_filename, &dalvik_cache_exists,
-                                             &has_cache);
+                                             &has_cache, &is_global_cache);
 
   ImageSpace* space;
   bool relocate = Runtime::Current()->ShouldRelocate();
@@ -377,18 +395,27 @@
           relocated_version_used = true;
         } else {
           // We cannot have a relocated version, Relocate the system one and use it.
-          if (can_compile && RelocateImage(image_location, cache_filename.c_str(), image_isa,
-                                           error_msg)) {
+
+          std::string reason;
+          bool success;
+
+          // Check whether we are allowed to relocate.
+          if (!can_compile) {
+            reason = "Image dex2oat disabled by -Xnoimage-dex2oat.";
+            success = false;
+          } else if (!ImageCreationAllowed(is_global_cache, &reason)) {
+            // Whether we can write to the cache.
+            success = false;
+          } else {
+            // Try to relocate.
+            success = RelocateImage(image_location, cache_filename.c_str(), image_isa, &reason);
+          }
+
+          if (success) {
             relocated_version_used = true;
             image_filename = &cache_filename;
           } else {
-            std::string reason;
-            if (can_compile) {
-              reason = StringPrintf(": %s", error_msg->c_str());
-            } else {
-              reason = " because image dex2oat is disabled.";
-            }
-            *error_msg = StringPrintf("Unable to relocate image '%s' from '%s' to '%s'%s",
+            *error_msg = StringPrintf("Unable to relocate image '%s' from '%s' to '%s': %s",
                                       image_location, system_filename.c_str(),
                                       cache_filename.c_str(), reason.c_str());
             return nullptr;
@@ -460,6 +487,8 @@
   } else if (!dalvik_cache_exists) {
     *error_msg = StringPrintf("No place to put generated image.");
     return nullptr;
+  } else if (!ImageCreationAllowed(is_global_cache, error_msg)) {
+    return nullptr;
   } else if (!GenerateImage(cache_filename, image_isa, error_msg)) {
     *error_msg = StringPrintf("Failed to generate image '%s': %s",
                               cache_filename.c_str(), error_msg->c_str());
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 28ebca6..2586ece 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -110,7 +110,8 @@
                                 bool* has_system,
                                 std::string* data_location,
                                 bool* dalvik_cache_exists,
-                                bool* has_data);
+                                bool* has_data,
+                                bool *is_global_cache);
 
  private:
   // Tries to initialize an ImageSpace from the given image path,
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index d5a03c6..dad5855 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -120,7 +120,7 @@
   mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
   large_objects_.push_back(obj);
   mem_maps_.Put(obj, mem_map);
-  size_t allocation_size = mem_map->Size();
+  const size_t allocation_size = mem_map->BaseSize();
   DCHECK(bytes_allocated != nullptr);
   begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
   byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
@@ -145,8 +145,9 @@
     Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
   }
-  DCHECK_GE(num_bytes_allocated_, found->second->Size());
-  size_t allocation_size = found->second->Size();
+  const size_t map_size = found->second->BaseSize();
+  DCHECK_GE(num_bytes_allocated_, map_size);
+  size_t allocation_size = map_size;
   num_bytes_allocated_ -= allocation_size;
   --num_objects_allocated_;
   delete found->second;
@@ -158,7 +159,7 @@
   MutexLock mu(Thread::Current(), lock_);
   auto found = mem_maps_.find(obj);
   CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
-  return found->second->Size();
+  return found->second->BaseSize();
 }
 
 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
@@ -192,6 +193,96 @@
   }
 }
 
+// Keeps track of allocation sizes + whether or not the previous allocation is free.
+// Used to coalesce free blocks and find the best fit block for an allocation.
+class AllocationInfo {
+ public:
+  AllocationInfo() : prev_free_(0), alloc_size_(0) {
+  }
+  // Return the number of pages that the allocation info covers.
+  size_t AlignSize() const {
+    return alloc_size_ & ~kFlagFree;
+  }
+  // Returns the allocation size in bytes.
+  size_t ByteSize() const {
+    return AlignSize() * FreeListSpace::kAlignment;
+  }
+  // Updates the allocation size and whether or not it is free.
+  void SetByteSize(size_t size, bool free) {
+    DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
+    alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0U);
+  }
+  bool IsFree() const {
+    return (alloc_size_ & kFlagFree) != 0;
+  }
+  // Finds and returns the next non free allocation info after ourself.
+  AllocationInfo* GetNextInfo() {
+    return this + AlignSize();
+  }
+  const AllocationInfo* GetNextInfo() const {
+    return this + AlignSize();
+  }
+  // Returns the previous free allocation info by using the prev_free_ member to figure out
+  // where it is. This is only used for coalescing so we only need to be able to do it if the
+  // previous allocation info is free.
+  AllocationInfo* GetPrevFreeInfo() {
+    DCHECK_NE(prev_free_, 0U);
+    return this - prev_free_;
+  }
+  // Returns the address of the object associated with this allocation info.
+  mirror::Object* GetObjectAddress() {
+    return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
+  }
+  // Return how many kAlignment units there are before the free block.
+  size_t GetPrevFree() const {
+    return prev_free_;
+  }
+  // Returns how many free bytes there is before the block.
+  size_t GetPrevFreeBytes() const {
+    return GetPrevFree() * FreeListSpace::kAlignment;
+  }
+  // Update the size of the free block prior to the allocation.
+  void SetPrevFreeBytes(size_t bytes) {
+    DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
+    prev_free_ = bytes / FreeListSpace::kAlignment;
+  }
+
+ private:
+  // Used to implement best fit object allocation. Each allocation has an AllocationInfo which
+  // contains the size of the previous free block preceding it. Implemented in such a way that we
+  // can also find the iterator for any allocation info pointer.
+  static constexpr uint32_t kFlagFree = 0x8000000;
+  // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
+  // allocation before us is not free.
+  // These variables are undefined in the middle of allocations / free blocks.
+  uint32_t prev_free_;
+  // Allocation size of this object in kAlignment as the unit.
+  uint32_t alloc_size_;
+};
+
+size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
+  DCHECK_GE(info, allocation_info_);
+  DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
+  return info - allocation_info_;
+}
+
+AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
+  return &allocation_info_[GetSlotIndexForAddress(address)];
+}
+
+const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
+  return &allocation_info_[GetSlotIndexForAddress(address)];
+}
+
+inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
+                                                      const AllocationInfo* b) const {
+  if (a->GetPrevFree() < b->GetPrevFree()) return true;
+  if (a->GetPrevFree() > b->GetPrevFree()) return false;
+  if (a->AlignSize() < b->AlignSize()) return true;
+  if (a->AlignSize() > b->AlignSize()) return false;
+  return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
+}
+
 FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
   CHECK_EQ(size % kAlignment, 0U);
   std::string error_msg;
@@ -205,112 +296,113 @@
     : LargeObjectSpace(name, begin, end),
       mem_map_(mem_map),
       lock_("free list space lock", kAllocSpaceLock) {
-  free_end_ = end - begin;
+  const size_t space_capacity = end - begin;
+  free_end_ = space_capacity;
+  CHECK_ALIGNED(space_capacity, kAlignment);
+  const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
+  std::string error_msg;
+  allocation_info_map_.reset(MemMap::MapAnonymous("large object free list space allocation info map",
+                                                  nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
+                                                  false, &error_msg));
+  CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
+      << error_msg;
+  allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
 }
 
 FreeListSpace::~FreeListSpace() {}
 
 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
   MutexLock mu(Thread::Current(), lock_);
-  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
-  AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
-  while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
-    cur_header = cur_header->GetNextNonFree();
-    size_t alloc_size = cur_header->AllocationSize();
-    byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
-    byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
-    callback(byte_start, byte_end, alloc_size, arg);
-    callback(NULL, NULL, 0, arg);
-    cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
+  const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
+  AllocationInfo* cur_info = &allocation_info_[0];
+  const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
+  while (cur_info < end_info) {
+    if (!cur_info->IsFree()) {
+      size_t alloc_size = cur_info->ByteSize();
+      byte* byte_start = reinterpret_cast<byte*>(GetAddressForAllocationInfo(cur_info));
+      byte* byte_end = byte_start + alloc_size;
+      callback(byte_start, byte_end, alloc_size, arg);
+      callback(nullptr, nullptr, 0, arg);
+    }
+    cur_info = cur_info->GetNextInfo();
   }
+  CHECK_EQ(cur_info, end_info);
 }
 
-void FreeListSpace::RemoveFreePrev(AllocationHeader* header) {
-  CHECK(!header->IsFree());
-  CHECK_GT(header->GetPrevFree(), size_t(0));
-  FreeBlocks::iterator found = free_blocks_.lower_bound(header);
-  CHECK(found != free_blocks_.end());
-  CHECK_EQ(*found, header);
-  free_blocks_.erase(found);
-}
-
-FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) {
-  DCHECK(Contains(obj));
-  return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) -
-      sizeof(AllocationHeader));
-}
-
-FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() {
-  // We know that there has to be at least one object after us or else we would have
-  // coalesced with the free end region. May be worth investigating a better way to do this
-  // as it may be expensive for large allocations.
-  for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) {
-    AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos);
-    if (!cur->IsFree()) return cur;
-  }
+void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
+  CHECK_GT(info->GetPrevFree(), 0U);
+  auto it = free_blocks_.lower_bound(info);
+  CHECK(it != free_blocks_.end());
+  CHECK_EQ(*it, info);
+  free_blocks_.erase(it);
 }
 
 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
   MutexLock mu(self, lock_);
-  DCHECK(Contains(obj));
-  AllocationHeader* header = GetAllocationHeader(obj);
-  CHECK(IsAligned<kAlignment>(header));
-  size_t allocation_size = header->AllocationSize();
-  DCHECK_GT(allocation_size, size_t(0));
-  DCHECK(IsAligned<kAlignment>(allocation_size));
+  DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
+                        << reinterpret_cast<void*>(End());
+  DCHECK_ALIGNED(obj, kAlignment);
+  AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
+  DCHECK(!info->IsFree());
+  const size_t allocation_size = info->ByteSize();
+  DCHECK_GT(allocation_size, 0U);
+  DCHECK_ALIGNED(allocation_size, kAlignment);
+  info->SetByteSize(allocation_size, true);  // Mark as free.
   // Look at the next chunk.
-  AllocationHeader* next_header = header->GetNextAllocationHeader();
+  AllocationInfo* next_info = info->GetNextInfo();
   // Calculate the start of the end free block.
   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
-  size_t header_prev_free = header->GetPrevFree();
+  size_t prev_free_bytes = info->GetPrevFreeBytes();
   size_t new_free_size = allocation_size;
-  if (header_prev_free) {
-    new_free_size += header_prev_free;
-    RemoveFreePrev(header);
+  if (prev_free_bytes != 0) {
+    // Coalesce with previous free chunk.
+    new_free_size += prev_free_bytes;
+    RemoveFreePrev(info);
+    info = info->GetPrevFreeInfo();
+    // The previous allocation info must not be free since we are supposed to always coalesce.
+    DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
   }
-  if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) {
+  uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
+  if (next_addr >= free_end_start) {
     // Easy case, the next chunk is the end free region.
-    CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start);
+    CHECK_EQ(next_addr, free_end_start);
     free_end_ += new_free_size;
   } else {
-    AllocationHeader* new_free_header;
-    DCHECK(IsAligned<kAlignment>(next_header));
-    if (next_header->IsFree()) {
-      // Find the next chunk by reading each page until we hit one with non-zero chunk.
-      AllocationHeader* next_next_header = next_header->GetNextNonFree();
-      DCHECK(IsAligned<kAlignment>(next_next_header));
-      DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize()));
-      RemoveFreePrev(next_next_header);
-      new_free_header = next_next_header;
-      new_free_size += next_next_header->GetPrevFree();
+    AllocationInfo* new_free_info;
+    if (next_info->IsFree()) {
+      AllocationInfo* next_next_info = next_info->GetNextInfo();
+      // Next next info can't be free since we always coalesce.
+      DCHECK(!next_next_info->IsFree());
+      DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
+      new_free_info = next_next_info;
+      new_free_size += next_next_info->GetPrevFreeBytes();
+      RemoveFreePrev(next_next_info);
     } else {
-      new_free_header = next_header;
+      new_free_info = next_info;
     }
-    new_free_header->prev_free_ = new_free_size;
-    free_blocks_.insert(new_free_header);
+    new_free_info->SetPrevFreeBytes(new_free_size);
+    free_blocks_.insert(new_free_info);
+    info->SetByteSize(new_free_size, true);
+    DCHECK_EQ(info->GetNextInfo(), new_free_info);
   }
   --num_objects_allocated_;
   DCHECK_LE(allocation_size, num_bytes_allocated_);
   num_bytes_allocated_ -= allocation_size;
-  madvise(header, allocation_size, MADV_DONTNEED);
+  madvise(obj, allocation_size, MADV_DONTNEED);
   if (kIsDebugBuild) {
     // Can't disallow reads since we use them to find next chunks during coalescing.
-    mprotect(header, allocation_size, PROT_READ);
+    mprotect(obj, allocation_size, PROT_READ);
   }
   return allocation_size;
 }
 
-bool FreeListSpace::Contains(const mirror::Object* obj) const {
-  return mem_map_->HasAddress(obj);
-}
-
 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
-  AllocationHeader* header = GetAllocationHeader(obj);
   DCHECK(Contains(obj));
-  DCHECK(!header->IsFree());
-  size_t alloc_size = header->AllocationSize();
+  AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
+  DCHECK(!info->IsFree());
+  size_t alloc_size = info->ByteSize();
   if (usable_size != nullptr) {
-    *usable_size = alloc_size - sizeof(AllocationHeader);
+    *usable_size = alloc_size;
   }
   return alloc_size;
 }
@@ -318,56 +410,56 @@
 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                      size_t* usable_size) {
   MutexLock mu(self, lock_);
-  size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
-  AllocationHeader temp;
-  temp.SetPrevFree(allocation_size);
-  temp.SetAllocationSize(0);
-  AllocationHeader* new_header;
+  const size_t allocation_size = RoundUp(num_bytes, kAlignment);
+  AllocationInfo temp_info;
+  temp_info.SetPrevFreeBytes(allocation_size);
+  temp_info.SetByteSize(0, false);
+  AllocationInfo* new_info;
   // Find the smallest chunk at least num_bytes in size.
-  FreeBlocks::iterator found = free_blocks_.lower_bound(&temp);
-  if (found != free_blocks_.end()) {
-    AllocationHeader* header = *found;
-    free_blocks_.erase(found);
-
-    // Fit our object in the previous free header space.
-    new_header = header->GetPrevFreeAllocationHeader();
-
-    // Remove the newly allocated block from the header and update the prev_free_.
-    header->prev_free_ -= allocation_size;
-    if (header->prev_free_ > 0) {
+  auto it = free_blocks_.lower_bound(&temp_info);
+  if (it != free_blocks_.end()) {
+    AllocationInfo* info = *it;
+    free_blocks_.erase(it);
+    // Fit our object in the previous allocation info free space.
+    new_info = info->GetPrevFreeInfo();
+    // Remove the newly allocated block from the info and update the prev_free_.
+    info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
+    if (info->GetPrevFreeBytes() > 0) {
+      AllocationInfo* new_free = info - info->GetPrevFree();
+      new_free->SetPrevFreeBytes(0);
+      new_free->SetByteSize(info->GetPrevFreeBytes(), true);
       // If there is remaining space, insert back into the free set.
-      free_blocks_.insert(header);
+      free_blocks_.insert(info);
     }
   } else {
     // Try to steal some memory from the free space at the end of the space.
     if (LIKELY(free_end_ >= allocation_size)) {
       // Fit our object at the start of the end free block.
-      new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
+      new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
       free_end_ -= allocation_size;
     } else {
       return nullptr;
     }
   }
-
   DCHECK(bytes_allocated != nullptr);
   *bytes_allocated = allocation_size;
   if (usable_size != nullptr) {
-    *usable_size = allocation_size - sizeof(AllocationHeader);
+    *usable_size = allocation_size;
   }
   // Need to do these inside of the lock.
   ++num_objects_allocated_;
   ++total_objects_allocated_;
   num_bytes_allocated_ += allocation_size;
   total_bytes_allocated_ += allocation_size;
-
+  mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
   // We always put our object at the start of the free block, there can not be another free block
   // before it.
   if (kIsDebugBuild) {
-    mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE);
+    mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
   }
-  new_header->SetPrevFree(0);
-  new_header->SetAllocationSize(allocation_size);
-  return new_header->GetObjectAddress();
+  new_info->SetPrevFreeBytes(0);
+  new_info->SetByteSize(allocation_size, false);
+  return obj;
 }
 
 void FreeListSpace::Dump(std::ostream& os) const {
@@ -376,21 +468,20 @@
      << " begin: " << reinterpret_cast<void*>(Begin())
      << " end: " << reinterpret_cast<void*>(End()) << "\n";
   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
-  AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
-  while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
-    byte* free_start = reinterpret_cast<byte*>(cur_header);
-    cur_header = cur_header->GetNextNonFree();
-    byte* free_end = reinterpret_cast<byte*>(cur_header);
-    if (free_start != free_end) {
-      os << "Free block at address: " << reinterpret_cast<const void*>(free_start)
-         << " of length " << free_end - free_start << " bytes\n";
+  const AllocationInfo* cur_info =
+      GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
+  const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
+  while (cur_info < end_info) {
+    size_t size = cur_info->ByteSize();
+    uintptr_t address = GetAddressForAllocationInfo(cur_info);
+    if (cur_info->IsFree()) {
+      os << "Free block at address: " << reinterpret_cast<const void*>(address)
+         << " of length " << size << " bytes\n";
+    } else {
+      os << "Large object at address: " << reinterpret_cast<const void*>(address)
+         << " of length " << size << " bytes\n";
     }
-    size_t alloc_size = cur_header->AllocationSize();
-    byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
-    byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
-    os << "Large object at address: " << reinterpret_cast<const void*>(free_start)
-       << " of length " << byte_end - byte_start << " bytes\n";
-    cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
+    cur_info = cur_info->GetNextInfo();
   }
   if (free_end_) {
     os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 09a0919..a63c5c0 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -29,13 +29,20 @@
 namespace gc {
 namespace space {
 
+class AllocationInfo;
+
+enum LargeObjectSpaceType {
+  kLargeObjectSpaceTypeDisabled,
+  kLargeObjectSpaceTypeMap,
+  kLargeObjectSpaceTypeFreeList,
+};
+
 // Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
   SpaceType GetType() const OVERRIDE {
     return kSpaceTypeLargeObjectSpace;
   }
-
   void SwapBitmaps();
   void CopyLiveToMarked();
   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
@@ -44,57 +51,53 @@
   uint64_t GetBytesAllocated() OVERRIDE {
     return num_bytes_allocated_;
   }
-
   uint64_t GetObjectsAllocated() OVERRIDE {
     return num_objects_allocated_;
   }
-
   uint64_t GetTotalBytesAllocated() const {
     return total_bytes_allocated_;
   }
-
   uint64_t GetTotalObjectsAllocated() const {
     return total_objects_allocated_;
   }
-
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
-
   // LargeObjectSpaces don't have thread local state.
   void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
   }
   void RevokeAllThreadLocalBuffers() OVERRIDE {
   }
-
   bool IsAllocSpace() const OVERRIDE {
     return true;
   }
-
   AllocSpace* AsAllocSpace() OVERRIDE {
     return this;
   }
-
   collector::ObjectBytePair Sweep(bool swap_bitmaps);
-
   virtual bool CanMoveObjects() const OVERRIDE {
     return false;
   }
-
   // Current address at which the space begins, which may vary as the space is filled.
   byte* Begin() const {
     return begin_;
   }
-
   // Current address at which the space ends, which may vary as the space is filled.
   byte* End() const {
     return end_;
   }
-
+  // Current size of space
+  size_t Size() const {
+    return End() - Begin();
+  }
+  // Return true if we contain the specified address.
+  bool Contains(const mirror::Object* obj) const {
+    const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+    return Begin() <= byte_obj && byte_obj < End();
+  }
   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
   explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
-
   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
 
   // Approximate number of bytes which have been allocated into the space.
@@ -102,7 +105,6 @@
   uint64_t num_objects_allocated_;
   uint64_t total_bytes_allocated_;
   uint64_t total_objects_allocated_;
-
   // Begin and end, may change as more large objects are allocated.
   byte* begin_;
   byte* end_;
@@ -119,7 +121,6 @@
   // Creates a large object space. Allocations into the large object space use memory maps instead
   // of malloc.
   static LargeObjectMapSpace* Create(const std::string& name);
-
   // Return the storage space required by obj.
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
@@ -145,126 +146,52 @@
 // A continuous large object space with a free-list to handle holes.
 class FreeListSpace FINAL : public LargeObjectSpace {
  public:
+  static constexpr size_t kAlignment = kPageSize;
+
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
-
   size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                         size_t* usable_size) OVERRIDE;
   size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
-  bool Contains(const mirror::Object* obj) const OVERRIDE;
   void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
-
-  // Address at which the space begins.
-  byte* Begin() const {
-    return begin_;
-  }
-
-  // Address at which the space ends, which may vary as the space is filled.
-  byte* End() const {
-    return end_;
-  }
-
-  // Current size of space
-  size_t Size() const {
-    return End() - Begin();
-  }
-
   void Dump(std::ostream& os) const;
 
  protected:
-  static const size_t kAlignment = kPageSize;
-
-  class AllocationHeader {
-   public:
-    // Returns the allocation size, includes the header.
-    size_t AllocationSize() const {
-      return alloc_size_;
-    }
-
-    // Updates the allocation size in the header, the allocation size includes the header itself.
-    void SetAllocationSize(size_t size) {
-      DCHECK(IsAligned<kPageSize>(size));
-      alloc_size_ = size;
-    }
-
-    bool IsFree() const {
-      return AllocationSize() == 0;
-    }
-
-    // Returns the previous free allocation header by using the prev_free_ member to figure out
-    // where it is. If prev free is 0 then we just return ourself.
-    AllocationHeader* GetPrevFreeAllocationHeader() {
-      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) - prev_free_);
-    }
-
-    // Returns the address of the object associated with this allocation header.
-    mirror::Object* GetObjectAddress() {
-      return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
-    }
-
-    // Returns the next allocation header after the object associated with this allocation header.
-    AllocationHeader* GetNextAllocationHeader() {
-      DCHECK_NE(alloc_size_, 0U);
-      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) + alloc_size_);
-    }
-
-    // Returns how many free bytes there is before the block.
-    size_t GetPrevFree() const {
-      return prev_free_;
-    }
-
-    // Update the size of the free block prior to the allocation.
-    void SetPrevFree(size_t prev_free) {
-      DCHECK(IsAligned<kPageSize>(prev_free));
-      prev_free_ = prev_free;
-    }
-
-    // Finds and returns the next non free allocation header after ourself.
-    // TODO: Optimize, currently O(n) for n free following pages.
-    AllocationHeader* GetNextNonFree();
-
-    // Used to implement best fit object allocation. Each allocation has an AllocationHeader which
-    // contains the size of the previous free block preceding it. Implemented in such a way that we
-    // can also find the iterator for any allocation header pointer.
-    class SortByPrevFree {
-     public:
-      bool operator()(const AllocationHeader* a, const AllocationHeader* b) const {
-        if (a->GetPrevFree() < b->GetPrevFree()) return true;
-        if (a->GetPrevFree() > b->GetPrevFree()) return false;
-        if (a->AllocationSize() < b->AllocationSize()) return true;
-        if (a->AllocationSize() > b->AllocationSize()) return false;
-        return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
-      }
-    };
-
-   private:
-    // Contains the size of the previous free block, if 0 then the memory preceding us is an
-    // allocation.
-    size_t prev_free_;
-
-    // Allocation size of this object, 0 means that the allocation header is free memory.
-    size_t alloc_size_;
-
-    friend class FreeListSpace;
-  };
-
   FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
-
+  size_t GetSlotIndexForAddress(uintptr_t address) const {
+    DCHECK(Contains(reinterpret_cast<mirror::Object*>(address)));
+    return (address - reinterpret_cast<uintptr_t>(Begin())) / kAlignment;
+  }
+  size_t GetSlotIndexForAllocationInfo(const AllocationInfo* info) const;
+  AllocationInfo* GetAllocationInfoForAddress(uintptr_t address);
+  const AllocationInfo* GetAllocationInfoForAddress(uintptr_t address) const;
+  uintptr_t GetAllocationAddressForSlot(size_t slot) const {
+    return reinterpret_cast<uintptr_t>(Begin()) + slot * kAlignment;
+  }
+  uintptr_t GetAddressForAllocationInfo(const AllocationInfo* info) const {
+    return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info));
+  }
   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
-  void RemoveFreePrev(AllocationHeader* header) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
-  // Finds the allocation header corresponding to obj.
-  AllocationHeader* GetAllocationHeader(const mirror::Object* obj);
-
-  typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
-                   TrackingAllocator<AllocationHeader*, kAllocatorTagLOSFreeList>> FreeBlocks;
+  class SortByPrevFree {
+   public:
+    bool operator()(const AllocationInfo* a, const AllocationInfo* b) const;
+  };
+  typedef std::set<AllocationInfo*, SortByPrevFree,
+                   TrackingAllocator<AllocationInfo*, kAllocatorTagLOSFreeList>> FreeBlocks;
 
   // There is not footer for any allocations at the end of the space, so we keep track of how much
   // free space there is at the end manually.
   std::unique_ptr<MemMap> mem_map_;
+  // Side table for allocation info, one per page.
+  std::unique_ptr<MemMap> allocation_info_map_;
+  AllocationInfo* allocation_info_;
+
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Free bytes at the end of the space.
   size_t free_end_ GUARDED_BY(lock_);
   FreeBlocks free_blocks_ GUARDED_BY(lock_);
 };
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index f733584..c5d8abc 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -80,6 +80,8 @@
         ASSERT_GE(los->Free(Thread::Current(), obj), request_size);
       }
     }
+    // Test that dump doesn't crash.
+    los->Dump(LOG(INFO));
 
     size_t bytes_allocated = 0;
     // Checks that the coalescing works.
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 0291155..7211bb4 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -181,7 +181,7 @@
   // Succeeds, fits without adjusting the footprint limit.
   size_t ptr1_bytes_allocated, ptr1_usable_size;
   StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::Object> ptr1(
+  MutableHandle<mirror::Object> ptr1(
       hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
   EXPECT_TRUE(ptr1.Get() != nullptr);
   EXPECT_LE(1U * MB, ptr1_bytes_allocated);
@@ -194,7 +194,7 @@
 
   // Succeeds, adjusts the footprint.
   size_t ptr3_bytes_allocated, ptr3_usable_size;
-  Handle<mirror::Object> ptr3(
+  MutableHandle<mirror::Object> ptr3(
       hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
   EXPECT_TRUE(ptr3.Get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
@@ -284,7 +284,7 @@
   // Succeeds, fits without adjusting the footprint limit.
   size_t ptr1_bytes_allocated, ptr1_usable_size;
   StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::Object> ptr1(
+  MutableHandle<mirror::Object> ptr1(
       hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
   EXPECT_TRUE(ptr1.Get() != nullptr);
   EXPECT_LE(1U * MB, ptr1_bytes_allocated);
@@ -297,7 +297,7 @@
 
   // Succeeds, adjusts the footprint.
   size_t ptr3_bytes_allocated, ptr3_usable_size;
-  Handle<mirror::Object> ptr3(
+  MutableHandle<mirror::Object> ptr3(
       hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
   EXPECT_TRUE(ptr3.Get() != nullptr);
   EXPECT_LE(8U * MB, ptr3_bytes_allocated);
diff --git a/runtime/handle.h b/runtime/handle.h
index 06938e5..addb663 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -30,23 +30,23 @@
 
 // Handles are memory locations that contain GC roots. As the mirror::Object*s within a handle are
 // GC visible then the GC may move the references within them, something that couldn't be done with
-// a wrap pointer. Handles are generally allocated within HandleScopes. ConstHandle is a super-class
-// of Handle and doesn't support assignment operations.
+// a wrap pointer. Handles are generally allocated within HandleScopes. Handle is a super-class
+// of MutableHandle and doesn't support assignment operations.
 template<class T>
-class ConstHandle {
+class Handle {
  public:
-  ConstHandle() : reference_(nullptr) {
+  Handle() : reference_(nullptr) {
   }
 
-  ALWAYS_INLINE ConstHandle(const ConstHandle<T>& handle) : reference_(handle.reference_) {
+  ALWAYS_INLINE Handle(const Handle<T>& handle) : reference_(handle.reference_) {
   }
 
-  ALWAYS_INLINE ConstHandle<T>& operator=(const ConstHandle<T>& handle) {
+  ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle) {
     reference_ = handle.reference_;
     return *this;
   }
 
-  ALWAYS_INLINE explicit ConstHandle(StackReference<T>* reference) : reference_(reference) {
+  ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
   }
 
   ALWAYS_INLINE T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -73,11 +73,11 @@
   StackReference<T>* reference_;
 
   template<typename S>
-  explicit ConstHandle(StackReference<S>* reference)
+  explicit Handle(StackReference<S>* reference)
       : reference_(reinterpret_cast<StackReference<T>*>(reference)) {
   }
   template<typename S>
-  explicit ConstHandle(const ConstHandle<S>& handle)
+  explicit Handle(const Handle<S>& handle)
       : reference_(reinterpret_cast<StackReference<T>*>(handle.reference_)) {
   }
 
@@ -91,7 +91,7 @@
 
  private:
   friend class BuildGenericJniFrameVisitor;
-  template<class S> friend class ConstHandle;
+  template<class S> friend class Handle;
   friend class HandleScope;
   template<class S> friend class HandleWrapper;
   template<size_t kNumReferences> friend class StackHandleScope;
@@ -99,42 +99,43 @@
 
 // Handles that support assignment.
 template<class T>
-class Handle : public ConstHandle<T> {
+class MutableHandle : public Handle<T> {
  public:
-  Handle() {
+  MutableHandle() {
   }
 
-  ALWAYS_INLINE Handle(const Handle<T>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstHandle<T>(handle.reference_) {
+  ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Handle<T>(handle.reference_) {
   }
 
-  ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle)
+  ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    ConstHandle<T>::operator=(handle);
+    Handle<T>::operator=(handle);
     return *this;
   }
 
-  ALWAYS_INLINE explicit Handle(StackReference<T>* reference)
+  ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstHandle<T>(reference) {
+      : Handle<T>(reference) {
   }
 
   ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    StackReference<T>* ref = ConstHandle<T>::GetReference();
+    StackReference<T>* ref = Handle<T>::GetReference();
     T* const old = ref->AsMirrorPtr();
     ref->Assign(reference);
     return old;
   }
 
   template<typename S>
-  explicit Handle(const Handle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstHandle<T>(handle) {
+  explicit MutableHandle(const MutableHandle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Handle<T>(handle) {
   }
 
  protected:
   template<typename S>
-  explicit Handle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstHandle<T>(reference) {
+  explicit MutableHandle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Handle<T>(reference) {
   }
 
  private:
@@ -146,9 +147,9 @@
 
 // A special case of Handle that only holds references to null.
 template<class T>
-class NullHandle : public ConstHandle<T> {
+class NullHandle : public Handle<T> {
  public:
-  NullHandle() : ConstHandle<T>(&null_ref_) {
+  NullHandle() : Handle<T>(&null_ref_) {
   }
 
  private:
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 42ef779..99059f9 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -89,6 +89,12 @@
     return Handle<mirror::Object>(&references_[i]);
   }
 
+  MutableHandle<mirror::Object> GetMutableHandle(size_t i)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+    DCHECK_LT(i, number_of_references_);
+    return MutableHandle<mirror::Object>(&references_[i]);
+  }
+
   void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       ALWAYS_INLINE {
     DCHECK_LT(i, number_of_references_);
@@ -139,14 +145,14 @@
 // A wrapper which wraps around Object** and restores the pointer in the destructor.
 // TODO: Add more functionality.
 template<class T>
-class HandleWrapper : public Handle<T> {
+class HandleWrapper : public MutableHandle<T> {
  public:
-  HandleWrapper(T** obj, const Handle<T>& handle)
-     : Handle<T>(handle), obj_(obj) {
+  HandleWrapper(T** obj, const MutableHandle<T>& handle)
+     : MutableHandle<T>(handle), obj_(obj) {
   }
 
   ~HandleWrapper() {
-    *obj_ = Handle<T>::Get();
+    *obj_ = MutableHandle<T>::Get();
   }
 
  private:
@@ -169,10 +175,10 @@
     return references_storage_[i].AsMirrorPtr();
   }
 
-  Handle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  MutableHandle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       ALWAYS_INLINE {
     DCHECK_LT(i, number_of_references_);
-    return Handle<mirror::Object>(&references_storage_[i]);
+    return MutableHandle<mirror::Object>(&references_storage_[i]);
   }
 
   void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -182,9 +188,9 @@
   }
 
   template<class T>
-  Handle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetReference(pos_, object);
-    Handle<T> h(GetHandle(pos_));
+    MutableHandle<T> h(GetHandle(pos_));
     pos_++;
     return h;
   }
@@ -192,7 +198,7 @@
   template<class T>
   HandleWrapper<T> NewHandleWrapper(T** object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     SetReference(pos_, *object);
-    Handle<T> h(GetHandle(pos_));
+    MutableHandle<T> h(GetHandle(pos_));
     pos_++;
     return HandleWrapper<T>(object, h);
   }
diff --git a/runtime/image.cc b/runtime/image.cc
index 93ec27d..478b486 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
 namespace art {
 
 const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const byte ImageHeader::kImageVersion[] = { '0', '0', '8', '\0' };
+const byte ImageHeader::kImageVersion[] = { '0', '0', '9', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 0f45b9e..a2e88a6 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -597,10 +597,13 @@
   thread->ResetQuickAllocEntryPointsForThread();
 }
 
-void Instrumentation::SetEntrypointsInstrumented(bool instrumented) {
+void Instrumentation::SetEntrypointsInstrumented(bool instrumented, bool suspended) {
   Runtime* runtime = Runtime::Current();
   ThreadList* tl = runtime->GetThreadList();
-  if (runtime->IsStarted()) {
+  if (suspended) {
+    Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+  }
+  if (runtime->IsStarted() && !suspended) {
     tl->SuspendAll();
   }
   {
@@ -608,30 +611,30 @@
     SetQuickAllocEntryPointsInstrumented(instrumented);
     ResetQuickAllocEntryPoints();
   }
-  if (runtime->IsStarted()) {
+  if (runtime->IsStarted() && !suspended) {
     tl->ResumeAll();
   }
 }
 
-void Instrumentation::InstrumentQuickAllocEntryPoints() {
+void Instrumentation::InstrumentQuickAllocEntryPoints(bool suspended) {
   // TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
   //       should be guarded by a lock.
   DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
   const bool enable_instrumentation =
       quick_alloc_entry_points_instrumentation_counter_.FetchAndAddSequentiallyConsistent(1) == 0;
   if (enable_instrumentation) {
-    SetEntrypointsInstrumented(true);
+    SetEntrypointsInstrumented(true, suspended);
   }
 }
 
-void Instrumentation::UninstrumentQuickAllocEntryPoints() {
+void Instrumentation::UninstrumentQuickAllocEntryPoints(bool suspended) {
   // TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
   //       should be guarded by a lock.
   DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
   const bool disable_instrumentation =
       quick_alloc_entry_points_instrumentation_counter_.FetchAndSubSequentiallyConsistent(1) == 1;
   if (disable_instrumentation) {
-    SetEntrypointsInstrumented(false);
+    SetEntrypointsInstrumented(false, suspended);
   }
 }
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index d05cee5..3c1c756 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -182,10 +182,10 @@
     return interpreter_handler_table_;
   }
 
-  void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                                                        Locks::runtime_shutdown_lock_);
-  void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::thread_list_lock_,
-                                                          Locks::runtime_shutdown_lock_);
+  void InstrumentQuickAllocEntryPoints(bool suspended)
+      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
+  void UninstrumentQuickAllocEntryPoints(bool suspended)
+      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_);
   void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
 
   // Update the code of a method respecting any installed stubs.
@@ -350,7 +350,7 @@
 
   // No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
   // exclusive access to mutator lock which you can't get if the runtime isn't started.
-  void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
+  void SetEntrypointsInstrumented(bool instrumented, bool suspended) NO_THREAD_SAFETY_ANALYSIS;
 
   void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
                             mirror::ArtMethod* method, uint32_t dex_pc) const
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 47a7f0d..7e685e8 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -462,7 +462,7 @@
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-    if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
+    if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
       CHECK(self->IsExceptionPending());
       self->PopShadowFrame();
       return;
@@ -537,7 +537,7 @@
       StackHandleScope<1> hs(self);
       HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
       if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
-          h_declaring_class, true, true))) {
+          self, h_declaring_class, true, true))) {
         DCHECK(self->IsExceptionPending());
         self->PopShadowFrame();
         return;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 5724e35..9f08013 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -455,7 +455,7 @@
   Thread* const self_;
   StackHandleScope<1> handle_scope_;
   Handle<mirror::Throwable>* exception_;
-  Handle<mirror::ArtMethod> catch_method_;
+  MutableHandle<mirror::ArtMethod> catch_method_;
   uint32_t catch_dex_pc_;
   bool clear_exception_;
 
@@ -780,8 +780,8 @@
 }
 
 // Helper function to deal with class loading in an unstarted runtime.
-static void UnstartedRuntimeFindClass(Thread* self, ConstHandle<mirror::String> className,
-                                      ConstHandle<mirror::ClassLoader> class_loader, JValue* result,
+static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> className,
+                                      Handle<mirror::ClassLoader> class_loader, JValue* result,
                                       const std::string& method_name, bool initialize_class,
                                       bool abort_if_not_found)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -800,7 +800,7 @@
   if (found != nullptr && initialize_class) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(found));
-    if (!class_linker->EnsureInitialized(h_class, true, true)) {
+    if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
       CHECK(self->IsExceptionPending());
       return;
     }
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 5a1d01e..9358632 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -192,7 +192,7 @@
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
-    if (UNLIKELY(!class_linker->EnsureInitialized(h_class, true, true))) {
+    if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
       DCHECK(self->IsExceptionPending());
       return nullptr;
     }
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 755e1ed..5c8a6c6 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -249,9 +249,7 @@
       // perform the memory barrier now.
       QuasiAtomic::ThreadFenceForConstructor();
     }
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -268,9 +266,7 @@
   HANDLE_INSTRUCTION_START(RETURN_VOID_BARRIER) {
     QuasiAtomic::ThreadFenceForConstructor();
     JValue result;
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -288,9 +284,7 @@
     JValue result;
     result.SetJ(0);
     result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -307,9 +301,7 @@
   HANDLE_INSTRUCTION_START(RETURN_WIDE) {
     JValue result;
     result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
     if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
       instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -325,9 +317,7 @@
 
   HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
     JValue result;
-    if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
-    }
+    self->AllowThreadSuspension();
     const uint8_t vreg_index = inst->VRegA_11x(inst_data);
     Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
     if (do_assignability_check && obj_result != NULL) {
@@ -632,7 +622,7 @@
     int8_t offset = inst->VRegA_10t(inst_data);
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -644,7 +634,7 @@
     int16_t offset = inst->VRegA_20t();
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -656,7 +646,7 @@
     int32_t offset = inst->VRegA_30t();
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -668,7 +658,7 @@
     int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -680,7 +670,7 @@
     int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
     if (IsBackwardBranch(offset)) {
       if (UNLIKELY(self->TestAllFlags())) {
-        CheckSuspend(self);
+        self->CheckSuspend();
         UPDATE_HANDLER_TABLE();
       }
     }
@@ -773,7 +763,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -789,7 +779,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -805,7 +795,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -821,7 +811,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -837,7 +827,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -853,7 +843,7 @@
       int16_t offset = inst->VRegC_22t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -869,7 +859,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -885,7 +875,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -901,7 +891,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -917,7 +907,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -933,7 +923,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -949,7 +939,7 @@
       int16_t offset = inst->VRegB_21t();
       if (IsBackwardBranch(offset)) {
         if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
+          self->CheckSuspend();
           UPDATE_HANDLER_TABLE();
         }
       }
@@ -2399,7 +2389,7 @@
   exception_pending_label: {
     CHECK(self->IsExceptionPending());
     if (UNLIKELY(self->TestAllFlags())) {
-      CheckSuspend(self);
+      self->CheckSuspend();
       UPDATE_HANDLER_TABLE();
     }
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 6054a25..c6cef6a 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -22,9 +22,7 @@
 #define HANDLE_PENDING_EXCEPTION()                                                              \
   do {                                                                                          \
     DCHECK(self->IsExceptionPending());                                                         \
-    if (UNLIKELY(self->TestAllFlags())) {                                                       \
-      CheckSuspend(self);                                                                       \
-    }                                                                                           \
+    self->AllowThreadSuspension();                                                              \
     uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame,           \
                                                                   inst->GetDexPc(insns),        \
                                                                   instrumentation);             \
@@ -175,9 +173,7 @@
           // perform the memory barrier now.
           QuasiAtomic::ThreadFenceForConstructor();
         }
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -191,9 +187,7 @@
       case Instruction::RETURN_VOID_BARRIER: {
         QuasiAtomic::ThreadFenceForConstructor();
         JValue result;
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -208,9 +202,7 @@
         JValue result;
         result.SetJ(0);
         result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -224,9 +216,7 @@
       case Instruction::RETURN_WIDE: {
         JValue result;
         result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
           instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
                                            shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -239,9 +229,7 @@
       }
       case Instruction::RETURN_OBJECT: {
         JValue result;
-        if (UNLIKELY(self->TestAllFlags())) {
-          CheckSuspend(self);
-        }
+        self->AllowThreadSuspension();
         const size_t ref_idx = inst->VRegA_11x(inst_data);
         Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
         if (do_assignability_check && obj_result != NULL) {
@@ -545,9 +533,7 @@
         PREAMBLE();
         int8_t offset = inst->VRegA_10t(inst_data);
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -556,9 +542,7 @@
         PREAMBLE();
         int16_t offset = inst->VRegA_20t();
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -567,9 +551,7 @@
         PREAMBLE();
         int32_t offset = inst->VRegA_30t();
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -578,9 +560,7 @@
         PREAMBLE();
         int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -589,9 +569,7 @@
         PREAMBLE();
         int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
         if (IsBackwardBranch(offset)) {
-          if (UNLIKELY(self->TestAllFlags())) {
-            CheckSuspend(self);
-          }
+          self->AllowThreadSuspension();
         }
         inst = inst->RelativeAt(offset);
         break;
@@ -682,9 +660,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) == shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -697,9 +673,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) != shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -712,9 +686,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) < shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -727,9 +699,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -742,9 +712,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) > shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -757,9 +725,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <= shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
           int16_t offset = inst->VRegC_22t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -772,9 +738,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -787,9 +751,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -802,9 +764,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -817,9 +777,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -832,9 +790,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
@@ -847,9 +803,7 @@
         if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
           int16_t offset = inst->VRegB_21t();
           if (IsBackwardBranch(offset)) {
-            if (UNLIKELY(self->TestAllFlags())) {
-              CheckSuspend(self);
-            }
+            self->AllowThreadSuspension();
           }
           inst = inst->RelativeAt(offset);
         } else {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 0ac5b88..1444d97 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -36,9 +36,6 @@
 
 namespace art {
 
-static const size_t kPinTableInitial = 16;  // Arbitrary.
-static const size_t kPinTableMax = 1024;  // Arbitrary sanity check.
-
 static size_t gGlobalsInitial = 512;  // Arbitrary.
 static size_t gGlobalsMax = 51200;  // Arbitrary sanity check. (Must fit in 16 bits.)
 
@@ -365,8 +362,6 @@
       force_copy_(options->force_copy_),
       tracing_enabled_(!options->jni_trace_.empty() || VLOG_IS_ON(third_party_jni)),
       trace_(options->jni_trace_),
-      pins_lock_("JNI pin table lock", kPinTableLock),
-      pin_table_("pin table", kPinTableInitial, kPinTableMax),
       globals_lock_("JNI global reference table lock"),
       globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
       libraries_(new Libraries),
@@ -523,10 +518,6 @@
   }
   Thread* self = Thread::Current();
   {
-    MutexLock mu(self, pins_lock_);
-    os << "; pins=" << pin_table_.Size();
-  }
-  {
     ReaderMutexLock mu(self, globals_lock_);
     os << "; globals=" << globals_.Capacity();
   }
@@ -568,16 +559,6 @@
   return weak_globals_.Get(ref);
 }
 
-void JavaVMExt::PinPrimitiveArray(Thread* self, mirror::Array* array) {
-  MutexLock mu(self, pins_lock_);
-  pin_table_.Add(array);
-}
-
-void JavaVMExt::UnpinPrimitiveArray(Thread* self, mirror::Array* array) {
-  MutexLock mu(self, pins_lock_);
-  pin_table_.Remove(array);
-}
-
 void JavaVMExt::DumpReferenceTables(std::ostream& os) {
   Thread* self = Thread::Current();
   {
@@ -588,10 +569,6 @@
     MutexLock mu(self, weak_globals_lock_);
     weak_globals_.Dump(os);
   }
-  {
-    MutexLock mu(self, pins_lock_);
-    pin_table_.Dump(os);
-  }
 }
 
 bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
@@ -779,10 +756,6 @@
     ReaderMutexLock mu(self, globals_lock_);
     globals_.VisitRoots(callback, arg, 0, kRootJNIGlobal);
   }
-  {
-    MutexLock mu(self, pins_lock_);
-    pin_table_.VisitRoots(callback, arg, 0, kRootVMInternal);
-  }
   // The weak_globals table is visited by the GC itself (because it mutates the table).
 }
 
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index da0b8e3..2957ba3 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -95,7 +95,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_, pins_lock_);
+      LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_);
 
   void DumpReferenceTables(std::ostream& os)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -127,14 +127,6 @@
   mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void PinPrimitiveArray(Thread* self, mirror::Array* array)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(pins_lock_);
-
-  void UnpinPrimitiveArray(Thread* self, mirror::Array* array)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      LOCKS_EXCLUDED(pins_lock_);
-
   const JNIInvokeInterface* GetUncheckedFunctions() const {
     return unchecked_functions_;
   }
@@ -154,10 +146,6 @@
   // Extra diagnostics.
   const std::string trace_;
 
-  // Used to hold references to pinned primitive arrays.
-  Mutex pins_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  ReferenceTable pin_table_ GUARDED_BY(pins_lock_);
-
   // JNI global references.
   ReaderWriterMutex globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   // Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index b5b6298..0c9451c 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -36,8 +36,13 @@
 class Thread;
 
 namespace mirror {
+  class ArtField;
   class ArtMethod;
+  class Class;
+  class Object;
+  class Throwable;
 }  // namespace mirror
+class Thread;
 
 namespace JDWP {
 
@@ -65,6 +70,11 @@
 static inline void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) { expandBufAdd8BE(pReply, id); }
 static inline void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) { expandBufAdd8BE(pReply, id); }
 
+struct EventLocation {
+  mirror::ArtMethod* method;
+  uint32_t dex_pc;
+};
+
 /*
  * Holds a JDWP "location".
  */
@@ -178,7 +188,7 @@
    * The VM has finished initializing.  Only called when the debugger is
    * connected at the time initialization completes.
    */
-  bool PostVMStart() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool PostVMStart() LOCKS_EXCLUDED(event_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * A location of interest has been reached.  This is used for breakpoints,
@@ -192,8 +202,9 @@
    *
    * "returnValue" is non-null for MethodExit events only.
    */
-  bool PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, int eventFlags,
+  bool PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
                          const JValue* returnValue)
+     LOCKS_EXCLUDED(event_list_lock_)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -203,8 +214,9 @@
    * "fieldValue" is non-null for field modification events only.
    * "is_modification" is true for field modification, false for field access.
    */
-  bool PostFieldEvent(const JdwpLocation* pLoc, RefTypeId typeId, FieldId fieldId,
-                      ObjectId thisPtr, const JValue* fieldValue, bool is_modification)
+  bool PostFieldEvent(const EventLocation* pLoc, mirror::ArtField* field, mirror::Object* thisPtr,
+                      const JValue* fieldValue, bool is_modification)
+      LOCKS_EXCLUDED(event_list_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -212,21 +224,23 @@
    *
    * Pass in a zeroed-out "*pCatchLoc" if the exception wasn't caught.
    */
-  bool PostException(const JdwpLocation* pThrowLoc, ObjectId excepId, RefTypeId excepClassId,
-                     const JdwpLocation* pCatchLoc, ObjectId thisPtr)
+  bool PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
+                     const EventLocation* pCatchLoc, mirror::Object* thisPtr)
+      LOCKS_EXCLUDED(event_list_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * A thread has started or stopped.
    */
-  bool PostThreadChange(ObjectId threadId, bool start)
+  bool PostThreadChange(Thread* thread, bool start)
+      LOCKS_EXCLUDED(event_list_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Class has been prepared.
    */
-  bool PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature,
-                        int status)
+  bool PostClassPrepare(mirror::Class* klass)
+      LOCKS_EXCLUDED(event_list_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index fc39cc4..d61660b 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -27,6 +27,9 @@
 #include "jdwp/jdwp_constants.h"
 #include "jdwp/jdwp_expand_buf.h"
 #include "jdwp/jdwp_priv.h"
+#include "jdwp/object_registry.h"
+#include "mirror/art_field-inl.h"
+#include "scoped_thread_state_change.h"
 #include "thread-inl.h"
 
 /*
@@ -107,18 +110,17 @@
  * The rest will be zeroed.
  */
 struct ModBasket {
-  ModBasket() : pLoc(NULL), threadId(0), classId(0), excepClassId(0),
-                caught(false), fieldTypeID(0), fieldId(0), thisPtr(0) { }
+  ModBasket() : pLoc(nullptr), thread(nullptr), locationClass(nullptr), exceptionClass(nullptr),
+                caught(false), field(nullptr), thisPtr(nullptr) { }
 
-  const JdwpLocation* pLoc;           /* LocationOnly */
-  std::string         className;      /* ClassMatch/ClassExclude */
-  ObjectId            threadId;       /* ThreadOnly */
-  RefTypeId           classId;        /* ClassOnly */
-  RefTypeId           excepClassId;   /* ExceptionOnly */
-  bool                caught;         /* ExceptionOnly */
-  RefTypeId           fieldTypeID;    /* FieldOnly */
-  FieldId             fieldId;        /* FieldOnly */
-  ObjectId            thisPtr;        /* InstanceOnly */
+  const EventLocation*  pLoc;             /* LocationOnly */
+  std::string           className;        /* ClassMatch/ClassExclude */
+  Thread*               thread;           /* ThreadOnly */
+  mirror::Class*        locationClass;    /* ClassOnly */
+  mirror::Class*        exceptionClass;   /* ExceptionOnly */
+  bool                  caught;           /* ExceptionOnly */
+  mirror::ArtField*     field;            /* FieldOnly */
+  mirror::Object*       thisPtr;          /* InstanceOnly */
   /* nothing for StepOnly -- handled differently */
 };
 
@@ -295,9 +297,6 @@
 /*
  * Remove the event with the given ID from the list.
  *
- * Failure to find the event isn't really an error, but it is a little
- * weird.  (It looks like Eclipse will try to be extra careful and will
- * explicitly remove one-off single-step events.)
  */
 void JdwpState::UnregisterEventById(uint32_t requestId) {
   bool found = false;
@@ -317,7 +316,11 @@
   if (found) {
     Dbg::ManageDeoptimization();
   } else {
-    LOG(WARNING) << StringPrintf("Odd: no match when removing event reqId=0x%04x", requestId);
+    // Failure to find the event isn't really an error. For instance, it looks like Eclipse will
+    // try to be extra careful and will explicitly remove one-off single-step events (using a
+    // 'count' event modifier of 1). So the event may have already been removed as part of the
+    // event notification (see JdwpState::CleanupMatchList).
+    VLOG(jdwp) << StringPrintf("No match when removing event reqId=0x%04x", requestId);
   }
 }
 
@@ -463,12 +466,12 @@
       CHECK(false);  // should not be getting these
       break;
     case MK_THREAD_ONLY:
-      if (pMod->threadOnly.threadId != basket.threadId) {
+      if (!Dbg::MatchThread(pMod->threadOnly.threadId, basket.thread)) {
         return false;
       }
       break;
     case MK_CLASS_ONLY:
-      if (!Dbg::MatchType(basket.classId, pMod->classOnly.refTypeId)) {
+      if (!Dbg::MatchType(basket.locationClass, pMod->classOnly.refTypeId)) {
         return false;
       }
       break;
@@ -483,33 +486,32 @@
       }
       break;
     case MK_LOCATION_ONLY:
-      if (pMod->locationOnly.loc != *basket.pLoc) {
+      if (!Dbg::MatchLocation(pMod->locationOnly.loc, *basket.pLoc)) {
         return false;
       }
       break;
     case MK_EXCEPTION_ONLY:
-      if (pMod->exceptionOnly.refTypeId != 0 && !Dbg::MatchType(basket.excepClassId, pMod->exceptionOnly.refTypeId)) {
+      if (pMod->exceptionOnly.refTypeId != 0 &&
+          !Dbg::MatchType(basket.exceptionClass, pMod->exceptionOnly.refTypeId)) {
         return false;
       }
-      if ((basket.caught && !pMod->exceptionOnly.caught) || (!basket.caught && !pMod->exceptionOnly.uncaught)) {
+      if ((basket.caught && !pMod->exceptionOnly.caught) ||
+          (!basket.caught && !pMod->exceptionOnly.uncaught)) {
         return false;
       }
       break;
     case MK_FIELD_ONLY:
-      if (pMod->fieldOnly.fieldId != basket.fieldId) {
-        return false;
-      }
-      if (!Dbg::MatchType(basket.fieldTypeID, pMod->fieldOnly.refTypeId)) {
+      if (!Dbg::MatchField(pMod->fieldOnly.refTypeId, pMod->fieldOnly.fieldId, basket.field)) {
         return false;
       }
       break;
     case MK_STEP:
-      if (pMod->step.threadId != basket.threadId) {
+      if (!Dbg::MatchThread(pMod->step.threadId, basket.thread)) {
         return false;
       }
       break;
     case MK_INSTANCE_ONLY:
-      if (pMod->instanceOnly.objectId != basket.thisPtr) {
+      if (!Dbg::MatchInstance(pMod->instanceOnly.objectId, basket.thisPtr)) {
         return false;
       }
       break;
@@ -773,7 +775,7 @@
 }
 
 static void LogMatchingEventsAndThread(JdwpEvent** match_list, size_t match_count,
-                                       const ModBasket& basket)
+                                       ObjectId thread_id)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   for (size_t i = 0; i < match_count; ++i) {
     JdwpEvent* pEvent = match_list[i];
@@ -781,11 +783,19 @@
                << StringPrintf(" (requestId=%#" PRIx32 ")", pEvent->requestId);
   }
   std::string thread_name;
-  JdwpError error = Dbg::GetThreadName(basket.threadId, &thread_name);
+  JdwpError error = Dbg::GetThreadName(thread_id, &thread_name);
   if (error != JDWP::ERR_NONE) {
     thread_name = "<unknown>";
   }
-  VLOG(jdwp) << StringPrintf("  thread=%#" PRIx64, basket.threadId) << " " << thread_name;
+  VLOG(jdwp) << StringPrintf("  thread=%#" PRIx64, thread_id) << " " << thread_name;
+}
+
+static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location,
+                                             JDWP::JdwpLocation* jdwp_location)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(event_location != nullptr);
+  DCHECK(jdwp_location != nullptr);
+  Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc);
 }
 
 /*
@@ -809,14 +819,18 @@
  *  - Single-step to a line with a breakpoint.  Should get a single
  *    event message with both events in it.
  */
-bool JdwpState::PostLocationEvent(const JdwpLocation* pLoc, ObjectId thisPtr, int eventFlags,
-                                  const JValue* returnValue) {
+bool JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr,
+                                  int eventFlags, const JValue* returnValue) {
+  DCHECK(pLoc != nullptr);
+  DCHECK(pLoc->method != nullptr);
+  DCHECK_EQ(pLoc->method->IsStatic(), thisPtr == nullptr);
+
   ModBasket basket;
   basket.pLoc = pLoc;
-  basket.classId = pLoc->class_id;
+  basket.locationClass = pLoc->method->GetDeclaringClass();
   basket.thisPtr = thisPtr;
-  basket.threadId = Dbg::GetThreadSelfId();
-  basket.className = Dbg::GetClassName(pLoc->class_id);
+  basket.thread = Thread::Current();
+  basket.className = Dbg::GetClassName(basket.locationClass);
 
   /*
    * On rare occasions we may need to execute interpreted code in the VM
@@ -824,7 +838,7 @@
    * while doing so.  (I don't think we currently do this at all, so
    * this is mostly paranoia.)
    */
-  if (basket.threadId == debug_thread_id_) {
+  if (basket.thread == GetDebugThread()) {
     VLOG(jdwp) << "Ignoring location event in JDWP thread";
     return false;
   }
@@ -846,29 +860,36 @@
   size_t match_count = 0;
   ExpandBuf* pReq = NULL;
   JdwpSuspendPolicy suspend_policy = SP_NONE;
+  JdwpEvent** match_list = nullptr;
+  ObjectId thread_id = 0;
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    JdwpEvent** match_list = AllocMatchList(event_list_size_);
-    if ((eventFlags & Dbg::kBreakpoint) != 0) {
-      FindMatchingEvents(EK_BREAKPOINT, basket, match_list, &match_count);
-    }
-    if ((eventFlags & Dbg::kSingleStep) != 0) {
-      FindMatchingEvents(EK_SINGLE_STEP, basket, match_list, &match_count);
-    }
-    if ((eventFlags & Dbg::kMethodEntry) != 0) {
-      FindMatchingEvents(EK_METHOD_ENTRY, basket, match_list, &match_count);
-    }
-    if ((eventFlags & Dbg::kMethodExit) != 0) {
-      FindMatchingEvents(EK_METHOD_EXIT, basket, match_list, &match_count);
-      FindMatchingEvents(EK_METHOD_EXIT_WITH_RETURN_VALUE, basket, match_list, &match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      match_list = AllocMatchList(event_list_size_);
+      if ((eventFlags & Dbg::kBreakpoint) != 0) {
+        FindMatchingEvents(EK_BREAKPOINT, basket, match_list, &match_count);
+      }
+      if ((eventFlags & Dbg::kSingleStep) != 0) {
+        FindMatchingEvents(EK_SINGLE_STEP, basket, match_list, &match_count);
+      }
+      if ((eventFlags & Dbg::kMethodEntry) != 0) {
+        FindMatchingEvents(EK_METHOD_ENTRY, basket, match_list, &match_count);
+      }
+      if ((eventFlags & Dbg::kMethodExit) != 0) {
+        FindMatchingEvents(EK_METHOD_EXIT, basket, match_list, &match_count);
+        FindMatchingEvents(EK_METHOD_EXIT_WITH_RETURN_VALUE, basket, match_list, &match_count);
+      }
     }
     if (match_count != 0) {
       suspend_policy = scanSuspendPolicy(match_list, match_count);
 
+      thread_id = Dbg::GetThreadId(basket.thread);
+      JDWP::JdwpLocation jdwp_location;
+      SetJdwpLocationFromEventLocation(pLoc, &jdwp_location);
+
       if (VLOG_IS_ON(jdwp)) {
-        LogMatchingEventsAndThread(match_list, match_count, basket);
-        VLOG(jdwp) << "  location=" << *pLoc;
-        VLOG(jdwp) << StringPrintf("  this=%#" PRIx64, basket.thisPtr);
+        LogMatchingEventsAndThread(match_list, match_count, thread_id);
+        VLOG(jdwp) << "  location=" << jdwp_location;
         VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
       }
 
@@ -879,79 +900,81 @@
       for (size_t i = 0; i < match_count; i++) {
         expandBufAdd1(pReq, match_list[i]->eventKind);
         expandBufAdd4BE(pReq, match_list[i]->requestId);
-        expandBufAdd8BE(pReq, basket.threadId);
-        expandBufAddLocation(pReq, *pLoc);
+        expandBufAdd8BE(pReq, thread_id);
+        expandBufAddLocation(pReq, jdwp_location);
         if (match_list[i]->eventKind == EK_METHOD_EXIT_WITH_RETURN_VALUE) {
-          Dbg::OutputMethodReturnValue(pLoc->method_id, returnValue, pReq);
+          Dbg::OutputMethodReturnValue(jdwp_location.method_id, returnValue, pReq);
         }
       }
     }
 
-    CleanupMatchList(match_list, match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      CleanupMatchList(match_list, match_count);
+    }
   }
 
   Dbg::ManageDeoptimization();
 
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
+  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
   return match_count != 0;
 }
 
-bool JdwpState::PostFieldEvent(const JdwpLocation* pLoc, RefTypeId typeId, FieldId fieldId,
-                               ObjectId thisPtr, const JValue* fieldValue, bool is_modification) {
+bool JdwpState::PostFieldEvent(const EventLocation* pLoc, mirror::ArtField* field,
+                               mirror::Object* this_object, const JValue* fieldValue,
+                               bool is_modification) {
+  DCHECK(pLoc != nullptr);
+  DCHECK(field != nullptr);
+  DCHECK_EQ(fieldValue != nullptr, is_modification);
+  DCHECK_EQ(field->IsStatic(), this_object == nullptr);
+
   ModBasket basket;
   basket.pLoc = pLoc;
-  basket.classId = pLoc->class_id;
-  basket.thisPtr = thisPtr;
-  basket.threadId = Dbg::GetThreadSelfId();
-  basket.className = Dbg::GetClassName(pLoc->class_id);
-  basket.fieldTypeID = typeId;
-  basket.fieldId = fieldId;
-
-  DCHECK_EQ(fieldValue != nullptr, is_modification);
+  basket.locationClass = pLoc->method->GetDeclaringClass();
+  basket.thisPtr = this_object;
+  basket.thread = Thread::Current();
+  basket.className = Dbg::GetClassName(basket.locationClass);
+  basket.field = field;
 
   if (InvokeInProgress()) {
     VLOG(jdwp) << "Not posting field event during invoke";
     return false;
   }
 
-  // Get field's reference type tag.
-  JDWP::JdwpTypeTag type_tag;
-  uint32_t class_status;  // unused here.
-  JdwpError error = Dbg::GetClassInfo(typeId, &type_tag, &class_status, NULL);
-  if (error != ERR_NONE) {
-    return false;
-  }
-
-  // Get instance type tag.
-  uint8_t tag;
-  error = Dbg::GetObjectTag(thisPtr, &tag);
-  if (error != ERR_NONE) {
-    return false;
-  }
-
   size_t match_count = 0;
   ExpandBuf* pReq = NULL;
   JdwpSuspendPolicy suspend_policy = SP_NONE;
+  JdwpEvent** match_list = nullptr;
+  ObjectId thread_id = 0;
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    JdwpEvent** match_list = AllocMatchList(event_list_size_);
-
-    if (is_modification) {
-      FindMatchingEvents(EK_FIELD_MODIFICATION, basket, match_list, &match_count);
-    } else {
-      FindMatchingEvents(EK_FIELD_ACCESS, basket, match_list, &match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      match_list = AllocMatchList(event_list_size_);
+      if (is_modification) {
+        FindMatchingEvents(EK_FIELD_MODIFICATION, basket, match_list, &match_count);
+      } else {
+        FindMatchingEvents(EK_FIELD_ACCESS, basket, match_list, &match_count);
+      }
     }
     if (match_count != 0) {
       suspend_policy = scanSuspendPolicy(match_list, match_count);
 
+      thread_id = Dbg::GetThreadId(basket.thread);
+      ObjectRegistry* registry = Dbg::GetObjectRegistry();
+      ObjectId instance_id = registry->Add(basket.thisPtr);
+      RefTypeId field_type_id = registry->AddRefType(field->GetDeclaringClass());
+      FieldId field_id = Dbg::ToFieldId(field);
+      JDWP::JdwpLocation jdwp_location;
+      SetJdwpLocationFromEventLocation(pLoc, &jdwp_location);
+
       if (VLOG_IS_ON(jdwp)) {
-        LogMatchingEventsAndThread(match_list, match_count, basket);
-        VLOG(jdwp) << "  location=" << *pLoc;
-        VLOG(jdwp) << StringPrintf("  this=%#" PRIx64, basket.thisPtr);
-        VLOG(jdwp) << StringPrintf("  type=%#" PRIx64, basket.fieldTypeID) << " "
-                   << Dbg::GetClassName(basket.fieldTypeID);
-        VLOG(jdwp) << StringPrintf("  field=%#" PRIx32, basket.fieldId) << " "
-                   << Dbg::GetFieldName(basket.fieldId);
+        LogMatchingEventsAndThread(match_list, match_count, thread_id);
+        VLOG(jdwp) << "  location=" << jdwp_location;
+        VLOG(jdwp) << StringPrintf("  this=%#" PRIx64, instance_id);
+        VLOG(jdwp) << StringPrintf("  type=%#" PRIx64, field_type_id) << " "
+                   << Dbg::GetClassName(field_id);
+        VLOG(jdwp) << StringPrintf("  field=%#" PRIx32, field_id) << " "
+                   << Dbg::GetFieldName(field_id);
         VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
       }
 
@@ -959,28 +982,41 @@
       expandBufAdd1(pReq, suspend_policy);
       expandBufAdd4BE(pReq, match_count);
 
+      // Get field's reference type tag.
+      JDWP::JdwpTypeTag type_tag = Dbg::GetTypeTag(field->GetDeclaringClass());
+
+      // Get instance type tag.
+      uint8_t tag;
+      {
+        ScopedObjectAccessUnchecked soa(Thread::Current());
+        tag = Dbg::TagFromObject(soa, basket.thisPtr);
+      }
+
       for (size_t i = 0; i < match_count; i++) {
         expandBufAdd1(pReq, match_list[i]->eventKind);
         expandBufAdd4BE(pReq, match_list[i]->requestId);
-        expandBufAdd8BE(pReq, basket.threadId);
-        expandBufAddLocation(pReq, *pLoc);
+        expandBufAdd8BE(pReq, thread_id);
+        expandBufAddLocation(pReq, jdwp_location);
         expandBufAdd1(pReq, type_tag);
-        expandBufAddRefTypeId(pReq, typeId);
-        expandBufAddFieldId(pReq, fieldId);
+        expandBufAddRefTypeId(pReq, field_type_id);
+        expandBufAddFieldId(pReq, field_id);
         expandBufAdd1(pReq, tag);
-        expandBufAddObjectId(pReq, thisPtr);
+        expandBufAddObjectId(pReq, instance_id);
         if (is_modification) {
-          Dbg::OutputFieldValue(fieldId, fieldValue, pReq);
+          Dbg::OutputFieldValue(field_id, fieldValue, pReq);
         }
       }
     }
 
-    CleanupMatchList(match_list, match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      CleanupMatchList(match_list, match_count);
+    }
   }
 
   Dbg::ManageDeoptimization();
 
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
+  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
   return match_count != 0;
 }
 
@@ -990,8 +1026,8 @@
  * Valid mods:
  *  Count, ThreadOnly
  */
-bool JdwpState::PostThreadChange(ObjectId threadId, bool start) {
-  CHECK_EQ(threadId, Dbg::GetThreadSelfId());
+bool JdwpState::PostThreadChange(Thread* thread, bool start) {
+  CHECK_EQ(thread, Thread::Current());
 
   /*
    * I don't think this can happen.
@@ -1002,27 +1038,32 @@
   }
 
   ModBasket basket;
-  basket.threadId = threadId;
+  basket.thread = thread;
 
   ExpandBuf* pReq = NULL;
   JdwpSuspendPolicy suspend_policy = SP_NONE;
+  JdwpEvent** match_list = nullptr;
   size_t match_count = 0;
+  ObjectId thread_id = 0;
   {
-    // Don't allow the list to be updated while we scan it.
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    JdwpEvent** match_list = AllocMatchList(event_list_size_);
-
-    if (start) {
-      FindMatchingEvents(EK_THREAD_START, basket, match_list, &match_count);
-    } else {
-      FindMatchingEvents(EK_THREAD_DEATH, basket, match_list, &match_count);
+    {
+      // Don't allow the list to be updated while we scan it.
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      match_list = AllocMatchList(event_list_size_);
+      if (start) {
+        FindMatchingEvents(EK_THREAD_START, basket, match_list, &match_count);
+      } else {
+        FindMatchingEvents(EK_THREAD_DEATH, basket, match_list, &match_count);
+      }
     }
 
     if (match_count != 0) {
       suspend_policy = scanSuspendPolicy(match_list, match_count);
 
+      thread_id = Dbg::GetThreadId(basket.thread);
+
       if (VLOG_IS_ON(jdwp)) {
-        LogMatchingEventsAndThread(match_list, match_count, basket);
+        LogMatchingEventsAndThread(match_list, match_count, thread_id);
         VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
       }
 
@@ -1033,16 +1074,19 @@
       for (size_t i = 0; i < match_count; i++) {
         expandBufAdd1(pReq, match_list[i]->eventKind);
         expandBufAdd4BE(pReq, match_list[i]->requestId);
-        expandBufAdd8BE(pReq, basket.threadId);
+        expandBufAdd8BE(pReq, thread_id);
       }
     }
 
-    CleanupMatchList(match_list, match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      CleanupMatchList(match_list, match_count);
+    }
   }
 
   Dbg::ManageDeoptimization();
 
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
+  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
 
   return match_count != 0;
 }
@@ -1076,17 +1120,21 @@
  * because there's a pretty good chance that we're not going to send it
  * up the debugger.
  */
-bool JdwpState::PostException(const JdwpLocation* pThrowLoc,
-                              ObjectId exceptionId, RefTypeId exceptionClassId,
-                              const JdwpLocation* pCatchLoc, ObjectId thisPtr) {
-  ModBasket basket;
+bool JdwpState::PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
+                              const EventLocation* pCatchLoc, mirror::Object* thisPtr) {
+  DCHECK(exception_object != nullptr);
+  DCHECK(pThrowLoc != nullptr);
+  DCHECK(pCatchLoc != nullptr);
+  DCHECK(pThrowLoc->method != nullptr);
+  DCHECK_EQ(pThrowLoc->method->IsStatic(), thisPtr == nullptr);
 
+  ModBasket basket;
   basket.pLoc = pThrowLoc;
-  basket.classId = pThrowLoc->class_id;
-  basket.threadId = Dbg::GetThreadSelfId();
-  basket.className = Dbg::GetClassName(basket.classId);
-  basket.excepClassId = exceptionClassId;
-  basket.caught = (pCatchLoc->class_id != 0);
+  basket.locationClass = pThrowLoc->method->GetDeclaringClass();
+  basket.thread = Thread::Current();
+  basket.className = Dbg::GetClassName(basket.locationClass);
+  basket.exceptionClass = exception_object->GetClass();
+  basket.caught = (pCatchLoc->method != 0);
   basket.thisPtr = thisPtr;
 
   /* don't try to post an exception caused by the debugger */
@@ -1098,24 +1146,37 @@
   size_t match_count = 0;
   ExpandBuf* pReq = NULL;
   JdwpSuspendPolicy suspend_policy = SP_NONE;
+  JdwpEvent** match_list = nullptr;
+  ObjectId thread_id = 0;
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    JdwpEvent** match_list = AllocMatchList(event_list_size_);
-    FindMatchingEvents(EK_EXCEPTION, basket, match_list, &match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      match_list = AllocMatchList(event_list_size_);
+      FindMatchingEvents(EK_EXCEPTION, basket, match_list, &match_count);
+    }
     if (match_count != 0) {
       suspend_policy = scanSuspendPolicy(match_list, match_count);
 
+      thread_id = Dbg::GetThreadId(basket.thread);
+      ObjectRegistry* registry = Dbg::GetObjectRegistry();
+      ObjectId exceptionId = registry->Add(exception_object);
+      JDWP::JdwpLocation jdwp_throw_location;
+      JDWP::JdwpLocation jdwp_catch_location;
+      SetJdwpLocationFromEventLocation(pThrowLoc, &jdwp_throw_location);
+      SetJdwpLocationFromEventLocation(pCatchLoc, &jdwp_catch_location);
+
       if (VLOG_IS_ON(jdwp)) {
-        LogMatchingEventsAndThread(match_list, match_count, basket);
-        VLOG(jdwp) << "  throwLocation=" << *pThrowLoc;
-        if (pCatchLoc->class_id == 0) {
+        std::string exceptionClassName(PrettyDescriptor(exception_object->GetClass()));
+
+        LogMatchingEventsAndThread(match_list, match_count, thread_id);
+        VLOG(jdwp) << "  throwLocation=" << jdwp_throw_location;
+        if (jdwp_catch_location.class_id == 0) {
           VLOG(jdwp) << "  catchLocation=uncaught";
         } else {
-          VLOG(jdwp) << "  catchLocation=" << *pCatchLoc;
+          VLOG(jdwp) << "  catchLocation=" << jdwp_catch_location;
         }
-        VLOG(jdwp) << StringPrintf("  this=%#" PRIx64, basket.thisPtr);
-        VLOG(jdwp) << StringPrintf("  exceptionClass=%#" PRIx64, basket.excepClassId) << " "
-                   << Dbg::GetClassName(basket.excepClassId);
+        VLOG(jdwp) << StringPrintf("  exception=%#" PRIx64, exceptionId) << " "
+                   << exceptionClassName;
         VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
       }
 
@@ -1126,21 +1187,23 @@
       for (size_t i = 0; i < match_count; i++) {
         expandBufAdd1(pReq, match_list[i]->eventKind);
         expandBufAdd4BE(pReq, match_list[i]->requestId);
-        expandBufAdd8BE(pReq, basket.threadId);
-
-        expandBufAddLocation(pReq, *pThrowLoc);
+        expandBufAdd8BE(pReq, thread_id);
+        expandBufAddLocation(pReq, jdwp_throw_location);
         expandBufAdd1(pReq, JT_OBJECT);
         expandBufAdd8BE(pReq, exceptionId);
-        expandBufAddLocation(pReq, *pCatchLoc);
+        expandBufAddLocation(pReq, jdwp_catch_location);
       }
     }
 
-    CleanupMatchList(match_list, match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      CleanupMatchList(match_list, match_count);
+    }
   }
 
   Dbg::ManageDeoptimization();
 
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
+  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
 
   return match_count != 0;
 }
@@ -1151,13 +1214,13 @@
  * Valid mods:
  *  Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude
  */
-bool JdwpState::PostClassPrepare(JdwpTypeTag tag, RefTypeId refTypeId, const std::string& signature,
-                                 int status) {
-  ModBasket basket;
+bool JdwpState::PostClassPrepare(mirror::Class* klass) {
+  DCHECK(klass != nullptr);
 
-  basket.classId = refTypeId;
-  basket.threadId = Dbg::GetThreadSelfId();
-  basket.className = Dbg::GetClassName(basket.classId);
+  ModBasket basket;
+  basket.locationClass = klass;
+  basket.thread = Thread::Current();
+  basket.className = Dbg::GetClassName(basket.locationClass);
 
   /* suppress class prep caused by debugger */
   if (InvokeInProgress()) {
@@ -1167,28 +1230,44 @@
 
   ExpandBuf* pReq = NULL;
   JdwpSuspendPolicy suspend_policy = SP_NONE;
+  JdwpEvent** match_list = nullptr;
   size_t match_count = 0;
+  ObjectId thread_id = 0;
   {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    JdwpEvent** match_list = AllocMatchList(event_list_size_);
-    FindMatchingEvents(EK_CLASS_PREPARE, basket, match_list, &match_count);
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      match_list = AllocMatchList(event_list_size_);
+      FindMatchingEvents(EK_CLASS_PREPARE, basket, match_list, &match_count);
+    }
     if (match_count != 0) {
       suspend_policy = scanSuspendPolicy(match_list, match_count);
 
+      thread_id = Dbg::GetThreadId(basket.thread);
+      ObjectRegistry* registry = Dbg::GetObjectRegistry();
+      RefTypeId class_id = registry->AddRefType(basket.locationClass);
+
+      // OLD-TODO - we currently always send both "verified" and "prepared" since
+      // debuggers seem to like that.  There might be some advantage to honesty,
+      // since the class may not yet be verified.
+      int status = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
+      JDWP::JdwpTypeTag tag = Dbg::GetTypeTag(basket.locationClass);
+      std::string temp;
+      std::string signature(basket.locationClass->GetDescriptor(&temp));
+
       if (VLOG_IS_ON(jdwp)) {
-        LogMatchingEventsAndThread(match_list, match_count, basket);
-        VLOG(jdwp) << StringPrintf("  type=%#" PRIx64, basket.classId)<< " " << signature;
+        LogMatchingEventsAndThread(match_list, match_count, thread_id);
+        VLOG(jdwp) << StringPrintf("  type=%#" PRIx64, class_id) << " " << signature;
         VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
       }
 
-      if (basket.threadId == debug_thread_id_) {
+      if (thread_id == debug_thread_id_) {
         /*
          * JDWP says that, for a class prep in the debugger thread, we
-         * should set threadId to null and if any threads were supposed
+         * should set thread to null and if any threads were supposed
          * to be suspended then we suspend all other threads.
          */
         VLOG(jdwp) << "  NOTE: class prepare in debugger thread!";
-        basket.threadId = 0;
+        thread_id = 0;
         if (suspend_policy == SP_EVENT_THREAD) {
           suspend_policy = SP_ALL;
         }
@@ -1201,20 +1280,23 @@
       for (size_t i = 0; i < match_count; i++) {
         expandBufAdd1(pReq, match_list[i]->eventKind);
         expandBufAdd4BE(pReq, match_list[i]->requestId);
-        expandBufAdd8BE(pReq, basket.threadId);
-
+        expandBufAdd8BE(pReq, thread_id);
         expandBufAdd1(pReq, tag);
-        expandBufAdd8BE(pReq, refTypeId);
+        expandBufAdd8BE(pReq, class_id);
         expandBufAddUtf8String(pReq, signature);
         expandBufAdd4BE(pReq, status);
       }
     }
-    CleanupMatchList(match_list, match_count);
+
+    {
+      MutexLock mu(Thread::Current(), event_list_lock_);
+      CleanupMatchList(match_list, match_count);
+    }
   }
 
   Dbg::ManageDeoptimization();
 
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, basket.threadId);
+  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
 
   return match_count != 0;
 }
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 35095f9..e0a83f6 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -151,7 +151,12 @@
     /* show detailed debug output */
     if (resultTag == JT_STRING && exceptObjId == 0) {
       if (resultValue != 0) {
-        VLOG(jdwp) << "      string '" << Dbg::StringToUtf8(resultValue) << "'";
+        if (VLOG_IS_ON(jdwp)) {
+          std::string result_string;
+          JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
+          CHECK_EQ(error, JDWP::ERR_NONE);
+          VLOG(jdwp) << "      string '" << result_string << "'";
+        }
       } else {
         VLOG(jdwp) << "      string (null)";
       }
@@ -220,7 +225,7 @@
 static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   std::vector<ObjectId> thread_ids;
-  Dbg::GetThreads(0, &thread_ids);
+  Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
 
   expandBufAdd4BE(pReply, thread_ids.size());
   for (uint32_t i = 0; i < thread_ids.size(); ++i) {
@@ -919,7 +924,11 @@
 static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ObjectId stringObject = request->ReadObjectId();
-  std::string str(Dbg::StringToUtf8(stringObject));
+  std::string str;
+  JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
+  if (error != JDWP::ERR_NONE) {
+    return error;
+  }
 
   VLOG(jdwp) << StringPrintf("    --> %s", PrintableString(str.c_str()).c_str());
 
@@ -1141,10 +1150,7 @@
 static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ObjectId thread_group_id = request->ReadThreadGroupId();
-
-  expandBufAddUtf8String(pReply, Dbg::GetThreadGroupName(thread_group_id));
-
-  return ERR_NONE;
+  return Dbg::GetThreadGroupName(thread_group_id, pReply);
 }
 
 /*
@@ -1154,11 +1160,7 @@
 static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ObjectId thread_group_id = request->ReadThreadGroupId();
-
-  ObjectId parentGroup = Dbg::GetThreadGroupParent(thread_group_id);
-  expandBufAddObjectId(pReply, parentGroup);
-
-  return ERR_NONE;
+  return Dbg::GetThreadGroupParent(thread_group_id, pReply);
 }
 
 /*
@@ -1168,22 +1170,7 @@
 static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   ObjectId thread_group_id = request->ReadThreadGroupId();
-
-  std::vector<ObjectId> thread_ids;
-  Dbg::GetThreads(thread_group_id, &thread_ids);
-  expandBufAdd4BE(pReply, thread_ids.size());
-  for (uint32_t i = 0; i < thread_ids.size(); ++i) {
-    expandBufAddObjectId(pReply, thread_ids[i]);
-  }
-
-  std::vector<ObjectId> child_thread_groups_ids;
-  Dbg::GetChildThreadGroups(thread_group_id, &child_thread_groups_ids);
-  expandBufAdd4BE(pReply, child_thread_groups_ids.size());
-  for (uint32_t i = 0; i < child_thread_groups_ids.size(); ++i) {
-    expandBufAddObjectId(pReply, child_thread_groups_ids[i]);
-  }
-
-  return ERR_NONE;
+  return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
 }
 
 /*
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 0e46d5c..faddff1 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -63,7 +63,8 @@
 
   JDWP::ObjectId Add(mirror::Object* o)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::thread_list_lock_);
-  JDWP::RefTypeId AddRefType(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  JDWP::RefTypeId AddRefType(mirror::Class* c)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::thread_list_lock_);
 
   template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -80,10 +81,14 @@
 
   void Clear() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void DisableCollection(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void EnableCollection(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DisableCollection(JDWP::ObjectId id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
 
-  bool IsCollected(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void EnableCollection(JDWP::ObjectId id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
+  bool IsCollected(JDWP::ObjectId id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
 
   void DisposeObject(JDWP::ObjectId id, uint32_t reference_count)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -94,13 +99,24 @@
 
  private:
   JDWP::ObjectId InternalAdd(mirror::Object* o)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::thread_list_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(lock_, Locks::thread_list_lock_);
+
   mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void Demote(ObjectRegistryEntry& entry) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, lock_);
-  void Promote(ObjectRegistryEntry& entry) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(lock_);
+
+  void Demote(ObjectRegistryEntry& entry)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+  void Promote(ObjectRegistryEntry& entry)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
   bool Contains(mirror::Object* o, ObjectRegistryEntry** out_entry)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+
   bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
                       ObjectRegistryEntry** out_entry)
       EXCLUSIVE_LOCKS_REQUIRED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 3e9ae09..bf979c1 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -108,7 +108,7 @@
   }
   StackHandleScope<1> hs(self);
   Handle<mirror::Class> h_klass(hs.NewHandle(klass));
-  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true)) {
+  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
     return nullptr;
   }
   return h_klass.Get();
@@ -1695,7 +1695,6 @@
     ScopedObjectAccess soa(env);
     mirror::String* s = soa.Decode<mirror::String*>(java_string);
     mirror::CharArray* chars = s->GetCharArray();
-    soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
     gc::Heap* heap = Runtime::Current()->GetHeap();
     if (heap->IsMovableObject(chars)) {
       if (is_copy != nullptr) {
@@ -1724,7 +1723,6 @@
     if (chars != (s_chars->GetData() + s->GetOffset())) {
       delete[] chars;
     }
-    soa.Vm()->UnpinPrimitiveArray(soa.Self(), s->GetCharArray());
   }
 
   static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* is_copy) {
@@ -1733,7 +1731,6 @@
     mirror::String* s = soa.Decode<mirror::String*>(java_string);
     mirror::CharArray* chars = s->GetCharArray();
     int32_t offset = s->GetOffset();
-    soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
     gc::Heap* heap = Runtime::Current()->GetHeap();
     if (heap->IsMovableObject(chars)) {
       StackHandleScope<1> hs(soa.Self());
@@ -1749,8 +1746,6 @@
   static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
     CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
     ScopedObjectAccess soa(env);
-    soa.Vm()->UnpinPrimitiveArray(soa.Self(),
-                                  soa.Decode<mirror::String*>(java_string)->GetCharArray());
     gc::Heap* heap = Runtime::Current()->GetHeap();
     mirror::String* s = soa.Decode<mirror::String*>(java_string);
     mirror::CharArray* s_chars = s->GetCharArray();
@@ -1906,7 +1901,6 @@
       // Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
       array = soa.Decode<mirror::Array*>(java_array);
     }
-    soa.Vm()->PinPrimitiveArray(soa.Self(), array);
     if (is_copy != nullptr) {
       *is_copy = JNI_FALSE;
     }
@@ -2331,7 +2325,6 @@
     if (UNLIKELY(array == nullptr)) {
       return nullptr;
     }
-    soa.Vm()->PinPrimitiveArray(soa.Self(), array);
     // Only make a copy if necessary.
     if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
       if (is_copy != nullptr) {
@@ -2394,7 +2387,6 @@
         // Non copy to a movable object must means that we had disabled the moving GC.
         heap->DecrementDisableMovingGC(soa.Self());
       }
-      soa.Vm()->UnpinPrimitiveArray(soa.Self(), array);
     }
   }
 
diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h
index 9af835f..143f4bc 100644
--- a/runtime/method_helper-inl.h
+++ b/runtime/method_helper-inl.h
@@ -26,7 +26,9 @@
 
 namespace art {
 
-inline bool MethodHelper::HasSameNameAndSignature(MethodHelper* other) {
+template <template <class T> class HandleKind>
+template <template <class T2> class HandleKind2>
+inline bool MethodHelperT<HandleKind>::HasSameNameAndSignature(MethodHelperT<HandleKind2>* other) {
   const DexFile* dex_file = method_->GetDexFile();
   const DexFile::MethodId& mid = dex_file->GetMethodId(GetMethod()->GetDexMethodIndex());
   if (method_->GetDexCache() == other->method_->GetDexCache()) {
@@ -43,7 +45,9 @@
   return dex_file->GetMethodSignature(mid) == other_dex_file->GetMethodSignature(other_mid);
 }
 
-inline mirror::Class* MethodHelper::GetClassFromTypeIdx(uint16_t type_idx, bool resolve) {
+template <template <class T> class HandleKind>
+inline mirror::Class* MethodHelperT<HandleKind>::GetClassFromTypeIdx(uint16_t type_idx,
+                                                                     bool resolve) {
   mirror::ArtMethod* method = GetMethod();
   mirror::Class* type = method->GetDexCacheResolvedType(type_idx);
   if (type == nullptr && resolve) {
@@ -53,7 +57,8 @@
   return type;
 }
 
-inline mirror::Class* MethodHelper::GetReturnType(bool resolve) {
+template <template <class T> class HandleKind>
+inline mirror::Class* MethodHelperT<HandleKind>::GetReturnType(bool resolve) {
   mirror::ArtMethod* method = GetMethod();
   const DexFile* dex_file = method->GetDexFile();
   const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
@@ -62,7 +67,8 @@
   return GetClassFromTypeIdx(return_type_idx, resolve);
 }
 
-inline mirror::String* MethodHelper::ResolveString(uint32_t string_idx) {
+template <template <class T> class HandleKind>
+inline mirror::String* MethodHelperT<HandleKind>::ResolveString(uint32_t string_idx) {
   mirror::ArtMethod* method = GetMethod();
   mirror::String* s = method->GetDexCacheStrings()->Get(string_idx);
   if (UNLIKELY(s == nullptr)) {
diff --git a/runtime/method_helper.cc b/runtime/method_helper.cc
index d6f83a8..79c2b91 100644
--- a/runtime/method_helper.cc
+++ b/runtime/method_helper.cc
@@ -25,7 +25,8 @@
 
 namespace art {
 
-mirror::String* MethodHelper::GetNameAsString(Thread* self) {
+template <template <class T> class HandleKind>
+mirror::String* MethodHelperT<HandleKind>::GetNameAsString(Thread* self) {
   const DexFile* dex_file = method_->GetDexFile();
   mirror::ArtMethod* method = method_->GetInterfaceMethodIfProxy();
   uint32_t dex_method_idx = method->GetDexMethodIndex();
@@ -36,7 +37,10 @@
                                                              dex_cache);
 }
 
-bool MethodHelper::HasSameSignatureWithDifferentClassLoaders(MethodHelper* other) {
+template <template <class T> class HandleKind>
+template <template <class T2> class HandleKind2>
+bool MethodHelperT<HandleKind>::HasSameSignatureWithDifferentClassLoaders(
+    MethodHelperT<HandleKind2>* other) {
   if (UNLIKELY(GetReturnType() != other->GetReturnType())) {
     return false;
   }
@@ -62,7 +66,8 @@
   return true;
 }
 
-uint32_t MethodHelper::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile)
+template <template <class T> class HandleKind>
+uint32_t MethodHelperT<HandleKind>::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtMethod* method = GetMethod();
   const DexFile* dexfile = method->GetDexFile();
@@ -102,8 +107,9 @@
   return DexFile::kDexNoIndex;
 }
 
-uint32_t MethodHelper::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile,
-                                                        uint32_t name_and_signature_idx)
+template <template <typename> class HandleKind>
+uint32_t MethodHelperT<HandleKind>::FindDexMethodIndexInOtherDexFile(
+    const DexFile& other_dexfile, uint32_t name_and_signature_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::ArtMethod* method = GetMethod();
   const DexFile* dexfile = method->GetDexFile();
@@ -133,4 +139,38 @@
   return DexFile::kDexNoIndex;
 }
 
+// Instantiate methods.
+template mirror::String* MethodHelperT<Handle>::GetNameAsString(Thread* self);
+
+template mirror::String* MethodHelperT<MutableHandle>::GetNameAsString(Thread* self);
+
+template
+uint32_t MethodHelperT<Handle>::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile);
+template
+uint32_t MethodHelperT<MutableHandle>::FindDexMethodIndexInOtherDexFile(
+    const DexFile& other_dexfile);
+
+template
+uint32_t MethodHelperT<Handle>::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile,
+                                                                 uint32_t name_and_signature_idx);
+template
+uint32_t MethodHelperT<MutableHandle>::FindDexMethodIndexInOtherDexFile(
+    const DexFile& other_dexfile, uint32_t name_and_signature_idx);
+
+template
+bool MethodHelperT<Handle>::HasSameSignatureWithDifferentClassLoaders<Handle>(
+    MethodHelperT<Handle>* other);
+
+template
+bool MethodHelperT<Handle>::HasSameSignatureWithDifferentClassLoaders<MutableHandle>(
+    MethodHelperT<MutableHandle>* other);
+
+template
+bool MethodHelperT<MutableHandle>::HasSameSignatureWithDifferentClassLoaders<Handle>(
+    MethodHelperT<Handle>* other);
+
+template
+bool MethodHelperT<MutableHandle>::HasSameSignatureWithDifferentClassLoaders<MutableHandle>(
+    MethodHelperT<MutableHandle>* other);
+
 }  // namespace art
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index 8150456..fe364d3 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -24,17 +24,11 @@
 
 namespace art {
 
-class MethodHelper {
+template <template <class T> class HandleKind>
+class MethodHelperT {
  public:
-  explicit MethodHelper(Handle<mirror::ArtMethod> m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : method_(m), shorty_(nullptr), shorty_len_(0) {
-    SetMethod(m.Get());
-  }
-
-  void ChangeMethod(mirror::ArtMethod* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(new_m != nullptr);
-    SetMethod(new_m);
-    shorty_ = nullptr;
+  explicit MethodHelperT(HandleKind<mirror::ArtMethod> m)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : method_(m), shorty_(nullptr), shorty_len_(0) {
   }
 
   mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -110,10 +104,12 @@
     return GetParamPrimitiveType(param) == Primitive::kPrimNot;
   }
 
-  ALWAYS_INLINE bool HasSameNameAndSignature(MethodHelper* other)
+  template <template <class T> class HandleKind2>
+  ALWAYS_INLINE bool HasSameNameAndSignature(MethodHelperT<HandleKind2>* other)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool HasSameSignatureWithDifferentClassLoaders(MethodHelper* other)
+  template <template <class T> class HandleKind2>
+  bool HasSameSignatureWithDifferentClassLoaders(MethodHelperT<HandleKind2>* other)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   mirror::Class* GetClassFromTypeIdx(uint16_t type_idx, bool resolve = true)
@@ -130,6 +126,33 @@
                                             uint32_t name_and_signature_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+ protected:
+  HandleKind<mirror::ArtMethod> method_;
+
+  const char* shorty_;
+  uint32_t shorty_len_;
+
+ private:
+  template <template <class T2> class HandleKind2> friend class MethodHelperT;
+
+  DISALLOW_COPY_AND_ASSIGN(MethodHelperT);
+};
+
+class MethodHelper : public MethodHelperT<Handle> {
+  using MethodHelperT<Handle>::MethodHelperT;
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MethodHelper);
+};
+
+class MutableMethodHelper : public MethodHelperT<MutableHandle> {
+  using MethodHelperT<MutableHandle>::MethodHelperT;
+ public:
+  void ChangeMethod(mirror::ArtMethod* new_m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(new_m != nullptr);
+    SetMethod(new_m);
+    shorty_ = nullptr;
+  }
+
  private:
   // Set the method_ field, for proxy methods looking up the interface method via the resolved
   // methods table.
@@ -137,11 +160,7 @@
     method_.Assign(method);
   }
 
-  Handle<mirror::ArtMethod> method_;
-  const char* shorty_;
-  uint32_t shorty_len_;
-
-  DISALLOW_COPY_AND_ASSIGN(MethodHelper);
+  DISALLOW_COPY_AND_ASSIGN(MutableMethodHelper);
 };
 
 }  // namespace art
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index f54af85..4535f6c 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -94,7 +94,7 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   mirror::Class* element_class_ptr = element_class.Get();
   StackHandleScope<1> hs(self);
-  Handle<mirror::Class> array_class(
+  MutableHandle<mirror::Class> array_class(
       hs.NewHandle(class_linker->FindArrayClass(self, &element_class_ptr)));
   if (UNLIKELY(array_class.Get() == nullptr)) {
     CHECK(self->IsExceptionPending());
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index ae17070..8447616 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -285,14 +285,17 @@
 }
 
 inline StackMap ArtMethod::GetStackMap(uint32_t native_pc_offset) {
+  return GetOptimizedCodeInfo().GetStackMapForNativePcOffset(native_pc_offset);
+}
+
+inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
   DCHECK(IsOptimized());
   const void* code_pointer = GetQuickOatCodePointer();
   DCHECK(code_pointer != nullptr);
   uint32_t offset =
       reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
   const void* data = reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
-  CodeInfo code_info(data);
-  return code_info.GetStackMapForNativePcOffset(native_pc_offset);
+  return CodeInfo(data);
 }
 
 inline void ArtMethod::SetOatNativeGcMapOffset(uint32_t gc_map_offset) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 131f5d6..159d04d 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -143,7 +143,7 @@
     } else {
       StackHandleScope<2> hs(Thread::Current());
       MethodHelper mh(hs.NewHandle(this));
-      MethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
+      MutableMethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
       IfTable* iftable = GetDeclaringClass()->GetIfTable();
       for (size_t i = 0; i < iftable->Count() && result == NULL; i++) {
         Class* interface = iftable->GetInterface(i);
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index d37aa57..de6ec05 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -155,7 +155,9 @@
     // Temporary solution for detecting if a method has been optimized: the compiler
     // does not create a GC map. Instead, the vmap table contains the stack map
     // (as in stack_map.h).
-    return (GetEntryPointFromQuickCompiledCode() != nullptr) && (GetNativeGcMap() == nullptr);
+    return (GetEntryPointFromQuickCompiledCode() != nullptr)
+        && (GetQuickOatCodePointer() != nullptr)
+        && (GetNativeGcMap() == nullptr);
   }
 
   bool IsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -349,6 +351,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   StackMap GetStackMap(uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 726e928..3f67468 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -741,6 +741,15 @@
   klass->SetDexTypeIndex(DexFile::kDexNoIndex16);  // Default to no valid type index.
 }
 
+inline void Class::SetAccessFlags(uint32_t new_access_flags) {
+  // Called inside a transaction when setting pre-verified flag during boot image compilation.
+  if (Runtime::Current()->IsActiveTransaction()) {
+    SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
+  } else {
+    SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
+  }
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index e7d8163..0ee8fa8 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -760,7 +760,7 @@
   return GetInterfaceTypeList()->GetTypeItem(idx).type_idx_;
 }
 
-mirror::Class* Class::GetDirectInterface(Thread* self, ConstHandle<mirror::Class> klass,
+mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
                                          uint32_t idx) {
   DCHECK(klass.Get() != nullptr);
   DCHECK(!klass->IsPrimitive());
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index cf9501a..4a8d6dc 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -65,7 +65,7 @@
 namespace art {
 
 struct ClassOffsets;
-template<class T> class ConstHandle;
+template<class T> class Handle;
 template<class T> class Handle;
 class Signature;
 class StringPiece;
@@ -98,6 +98,12 @@
 
   // Class Status
   //
+  // kStatusRetired: Class that's temporarily used till class linking time
+  // has its (vtable) size figured out and has been cloned to one with the
+  // right size which will be the one used later. The old one is retired and
+  // will be gc'ed once all refs to the class point to the newly
+  // cloned version.
+  //
   // kStatusNotReady: If a Class cannot be found in the class table by
   // FindClass, it allocates an new one with AllocClass in the
   // kStatusNotReady and calls LoadClass. Note if it does find a
@@ -133,7 +139,7 @@
   //
   // TODO: Explain the other states
   enum Status {
-    kStatusRetired = -2,
+    kStatusRetired = -2,  // Retired, should not be used. Use the newly cloned one instead.
     kStatusError = -1,
     kStatusNotReady = 0,
     kStatusIdx = 1,  // Loaded, DEX idx in super_class_type_idx_ and interfaces_type_idx_.
@@ -219,10 +225,7 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // Not called within a transaction.
-    SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
-  }
+  void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns true if the class is an interface.
   bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -263,6 +266,16 @@
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
+  // Returns true if the class can avoid access checks.
+  bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return (GetAccessFlags() & kAccPreverified) != 0;
+  }
+
+  void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+    SetAccessFlags(flags | kAccPreverified);
+  }
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0;
@@ -977,7 +990,7 @@
 
   uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static mirror::Class* GetDirectInterface(Thread* self, ConstHandle<mirror::Class> klass,
+  static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
                                            uint32_t idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index aa181ee..1290a3d 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -161,7 +161,7 @@
   ScopedObjectAccess soa(Thread::Current());
   Class* c = class_linker_->FindSystemClass(soa.Self(), "[I");
   StackHandleScope<1> hs(soa.Self());
-  Handle<Array> a(
+  MutableHandle<Array> a(
       hs.NewHandle(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
                                       Runtime::Current()->GetHeap()->GetCurrentAllocator())));
   EXPECT_TRUE(c == a->GetClass());
@@ -184,7 +184,7 @@
   ScopedObjectAccess soa(Thread::Current());
   Class* c = class_linker_->FindSystemClass(soa.Self(), "[B");
   StackHandleScope<1> hs(soa.Self());
-  Handle<Array> a(
+  MutableHandle<Array> a(
       hs.NewHandle(Array::Alloc<true>(soa.Self(), c, 1, c->GetComponentSize(),
                                       Runtime::Current()->GetHeap()->GetCurrentAllocator(), true)));
   EXPECT_TRUE(c == a->GetClass());
@@ -287,7 +287,7 @@
 
   StackHandleScope<2> hs(soa.Self());
   Handle<Class> c(hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "I")));
-  Handle<IntArray> dims(hs.NewHandle(IntArray::Alloc(soa.Self(), 1)));
+  MutableHandle<IntArray> dims(hs.NewHandle(IntArray::Alloc(soa.Self(), 1)));
   dims->Set<false>(0, 1);
   Array* multi = Array::CreateMultiArray(soa.Self(), c, dims);
   EXPECT_TRUE(multi->GetClass() == class_linker_->FindSystemClass(soa.Self(), "[I"));
@@ -485,8 +485,8 @@
   ArtMethod* m4_2 = klass2->GetVirtualMethod(3);
   EXPECT_STREQ(m4_2->GetName(), "m4");
 
-  MethodHelper mh(hs.NewHandle(m1_1));
-  MethodHelper mh2(hs.NewHandle(m1_2));
+  MutableMethodHelper mh(hs.NewHandle(m1_1));
+  MutableMethodHelper mh2(hs.NewHandle(m1_2));
   EXPECT_TRUE(mh.HasSameNameAndSignature(&mh2));
   EXPECT_TRUE(mh2.HasSameNameAndSignature(&mh));
 
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index e599b03..23c18f8 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -19,48 +19,77 @@
 
 #include <stdint.h>
 
-static const uint32_t kAccPublic = 0x0001;  // class, field, method, ic
-static const uint32_t kAccPrivate = 0x0002;  // field, method, ic
-static const uint32_t kAccProtected = 0x0004;  // field, method, ic
-static const uint32_t kAccStatic = 0x0008;  // field, method, ic
-static const uint32_t kAccFinal = 0x0010;  // class, field, method, ic
-static const uint32_t kAccSynchronized = 0x0020;  // method (only allowed on natives)
-static const uint32_t kAccSuper = 0x0020;  // class (not used in dex)
-static const uint32_t kAccVolatile = 0x0040;  // field
-static const uint32_t kAccBridge = 0x0040;  // method (1.5)
-static const uint32_t kAccTransient = 0x0080;  // field
-static const uint32_t kAccVarargs = 0x0080;  // method (1.5)
-static const uint32_t kAccNative = 0x0100;  // method
-static const uint32_t kAccInterface = 0x0200;  // class, ic
-static const uint32_t kAccAbstract = 0x0400;  // class, method, ic
-static const uint32_t kAccStrict = 0x0800;  // method
-static const uint32_t kAccSynthetic = 0x1000;  // class, field, method, ic
-static const uint32_t kAccAnnotation = 0x2000;  // class, ic (1.5)
-static const uint32_t kAccEnum = 0x4000;  // class, field, ic (1.5)
+static constexpr uint32_t kAccPublic =       0x0001;  // class, field, method, ic
+static constexpr uint32_t kAccPrivate =      0x0002;  // field, method, ic
+static constexpr uint32_t kAccProtected =    0x0004;  // field, method, ic
+static constexpr uint32_t kAccStatic =       0x0008;  // field, method, ic
+static constexpr uint32_t kAccFinal =        0x0010;  // class, field, method, ic
+static constexpr uint32_t kAccSynchronized = 0x0020;  // method (only allowed on natives)
+static constexpr uint32_t kAccSuper =        0x0020;  // class (not used in dex)
+static constexpr uint32_t kAccVolatile =     0x0040;  // field
+static constexpr uint32_t kAccBridge =       0x0040;  // method (1.5)
+static constexpr uint32_t kAccTransient =    0x0080;  // field
+static constexpr uint32_t kAccVarargs =      0x0080;  // method (1.5)
+static constexpr uint32_t kAccNative =       0x0100;  // method
+static constexpr uint32_t kAccInterface =    0x0200;  // class, ic
+static constexpr uint32_t kAccAbstract =     0x0400;  // class, method, ic
+static constexpr uint32_t kAccStrict =       0x0800;  // method
+static constexpr uint32_t kAccSynthetic =    0x1000;  // class, field, method, ic
+static constexpr uint32_t kAccAnnotation =   0x2000;  // class, ic (1.5)
+static constexpr uint32_t kAccEnum =         0x4000;  // class, field, ic (1.5)
 
-static const uint32_t kAccMiranda = 0x8000;  // method
+static constexpr uint32_t kAccJavaFlagsMask = 0xffff;  // bits set from Java sources (low 16)
 
-static const uint32_t kAccJavaFlagsMask = 0xffff;  // bits set from Java sources (low 16)
-
-static const uint32_t kAccConstructor = 0x00010000;  // method (dex only) <init> and <clinit>
-static const uint32_t kAccDeclaredSynchronized = 0x00020000;  // method (dex only)
-static const uint32_t kAccClassIsProxy = 0x00040000;  // class (dex only)
-static const uint32_t kAccPreverified = 0x00080000;  // method (dex only)
-static const uint32_t kAccFastNative = 0x0080000;  // method (dex only)
-static const uint32_t kAccPortableCompiled = 0x0100000;  // method (dex only)
+static constexpr uint32_t kAccConstructor =          0x00010000;  // method (dex only) <(cl)init>
+static constexpr uint32_t kAccDeclaredSynchronized = 0x00020000;  // method (dex only)
+static constexpr uint32_t kAccClassIsProxy =         0x00040000;  // class  (dex only)
+static constexpr uint32_t kAccPreverified =          0x00080000;  // class (runtime),
+                                                                  // method (dex only)
+static constexpr uint32_t kAccFastNative =           0x00080000;  // method (dex only)
+static constexpr uint32_t kAccPortableCompiled =     0x00100000;  // method (dex only)
+static constexpr uint32_t kAccMiranda =              0x00200000;  // method (dex only)
 
 // Special runtime-only flags.
 // Note: if only kAccClassIsReference is set, we have a soft reference.
-static const uint32_t kAccClassIsFinalizable        = 0x80000000;  // class/ancestor overrides finalize()
-static const uint32_t kAccClassIsReference          = 0x08000000;  // class is a soft/weak/phantom ref
-static const uint32_t kAccClassIsWeakReference      = 0x04000000;  // class is a weak reference
-static const uint32_t kAccClassIsFinalizerReference = 0x02000000;  // class is a finalizer reference
-static const uint32_t kAccClassIsPhantomReference   = 0x01000000;  // class is a phantom reference
 
-static const uint32_t kAccReferenceFlagsMask = (kAccClassIsReference
-                                                | kAccClassIsWeakReference
-                                                | kAccClassIsFinalizerReference
-                                                | kAccClassIsPhantomReference);
+// class/ancestor overrides finalize()
+static constexpr uint32_t kAccClassIsFinalizable        = 0x80000000;
+// class is a soft/weak/phantom ref
+static constexpr uint32_t kAccClassIsReference          = 0x08000000;
+// class is a weak reference
+static constexpr uint32_t kAccClassIsWeakReference      = 0x04000000;
+// class is a finalizer reference
+static constexpr uint32_t kAccClassIsFinalizerReference = 0x02000000;
+// class is a phantom reference
+static constexpr uint32_t kAccClassIsPhantomReference   = 0x01000000;
+
+static constexpr uint32_t kAccReferenceFlagsMask = (kAccClassIsReference
+                                                  | kAccClassIsWeakReference
+                                                  | kAccClassIsFinalizerReference
+                                                  | kAccClassIsPhantomReference);
+
+// Valid (meaningful) bits for a field.
+static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
+    kAccStatic | kAccFinal | kAccVolatile | kAccTransient | kAccSynthetic | kAccEnum;
+
+// Valid (meaningful) bits for a method.
+static constexpr uint32_t kAccValidMethodFlags = kAccPublic | kAccPrivate | kAccProtected |
+    kAccStatic | kAccFinal | kAccSynchronized | kAccBridge | kAccVarargs | kAccNative |
+    kAccAbstract | kAccStrict | kAccSynthetic | kAccMiranda | kAccConstructor |
+    kAccDeclaredSynchronized;
+
+// Valid (meaningful) bits for a class (not interface).
+// Note 1. These are positive bits. Other bits may have to be zero.
+// Note 2. Inner classes can expose more access flags to Java programs. That is handled by libcore.
+static constexpr uint32_t kAccValidClassFlags = kAccPublic | kAccFinal | kAccSuper |
+    kAccAbstract | kAccSynthetic | kAccEnum;
+
+// Valid (meaningful) bits for an interface.
+// Note 1. Annotations are interfaces.
+// Note 2. These are positive bits. Other bits may have to be zero.
+// Note 3. Inner classes can expose more access flags to Java programs. That is handled by libcore.
+static constexpr uint32_t kAccValidInterfaceFlags = kAccPublic | kAccInterface |
+    kAccAbstract | kAccSynthetic | kAccAnnotation;
 
 #endif  // ART_RUNTIME_MODIFIERS_H_
 
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index af24368..704e041 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -58,7 +58,7 @@
 static const size_t kMaxHandles = 1000000;  // Use arbitrary large amount for now.
 static void FillHeap(Thread* self, ClassLinker* class_linker,
                      std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp,
-                     std::vector<Handle<mirror::Object>>* handles)
+                     std::vector<MutableHandle<mirror::Object>>* handles)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
 
@@ -73,7 +73,7 @@
   // Start allocating with 128K
   size_t length = 128 * KB / 4;
   while (length > 10) {
-    Handle<mirror::Object> h((*hsp)->NewHandle<mirror::Object>(
+    MutableHandle<mirror::Object> h((*hsp)->NewHandle<mirror::Object>(
         mirror::ObjectArray<mirror::Object>::Alloc(self, ca.Get(), length / 4)));
     if (self->IsExceptionPending() || h.Get() == nullptr) {
       self->ClearException();
@@ -92,7 +92,7 @@
 
   // Allocate simple objects till it fails.
   while (!self->IsExceptionPending()) {
-    Handle<mirror::Object> h = (*hsp)->NewHandle<mirror::Object>(c->AllocObject(self));
+    MutableHandle<mirror::Object> h = (*hsp)->NewHandle<mirror::Object>(c->AllocObject(self));
     if (!self->IsExceptionPending() && h.Get() != nullptr) {
       handles->push_back(h);
     }
@@ -307,7 +307,7 @@
 
   // Fill the heap.
   std::unique_ptr<StackHandleScope<kMaxHandles>> hsp;
-  std::vector<Handle<mirror::Object>> handles;
+  std::vector<MutableHandle<mirror::Object>> handles;
   {
     Thread* self = Thread::Current();
     ScopedObjectAccess soa(self);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 14d6cd9..003815e 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -189,8 +189,8 @@
       StackHandleScope<1> hs(soa.Self());
       Handle<mirror::ClassLoader> class_loader(
           hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
-      mirror::Class* result = class_linker->DefineClass(descriptor.c_str(), class_loader, *dex_file,
-                                                        *dex_class_def);
+      mirror::Class* result = class_linker->DefineClass(soa.Self(), descriptor.c_str(),
+                                                        class_loader, *dex_file, *dex_class_def);
       if (result != nullptr) {
         VLOG(class_linker) << "DexFile_defineClassNative returning " << result;
         return soa.AddLocalReference<jclass>(result);
@@ -504,7 +504,9 @@
   std::string cache_dir;
   bool have_android_data = false;
   bool dalvik_cache_exists = false;
-  GetDalvikCache(instruction_set, false, &cache_dir, &have_android_data, &dalvik_cache_exists);
+  bool is_global_cache = false;
+  GetDalvikCache(instruction_set, false, &cache_dir, &have_android_data, &dalvik_cache_exists,
+                 &is_global_cache);
   std::string cache_filename;  // was cache_location
   bool have_cache_filename = false;
   if (dalvik_cache_exists) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index ceff206..d8a537f 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -60,11 +60,11 @@
 }
 
 static void VMDebug_startAllocCounting(JNIEnv*, jclass) {
-  Runtime::Current()->SetStatsEnabled(true);
+  Runtime::Current()->SetStatsEnabled(true, false);
 }
 
 static void VMDebug_stopAllocCounting(JNIEnv*, jclass) {
-  Runtime::Current()->SetStatsEnabled(false);
+  Runtime::Current()->SetStatsEnabled(false, false);
 }
 
 static jint VMDebug_getAllocCount(JNIEnv*, jclass, jint kind) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index db0a5c5..e1d9fc7 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -238,7 +238,7 @@
 }
 
 // Based on ClassLinker::ResolveType.
-static void PreloadDexCachesResolveType(mirror::DexCache* dex_cache, uint32_t type_idx)
+static void PreloadDexCachesResolveType(Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
   if (klass != NULL) {
@@ -250,7 +250,7 @@
   if (class_name[1] == '\0') {
     klass = linker->FindPrimitiveClass(class_name[0]);
   } else {
-    klass = linker->LookupClass(class_name, NULL);
+    klass = linker->LookupClass(self, class_name, NULL);
   }
   if (klass == NULL) {
     return;
@@ -427,7 +427,6 @@
 
   Runtime* runtime = Runtime::Current();
   ClassLinker* linker = runtime->GetClassLinker();
-  Thread* self = ThreadForEnv(env);
 
   // We use a std::map to avoid heap allocating StringObjects to lookup in gDvm.literalStrings
   StringTable strings;
@@ -440,7 +439,7 @@
   for (size_t i = 0; i< boot_class_path.size(); i++) {
     const DexFile* dex_file = boot_class_path[i];
     CHECK(dex_file != NULL);
-    StackHandleScope<1> hs(self);
+    StackHandleScope<1> hs(soa.Self());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file)));
 
     if (kPreloadDexCachesStrings) {
@@ -451,7 +450,7 @@
 
     if (kPreloadDexCachesTypes) {
       for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
-        PreloadDexCachesResolveType(dex_cache.Get(), i);
+        PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), i);
       }
     }
 
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 124bdf5..b11cbdf 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -78,7 +78,7 @@
     return nullptr;
   }
   if (initialize) {
-    class_linker->EnsureInitialized(c, true, true);
+    class_linker->EnsureInitialized(soa.Self(), c, true, true);
   }
   return soa.AddLocalReference<jclass>(c.Get());
 }
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index fefddae..f6a46bd 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -24,23 +24,31 @@
 
 namespace art {
 
-static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader, jstring javaName) {
+static jclass VMClassLoader_findLoadedClass(JNIEnv* env, jclass, jobject javaLoader,
+                                            jstring javaName) {
   ScopedFastNativeObjectAccess soa(env);
   mirror::ClassLoader* loader = soa.Decode<mirror::ClassLoader*>(javaLoader);
   ScopedUtfChars name(env, javaName);
-  if (name.c_str() == NULL) {
-    return NULL;
+  if (name.c_str() == nullptr) {
+    return nullptr;
   }
-
+  ClassLinker* cl = Runtime::Current()->GetClassLinker();
   std::string descriptor(DotToDescriptor(name.c_str()));
-  mirror::Class* c = Runtime::Current()->GetClassLinker()->LookupClass(descriptor.c_str(), loader);
-  if (c != NULL && c->IsResolved()) {
+  mirror::Class* c = cl->LookupClass(soa.Self(), descriptor.c_str(), loader);
+  if (c != nullptr && c->IsResolved()) {
     return soa.AddLocalReference<jclass>(c);
-  } else {
-    // Class wasn't resolved so it may be erroneous or not yet ready, force the caller to go into
-    // the regular loadClass code.
-    return NULL;
   }
+  if (loader != nullptr) {
+    // Try the common case.
+    StackHandleScope<1> hs(soa.Self());
+    c = cl->FindClassInPathClassLoader(soa, soa.Self(), descriptor.c_str(), hs.NewHandle(loader));
+    if (c != nullptr) {
+      return soa.AddLocalReference<jclass>(c);
+    }
+  }
+  // Class wasn't resolved so it may be erroneous or not yet ready, force the caller to go into
+  // the regular loadClass code.
+  return nullptr;
 }
 
 static jint VMClassLoader_getBootClassPathSize(JNIEnv*, jclass) {
@@ -60,13 +68,15 @@
  * with '/'); if it's not we'd need to make it absolute as part of forming
  * the URL string.
  */
-static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName, jint index) {
+static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName,
+                                                      jint index) {
   ScopedUtfChars name(env, javaName);
   if (name.c_str() == nullptr) {
     return nullptr;
   }
 
-  const std::vector<const DexFile*>& path = Runtime::Current()->GetClassLinker()->GetBootClassPath();
+  const std::vector<const DexFile*>& path =
+      Runtime::Current()->GetClassLinker()->GetBootClassPath();
   if (index < 0 || size_t(index) >= path.size()) {
     return nullptr;
   }
diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc
new file mode 100644
index 0000000..ad48ec0
--- /dev/null
+++ b/runtime/native/java_lang_ref_FinalizerReference.cc
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "jni_internal.h"
+#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
+#include "scoped_fast_native_object_access.h"
+
+namespace art {
+
+static jboolean FinalizerReference_makeCircularListIfUnenqueued(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  mirror::FinalizerReference* const ref = soa.Decode<mirror::FinalizerReference*>(javaThis);
+  return Runtime::Current()->GetHeap()->GetReferenceProcessor()->MakeCircularListIfUnenqueued(ref);
+}
+
+static JNINativeMethod gMethods[] = {
+  NATIVE_METHOD(FinalizerReference, makeCircularListIfUnenqueued, "!()Z"),
+};
+
+void register_java_lang_ref_FinalizerReference(JNIEnv* env) {
+  REGISTER_NATIVE_METHODS("java/lang/ref/FinalizerReference");
+}
+
+}  // namespace art
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 34cb93a..0542aeb 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -48,7 +48,7 @@
     return nullptr;
   }
 
-  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(c, true, true)) {
+  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), c, true, true)) {
     DCHECK(soa.Self()->IsExceptionPending());
     return nullptr;
   }
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 3903ffc..ad88109 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -97,7 +97,8 @@
     StackHandleScope<2> hs(soa.Self());
     HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(f));
     Handle<mirror::Class> h_klass(hs.NewHandle((*f)->GetDeclaringClass()));
-    if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_klass, true, true))) {
+    if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), h_klass, true,
+                                                                          true))) {
       DCHECK(soa.Self()->IsExceptionPending());
       *class_or_rcvr = nullptr;
       return false;
diff --git a/runtime/oat.h b/runtime/oat.h
index 6d5fefe..6a32e3e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -152,7 +152,7 @@
 enum OatClassType {
   kOatClassAllCompiled = 0,   // OatClass is followed by an OatMethodOffsets for each method.
   kOatClassSomeCompiled = 1,  // A bitmap of which OatMethodOffsets are present follows the OatClass.
-  kOatClassNoneCompiled = 2,  // All methods are interpretted so no OatMethodOffsets are necessary.
+  kOatClassNoneCompiled = 2,  // All methods are interpreted so no OatMethodOffsets are necessary.
   kOatClassMax = 3,
 };
 
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 97ca6b2..9570bb5 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -21,6 +21,39 @@
 
 namespace art {
 
+inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const {
+  const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  if (code == nullptr) {
+    return nullptr;
+  }
+  // Return a pointer to the packed struct before the code.
+  return reinterpret_cast<const OatQuickMethodHeader*>(code) - 1;
+}
+
+inline uint32_t OatFile::OatMethod::GetOatQuickMethodHeaderOffset() const {
+  const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+  if (method_header == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const byte*>(method_header) - begin_;
+}
+
+inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
+  const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  if (code == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+}
+
+inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const {
+  const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+  if (method_header == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const byte*>(&method_header->code_size_) - begin_;
+}
+
 inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
   const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
   if (code == nullptr) {
@@ -50,11 +83,27 @@
   return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u);
 }
 
+inline uint32_t OatFile::OatMethod::GetMappingTableOffsetOffset() const {
+  const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+  if (method_header == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const byte*>(&method_header->mapping_table_offset_) - begin_;
+}
+
 inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
   const uint8_t* vmap_table = GetVmapTable();
   return static_cast<uint32_t>(vmap_table != nullptr ? vmap_table - begin_ : 0u);
 }
 
+inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const {
+  const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader();
+  if (method_header == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const byte*>(&method_header->vmap_table_offset_) - begin_;
+}
+
 inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
   const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
   if (code == nullptr) {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index cf1c6e1..a896f3e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -19,6 +19,7 @@
 #include <dlfcn.h>
 #include <sstream>
 #include <string.h>
+#include <unistd.h>
 
 #include "base/bit_vector.h"
 #include "base/stl_util.h"
@@ -91,6 +92,10 @@
       return nullptr;
     }
     ret.reset(OpenElfFile(file.get(), location, requested_base, false, executable, error_msg));
+
+    // It would be nice to unlink here. But we might have opened the file created by the
+    // ScopedLock, which we better not delete to avoid races. TODO: Investigate how to fix the API
+    // to allow removal when we know the ELF must be borked.
   }
   return ret.release();
 }
@@ -449,8 +454,12 @@
                        dex_file_location_checksum_, error_msg);
 }
 
+uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
+  return oat_class_offsets_pointer_[class_def_index];
+}
+
 OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
-  uint32_t oat_class_offset = oat_class_offsets_pointer_[class_def_index];
+  uint32_t oat_class_offset = GetOatClassOffset(class_def_index);
 
   const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
   CHECK_LT(oat_class_pointer, oat_file_->End()) << oat_file_->GetLocation();
@@ -526,49 +535,54 @@
     }
 }
 
-const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
+uint32_t OatFile::OatClass::GetOatMethodOffsetsOffset(uint32_t method_index) const {
+  const OatMethodOffsets* oat_method_offsets = GetOatMethodOffsets(method_index);
+  if (oat_method_offsets == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const uint8_t*>(oat_method_offsets) - oat_file_->Begin();
+}
+
+const OatMethodOffsets* OatFile::OatClass::GetOatMethodOffsets(uint32_t method_index) const {
   // NOTE: We don't keep the number of methods and cannot do a bounds check for method_index.
-  if (methods_pointer_ == NULL) {
+  if (methods_pointer_ == nullptr) {
     CHECK_EQ(kOatClassNoneCompiled, type_);
-    return OatMethod(NULL, 0, 0);
+    return nullptr;
   }
   size_t methods_pointer_index;
-  if (bitmap_ == NULL) {
+  if (bitmap_ == nullptr) {
     CHECK_EQ(kOatClassAllCompiled, type_);
     methods_pointer_index = method_index;
   } else {
     CHECK_EQ(kOatClassSomeCompiled, type_);
     if (!BitVector::IsBitSet(bitmap_, method_index)) {
-      return OatMethod(NULL, 0, 0);
+      return nullptr;
     }
     size_t num_set_bits = BitVector::NumSetBits(bitmap_, method_index);
     methods_pointer_index = num_set_bits;
   }
   const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
-  if (oat_file_->IsExecutable()
-      || (Runtime::Current() == nullptr)
-      || Runtime::Current()->IsCompiler()) {
+  return &oat_method_offsets;
+}
+
+const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
+  const OatMethodOffsets* oat_method_offsets = GetOatMethodOffsets(method_index);
+  if (oat_method_offsets == nullptr) {
+    return OatMethod(nullptr, 0, 0);
+  }
+  if (oat_file_->IsExecutable() ||
+      Runtime::Current() == nullptr ||        // This case applies for oatdump.
+      Runtime::Current()->IsCompiler()) {
     return OatMethod(
         oat_file_->Begin(),
-        oat_method_offsets.code_offset_,
-        oat_method_offsets.gc_map_offset_);
+        oat_method_offsets->code_offset_,
+        oat_method_offsets->gc_map_offset_);
   } else {
     // We aren't allowed to use the compiled code. We just force it down the interpreted version.
     return OatMethod(oat_file_->Begin(), 0, 0);
   }
 }
 
-
-uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
-  uintptr_t code = reinterpret_cast<uintptr_t>(GetQuickCode());
-  if (code == 0) {
-    return 0;
-  }
-  // TODO: make this Thumb2 specific
-  code &= ~0x1;
-  return reinterpret_cast<uint32_t*>(code)[-1];
-}
-
 void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
   CHECK(method != NULL);
   method->SetEntryPointFromPortableCompiledCode(GetPortableCode());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 2fd4f4c..b9d5702 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -114,13 +114,22 @@
       }
     }
 
+    // Returns 0.
     uint32_t GetPortableCodeSize() const {
       // TODO: With Quick, we store the size before the code. With Portable, the code is in a .o
       // file we don't manage ourselves. ELF symbols do have a concept of size, so we could capture
       // that and store it somewhere, such as the OatMethod.
       return 0;
     }
+
+    // Returns size of quick code.
     uint32_t GetQuickCodeSize() const;
+    uint32_t GetQuickCodeSizeOffset() const;
+
+    // Returns OatQuickMethodHeader for debugging. Most callers should
+    // use more specific methods such as GetQuickCodeSize.
+    const OatQuickMethodHeader* GetOatQuickMethodHeader() const;
+    uint32_t GetOatQuickMethodHeaderOffset() const;
 
     const uint8_t* GetNativeGcMap() const {
       return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
@@ -129,10 +138,14 @@
     size_t GetFrameSizeInBytes() const;
     uint32_t GetCoreSpillMask() const;
     uint32_t GetFpSpillMask() const;
-    uint32_t GetMappingTableOffset() const;
-    uint32_t GetVmapTableOffset() const;
+
     const uint8_t* GetMappingTable() const;
+    uint32_t GetMappingTableOffset() const;
+    uint32_t GetMappingTableOffsetOffset() const;
+
     const uint8_t* GetVmapTable() const;
+    uint32_t GetVmapTableOffset() const;
+    uint32_t GetVmapTableOffsetOffset() const;
 
     // Create an OatMethod with offsets relative to the given base address
     OatMethod(const byte* base, const uint32_t code_offset, const uint32_t gc_map_offset)
@@ -176,11 +189,21 @@
     }
 
     // Get the OatMethod entry based on its index into the class
-    // defintion. direct methods come first, followed by virtual
-    // methods. note that runtime created methods such as miranda
+    // defintion. Direct methods come first, followed by virtual
+    // methods. Note that runtime created methods such as miranda
     // methods are not included.
     const OatMethod GetOatMethod(uint32_t method_index) const;
 
+    // Return a pointer to the OatMethodOffsets for the requested
+    // method_index, or nullptr if none is present. Note that most
+    // callers should use GetOatMethod.
+    const OatMethodOffsets* GetOatMethodOffsets(uint32_t method_index) const;
+
+    // Return the offset from the start of the OatFile to the
+    // OatMethodOffsets for the requested method_index, or 0 if none
+    // is present. Note that most callers should use GetOatMethod.
+    uint32_t GetOatMethodOffsetsOffset(uint32_t method_index) const;
+
     // A representation of an invalid OatClass, used when an OatClass can't be found.
     // See ClassLinker::FindOatClass.
     static OatClass Invalid() {
@@ -239,6 +262,9 @@
     // Returns the OatClass for the class specified by the given DexFile class_def_index.
     OatClass GetOatClass(uint16_t class_def_index) const;
 
+    // Returns the offset to the OatClass information. Most callers should use GetOatClass.
+    uint32_t GetOatClassOffset(uint16_t class_def_index) const;
+
     ~OatDexFile();
 
    private:
diff --git a/runtime/object_lock.cc b/runtime/object_lock.cc
index a2668ec..f7accc0 100644
--- a/runtime/object_lock.cc
+++ b/runtime/object_lock.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 template <typename T>
-ObjectLock<T>::ObjectLock(Thread* self, ConstHandle<T> object) : self_(self), obj_(object) {
+ObjectLock<T>::ObjectLock(Thread* self, Handle<T> object) : self_(self), obj_(object) {
   CHECK(object.Get() != nullptr);
   obj_->MonitorEnter(self_);
 }
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index 38690bc..acddc03 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -28,7 +28,7 @@
 template <typename T>
 class ObjectLock {
  public:
-  ObjectLock(Thread* self, ConstHandle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -40,7 +40,7 @@
 
  private:
   Thread* const self_;
-  ConstHandle<T> const obj_;
+  Handle<T> const obj_;
 
   DISALLOW_COPY_AND_ASSIGN(ObjectLock);
 };
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 37e08a5..2bd994d 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -63,6 +63,8 @@
     heap_min_free_(gc::Heap::kDefaultMinFree),
     heap_max_free_(gc::Heap::kDefaultMaxFree),
     heap_non_moving_space_capacity_(gc::Heap::kDefaultNonMovingSpaceCapacity),
+    large_object_space_type_(gc::Heap::kDefaultLargeObjectSpaceType),
+    large_object_threshold_(gc::Heap::kDefaultLargeObjectThreshold),
     heap_target_utilization_(gc::Heap::kDefaultTargetUtilization),
     foreground_heap_growth_multiplier_(gc::Heap::kDefaultHeapGrowthMultiplier),
     parallel_gc_threads_(1),
@@ -78,7 +80,7 @@
     gc::kCollectorTypeCMS),
 #error "ART default GC type must be set"
 #endif
-    background_collector_type_(gc::kCollectorTypeHomogeneousSpaceCompact),
+    background_collector_type_(gc::kCollectorTypeNone),
                                                     // If background_collector_type_ is
                                                     // kCollectorTypeNone, it defaults to the
                                                     // collector_type_ after parsing options. If
@@ -452,6 +454,32 @@
       if (!ParseXGcOption(option)) {
         return false;
       }
+    } else if (StartsWith(option, "-XX:LargeObjectSpace=")) {
+      std::string substring;
+      if (!ParseStringAfterChar(option, '=', &substring)) {
+        return false;
+      }
+      if (substring == "disabled") {
+        large_object_space_type_ = gc::space::kLargeObjectSpaceTypeDisabled;
+      } else if (substring == "freelist") {
+        large_object_space_type_ = gc::space::kLargeObjectSpaceTypeFreeList;
+      } else if (substring == "map") {
+        large_object_space_type_ = gc::space::kLargeObjectSpaceTypeMap;
+      } else {
+        Usage("Unknown -XX:LargeObjectSpace= option %s\n", substring.c_str());
+        return false;
+      }
+    } else if (StartsWith(option, "-XX:LargeObjectThreshold=")) {
+      std::string substring;
+      if (!ParseStringAfterChar(option, '=', &substring)) {
+        return false;
+      }
+      size_t size = ParseMemoryOption(substring.c_str(), 1);
+      if (size == 0) {
+        Usage("Failed to parse memory option %s\n", option.c_str());
+        return false;
+      }
+      large_object_threshold_ = size;
     } else if (StartsWith(option, "-XX:BackgroundGC=")) {
       std::string substring;
       if (!ParseStringAfterChar(option, '=', &substring)) {
@@ -668,6 +696,12 @@
       return false;
     }
   }
+  // If not set, background collector type defaults to homogeneous compaction
+  // if not low memory mode, semispace otherwise.
+  if (background_collector_type_ == gc::kCollectorTypeNone) {
+    background_collector_type_ = low_memory_mode_ ?
+        gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
+  }
 
   // If a reference to the dalvik core.jar snuck in, replace it with
   // the art specific version. This can happen with on device
@@ -757,7 +791,6 @@
   UsageMessage(stream, "  -Xstacktracefile:<filename>\n");
   UsageMessage(stream, "  -Xgc:[no]preverify\n");
   UsageMessage(stream, "  -Xgc:[no]postverify\n");
-  UsageMessage(stream, "  -XX:+DisableExplicitGC\n");
   UsageMessage(stream, "  -XX:HeapGrowthLimit=N\n");
   UsageMessage(stream, "  -XX:HeapMinFree=N\n");
   UsageMessage(stream, "  -XX:HeapMaxFree=N\n");
@@ -774,6 +807,7 @@
   UsageMessage(stream, "  -Xgc:[no]postverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]presweepingverify\n");
   UsageMessage(stream, "  -Ximage:filename\n");
+  UsageMessage(stream, "  -XX:+DisableExplicitGC\n");
   UsageMessage(stream, "  -XX:ParallelGCThreads=integervalue\n");
   UsageMessage(stream, "  -XX:ConcGCThreads=integervalue\n");
   UsageMessage(stream, "  -XX:MaxSpinsBeforeThinLockInflation=integervalue\n");
@@ -783,6 +817,8 @@
   UsageMessage(stream, "  -XX:IgnoreMaxFootprint\n");
   UsageMessage(stream, "  -XX:UseTLAB\n");
   UsageMessage(stream, "  -XX:BackgroundGC=none\n");
+  UsageMessage(stream, "  -XX:LargeObjectSpace={disabled,map,freelist}\n");
+  UsageMessage(stream, "  -XX:LargeObjectThreshold=N\n");
   UsageMessage(stream, "  -Xmethod-trace\n");
   UsageMessage(stream, "  -Xmethod-trace-file:filename");
   UsageMessage(stream, "  -Xmethod-trace-file-size:integervalue\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 3839e19..26a2f31 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -24,6 +24,7 @@
 
 #include "globals.h"
 #include "gc/collector_type.h"
+#include "gc/space/large_object_space.h"
 #include "instruction_set.h"
 #include "profiler_options.h"
 
@@ -72,6 +73,8 @@
   size_t heap_min_free_;
   size_t heap_max_free_;
   size_t heap_non_moving_space_capacity_;
+  gc::space::LargeObjectSpaceType large_object_space_type_;
+  size_t large_object_threshold_;
   double heap_target_utilization_;
   double foreground_heap_growth_multiplier_;
   unsigned int parallel_gc_threads_;
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index a6a2475..cde4177 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -119,12 +119,12 @@
 }
 
 // A closure that is called by the thread checkpoint code.
-class SampleCheckpoint : public Closure {
+class SampleCheckpoint FINAL : public Closure {
  public:
   explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
     profiler_(profiler) {}
 
-  virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
+  void Run(Thread* thread) OVERRIDE {
     Thread* self = Thread::Current();
     if (thread == nullptr) {
       LOG(ERROR) << "Checkpoint with nullptr thread";
@@ -192,6 +192,7 @@
       VLOG(profiler) << "Delaying profile start for " << delay_secs << " secs";
       MutexLock mu(self, profiler->wait_lock_);
       profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
+      // We were either signaled by Stop or timedout, in either case ignore the timed out result.
 
       // Expand the backoff by its coefficient, but don't go beyond the max.
       backoff = std::min(backoff * profiler->options_.GetBackoffCoefficient(), kMaxBackoffSecs);
@@ -238,17 +239,13 @@
       // is done with a timeout so that we can detect problems with the checkpoint
       // running code.  We should never see this.
       const uint32_t kWaitTimeoutMs = 10000;
-      const uint32_t kWaitTimeoutUs = kWaitTimeoutMs * 1000;
 
-      uint64_t waitstart_us = MicroTime();
       // Wait for all threads to pass the barrier.
-      profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
-      uint64_t waitend_us = MicroTime();
-      uint64_t waitdiff_us = waitend_us - waitstart_us;
+      bool timed_out =  profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
 
       // We should never get a timeout.  If we do, it suggests a problem with the checkpoint
       // code.  Crash the process in this case.
-      CHECK_LT(waitdiff_us, kWaitTimeoutUs);
+      CHECK(!timed_out);
 
       // Update the current time.
       now_us = MicroTime();
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index d977ce9..1eded62 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -183,7 +183,8 @@
   ASSERT_TRUE(throwsFieldClass.Get() != nullptr);
 
   // Test "Class[] interfaces" field.
-  FieldHelper fh(hs.NewHandle(static_fields->Get(0)));
+  MutableHandle<mirror::ArtField> fhandle = hs.NewHandle(static_fields->Get(0));
+  FieldHelper fh(fhandle);
   EXPECT_EQ("interfaces", std::string(fh.GetField()->GetName()));
   EXPECT_EQ("[Ljava/lang/Class;", std::string(fh.GetField()->GetTypeDescriptor()));
   EXPECT_EQ(interfacesFieldClass.Get(), fh.GetType());
@@ -191,12 +192,13 @@
   EXPECT_FALSE(fh.GetField()->IsPrimitiveType());
 
   // Test "Class[][] throws" field.
-  fh.ChangeField(static_fields->Get(1));
-  EXPECT_EQ("throws", std::string(fh.GetField()->GetName()));
-  EXPECT_EQ("[[Ljava/lang/Class;", std::string(fh.GetField()->GetTypeDescriptor()));
-  EXPECT_EQ(throwsFieldClass.Get(), fh.GetType());
-  EXPECT_EQ("L$Proxy1234;", std::string(fh.GetDeclaringClassDescriptor()));
-  EXPECT_FALSE(fh.GetField()->IsPrimitiveType());
+  fhandle.Assign(static_fields->Get(1));
+  FieldHelper fh2(fhandle);
+  EXPECT_EQ("throws", std::string(fh2.GetField()->GetName()));
+  EXPECT_EQ("[[Ljava/lang/Class;", std::string(fh2.GetField()->GetTypeDescriptor()));
+  EXPECT_EQ(throwsFieldClass.Get(), fh2.GetType());
+  EXPECT_EQ("L$Proxy1234;", std::string(fh2.GetDeclaringClassDescriptor()));
+  EXPECT_FALSE(fh2.GetField()->IsPrimitiveType());
 }
 
 }  // namespace art
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 1ec488e..43d21de 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -211,7 +211,7 @@
     Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
     Handle<mirror::ArtMethod> h_method(hs.NewHandle(m));
-    verifier::MethodVerifier verifier(h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader,
+    verifier::MethodVerifier verifier(self_, h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader,
                                       &m->GetClassDef(), code_item, m->GetDexMethodIndex(),
                                       h_method, m->GetAccessFlags(), false, true, true);
     verifier.Verify();
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 7da450c..9fe296a 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -560,7 +560,7 @@
   if (UNLIKELY(!declaring_class->IsInitialized())) {
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(h_class, true, true)) {
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), h_class, true, true)) {
       return nullptr;
     }
     declaring_class = h_class.Get();
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 9d10daa..75211e0 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -117,7 +117,7 @@
       // Ensure class is initialized before allocating object
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(c));
-      bool initialized = class_linker_->EnsureInitialized(h_class, true, true);
+      bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true);
       CHECK(initialized);
       *receiver = c->AllocObject(self);
     }
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 474b72d..8386cc0 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -72,6 +72,7 @@
 #include "reflection.h"
 #include "ScopedLocalRef.h"
 #include "scoped_thread_state_change.h"
+#include "sigchain.h"
 #include "signal_catcher.h"
 #include "signal_set.h"
 #include "handle_scope-inl.h"
@@ -199,7 +200,7 @@
 }
 
 struct AbortState {
-  void Dump(std::ostream& os) NO_THREAD_SAFETY_ANALYSIS {
+  void Dump(std::ostream& os) {
     if (gAborting > 1) {
       os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
       return;
@@ -229,7 +230,9 @@
     DumpAllThreads(os, self);
   }
 
-  void DumpThread(std::ostream& os, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // No thread-safety analysis as we do explicitly test for holding the mutator lock.
+  void DumpThread(std::ostream& os, Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+    DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
     self->Dump(os);
     if (self->IsExceptionPending()) {
       ThrowLocation throw_location;
@@ -240,7 +243,7 @@
     }
   }
 
-  void DumpAllThreads(std::ostream& os, Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+  void DumpAllThreads(std::ostream& os, Thread* self) {
     Runtime* runtime = Runtime::Current();
     if (runtime != nullptr) {
       ThreadList* thread_list = runtime->GetThreadList();
@@ -254,7 +257,7 @@
               << "\n";
         }
         os << "All threads:\n";
-        thread_list->DumpLocked(os);
+        thread_list->Dump(os);
       }
     }
   }
@@ -343,7 +346,7 @@
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> class_loader_class(
       hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
-  CHECK(cl->EnsureInitialized(class_loader_class, true, true));
+  CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
 
   mirror::ArtMethod* getSystemClassLoader =
       class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
@@ -359,7 +362,7 @@
 
   Handle<mirror::Class> thread_class(
       hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
-  CHECK(cl->EnsureInitialized(thread_class, true, true));
+  CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
 
   mirror::ArtField* contextClassLoader =
       thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
@@ -404,7 +407,7 @@
     ScopedObjectAccess soa(Thread::Current());
     StackHandleScope<1> hs(soa.Self());
     auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
-    class_linker_->EnsureInitialized(klass, true, true);
+    class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
   }
 
   // InitNativeMethods needs to be after started_ so that the classes
@@ -561,13 +564,15 @@
   std::string cache_filename_unused;
   bool dalvik_cache_exists_unused;
   bool has_cache_unused;
+  bool is_global_cache_unused;
   bool found_image = gc::space::ImageSpace::FindImageFilename(image_location.c_str(),
                                                               kRuntimeISA,
                                                               &system_filename,
                                                               &has_system,
                                                               &cache_filename_unused,
                                                               &dalvik_cache_exists_unused,
-                                                              &has_cache_unused);
+                                                              &has_cache_unused,
+                                                              &is_global_cache_unused);
   *failures = 0;
   if (!found_image || !has_system) {
     return false;
@@ -695,6 +700,8 @@
                        options->image_isa_,
                        options->collector_type_,
                        options->background_collector_type_,
+                       options->large_object_space_type_,
+                       options->large_object_threshold_,
                        options->parallel_gc_threads_,
                        options->conc_gc_threads_,
                        options->low_memory_mode_,
@@ -732,6 +739,11 @@
       break;
   }
 
+  // Always initialize the signal chain so that any calls to sigaction get
+  // correctly routed to the next in the chain regardless of whether we
+  // have claimed the signal or not.
+  InitializeSignalChain();
+
   if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
     fault_manager.Init();
 
@@ -955,6 +967,7 @@
   REGISTER(register_java_lang_System);
   REGISTER(register_java_lang_Thread);
   REGISTER(register_java_lang_VMClassLoader);
+  REGISTER(register_java_lang_ref_FinalizerReference);
   REGISTER(register_java_lang_ref_Reference);
   REGISTER(register_java_lang_reflect_Array);
   REGISTER(register_java_lang_reflect_Constructor);
@@ -993,14 +1006,14 @@
   }
 }
 
-void Runtime::SetStatsEnabled(bool new_state) {
+void Runtime::SetStatsEnabled(bool new_state, bool suspended) {
   if (new_state == true) {
     GetStats()->Clear(~0);
     // TODO: wouldn't it make more sense to clear _all_ threads' stats?
     Thread::Current()->GetStats()->Clear(~0);
-    GetInstrumentation()->InstrumentQuickAllocEntryPoints();
+    GetInstrumentation()->InstrumentQuickAllocEntryPoints(suspended);
   } else {
-    GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
+    GetInstrumentation()->UninstrumentQuickAllocEntryPoints(suspended);
   }
   stats_enabled_ = new_state;
 }
@@ -1145,6 +1158,7 @@
       callee_save_methods_[i].VisitRoot(callback, arg, 0, kRootVMInternal);
     }
   }
+  verifier::MethodVerifier::VisitStaticRoots(callback, arg);
   {
     MutexLock mu(Thread::Current(), method_verifier_lock_);
     for (verifier::MethodVerifier* verifier : method_verifiers_) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9df1453..f9c017b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -197,8 +197,7 @@
   // Detaches the current native thread from the runtime.
   void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
 
-  void DumpForSigQuit(std::ostream& os)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DumpForSigQuit(std::ostream& os);
   void DumpLockHolders(std::ostream& os);
 
   ~Runtime();
@@ -391,7 +390,7 @@
 
   void ResetStats(int kinds);
 
-  void SetStatsEnabled(bool new_state);
+  void SetStatsEnabled(bool new_state, bool suspended);
 
   enum class NativeBridgeAction {  // private
     kUnload,
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index c13776d..336340e 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -28,6 +28,7 @@
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
 #include "gc/heap.h"
+#include "instruction_set.h"
 #include "os.h"
 #include "runtime.h"
 #include "scoped_thread_state_change.h"
@@ -42,20 +43,21 @@
 #if defined(__linux__)
   // Show the original command line, and the current command line too if it's changed.
   // On Android, /proc/self/cmdline will have been rewritten to something like "system_server".
+  // Note: The string "Cmd line:" is chosen to match the format used by debuggerd.
   std::string current_cmd_line;
   if (ReadFileToString("/proc/self/cmdline", &current_cmd_line)) {
-    current_cmd_line.resize(current_cmd_line.size() - 1);  // Lose the trailing '\0'.
+    current_cmd_line.resize(current_cmd_line.find_last_not_of('\0') + 1);  // trim trailing '\0's
     std::replace(current_cmd_line.begin(), current_cmd_line.end(), '\0', ' ');
 
-    os << "Cmdline: " << current_cmd_line;
+    os << "Cmd line: " << current_cmd_line << "\n";
     const char* stashed_cmd_line = GetCmdLine();
-    if (stashed_cmd_line != NULL && current_cmd_line != stashed_cmd_line) {
-      os << "Original command line: " << stashed_cmd_line;
+    if (stashed_cmd_line != NULL && current_cmd_line != stashed_cmd_line
+            && strcmp(stashed_cmd_line, "<unset>") != 0) {
+      os << "Original command line: " << stashed_cmd_line << "\n";
     }
   }
-  os << "\n";
 #else
-  os << "Cmdline: " << GetCmdLine() << "\n";
+  os << "Cmd line: " << GetCmdLine() << "\n";
 #endif
 }
 
@@ -116,23 +118,15 @@
 
 void SignalCatcher::HandleSigQuit() {
   Runtime* runtime = Runtime::Current();
-  ThreadList* thread_list = runtime->GetThreadList();
-
-  // Grab exclusively the mutator lock, set state to Runnable without checking for a pending
-  // suspend request as we're going to suspend soon anyway. We set the state to Runnable to avoid
-  // giving away the mutator lock.
-  thread_list->SuspendAll();
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertExclusiveHeld(self);
-  const char* old_cause = self->StartAssertNoThreadSuspension("Handling SIGQUIT");
-  ThreadState old_state = self->SetStateUnsafe(kRunnable);
-
   std::ostringstream os;
   os << "\n"
       << "----- pid " << getpid() << " at " << GetIsoDate() << " -----\n";
 
   DumpCmdLine(os);
 
+  // Note: The string "ABI:" is chosen to match the format used by debuggerd.
+  os << "ABI: " << GetInstructionSetString(runtime->GetInstructionSet()) << "\n";
+
   os << "Build type: " << (kIsDebugBuild ? "debug" : "optimized") << "\n";
 
   runtime->DumpForSigQuit(os);
@@ -144,14 +138,6 @@
     }
   }
   os << "----- end " << getpid() << " -----\n";
-  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
-  self->EndAssertNoThreadSuspension(old_cause);
-  thread_list->ResumeAll();
-  // Run the checkpoints after resuming the threads to prevent deadlocks if the checkpoint function
-  // acquires the mutator lock.
-  if (self->ReadFlag(kCheckpointRequest)) {
-    self->RunCheckpointFunction();
-  }
   Output(os.str());
 }
 
diff --git a/runtime/stack.h b/runtime/stack.h
index 8e5da35..44e36c4 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -604,8 +604,8 @@
    *     | Compiler temp region          |  ... (reg >= max_num_special_temps)
    *     |      .                        |
    *     |      .                        |
-   *     | V[max_num_special_temps + 1] |
-   *     | V[max_num_special_temps + 0] |
+   *     | V[max_num_special_temps + 1]  |
+   *     | V[max_num_special_temps + 0]  |
    *     +-------------------------------+
    *     | OUT[outs-1]                   |
    *     | OUT[outs-2]                   |
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index bd399e7..6698634 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -23,6 +23,7 @@
 
 #include "base/casts.h"
 #include "base/mutex-inl.h"
+#include "entrypoints/entrypoint_utils-inl.h"
 #include "gc/heap.h"
 #include "jni_env_ext.h"
 
@@ -45,6 +46,26 @@
   }
 }
 
+inline void Thread::AllowThreadSuspension() {
+  DCHECK_EQ(Thread::Current(), this);
+  if (UNLIKELY(TestAllFlags())) {
+    CheckSuspend();
+  }
+}
+
+inline void Thread::CheckSuspend() {
+  DCHECK_EQ(Thread::Current(), this);
+  for (;;) {
+    if (ReadFlag(kCheckpointRequest)) {
+      RunCheckpointFunction();
+    } else if (ReadFlag(kSuspendRequest)) {
+      FullSuspendCheck();
+    } else {
+      break;
+    }
+  }
+}
+
 inline ThreadState Thread::SetState(ThreadState new_state) {
   // Cannot use this code to change into Runnable as changing to Runnable should fail if
   // old_state_and_flags.suspend_request is true.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6e3e9c1..650b0f9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -446,7 +446,7 @@
 
   ScopedObjectAccess soa(self);
   StackHandleScope<1> hs(self);
-  Handle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
+  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
   if (peer_thread_name.Get() == nullptr) {
     // The Thread constructor should have set the Thread.name to a
     // non-null value. However, because we can run without code
@@ -592,7 +592,7 @@
     }
   }
   std::ostringstream ss;
-  Runtime::Current()->GetThreadList()->DumpLocked(ss);
+  Runtime::Current()->GetThreadList()->Dump(ss);
   LOG(FATAL) << ss.str();
 }
 
@@ -961,7 +961,7 @@
     // If we're currently in native code, dump that stack before dumping the managed stack.
     if (dump_for_abort || ShouldShowNativeStack(this)) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
-      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr));
+      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
     }
     DumpJavaStack(os);
   } else {
@@ -1602,7 +1602,8 @@
   ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
 }
 
-void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
+void Thread::ThrowNewException(const ThrowLocation& throw_location,
+                               const char* exception_class_descriptor,
                                const char* msg) {
   // Callers should either clear or call ThrowNewWrappedException.
   AssertNoPendingExceptionForNewException(msg);
@@ -1638,7 +1639,8 @@
     return;
   }
 
-  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class, true, true))) {
+  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
+                                                             true))) {
     DCHECK(IsExceptionPending());
     return;
   }
diff --git a/runtime/thread.h b/runtime/thread.h
index aca4069..d96b50b 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -146,6 +146,12 @@
 
   static Thread* Current();
 
+  // On a runnable thread, check for pending thread suspension request and handle if pending.
+  void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Process pending thread suspension request and handle if pending.
+  void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
                                    mirror::Object* thread_peer)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
@@ -1029,7 +1035,11 @@
       deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
       pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
       thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
-      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
+      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
+      nested_signal_state(nullptr) {
+        for (size_t i = 0; i < kLockLevelCount; ++i) {
+          held_mutexes[i] = nullptr;
+        }
     }
 
     // The biased card table, see CardTable for details.
@@ -1162,7 +1172,6 @@
   friend class Runtime;  // For CreatePeer.
   friend class QuickExceptionHandler;  // For dumping the stack.
   friend class ScopedThreadStateChange;
-  friend class SignalCatcher;  // For SetStateUnsafe.
   friend class StubTest;  // For accessing entrypoints.
   friend class ThreadList;  // For ~Thread and Destroy.
 
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index afb98ca..ec5b775 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -88,10 +88,7 @@
 }
 
 void ThreadList::DumpForSigQuit(std::ostream& os) {
-  {
-    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
-    DumpLocked(os);
-  }
+  Dump(os);
   DumpUnattachedThreads(os);
 }
 
@@ -133,12 +130,54 @@
   closedir(d);
 }
 
-void ThreadList::DumpLocked(std::ostream& os) {
-  os << "DALVIK THREADS (" << list_.size() << "):\n";
-  for (const auto& thread : list_) {
-    thread->Dump(os);
-    os << "\n";
+// A closure used by Thread::Dump.
+class DumpCheckpoint FINAL : public Closure {
+ public:
+  explicit DumpCheckpoint(std::ostream* os) : os_(os), barrier_(0) {}
+
+  void Run(Thread* thread) OVERRIDE {
+    // Note thread and self may not be equal if thread was already suspended at the point of the
+    // request.
+    Thread* self = Thread::Current();
+    std::ostringstream local_os;
+    {
+      ScopedObjectAccess soa(self);
+      thread->Dump(local_os);
+    }
+    local_os << "\n";
+    {
+      // Use the logging lock to ensure serialization when writing to the common ostream.
+      MutexLock mu(self, *Locks::logging_lock_);
+      *os_ << local_os.str();
+    }
+    barrier_.Pass(self);
   }
+
+  void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
+    Thread* self = Thread::Current();
+    ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+    const uint32_t kWaitTimeoutMs = 10000;
+    bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kWaitTimeoutMs);
+    if (timed_out) {
+      LOG(kIsDebugBuild ? FATAL : ERROR) << "Unexpected time out during dump checkpoint.";
+    }
+  }
+
+ private:
+  // The common stream that will accumulate all the dumps.
+  std::ostream* const os_;
+  // The barrier to be passed through and for the requestor to wait upon.
+  Barrier barrier_;
+};
+
+void ThreadList::Dump(std::ostream& os) {
+  {
+    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+    os << "DALVIK THREADS (" << list_.size() << "):\n";
+  }
+  DumpCheckpoint checkpoint(&os);
+  size_t threads_running_checkpoint = RunCheckpoint(&checkpoint);
+  checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
 }
 
 void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
@@ -155,12 +194,12 @@
 
 #if HAVE_TIMED_RWLOCK
 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
-static void UnsafeLogFatalForThreadSuspendAllTimeout() NO_THREAD_SAFETY_ANALYSIS __attribute__((noreturn));
+static void UnsafeLogFatalForThreadSuspendAllTimeout() __attribute__((noreturn));
 static void UnsafeLogFatalForThreadSuspendAllTimeout() {
   Runtime* runtime = Runtime::Current();
   std::ostringstream ss;
   ss << "Thread suspend timeout\n";
-  runtime->GetThreadList()->DumpLocked(ss);
+  runtime->GetThreadList()->Dump(ss);
   LOG(FATAL) << ss.str();
   exit(0);
 }
@@ -266,12 +305,10 @@
 // threads.  Returns the number of successful requests.
 size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
   Thread* self = Thread::Current();
-  if (kIsDebugBuild) {
-    Locks::mutator_lock_->AssertNotExclusiveHeld(self);
-    Locks::thread_list_lock_->AssertNotHeld(self);
-    Locks::thread_suspend_count_lock_->AssertNotHeld(self);
-    CHECK_NE(self->GetState(), kRunnable);
-  }
+  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+  Locks::thread_list_lock_->AssertNotHeld(self);
+  Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+  CHECK_NE(self->GetState(), kRunnable);
 
   size_t count = 0;
   {
@@ -691,11 +728,14 @@
       Thread::resume_cond_->Wait(self);
       if (self->GetSuspendCount() != 0) {
         // The condition was signaled but we're still suspended. This
-        // can happen if the debugger lets go while a SIGQUIT thread
+        // can happen when we suspend then resume all threads to
+        // update instrumentation or compute monitor info. This can
+        // also happen if the debugger lets go while a SIGQUIT thread
         // dump event is pending (assuming SignalCatcher was resumed for
         // just long enough to try to grab the thread-suspend lock).
-        LOG(WARNING) << *self << " still suspended after undo "
-                   << "(suspend count=" << self->GetSuspendCount() << ")";
+        VLOG(jdwp) << *self << " still suspended after undo "
+                   << "(suspend count=" << self->GetSuspendCount() << ", "
+                   << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
       }
     }
     CHECK_EQ(self->GetSuspendCount(), 0);
@@ -835,14 +875,21 @@
     // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
     // Note: deliberately not using MutexLock that could hold a stale self pointer.
     Locks::thread_list_lock_->ExclusiveLock(self);
-    CHECK(Contains(self));
-    Locks::thread_suspend_count_lock_->ExclusiveLock(self);
-    bool removed = false;
-    if (!self->IsSuspended()) {
-      list_.remove(self);
-      removed = true;
+    bool removed = true;
+    if (!Contains(self)) {
+      std::ostringstream os;
+      DumpNativeStack(os, GetTid(), "  native: ", nullptr);
+      LOG(ERROR) << "Request to unregister unattached thread\n" << os.str();
+    } else {
+      Locks::thread_suspend_count_lock_->ExclusiveLock(self);
+      if (!self->IsSuspended()) {
+        list_.remove(self);
+      } else {
+        // We failed to remove the thread due to a suspend request, loop and try again.
+        removed = false;
+      }
+      Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
     }
-    Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
     Locks::thread_list_lock_->ExclusiveUnlock(self);
     if (removed) {
       delete self;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index bb4f775..9f47f9f 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -39,11 +39,11 @@
   ~ThreadList();
 
   void DumpForSigQuit(std::ostream& os)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DumpLocked(std::ostream& os)  // For thread suspend timeout dumps.
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      LOCKS_EXCLUDED(Locks::thread_list_lock_);
+  // For thread suspend timeout dumps.
+  void Dump(std::ostream& os)
+      LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                     Locks::thread_suspend_count_lock_);
   pid_t GetLockOwner();  // For SignalCatcher.
 
   // Thread suspension support.
@@ -93,7 +93,8 @@
                      Locks::thread_suspend_count_lock_);
 
   size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
-      LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_);
+  LOCKS_EXCLUDED(Locks::thread_list_lock_,
+                 Locks::thread_suspend_count_lock_);
 
   // Suspends all threads
   void SuspendAllForDebugger()
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 6dcc5fe..b32e042 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -373,11 +373,9 @@
 
       // Enable count of allocs if specified in the flags.
       if ((flags && kTraceCountAllocs) != 0) {
-        runtime->SetStatsEnabled(true);
+        runtime->SetStatsEnabled(true, true);
       }
 
-
-
       if (sampling_enabled) {
         CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, NULL, &RunSamplingThread,
                                             reinterpret_cast<void*>(interval_us)),
@@ -492,7 +490,7 @@
   size_t final_offset = cur_offset_.LoadRelaxed();
 
   if ((flags_ & kTraceCountAllocs) != 0) {
-    Runtime::Current()->SetStatsEnabled(false);
+    Runtime::Current()->SetStatsEnabled(false, true);
   }
 
   std::set<mirror::ArtMethod*> visited_methods;
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 691aec4..432a2fe 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -110,7 +110,7 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticFieldsTest;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   ASSERT_TRUE(h_klass->IsInitialized());
 
   // Lookup fields.
@@ -205,7 +205,7 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LInstanceFieldsTest;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   ASSERT_TRUE(h_klass->IsInitialized());
 
   // Allocate an InstanceFieldTest object.
@@ -305,7 +305,7 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LStaticArrayFieldsTest;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   ASSERT_TRUE(h_klass->IsInitialized());
 
   // Lookup fields.
@@ -419,12 +419,12 @@
   Handle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$EmptyStatic;", class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   Transaction transaction;
   Runtime::Current()->EnterTransactionMode(&transaction);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   Runtime::Current()->ExitTransactionMode();
   ASSERT_FALSE(soa.Self()->IsExceptionPending());
 }
@@ -440,12 +440,12 @@
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LTransaction$StaticFieldClass;",
                                             class_loader)));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   Transaction transaction;
   Runtime::Current()->EnterTransactionMode(&transaction);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   Runtime::Current()->ExitTransactionMode();
   ASSERT_FALSE(soa.Self()->IsExceptionPending());
 }
@@ -460,33 +460,33 @@
 
   // Load and verify java.lang.ExceptionInInitializerError and java.lang.InternalError which will
   // be thrown by class initialization due to native call.
-  Handle<mirror::Class> h_klass(
+  MutableHandle<mirror::Class> h_klass(
       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(),
                                                   "Ljava/lang/ExceptionInInitializerError;")));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
   h_klass.Assign(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/InternalError;"));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   // Load and verify Transaction$NativeSupport used in class initialization.
   h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$NativeSupport;",
                                              class_loader));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   h_klass.Assign(class_linker_->FindClass(soa.Self(), "LTransaction$BlacklistedClass;",
                                              class_loader));
   ASSERT_TRUE(h_klass.Get() != nullptr);
-  class_linker_->VerifyClass(h_klass);
+  class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
   Transaction transaction;
   Runtime::Current()->EnterTransactionMode(&transaction);
-  class_linker_->EnsureInitialized(h_klass, true, true);
+  class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   Runtime::Current()->ExitTransactionMode();
   ASSERT_TRUE(soa.Self()->IsExceptionPending());
 }
diff --git a/runtime/utils.cc b/runtime/utils.cc
index d15a09a..9157f6c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -823,7 +823,8 @@
 }
 
 enum ClassNameType { kName, kDescriptor };
-static bool IsValidClassName(const char* s, ClassNameType type, char separator) {
+template<ClassNameType kType, char kSeparator>
+static bool IsValidClassName(const char* s) {
   int arrayCount = 0;
   while (*s == '[') {
     arrayCount++;
@@ -835,7 +836,8 @@
     return false;
   }
 
-  if (arrayCount != 0) {
+  ClassNameType type = kType;
+  if (type != kDescriptor && arrayCount != 0) {
     /*
      * If we're looking at an array of some sort, then it doesn't
      * matter if what is being asked for is a class name; the
@@ -903,7 +905,7 @@
       return (type == kDescriptor) && !sepOrFirst && (s[1] == '\0');
     case '/':
     case '.':
-      if (c != separator) {
+      if (c != kSeparator) {
         // The wrong separator character.
         return false;
       }
@@ -925,15 +927,15 @@
 }
 
 bool IsValidBinaryClassName(const char* s) {
-  return IsValidClassName(s, kName, '.');
+  return IsValidClassName<kName, '.'>(s);
 }
 
 bool IsValidJniClassName(const char* s) {
-  return IsValidClassName(s, kName, '/');
+  return IsValidClassName<kName, '/'>(s);
 }
 
 bool IsValidDescriptor(const char* s) {
-  return IsValidClassName(s, kDescriptor, '/');
+  return IsValidClassName<kDescriptor, '/'>(s);
 }
 
 void Split(const std::string& s, char separator, std::vector<std::string>& result) {
@@ -1230,13 +1232,14 @@
 }
 
 void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
-                    bool* have_android_data, bool* dalvik_cache_exists) {
+                    bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
   CHECK(subdir != nullptr);
   std::string error_msg;
   const char* android_data = GetAndroidDataSafe(&error_msg);
   if (android_data == nullptr) {
     *have_android_data = false;
     *dalvik_cache_exists = false;
+    *is_global_cache = false;
     return;
   } else {
     *have_android_data = true;
@@ -1244,7 +1247,8 @@
   const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
   *dalvik_cache = dalvik_cache_root + subdir;
   *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
-  if (create_if_absent && !*dalvik_cache_exists && strcmp(android_data, "/data") != 0) {
+  *is_global_cache = strcmp(android_data, "/data") == 0;
+  if (create_if_absent && !*dalvik_cache_exists && !*is_global_cache) {
     // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
     *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
                             (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
diff --git a/runtime/utils.h b/runtime/utils.h
index 50462b1..9ec6db1 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -449,8 +449,9 @@
 // Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
 // have_android_data will be set to true if we have an ANDROID_DATA that exists,
 // dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
+// The flag is_global_cache tells whether this cache is /data/dalvik-cache.
 void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
-                    bool* have_android_data, bool* dalvik_cache_exists);
+                    bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache);
 
 // Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
 // rooted at cache_location.
diff --git a/runtime/verifier/instruction_flags.cc b/runtime/verifier/instruction_flags.cc
index f76c226..ca3c687 100644
--- a/runtime/verifier/instruction_flags.cc
+++ b/runtime/verifier/instruction_flags.cc
@@ -22,13 +22,14 @@
 namespace verifier {
 
 std::string InstructionFlags::ToString() const {
-  char encoding[7];
+  char encoding[8];
   if (!IsOpcode()) {
-    strncpy(encoding, "XXXXXX", sizeof(encoding));
+    strncpy(encoding, "XXXXXXX", sizeof(encoding));
   } else {
-    strncpy(encoding, "------", sizeof(encoding));
+    strncpy(encoding, "-------", sizeof(encoding));
     if (IsVisited())               encoding[kVisited] = 'V';
     if (IsChanged())               encoding[kChanged] = 'C';
+    if (IsOpcode())                encoding[kOpcode] = 'O';
     if (IsInTry())                 encoding[kInTry] = 'T';
     if (IsBranchTarget())          encoding[kBranchTarget] = 'B';
     if (IsCompileTimeInfoPoint())  encoding[kCompileTimeInfoPoint] = 'G';
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index f8abca0..36a6e55 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -20,24 +20,23 @@
 #include <stdint.h>
 #include <string>
 
-#include "base/logging.h"
+#include "base/macros.h"
 
 namespace art {
 namespace verifier {
 
-class InstructionFlags {
+class InstructionFlags FINAL {
  public:
-  InstructionFlags() : length_(0), flags_(0) {}
+  InstructionFlags() : flags_(0) {}
 
-  void SetLengthInCodeUnits(size_t length) {
-    DCHECK_LT(length, 65536u);
-    length_ = length;
+  void SetIsOpcode() {
+    flags_ |= 1 << kOpcode;
   }
-  size_t GetLengthInCodeUnits() {
-    return length_;
+  void ClearIsOpcode() {
+    flags_ &= ~(1 << kOpcode);
   }
   bool IsOpcode() const {
-    return length_ != 0;
+    return (flags_ & (1 << kOpcode)) != 0;
   }
 
   void SetInTry() {
@@ -117,21 +116,22 @@
     // Register type information flowing into the instruction changed and so the instruction must be
     // reprocessed.
     kChanged = 1,
+    // The item at this location is an opcode.
+    kOpcode = 2,
     // Instruction is contained within a try region.
-    kInTry = 2,
+    kInTry = 3,
     // Instruction is the target of a branch (ie the start of a basic block).
-    kBranchTarget = 3,
+    kBranchTarget = 4,
     // Location of interest to the compiler for GC maps and verifier based method sharpening.
-    kCompileTimeInfoPoint = 4,
+    kCompileTimeInfoPoint = 5,
     // A return instruction.
-    kReturn = 5,
+    kReturn = 6,
   };
-
-  // Size of instruction in code units.
-  uint16_t length_;
   uint8_t flags_;
 };
 
+COMPILE_ASSERT(sizeof(InstructionFlags) == sizeof(uint8_t), err);
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9cde8da..f28d488 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -38,6 +38,7 @@
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "reg_type-inl.h"
 #include "register_line-inl.h"
 #include "runtime.h"
 #include "scoped_thread_state_change.h"
@@ -87,7 +88,8 @@
   }
 }
 
-MethodVerifier::FailureKind MethodVerifier::VerifyClass(mirror::Class* klass,
+MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
+                                                        mirror::Class* klass,
                                                         bool allow_soft_failures,
                                                         std::string* error) {
   if (klass->IsVerified()) {
@@ -99,13 +101,13 @@
   const DexFile::ClassDef* class_def = klass->GetClassDef();
   mirror::Class* super = klass->GetSuperClass();
   std::string temp;
-  if (super == NULL && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
+  if (super == nullptr && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
     early_failure = true;
     failure_message = " that has no super class";
-  } else if (super != NULL && super->IsFinal()) {
+  } else if (super != nullptr && super->IsFinal()) {
     early_failure = true;
     failure_message = " that attempts to sub-class final class " + PrettyDescriptor(super);
-  } else if (class_def == NULL) {
+  } else if (class_def == nullptr) {
     early_failure = true;
     failure_message = " that isn't present in dex file " + dex_file.GetLocation();
   }
@@ -117,21 +119,22 @@
     }
     return kHardFailure;
   }
-  StackHandleScope<2> hs(Thread::Current());
+  StackHandleScope<2> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
-  return VerifyClass(&dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
+  return VerifyClass(self, &dex_file, dex_cache, class_loader, class_def, allow_soft_failures, error);
 }
 
-MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
-                                                        ConstHandle<mirror::DexCache> dex_cache,
-                                                        ConstHandle<mirror::ClassLoader> class_loader,
+MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self,
+                                                        const DexFile* dex_file,
+                                                        Handle<mirror::DexCache> dex_cache,
+                                                        Handle<mirror::ClassLoader> class_loader,
                                                         const DexFile::ClassDef* class_def,
                                                         bool allow_soft_failures,
                                                         std::string* error) {
   DCHECK(class_def != nullptr);
   const byte* class_data = dex_file->GetClassData(*class_def);
-  if (class_data == NULL) {
+  if (class_data == nullptr) {
     // empty class, probably a marker interface
     return kNoFailure;
   }
@@ -139,12 +142,12 @@
   while (it.HasNextStaticField() || it.HasNextInstanceField()) {
     it.Next();
   }
-  Thread* self = Thread::Current();
   size_t error_count = 0;
   bool hard_fail = false;
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
   int64_t previous_direct_method_idx = -1;
   while (it.HasNextDirectMethod()) {
+    self->AllowThreadSuspension();
     uint32_t method_idx = it.GetMemberIndex();
     if (method_idx == previous_direct_method_idx) {
       // smali can create dex files with two encoded_methods sharing the same method_idx
@@ -157,21 +160,22 @@
     mirror::ArtMethod* method =
         linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
                               NullHandle<mirror::ArtMethod>(), type);
-    if (method == NULL) {
+    if (method == nullptr) {
       DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
       self->ClearException();
     }
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
-    MethodVerifier::FailureKind result = VerifyMethod(method_idx,
+    MethodVerifier::FailureKind result = VerifyMethod(self,
+                                                      method_idx,
                                                       dex_file,
                                                       dex_cache,
                                                       class_loader,
                                                       class_def,
                                                       it.GetMethodCodeItem(),
                                                       h_method,
-                                                      it.GetMemberAccessFlags(),
+                                                      it.GetMethodAccessFlags(),
                                                       allow_soft_failures,
                                                       false);
     if (result != kNoFailure) {
@@ -191,6 +195,7 @@
   }
   int64_t previous_virtual_method_idx = -1;
   while (it.HasNextVirtualMethod()) {
+    self->AllowThreadSuspension();
     uint32_t method_idx = it.GetMemberIndex();
     if (method_idx == previous_virtual_method_idx) {
       // smali can create dex files with two encoded_methods sharing the same method_idx
@@ -203,21 +208,22 @@
     mirror::ArtMethod* method =
         linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
                               NullHandle<mirror::ArtMethod>(), type);
-    if (method == NULL) {
+    if (method == nullptr) {
       DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
       self->ClearException();
     }
     StackHandleScope<1> hs(self);
     Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
-    MethodVerifier::FailureKind result = VerifyMethod(method_idx,
+    MethodVerifier::FailureKind result = VerifyMethod(self,
+                                                      method_idx,
                                                       dex_file,
                                                       dex_cache,
                                                       class_loader,
                                                       class_def,
                                                       it.GetMethodCodeItem(),
                                                       h_method,
-                                                      it.GetMemberAccessFlags(),
+                                                      it.GetMethodAccessFlags(),
                                                       allow_soft_failures,
                                                       false);
     if (result != kNoFailure) {
@@ -242,20 +248,20 @@
   }
 }
 
-MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
+MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t method_idx,
                                                          const DexFile* dex_file,
-                                                         ConstHandle<mirror::DexCache> dex_cache,
-                                                         ConstHandle<mirror::ClassLoader> class_loader,
+                                                         Handle<mirror::DexCache> dex_cache,
+                                                         Handle<mirror::ClassLoader> class_loader,
                                                          const DexFile::ClassDef* class_def,
                                                          const DexFile::CodeItem* code_item,
-                                                         ConstHandle<mirror::ArtMethod> method,
+                                                         Handle<mirror::ArtMethod> method,
                                                          uint32_t method_access_flags,
                                                          bool allow_soft_failures,
                                                          bool need_precise_constants) {
   MethodVerifier::FailureKind result = kNoFailure;
   uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
 
-  MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
+  MethodVerifier verifier(self, dex_file, dex_cache, class_loader, class_def, code_item,
                           method_idx, method, method_access_flags, true, allow_soft_failures,
                           need_precise_constants);
   if (verifier.Verify()) {
@@ -291,30 +297,41 @@
   return result;
 }
 
-void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
+MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t dex_method_idx,
                                          const DexFile* dex_file,
-                                         ConstHandle<mirror::DexCache> dex_cache,
-                                         ConstHandle<mirror::ClassLoader> class_loader,
+                                         Handle<mirror::DexCache> dex_cache,
+                                         Handle<mirror::ClassLoader> class_loader,
                                          const DexFile::ClassDef* class_def,
                                          const DexFile::CodeItem* code_item,
-                                         ConstHandle<mirror::ArtMethod> method,
+                                         Handle<mirror::ArtMethod> method,
                                          uint32_t method_access_flags) {
-  MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
-                          dex_method_idx, method, method_access_flags, true, true, true);
-  verifier.Verify();
-  verifier.DumpFailures(os);
-  os << verifier.info_messages_.str();
-  verifier.Dump(os);
+  MethodVerifier* verifier = new MethodVerifier(self, dex_file, dex_cache, class_loader,
+                                                class_def, code_item, dex_method_idx, method,
+                                                method_access_flags, true, true, true, true);
+  verifier->Verify();
+  verifier->DumpFailures(os);
+  os << verifier->info_messages_.str();
+  // Only dump and return if no hard failures. Otherwise the verifier may be not fully initialized
+  // and querying any info is dangerous/can abort.
+  if (verifier->have_pending_hard_failure_) {
+    delete verifier;
+    return nullptr;
+  } else {
+    verifier->Dump(os);
+    return verifier;
+  }
 }
 
-MethodVerifier::MethodVerifier(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
-                               ConstHandle<mirror::ClassLoader> class_loader,
+MethodVerifier::MethodVerifier(Thread* self,
+                               const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
+                               Handle<mirror::ClassLoader> class_loader,
                                const DexFile::ClassDef* class_def,
                                const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
-                               ConstHandle<mirror::ArtMethod> method, uint32_t method_access_flags,
+                               Handle<mirror::ArtMethod> method, uint32_t method_access_flags,
                                bool can_load_classes, bool allow_soft_failures,
-                               bool need_precise_constants)
-    : reg_types_(can_load_classes),
+                               bool need_precise_constants, bool verify_to_dump)
+    : self_(self),
+      reg_types_(can_load_classes),
       work_insn_idx_(-1),
       dex_method_idx_(dex_method_idx),
       mirror_method_(method),
@@ -325,7 +342,7 @@
       class_loader_(class_loader),
       class_def_(class_def),
       code_item_(code_item),
-      declaring_class_(NULL),
+      declaring_class_(nullptr),
       interesting_dex_pc_(-1),
       monitor_enter_dex_pcs_(nullptr),
       have_pending_hard_failure_(false),
@@ -336,7 +353,8 @@
       allow_soft_failures_(allow_soft_failures),
       need_precise_constants_(need_precise_constants),
       has_check_casts_(false),
-      has_virtual_or_interface_invokes_(false) {
+      has_virtual_or_interface_invokes_(false),
+      verify_to_dump_(verify_to_dump) {
   Runtime::Current()->AddMethodVerifier(this);
   DCHECK(class_def != nullptr);
 }
@@ -348,11 +366,12 @@
 
 void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
                                       std::vector<uint32_t>* monitor_enter_dex_pcs) {
-  StackHandleScope<3> hs(Thread::Current());
+  Thread* self = Thread::Current();
+  StackHandleScope<3> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+  MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
                           m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
                           false, true, false);
   verifier.interesting_dex_pc_ = dex_pc;
@@ -361,8 +380,8 @@
 }
 
 void MethodVerifier::FindLocksAtDexPc() {
-  CHECK(monitor_enter_dex_pcs_ != NULL);
-  CHECK(code_item_ != NULL);  // This only makes sense for methods with code.
+  CHECK(monitor_enter_dex_pcs_ != nullptr);
+  CHECK(code_item_ != nullptr);  // This only makes sense for methods with code.
 
   // Strictly speaking, we ought to be able to get away with doing a subset of the full method
   // verification. In practice, the phase we want relies on data structures set up by all the
@@ -373,18 +392,19 @@
 
 mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
                                                            uint32_t dex_pc) {
-  StackHandleScope<3> hs(Thread::Current());
+  Thread* self = Thread::Current();
+  StackHandleScope<3> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+  MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
                           m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
                           true, true, false);
   return verifier.FindAccessedFieldAtDexPc(dex_pc);
 }
 
 mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
-  CHECK(code_item_ != NULL);  // This only makes sense for methods with code.
+  CHECK(code_item_ != nullptr);  // This only makes sense for methods with code.
 
   // Strictly speaking, we ought to be able to get away with doing a subset of the full method
   // verification. In practice, the phase we want relies on data structures set up by all the
@@ -395,7 +415,7 @@
     return nullptr;
   }
   RegisterLine* register_line = reg_table_.GetLine(dex_pc);
-  if (register_line == NULL) {
+  if (register_line == nullptr) {
     return nullptr;
   }
   const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
@@ -404,18 +424,19 @@
 
 mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m,
                                                             uint32_t dex_pc) {
-  StackHandleScope<3> hs(Thread::Current());
+  Thread* self = Thread::Current();
+  StackHandleScope<3> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
   Handle<mirror::ArtMethod> method(hs.NewHandle(m));
-  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+  MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
                           m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
                           true, true, false);
   return verifier.FindInvokedMethodAtDexPc(dex_pc);
 }
 
 mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
-  CHECK(code_item_ != NULL);  // This only makes sense for methods with code.
+  CHECK(code_item_ != nullptr);  // This only makes sense for methods with code.
 
   // Strictly speaking, we ought to be able to get away with doing a subset of the full method
   // verification. In practice, the phase we want relies on data structures set up by all the
@@ -423,11 +444,11 @@
   // got what we wanted.
   bool success = Verify();
   if (!success) {
-    return NULL;
+    return nullptr;
   }
   RegisterLine* register_line = reg_table_.GetLine(dex_pc);
-  if (register_line == NULL) {
-    return NULL;
+  if (register_line == nullptr) {
+    return nullptr;
   }
   const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
   const bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
@@ -436,7 +457,7 @@
 
 bool MethodVerifier::Verify() {
   // If there aren't any instructions, make sure that's expected, then exit successfully.
-  if (code_item_ == NULL) {
+  if (code_item_ == nullptr) {
     if ((method_access_flags_ & (kAccNative | kAccAbstract)) == 0) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "zero-length code in concrete non-native method";
       return false;
@@ -569,9 +590,9 @@
         break;
     }
     size_t inst_size = inst->SizeInCodeUnits();
-    insn_flags_[dex_pc].SetLengthInCodeUnits(inst_size);
+    insn_flags_[dex_pc].SetIsOpcode();
     dex_pc += inst_size;
-    inst = inst->Next();
+    inst = inst->RelativeAt(inst_size);
   }
 
   if (dex_pc != insns_size) {
@@ -607,9 +628,13 @@
           << "'try' block starts inside an instruction (" << start << ")";
       return false;
     }
-    for (uint32_t dex_pc = start; dex_pc < end;
-        dex_pc += insn_flags_[dex_pc].GetLengthInCodeUnits()) {
+    uint32_t dex_pc = start;
+    const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
+    while (dex_pc < end) {
       insn_flags_[dex_pc].SetInTry();
+      size_t insn_size = inst->SizeInCodeUnits();
+      dex_pc += insn_size;
+      inst = inst->RelativeAt(insn_size);
     }
   }
   // Iterate over each of the handlers to verify target addresses.
@@ -632,9 +657,9 @@
         mirror::Class* exception_type = linker->ResolveType(*dex_file_,
                                                             iterator.GetHandlerTypeIndex(),
                                                             dex_cache_, class_loader_);
-        if (exception_type == NULL) {
-          DCHECK(Thread::Current()->IsExceptionPending());
-          Thread::Current()->ClearException();
+        if (exception_type == nullptr) {
+          DCHECK(self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
     }
@@ -759,14 +784,14 @@
       result = false;
       break;
   }
-  if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsCompiler()) {
+  if (inst->GetVerifyIsRuntimeOnly() && Runtime::Current()->IsCompiler() && !verify_to_dump_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "opcode only expected at runtime " << inst->Name();
     result = false;
   }
   return result;
 }
 
-bool MethodVerifier::CheckRegisterIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckRegisterIndex(uint32_t idx) {
   if (idx >= code_item_->registers_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register index out of range (" << idx << " >= "
                                       << code_item_->registers_size_ << ")";
@@ -775,7 +800,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckWideRegisterIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckWideRegisterIndex(uint32_t idx) {
   if (idx + 1 >= code_item_->registers_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register index out of range (" << idx
                                       << "+1 >= " << code_item_->registers_size_ << ")";
@@ -784,7 +809,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckFieldIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().field_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
                                       << dex_file_->GetHeader().field_ids_size_ << ")";
@@ -793,7 +818,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckMethodIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckMethodIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().method_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method index " << idx << " (max "
                                       << dex_file_->GetHeader().method_ids_size_ << ")";
@@ -802,7 +827,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckNewInstance(uint32_t idx) {
+inline bool MethodVerifier::CheckNewInstance(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().type_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx << " (max "
                                       << dex_file_->GetHeader().type_ids_size_ << ")";
@@ -817,7 +842,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckStringIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckStringIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().string_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
                                       << dex_file_->GetHeader().string_ids_size_ << ")";
@@ -826,7 +851,7 @@
   return true;
 }
 
-bool MethodVerifier::CheckTypeIndex(uint32_t idx) {
+inline bool MethodVerifier::CheckTypeIndex(uint32_t idx) {
   if (idx >= dex_file_->GetHeader().type_ids_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx << " (max "
                                       << dex_file_->GetHeader().type_ids_size_ << ")";
@@ -1129,7 +1154,7 @@
 }
 
 void MethodVerifier::Dump(std::ostream& os) {
-  if (code_item_ == NULL) {
+  if (code_item_ == nullptr) {
     os << "Native method\n";
     return;
   }
@@ -1144,10 +1169,10 @@
   std::ostream indent_os(&indent_filter);
   const Instruction* inst = Instruction::At(code_item_->insns_);
   for (size_t dex_pc = 0; dex_pc < code_item_->insns_size_in_code_units_;
-      dex_pc += insn_flags_[dex_pc].GetLengthInCodeUnits()) {
+      dex_pc += inst->SizeInCodeUnits()) {
     RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
-    if (reg_line != NULL) {
-      indent_os << reg_line->Dump() << "\n";
+    if (reg_line != nullptr) {
+      indent_os << reg_line->Dump(this) << "\n";
     }
     indent_os << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
     const bool kDumpHexOfInstruction = false;
@@ -1189,10 +1214,10 @@
     // called.
     const RegType& declaring_class = GetDeclaringClass();
     if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
-      reg_line->SetRegisterType(arg_start + cur_arg,
+      reg_line->SetRegisterType(this, arg_start + cur_arg,
                                 reg_types_.UninitializedThisArgument(declaring_class));
     } else {
-      reg_line->SetRegisterType(arg_start + cur_arg, declaring_class);
+      reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class);
     }
     cur_arg++;
   }
@@ -1203,7 +1228,7 @@
 
   for (; iterator.HasNext(); iterator.Next()) {
     const char* descriptor = iterator.GetDescriptor();
-    if (descriptor == NULL) {
+    if (descriptor == nullptr) {
       LOG(FATAL) << "Null descriptor";
     }
     if (cur_arg >= expected_args) {
@@ -1224,26 +1249,26 @@
             DCHECK(HasFailures());
             return false;
           }
-          reg_line->SetRegisterType(arg_start + cur_arg, reg_type);
+          reg_line->SetRegisterType(this, arg_start + cur_arg, reg_type);
         }
         break;
       case 'Z':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Boolean());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Boolean());
         break;
       case 'C':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Char());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Char());
         break;
       case 'B':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Byte());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Byte());
         break;
       case 'I':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Integer());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Integer());
         break;
       case 'S':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Short());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Short());
         break;
       case 'F':
-        reg_line->SetRegisterType(arg_start + cur_arg, reg_types_.Float());
+        reg_line->SetRegisterType(this, arg_start + cur_arg, reg_types_.Float());
         break;
       case 'J':
       case 'D': {
@@ -1253,9 +1278,16 @@
           return false;
         }
 
-        const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
-        const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
-        reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
+        const RegType* lo_half;
+        const RegType* hi_half;
+        if (descriptor[0] == 'J') {
+          lo_half = &reg_types_.LongLo();
+          hi_half = &reg_types_.LongHi();
+        } else {
+          lo_half = &reg_types_.DoubleLo();
+          hi_half = &reg_types_.DoubleHi();
+        }
+        reg_line->SetRegisterTypeWide(this, arg_start + cur_arg, *lo_half, *hi_half);
         cur_arg++;
         break;
       }
@@ -1317,6 +1349,7 @@
 
   /* Continue until no instructions are marked "changed". */
   while (true) {
+    self_->AllowThreadSuspension();
     // Find the first marked one. Use "start_guess" as a way to find one quickly.
     uint32_t insn_idx = start_guess;
     for (; insn_idx < insns_size; insn_idx++) {
@@ -1348,14 +1381,14 @@
        * a full table) and make sure it actually matches.
        */
       RegisterLine* register_line = reg_table_.GetLine(insn_idx);
-      if (register_line != NULL) {
+      if (register_line != nullptr) {
         if (work_line_->CompareLine(register_line) != 0) {
           Dump(std::cout);
           std::cout << info_messages_.str();
           LOG(FATAL) << "work_line diverged in " << PrettyMethod(dex_method_idx_, *dex_file_)
                      << "@" << reinterpret_cast<void*>(work_insn_idx_) << "\n"
-                     << " work_line=" << *work_line_ << "\n"
-                     << "  expected=" << *register_line;
+                     << " work_line=" << work_line_->Dump(this) << "\n"
+                     << "  expected=" << register_line->Dump(this);
         }
       }
     }
@@ -1381,7 +1414,8 @@
      */
     int dead_start = -1;
     uint32_t insn_idx = 0;
-    for (; insn_idx < insns_size; insn_idx += insn_flags_[insn_idx].GetLengthInCodeUnits()) {
+    for (; insn_idx < insns_size;
+         insn_idx += Instruction::At(code_item_->insns_ + insn_idx)->SizeInCodeUnits()) {
       /*
        * Switch-statement data doesn't get "visited" by scanner. It
        * may or may not be preceded by a padding NOP (for alignment).
@@ -1423,7 +1457,7 @@
   // We want the state _before_ the instruction, for the case where the dex pc we're
   // interested in is itself a monitor-enter instruction (which is a likely place
   // for a thread to be suspended).
-  if (monitor_enter_dex_pcs_ != NULL && work_insn_idx_ == interesting_dex_pc_) {
+  if (monitor_enter_dex_pcs_ != nullptr && work_insn_idx_ == interesting_dex_pc_) {
     monitor_enter_dex_pcs_->clear();  // The new work line is more accurate than the previous one.
     for (size_t i = 0; i < work_line_->GetMonitorEnterCount(); ++i) {
       monitor_enter_dex_pcs_->push_back(work_line_->GetMonitorEnterDexPc(i));
@@ -1457,7 +1491,7 @@
   if (gDebugVerify) {
     // Generate processing back trace to debug verifier
     LogVerifyInfo() << "Processing " << inst->DumpString(dex_file_) << "\n"
-                    << *work_line_.get() << "\n";
+                    << work_line_->Dump(this) << "\n";
   }
 
   /*
@@ -1493,31 +1527,31 @@
       break;
 
     case Instruction::MOVE:
-      work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategory1nr);
+      work_line_->CopyRegister1(this, inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategory1nr);
       break;
     case Instruction::MOVE_FROM16:
-      work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategory1nr);
+      work_line_->CopyRegister1(this, inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategory1nr);
       break;
     case Instruction::MOVE_16:
-      work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategory1nr);
+      work_line_->CopyRegister1(this, inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategory1nr);
       break;
     case Instruction::MOVE_WIDE:
-      work_line_->CopyRegister2(inst->VRegA_12x(), inst->VRegB_12x());
+      work_line_->CopyRegister2(this, inst->VRegA_12x(), inst->VRegB_12x());
       break;
     case Instruction::MOVE_WIDE_FROM16:
-      work_line_->CopyRegister2(inst->VRegA_22x(), inst->VRegB_22x());
+      work_line_->CopyRegister2(this, inst->VRegA_22x(), inst->VRegB_22x());
       break;
     case Instruction::MOVE_WIDE_16:
-      work_line_->CopyRegister2(inst->VRegA_32x(), inst->VRegB_32x());
+      work_line_->CopyRegister2(this, inst->VRegA_32x(), inst->VRegB_32x());
       break;
     case Instruction::MOVE_OBJECT:
-      work_line_->CopyRegister1(inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategoryRef);
+      work_line_->CopyRegister1(this, inst->VRegA_12x(), inst->VRegB_12x(), kTypeCategoryRef);
       break;
     case Instruction::MOVE_OBJECT_FROM16:
-      work_line_->CopyRegister1(inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategoryRef);
+      work_line_->CopyRegister1(this, inst->VRegA_22x(), inst->VRegB_22x(), kTypeCategoryRef);
       break;
     case Instruction::MOVE_OBJECT_16:
-      work_line_->CopyRegister1(inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategoryRef);
+      work_line_->CopyRegister1(this, inst->VRegA_32x(), inst->VRegB_32x(), kTypeCategoryRef);
       break;
 
     /*
@@ -1532,13 +1566,13 @@
      * easier to read in some cases.)
      */
     case Instruction::MOVE_RESULT:
-      work_line_->CopyResultRegister1(inst->VRegA_11x(), false);
+      work_line_->CopyResultRegister1(this, inst->VRegA_11x(), false);
       break;
     case Instruction::MOVE_RESULT_WIDE:
-      work_line_->CopyResultRegister2(inst->VRegA_11x());
+      work_line_->CopyResultRegister2(this, inst->VRegA_11x());
       break;
     case Instruction::MOVE_RESULT_OBJECT:
-      work_line_->CopyResultRegister1(inst->VRegA_11x(), true);
+      work_line_->CopyResultRegister1(this, inst->VRegA_11x(), true);
       break;
 
     case Instruction::MOVE_EXCEPTION: {
@@ -1547,18 +1581,18 @@
        * that as part of extracting the exception type from the catch block list.
        */
       const RegType& res_type = GetCaughtExceptionType();
-      work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
+      work_line_->SetRegisterType(this, inst->VRegA_11x(), res_type);
       break;
     }
     case Instruction::RETURN_VOID:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         if (!GetMethodReturnType().IsConflict()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected";
         }
       }
       break;
     case Instruction::RETURN:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         /* check the method signature */
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory1Types()) {
@@ -1568,14 +1602,14 @@
           // Compilers may generate synthetic functions that write byte values into boolean fields.
           // Also, it may use integer values for boolean, byte, short, and character return types.
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& src_type = work_line_->GetRegisterType(vregA);
+          const RegType& src_type = work_line_->GetRegisterType(this, vregA);
           bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
                           ((return_type.IsBoolean() || return_type.IsByte() ||
                            return_type.IsShort() || return_type.IsChar()) &&
                            src_type.IsInteger()));
           /* check the register contents */
           bool success =
-              work_line_->VerifyRegisterType(vregA, use_src ? src_type : return_type);
+              work_line_->VerifyRegisterType(this, vregA, use_src ? src_type : return_type);
           if (!success) {
             AppendToLastFailMessage(StringPrintf(" return-1nr on invalid register v%d", vregA));
           }
@@ -1583,7 +1617,7 @@
       }
       break;
     case Instruction::RETURN_WIDE:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         /* check the method signature */
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsCategory2Types()) {
@@ -1591,7 +1625,7 @@
         } else {
           /* check the register contents */
           const uint32_t vregA = inst->VRegA_11x();
-          bool success = work_line_->VerifyRegisterType(vregA, return_type);
+          bool success = work_line_->VerifyRegisterType(this, vregA, return_type);
           if (!success) {
             AppendToLastFailMessage(StringPrintf(" return-wide on invalid register v%d", vregA));
           }
@@ -1599,7 +1633,7 @@
       }
       break;
     case Instruction::RETURN_OBJECT:
-      if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
+      if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) {
         const RegType& return_type = GetMethodReturnType();
         if (!return_type.IsReferenceTypes()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
@@ -1608,7 +1642,7 @@
           DCHECK(!return_type.IsZero());
           DCHECK(!return_type.IsUninitializedReference());
           const uint32_t vregA = inst->VRegA_11x();
-          const RegType& reg_type = work_line_->GetRegisterType(vregA);
+          const RegType& reg_type = work_line_->GetRegisterType(this, vregA);
           // Disallow returning uninitialized values and verify that the reference in vAA is an
           // instance of the "return_type"
           if (reg_type.IsUninitializedTypes()) {
@@ -1630,25 +1664,25 @@
       /* could be boolean, int, float, or a null reference */
     case Instruction::CONST_4: {
       int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
-      work_line_->SetRegisterType(inst->VRegA_11n(),
+      work_line_->SetRegisterType(this, inst->VRegA_11n(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_16: {
       int16_t val = static_cast<int16_t>(inst->VRegB_21s());
-      work_line_->SetRegisterType(inst->VRegA_21s(),
+      work_line_->SetRegisterType(this, inst->VRegA_21s(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST: {
       int32_t val = inst->VRegB_31i();
-      work_line_->SetRegisterType(inst->VRegA_31i(),
+      work_line_->SetRegisterType(this, inst->VRegA_31i(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
     case Instruction::CONST_HIGH16: {
       int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
-      work_line_->SetRegisterType(inst->VRegA_21h(),
+      work_line_->SetRegisterType(this, inst->VRegA_21h(),
                                   DetermineCat1Constant(val, need_precise_constants_));
       break;
     }
@@ -1657,48 +1691,48 @@
       int64_t val = static_cast<int16_t>(inst->VRegB_21s());
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_21s(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_32: {
       int64_t val = static_cast<int32_t>(inst->VRegB_31i());
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_31i(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE: {
       int64_t val = inst->VRegB_51l();
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_51l(), lo, hi);
       break;
     }
     case Instruction::CONST_WIDE_HIGH16: {
       int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
       const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
       const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
-      work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
+      work_line_->SetRegisterTypeWide(this, inst->VRegA_21h(), lo, hi);
       break;
     }
     case Instruction::CONST_STRING:
-      work_line_->SetRegisterType(inst->VRegA_21c(), reg_types_.JavaLangString());
+      work_line_->SetRegisterType(this, inst->VRegA_21c(), reg_types_.JavaLangString());
       break;
     case Instruction::CONST_STRING_JUMBO:
-      work_line_->SetRegisterType(inst->VRegA_31c(), reg_types_.JavaLangString());
+      work_line_->SetRegisterType(this, inst->VRegA_31c(), reg_types_.JavaLangString());
       break;
     case Instruction::CONST_CLASS: {
       // Get type from instruction if unresolved then we need an access check
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
       const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
       // Register holds class, ie its type is class, on error it will hold Conflict.
-      work_line_->SetRegisterType(inst->VRegA_21c(),
+      work_line_->SetRegisterType(this, inst->VRegA_21c(),
                                   res_type.IsConflict() ? res_type
-                                                        : reg_types_.JavaLangClass(true));
+                                                        : reg_types_.JavaLangClass());
       break;
     }
     case Instruction::MONITOR_ENTER:
-      work_line_->PushMonitor(inst->VRegA_11x(), work_insn_idx_);
+      work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
       break;
     case Instruction::MONITOR_EXIT:
       /*
@@ -1722,7 +1756,7 @@
        * "live" so we still need to check it.
        */
       opcode_flags &= ~Instruction::kThrow;
-      work_line_->PopMonitor(inst->VRegA_11x());
+      work_line_->PopMonitor(this, inst->VRegA_11x());
       break;
 
     case Instruction::CHECK_CAST:
@@ -1749,13 +1783,13 @@
 
         DCHECK_NE(failures_.size(), 0U);
         if (!is_checkcast) {
-          work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
+          work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
         }
         break;  // bad class
       }
       // TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
       uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
-      const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+      const RegType& orig_type = work_line_->GetRegisterType(this, orig_type_reg);
       if (!res_type.IsNonZeroReferenceTypes()) {
         if (is_checkcast) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1770,20 +1804,20 @@
         }
       } else {
         if (is_checkcast) {
-          work_line_->SetRegisterType(inst->VRegA_21c(), res_type);
+          work_line_->SetRegisterType(this, inst->VRegA_21c(), res_type);
         } else {
-          work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
+          work_line_->SetRegisterType(this, inst->VRegA_22c(), reg_types_.Boolean());
         }
       }
       break;
     }
     case Instruction::ARRAY_LENGTH: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+      const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegB_12x());
       if (res_type.IsReferenceTypes()) {
         if (!res_type.IsArrayTypes() && !res_type.IsZero()) {  // ie not an array or null
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
         } else {
-          work_line_->SetRegisterType(inst->VRegA_12x(), reg_types_.Integer());
+          work_line_->SetRegisterType(this, inst->VRegA_12x(), reg_types_.Integer());
         }
       } else {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
@@ -1806,9 +1840,9 @@
       const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
       // Any registers holding previous allocations from this address that have not yet been
       // initialized must be marked invalid.
-      work_line_->MarkUninitRefsAsInvalid(uninit_type);
+      work_line_->MarkUninitRefsAsInvalid(this, uninit_type);
       // add the new uninitialized reference to the register state
-      work_line_->SetRegisterType(inst->VRegA_21c(), uninit_type);
+      work_line_->SetRegisterType(this, inst->VRegA_21c(), uninit_type);
       break;
     }
     case Instruction::NEW_ARRAY:
@@ -1824,39 +1858,39 @@
       break;
     case Instruction::CMPL_FLOAT:
     case Instruction::CMPG_FLOAT:
-      if (!work_line_->VerifyRegisterType(inst->VRegB_23x(), reg_types_.Float())) {
+      if (!work_line_->VerifyRegisterType(this, inst->VRegB_23x(), reg_types_.Float())) {
         break;
       }
-      if (!work_line_->VerifyRegisterType(inst->VRegC_23x(), reg_types_.Float())) {
+      if (!work_line_->VerifyRegisterType(this, inst->VRegC_23x(), reg_types_.Float())) {
         break;
       }
-      work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::CMPL_DOUBLE:
     case Instruction::CMPG_DOUBLE:
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.DoubleLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.DoubleLo(),
                                               reg_types_.DoubleHi())) {
         break;
       }
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.DoubleLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegC_23x(), reg_types_.DoubleLo(),
                                               reg_types_.DoubleHi())) {
         break;
       }
-      work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::CMP_LONG:
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegB_23x(), reg_types_.LongLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegB_23x(), reg_types_.LongLo(),
                                               reg_types_.LongHi())) {
         break;
       }
-      if (!work_line_->VerifyRegisterTypeWide(inst->VRegC_23x(), reg_types_.LongLo(),
+      if (!work_line_->VerifyRegisterTypeWide(this, inst->VRegC_23x(), reg_types_.LongLo(),
                                               reg_types_.LongHi())) {
         break;
       }
-      work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
+      work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Integer());
       break;
     case Instruction::THROW: {
-      const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+      const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegA_11x());
       if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
         Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
             << "thrown class " << res_type << " not instanceof Throwable";
@@ -1872,12 +1906,12 @@
     case Instruction::PACKED_SWITCH:
     case Instruction::SPARSE_SWITCH:
       /* verify that vAA is an integer, or can be converted to one */
-      work_line_->VerifyRegisterType(inst->VRegA_31t(), reg_types_.Integer());
+      work_line_->VerifyRegisterType(this, inst->VRegA_31t(), reg_types_.Integer());
       break;
 
     case Instruction::FILL_ARRAY_DATA: {
       /* Similar to the verification done for APUT */
-      const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+      const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegA_31t());
       /* array_type can be null if the reg type is Zero */
       if (!array_type.IsZero()) {
         if (!array_type.IsArrayTypes()) {
@@ -1911,8 +1945,8 @@
     }
     case Instruction::IF_EQ:
     case Instruction::IF_NE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      const RegType& reg_type1 = work_line_->GetRegisterType(this, inst->VRegA_22t());
+      const RegType& reg_type2 = work_line_->GetRegisterType(this, inst->VRegB_22t());
       bool mismatch = false;
       if (reg_type1.IsZero()) {  // zero then integral or reference expected
         mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1931,8 +1965,8 @@
     case Instruction::IF_GE:
     case Instruction::IF_GT:
     case Instruction::IF_LE: {
-      const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
-      const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+      const RegType& reg_type1 = work_line_->GetRegisterType(this, inst->VRegA_22t());
+      const RegType& reg_type2 = work_line_->GetRegisterType(this, inst->VRegB_22t());
       if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
                                           << reg_type2 << ") must be integral";
@@ -1941,7 +1975,7 @@
     }
     case Instruction::IF_EQZ:
     case Instruction::IF_NEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      const RegType& reg_type = work_line_->GetRegisterType(this, inst->VRegA_21t());
       if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-eqz/if-nez";
@@ -1987,7 +2021,7 @@
         // type is assignable to the original then allow optimization. This check is performed to
         // ensure that subsequent merges don't lose type information - such as becoming an
         // interface from a class that would lose information relevant to field checks.
-        const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+        const RegType& orig_type = work_line_->GetRegisterType(this, instance_of_inst->VRegB_22c());
         const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
 
         if (!orig_type.Equals(cast_type) &&
@@ -2003,7 +2037,7 @@
             branch_line.reset(update_line);
           }
           update_line->CopyFromLine(work_line_.get());
-          update_line->SetRegisterType(instance_of_inst->VRegB_22c(), cast_type);
+          update_line->SetRegisterType(this, instance_of_inst->VRegB_22c(), cast_type);
           if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
             // See if instance-of was preceded by a move-object operation, common due to the small
             // register encoding space of instance-of, and propagate type information to the source
@@ -2017,17 +2051,17 @@
             switch (move_inst->Opcode()) {
               case Instruction::MOVE_OBJECT:
                 if (move_inst->VRegA_12x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(move_inst->VRegB_12x(), cast_type);
+                  update_line->SetRegisterType(this, move_inst->VRegB_12x(), cast_type);
                 }
                 break;
               case Instruction::MOVE_OBJECT_FROM16:
                 if (move_inst->VRegA_22x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(move_inst->VRegB_22x(), cast_type);
+                  update_line->SetRegisterType(this, move_inst->VRegB_22x(), cast_type);
                 }
                 break;
               case Instruction::MOVE_OBJECT_16:
                 if (move_inst->VRegA_32x() == instance_of_inst->VRegB_22c()) {
-                  update_line->SetRegisterType(move_inst->VRegB_32x(), cast_type);
+                  update_line->SetRegisterType(this, move_inst->VRegB_32x(), cast_type);
                 }
                 break;
               default:
@@ -2043,7 +2077,7 @@
     case Instruction::IF_GEZ:
     case Instruction::IF_GTZ:
     case Instruction::IF_LEZ: {
-      const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+      const RegType& reg_type = work_line_->GetRegisterType(this, inst->VRegA_21t());
       if (!reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
                                           << " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2194,8 +2228,7 @@
                                                               is_super);
       const RegType* return_type = nullptr;
       if (called_method != nullptr) {
-        Thread* self = Thread::Current();
-        StackHandleScope<1> hs(self);
+        StackHandleScope<1> hs(self_);
         Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
         MethodHelper mh(h_called_method);
         mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
@@ -2204,8 +2237,8 @@
                                               return_type_class,
                                               return_type_class->CannotBeAssignedFromOtherTypes());
         } else {
-          DCHECK(!can_load_classes_ || self->IsExceptionPending());
-          self->ClearException();
+          DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
       if (return_type == nullptr) {
@@ -2216,7 +2249,7 @@
         return_type = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
       }
       if (!return_type->IsLowHalf()) {
-        work_line_->SetResultRegisterType(*return_type);
+        work_line_->SetResultRegisterType(this, *return_type);
       } else {
         work_line_->SetResultRegisterTypeWide(*return_type, return_type->HighHalf(&reg_types_));
       }
@@ -2231,7 +2264,7 @@
       const char* return_type_descriptor;
       bool is_constructor;
       const RegType* return_type = nullptr;
-      if (called_method == NULL) {
+      if (called_method == nullptr) {
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         is_constructor = strcmp("<init>", dex_file_->StringDataByIdx(method_id.name_idx_)) == 0;
@@ -2240,8 +2273,7 @@
       } else {
         is_constructor = called_method->IsConstructor();
         return_type_descriptor = called_method->GetReturnTypeDescriptor();
-        Thread* self = Thread::Current();
-        StackHandleScope<1> hs(self);
+        StackHandleScope<1> hs(self_);
         Handle<mirror::ArtMethod> h_called_method(hs.NewHandle(called_method));
         MethodHelper mh(h_called_method);
         mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
@@ -2250,8 +2282,8 @@
                                               return_type_class,
                                               return_type_class->CannotBeAssignedFromOtherTypes());
         } else {
-          DCHECK(!can_load_classes_ || self->IsExceptionPending());
-          self->ClearException();
+          DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
       if (is_constructor) {
@@ -2262,7 +2294,7 @@
          * allowing the latter only if the "this" argument is the same as the "this" argument to
          * this method (which implies that we're in a constructor ourselves).
          */
-        const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+        const RegType& this_type = work_line_->GetInvocationThis(this, inst, is_range);
         if (this_type.IsConflict())  // failure.
           break;
 
@@ -2292,14 +2324,14 @@
          * Replace the uninitialized reference with an initialized one. We need to do this for all
          * registers that have the same object instance in them, not just the "this" register.
          */
-        work_line_->MarkRefsAsInitialized(this_type);
+        work_line_->MarkRefsAsInitialized(this, this_type);
       }
       if (return_type == nullptr) {
         return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
                                                  false);
       }
       if (!return_type->IsLowHalf()) {
-        work_line_->SetResultRegisterType(*return_type);
+        work_line_->SetResultRegisterType(this, *return_type);
       } else {
         work_line_->SetResultRegisterTypeWide(*return_type, return_type->HighHalf(&reg_types_));
       }
@@ -2314,7 +2346,7 @@
                                                                      is_range,
                                                                      false);
         const char* descriptor;
-        if (called_method == NULL) {
+        if (called_method == nullptr) {
           uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
           const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
           uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -2324,7 +2356,7 @@
         }
         const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
         if (!return_type.IsLowHalf()) {
-          work_line_->SetResultRegisterType(return_type);
+          work_line_->SetResultRegisterType(this, return_type);
         } else {
           work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
         }
@@ -2338,7 +2370,7 @@
                                                                 METHOD_INTERFACE,
                                                                 is_range,
                                                                 false);
-      if (abs_method != NULL) {
+      if (abs_method != nullptr) {
         mirror::Class* called_interface = abs_method->GetDeclaringClass();
         if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) {
           Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected interface class in invoke-interface '"
@@ -2349,7 +2381,7 @@
       /* Get the type of the "this" arg, which should either be a sub-interface of called
        * interface or Object (see comments in RegType::JoinClass).
        */
-      const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+      const RegType& this_type = work_line_->GetInvocationThis(this, inst, is_range);
       if (this_type.IsZero()) {
         /* null pointer always passes (and always fails at runtime) */
       } else {
@@ -2371,7 +2403,7 @@
        * the type information is in the abstract method, so we're good.
        */
       const char* descriptor;
-      if (abs_method == NULL) {
+      if (abs_method == nullptr) {
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
@@ -2381,7 +2413,7 @@
       }
       const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
       if (!return_type.IsLowHalf()) {
-        work_line_->SetResultRegisterType(return_type);
+        work_line_->SetResultRegisterType(this, return_type);
       } else {
         work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
       }
@@ -2390,74 +2422,74 @@
     }
     case Instruction::NEG_INT:
     case Instruction::NOT_INT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Integer(), reg_types_.Integer());
       break;
     case Instruction::NEG_LONG:
     case Instruction::NOT_LONG:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                    reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::NEG_FLOAT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Float());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Float(), reg_types_.Float());
       break;
     case Instruction::NEG_DOUBLE:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                    reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::INT_TO_LONG:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                      reg_types_.Integer());
       break;
     case Instruction::INT_TO_FLOAT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Float(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Float(), reg_types_.Integer());
       break;
     case Instruction::INT_TO_DOUBLE:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                      reg_types_.Integer());
       break;
     case Instruction::LONG_TO_INT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Integer(),
                                        reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::LONG_TO_FLOAT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Float(),
                                        reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::LONG_TO_DOUBLE:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                    reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::FLOAT_TO_INT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Integer(), reg_types_.Float());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Integer(), reg_types_.Float());
       break;
     case Instruction::FLOAT_TO_LONG:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                      reg_types_.Float());
       break;
     case Instruction::FLOAT_TO_DOUBLE:
-      work_line_->CheckUnaryOpToWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckUnaryOpToWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                      reg_types_.Float());
       break;
     case Instruction::DOUBLE_TO_INT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Integer(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Integer(),
                                        reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::DOUBLE_TO_LONG:
-      work_line_->CheckUnaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckUnaryOpWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                    reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::DOUBLE_TO_FLOAT:
-      work_line_->CheckUnaryOpFromWide(inst, reg_types_.Float(),
+      work_line_->CheckUnaryOpFromWide(this, inst, reg_types_.Float(),
                                        reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
     case Instruction::INT_TO_BYTE:
-      work_line_->CheckUnaryOp(inst, reg_types_.Byte(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Byte(), reg_types_.Integer());
       break;
     case Instruction::INT_TO_CHAR:
-      work_line_->CheckUnaryOp(inst, reg_types_.Char(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Char(), reg_types_.Integer());
       break;
     case Instruction::INT_TO_SHORT:
-      work_line_->CheckUnaryOp(inst, reg_types_.Short(), reg_types_.Integer());
+      work_line_->CheckUnaryOp(this, inst, reg_types_.Short(), reg_types_.Integer());
       break;
 
     case Instruction::ADD_INT:
@@ -2468,13 +2500,13 @@
     case Instruction::SHL_INT:
     case Instruction::SHR_INT:
     case Instruction::USHR_INT:
-      work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
+      work_line_->CheckBinaryOp(this, inst, reg_types_.Integer(), reg_types_.Integer(),
                                 reg_types_.Integer(), false);
       break;
     case Instruction::AND_INT:
     case Instruction::OR_INT:
     case Instruction::XOR_INT:
-      work_line_->CheckBinaryOp(inst, reg_types_.Integer(), reg_types_.Integer(),
+      work_line_->CheckBinaryOp(this, inst, reg_types_.Integer(), reg_types_.Integer(),
                                 reg_types_.Integer(), true);
       break;
     case Instruction::ADD_LONG:
@@ -2485,7 +2517,7 @@
     case Instruction::AND_LONG:
     case Instruction::OR_LONG:
     case Instruction::XOR_LONG:
-      work_line_->CheckBinaryOpWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOpWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                     reg_types_.LongLo(), reg_types_.LongHi(),
                                     reg_types_.LongLo(), reg_types_.LongHi());
       break;
@@ -2493,7 +2525,7 @@
     case Instruction::SHR_LONG:
     case Instruction::USHR_LONG:
       /* shift distance is Int, making these different from other binary operations */
-      work_line_->CheckBinaryOpWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOpWideShift(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                          reg_types_.Integer());
       break;
     case Instruction::ADD_FLOAT:
@@ -2501,18 +2533,15 @@
     case Instruction::MUL_FLOAT:
     case Instruction::DIV_FLOAT:
     case Instruction::REM_FLOAT:
-      work_line_->CheckBinaryOp(inst,
-                                reg_types_.Float(),
-                                reg_types_.Float(),
-                                reg_types_.Float(),
-                                false);
+      work_line_->CheckBinaryOp(this, inst, reg_types_.Float(), reg_types_.Float(),
+                                reg_types_.Float(), false);
       break;
     case Instruction::ADD_DOUBLE:
     case Instruction::SUB_DOUBLE:
     case Instruction::MUL_DOUBLE:
     case Instruction::DIV_DOUBLE:
     case Instruction::REM_DOUBLE:
-      work_line_->CheckBinaryOpWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckBinaryOpWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                     reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                     reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
@@ -2523,27 +2552,18 @@
     case Instruction::SHL_INT_2ADDR:
     case Instruction::SHR_INT_2ADDR:
     case Instruction::USHR_INT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     false);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Integer(), reg_types_.Integer(),
+                                     reg_types_.Integer(), false);
       break;
     case Instruction::AND_INT_2ADDR:
     case Instruction::OR_INT_2ADDR:
     case Instruction::XOR_INT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     true);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Integer(), reg_types_.Integer(),
+                                     reg_types_.Integer(), true);
       break;
     case Instruction::DIV_INT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     reg_types_.Integer(),
-                                     false);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Integer(), reg_types_.Integer(),
+                                     reg_types_.Integer(), false);
       break;
     case Instruction::ADD_LONG_2ADDR:
     case Instruction::SUB_LONG_2ADDR:
@@ -2553,14 +2573,14 @@
     case Instruction::AND_LONG_2ADDR:
     case Instruction::OR_LONG_2ADDR:
     case Instruction::XOR_LONG_2ADDR:
-      work_line_->CheckBinaryOp2addrWide(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOp2addrWide(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                          reg_types_.LongLo(), reg_types_.LongHi(),
                                          reg_types_.LongLo(), reg_types_.LongHi());
       break;
     case Instruction::SHL_LONG_2ADDR:
     case Instruction::SHR_LONG_2ADDR:
     case Instruction::USHR_LONG_2ADDR:
-      work_line_->CheckBinaryOp2addrWideShift(inst, reg_types_.LongLo(), reg_types_.LongHi(),
+      work_line_->CheckBinaryOp2addrWideShift(this, inst, reg_types_.LongLo(), reg_types_.LongHi(),
                                               reg_types_.Integer());
       break;
     case Instruction::ADD_FLOAT_2ADDR:
@@ -2568,18 +2588,15 @@
     case Instruction::MUL_FLOAT_2ADDR:
     case Instruction::DIV_FLOAT_2ADDR:
     case Instruction::REM_FLOAT_2ADDR:
-      work_line_->CheckBinaryOp2addr(inst,
-                                     reg_types_.Float(),
-                                     reg_types_.Float(),
-                                     reg_types_.Float(),
-                                     false);
+      work_line_->CheckBinaryOp2addr(this, inst, reg_types_.Float(), reg_types_.Float(),
+                                     reg_types_.Float(), false);
       break;
     case Instruction::ADD_DOUBLE_2ADDR:
     case Instruction::SUB_DOUBLE_2ADDR:
     case Instruction::MUL_DOUBLE_2ADDR:
     case Instruction::DIV_DOUBLE_2ADDR:
     case Instruction::REM_DOUBLE_2ADDR:
-      work_line_->CheckBinaryOp2addrWide(inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
+      work_line_->CheckBinaryOp2addrWide(this, inst, reg_types_.DoubleLo(), reg_types_.DoubleHi(),
                                          reg_types_.DoubleLo(),  reg_types_.DoubleHi(),
                                          reg_types_.DoubleLo(), reg_types_.DoubleHi());
       break;
@@ -2588,12 +2605,14 @@
     case Instruction::MUL_INT_LIT16:
     case Instruction::DIV_INT_LIT16:
     case Instruction::REM_INT_LIT16:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, true);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), false,
+                                 true);
       break;
     case Instruction::AND_INT_LIT16:
     case Instruction::OR_INT_LIT16:
     case Instruction::XOR_INT_LIT16:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, true);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), true,
+                                 true);
       break;
     case Instruction::ADD_INT_LIT8:
     case Instruction::RSUB_INT_LIT8:
@@ -2603,12 +2622,14 @@
     case Instruction::SHL_INT_LIT8:
     case Instruction::SHR_INT_LIT8:
     case Instruction::USHR_INT_LIT8:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), false, false);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), false,
+                                 false);
       break;
     case Instruction::AND_INT_LIT8:
     case Instruction::OR_INT_LIT8:
     case Instruction::XOR_INT_LIT8:
-      work_line_->CheckLiteralOp(inst, reg_types_.Integer(), reg_types_.Integer(), true, false);
+      work_line_->CheckLiteralOp(this, inst, reg_types_.Integer(), reg_types_.Integer(), true,
+                                 false);
       break;
 
     // Special instructions.
@@ -2654,11 +2675,11 @@
     case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
       bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
       mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
-      if (called_method != NULL) {
+      if (called_method != nullptr) {
         const char* descriptor = called_method->GetReturnTypeDescriptor();
         const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
         if (!return_type.IsLowHalf()) {
-          work_line_->SetResultRegisterType(return_type);
+          work_line_->SetResultRegisterType(this, return_type);
         } else {
           work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
         }
@@ -2720,7 +2741,7 @@
    * not expensive and it makes our debugging output cleaner.)
    */
   if (!just_set_result) {
-    work_line_->SetResultTypeToUnknown();
+    work_line_->SetResultTypeToUnknown(this);
   }
 
 
@@ -2749,7 +2770,7 @@
       return false;
     }
     /* update branch target, set "changed" if appropriate */
-    if (NULL != branch_line.get()) {
+    if (nullptr != branch_line.get()) {
       if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
         return false;
       }
@@ -2825,9 +2846,8 @@
           }
         } else {
           // Clear exception.
-          Thread* self = Thread::Current();
-          DCHECK(self->IsExceptionPending());
-          self->ClearException();
+          DCHECK(self_->IsExceptionPending());
+          self_->ClearException();
         }
       }
       /*
@@ -2864,7 +2884,8 @@
    *        and this change should not be used in those cases.
    */
   if ((opcode_flags & Instruction::kContinue) != 0) {
-    uint32_t next_insn_idx = work_insn_idx_ + CurrentInsnFlags()->GetLengthInCodeUnits();
+    DCHECK_EQ(Instruction::At(code_item_->insns_ + work_insn_idx_), inst);
+    uint32_t next_insn_idx = work_insn_idx_ + inst->SizeInCodeUnits();
     if (next_insn_idx >= code_item_->insns_size_in_code_units_) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Execution can walk off end of code area";
       return false;
@@ -2874,7 +2895,7 @@
     if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
       return false;
     }
-    if (NULL != fallthrough_line.get()) {
+    if (nullptr != fallthrough_line.get()) {
       // Make workline consistent with fallthrough computed from peephole optimization.
       work_line_->CopyFromLine(fallthrough_line.get());
     }
@@ -2883,17 +2904,17 @@
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
       Instruction::Code opcode = ret_inst->Opcode();
       if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
-        work_line_->MarkAllRegistersAsConflicts();
+        work_line_->MarkAllRegistersAsConflicts(this);
       } else {
         if (opcode == Instruction::RETURN_WIDE) {
-          work_line_->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x());
+          work_line_->MarkAllRegistersAsConflictsExceptWide(this, ret_inst->VRegA_11x());
         } else {
-          work_line_->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x());
+          work_line_->MarkAllRegistersAsConflictsExcept(this, ret_inst->VRegA_11x());
         }
       }
     }
     RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
-    if (next_line != NULL) {
+    if (next_line != nullptr) {
       // Merge registers into what we have for the next instruction, and set the "changed" flag if
       // needed. If the merge changes the state of the registers then the work line will be
       // updated.
@@ -2911,7 +2932,7 @@
 
   /* If we're returning from the method, make sure monitor stack is empty. */
   if ((opcode_flags & Instruction::kReturn) != 0) {
-    if (!work_line_->VerifyMonitorStackEmpty()) {
+    if (!work_line_->VerifyMonitorStackEmpty(this)) {
       return false;
     }
   }
@@ -2923,7 +2944,8 @@
    * alone and let the caller sort it out.
    */
   if ((opcode_flags & Instruction::kContinue) != 0) {
-    *start_guess = work_insn_idx_ + insn_flags_[work_insn_idx_].GetLengthInCodeUnits();
+    DCHECK_EQ(Instruction::At(code_item_->insns_ + work_insn_idx_), inst);
+    *start_guess = work_insn_idx_ + inst->SizeInCodeUnits();
   } else if ((opcode_flags & Instruction::kBranch) != 0) {
     /* we're still okay if branch_target is zero */
     *start_guess = work_insn_idx_ + branch_target;
@@ -2939,7 +2961,7 @@
   const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
   const RegType& referrer = GetDeclaringClass();
   mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
-  const RegType& result = klass != NULL ?
+  const RegType& result = klass != nullptr ?
       reg_types_.FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
       reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   if (result.IsConflict()) {
@@ -2947,7 +2969,7 @@
         << "' in " << referrer;
     return result;
   }
-  if (klass == NULL && !result.IsUnresolvedTypes()) {
+  if (klass == nullptr && !result.IsUnresolvedTypes()) {
     dex_cache_->SetResolvedType(class_idx, result.GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
@@ -2962,7 +2984,7 @@
 }
 
 const RegType& MethodVerifier::GetCaughtExceptionType() {
-  const RegType* common_super = NULL;
+  const RegType* common_super = nullptr;
   if (code_item_->tries_size_ != 0) {
     const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
     uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2997,7 +3019,7 @@
       handlers_ptr = iterator.EndDataPointer();
     }
   }
-  if (common_super == NULL) {
+  if (common_super == nullptr) {
     /* no catch blocks, or no catches with classes we can find */
     Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unable to find exception handler";
     return reg_types_.Conflict();
@@ -3013,15 +3035,15 @@
     std::string append(" in attempt to access method ");
     append += dex_file_->GetMethodName(method_id);
     AppendToLastFailMessage(append);
-    return NULL;
+    return nullptr;
   }
   if (klass_type.IsUnresolvedTypes()) {
-    return NULL;  // Can't resolve Class so no more to do here
+    return nullptr;  // Can't resolve Class so no more to do here
   }
   mirror::Class* klass = klass_type.GetClass();
   const RegType& referrer = GetDeclaringClass();
   mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
-  if (res_method == NULL) {
+  if (res_method == nullptr) {
     const char* name = dex_file_->GetMethodName(method_id);
     const Signature signature = dex_file_->GetMethodSignature(method_id);
 
@@ -3032,7 +3054,7 @@
     } else {
       res_method = klass->FindVirtualMethod(name, signature);
     }
-    if (res_method != NULL) {
+    if (res_method != nullptr) {
       dex_cache_->SetResolvedMethod(dex_method_idx, res_method);
     } else {
       // If a virtual or interface method wasn't found with the expected type, look in
@@ -3041,11 +3063,11 @@
       if (method_type == METHOD_INTERFACE || method_type == METHOD_VIRTUAL) {
         res_method = klass->FindDirectMethod(name, signature);
       }
-      if (res_method == NULL) {
+      if (res_method == nullptr) {
         Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method "
                                      << PrettyDescriptor(klass) << "." << name
                                      << " " << signature;
-        return NULL;
+        return nullptr;
       }
     }
   }
@@ -3054,13 +3076,13 @@
   if (res_method->IsConstructor() && method_type != METHOD_DIRECT) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "rejecting non-direct call to constructor "
                                       << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   // Disallow any calls to class initializers.
   if (res_method->IsClassInitializer()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "rejecting call to class initializer "
                                       << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   // Check if access is allowed.
   if (!referrer.CanAccessMember(res_method->GetDeclaringClass(), res_method->GetAccessFlags())) {
@@ -3072,17 +3094,17 @@
   if (res_method->IsPrivate() && method_type == METHOD_VIRTUAL) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke-super/virtual can't be used on private method "
                                       << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   // Check that interface methods match interface classes.
   if (klass->IsInterface() && method_type != METHOD_INTERFACE) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "non-interface method " << PrettyMethod(res_method)
                                     << " is in an interface class " << PrettyClass(klass);
-    return NULL;
+    return nullptr;
   } else if (!klass->IsInterface() && method_type == METHOD_INTERFACE) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "interface method " << PrettyMethod(res_method)
                                     << " is in a non-interface class " << PrettyClass(klass);
-    return NULL;
+    return nullptr;
   }
   // See if the method type implied by the invoke instruction matches the access flags for the
   // target method.
@@ -3092,7 +3114,7 @@
       ) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "invoke type (" << method_type << ") does not match method "
                                        " type of " << PrettyMethod(res_method);
-    return NULL;
+    return nullptr;
   }
   return res_method;
 }
@@ -3126,7 +3148,7 @@
    * rigorous check here (which is okay since we have to do it at runtime).
    */
   if (method_type != METHOD_STATIC) {
-    const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+    const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst, is_range);
     if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
       CHECK(have_pending_hard_failure_);
       return nullptr;
@@ -3193,13 +3215,13 @@
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
         arg[sig_registers];
     if (reg_type.IsIntegralTypes()) {
-      const RegType& src_type = work_line_->GetRegisterType(get_reg);
+      const RegType& src_type = work_line_->GetRegisterType(this, get_reg);
       if (!src_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
             << " but expected " << reg_type;
         return res_method;
       }
-    } else if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
+    } else if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) {
       // Continue on soft failures. We need to find possible hard failures to avoid problems in the
       // compiler.
       if (have_pending_hard_failure_) {
@@ -3264,7 +3286,7 @@
   const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
 
   mirror::ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
-  if (res_method == NULL) {  // error or class is unresolved
+  if (res_method == nullptr) {  // error or class is unresolved
     // Check what we can statically.
     if (!have_pending_hard_failure_) {
       VerifyInvocationArgsUnresolvedMethod(inst, method_type, is_range);
@@ -3304,7 +3326,7 @@
                                                          RegisterLine* reg_line, bool is_range) {
   DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
          inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
-  const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+  const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, is_range);
   if (!actual_arg_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
     return nullptr;
@@ -3324,27 +3346,27 @@
   CHECK_LT(static_cast<int32_t>(vtable_index), dispatch_class->GetVTableLength())
       << PrettyDescriptor(klass);
   mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
-  CHECK(!Thread::Current()->IsExceptionPending());
+  CHECK(!self_->IsExceptionPending());
   return res_method;
 }
 
 mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst,
-                                                                     bool is_range) {
-  DCHECK(Runtime::Current()->IsStarted());
+                                                                bool is_range) {
+  DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
   mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
                                                              is_range);
-  if (res_method == NULL) {
+  if (res_method == nullptr) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
-    return NULL;
+    return nullptr;
   }
   CHECK(!res_method->IsDirect() && !res_method->IsStatic());
 
   // We use vAA as our expected arg count, rather than res_method->insSize, because we need to
   // match the call to the signature. Also, we might be calling through an abstract method
   // definition (which doesn't have register count values).
-  const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+  const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst, is_range);
   if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
-    return NULL;
+    return nullptr;
   }
   const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
   /* caught by static verifier */
@@ -3352,7 +3374,7 @@
   if (expected_args > code_item_->outs_size_) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args
         << ") exceeds outsSize (" << code_item_->outs_size_ << ")";
-    return NULL;
+    return nullptr;
   }
 
   /*
@@ -3362,7 +3384,7 @@
    */
   if (actual_arg_type.IsUninitializedReference() && !res_method->IsConstructor()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
-    return NULL;
+    return nullptr;
   }
   if (!actual_arg_type.IsZero()) {
     mirror::Class* klass = res_method->GetDeclaringClass();
@@ -3374,7 +3396,7 @@
       Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS :
           VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type
           << "' not instance of '" << res_method_class << "'";
-      return NULL;
+      return nullptr;
     }
   }
   /*
@@ -3382,7 +3404,7 @@
    * have been verified, so we can't assume it's properly formed.
    */
   const DexFile::TypeList* params = res_method->GetParameterTypeList();
-  size_t params_size = params == NULL ? 0 : params->Size();
+  size_t params_size = params == nullptr ? 0 : params->Size();
   uint32_t arg[5];
   if (!is_range) {
     inst->GetVarArgs(arg);
@@ -3394,18 +3416,18 @@
                                         << "'. Expected " << expected_args
                                          << " arguments, processing argument " << actual_args
                                         << " (where longs/doubles count twice).";
-      return NULL;
+      return nullptr;
     }
     const char* descriptor =
         res_method->GetTypeDescriptorFromTypeIdx(params->GetTypeItem(param_index).type_idx_);
-    if (descriptor == NULL) {
+    if (descriptor == nullptr) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
                                         << " missing signature component";
-      return NULL;
+      return nullptr;
     }
     const RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
-    if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
+    if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) {
       return res_method;
     }
     actual_args = reg_type.IsLongOrDoubleTypes() ? actual_args + 2 : actual_args + 1;
@@ -3413,7 +3435,7 @@
   if (actual_args != expected_args) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " << PrettyMethod(res_method)
               << " expected " << expected_args << " arguments, found " << actual_args;
-    return NULL;
+    return nullptr;
   } else {
     return res_method;
   }
@@ -3440,10 +3462,10 @@
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "new-array on non-array class " << res_type;
     } else if (!is_filled) {
       /* make sure "size" register is valid type */
-      work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
+      work_line_->VerifyRegisterType(this, inst->VRegB_22c(), reg_types_.Integer());
       /* set register type to array class */
       const RegType& precise_type = reg_types_.FromUninitialized(res_type);
-      work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
+      work_line_->SetRegisterType(this, inst->VRegA_22c(), precise_type);
     } else {
       // Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
       // the list and fail. It's legal, if silly, for arg_count to be zero.
@@ -3455,34 +3477,35 @@
       }
       for (size_t ui = 0; ui < arg_count; ui++) {
         uint32_t get_reg = is_range ? inst->VRegC_3rc() + ui : arg[ui];
-        if (!work_line_->VerifyRegisterType(get_reg, expected_type)) {
-          work_line_->SetResultRegisterType(reg_types_.Conflict());
+        if (!work_line_->VerifyRegisterType(this, get_reg, expected_type)) {
+          work_line_->SetResultRegisterType(this, reg_types_.Conflict());
           return;
         }
       }
       // filled-array result goes into "result" register
       const RegType& precise_type = reg_types_.FromUninitialized(res_type);
-      work_line_->SetResultRegisterType(precise_type);
+      work_line_->SetResultRegisterType(this, precise_type);
     }
   }
 }
 
 void MethodVerifier::VerifyAGet(const Instruction* inst,
                                 const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+  const RegType& index_type = work_line_->GetRegisterType(this, inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array class; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type. TODO: have a proper notion of bottom here.
       if (!is_primitive || insn_type.IsCategory1Types()) {
         // Reference or category 1
-        work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Zero());
+        work_line_->SetRegisterType(this, inst->VRegA_23x(), reg_types_.Zero());
       } else {
         // Category 2
-        work_line_->SetRegisterTypeWide(inst->VRegA_23x(), reg_types_.FromCat2ConstLo(0, false),
+        work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(),
+                                        reg_types_.FromCat2ConstLo(0, false),
                                         reg_types_.FromCat2ConstHi(0, false));
       }
     } else if (!array_type.IsArrayTypes()) {
@@ -3506,9 +3529,9 @@
         // instruction, which can't differentiate object types and ints from floats, longs from
         // doubles.
         if (!component_type.IsLowHalf()) {
-          work_line_->SetRegisterType(inst->VRegA_23x(), component_type);
+          work_line_->SetRegisterType(this, inst->VRegA_23x(), component_type);
         } else {
-          work_line_->SetRegisterTypeWide(inst->VRegA_23x(), component_type,
+          work_line_->SetRegisterTypeWide(this, inst->VRegA_23x(), component_type,
                                           component_type.HighHalf(&reg_types_));
         }
       }
@@ -3521,7 +3544,7 @@
   // Primitive assignability rules are weaker than regular assignability rules.
   bool instruction_compatible;
   bool value_compatible;
-  const RegType& value_type = work_line_->GetRegisterType(vregA);
+  const RegType& value_type = work_line_->GetRegisterType(this, vregA);
   if (target_type.IsIntegralTypes()) {
     instruction_compatible = target_type.Equals(insn_type);
     value_compatible = value_type.IsIntegralTypes();
@@ -3533,7 +3556,7 @@
     // Additional register check: this is not checked statically (as part of VerifyInstructions),
     // as target_type depends on the resolved type of the field.
     if (instruction_compatible && work_line_->NumRegs() > vregA + 1) {
-      const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+      const RegType& value_type_hi = work_line_->GetRegisterType(this, vregA + 1);
       value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
     } else {
       value_compatible = false;
@@ -3543,7 +3566,7 @@
     // Additional register check: this is not checked statically (as part of VerifyInstructions),
     // as target_type depends on the resolved type of the field.
     if (instruction_compatible && work_line_->NumRegs() > vregA + 1) {
-      const RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+      const RegType& value_type_hi = work_line_->GetRegisterType(this, vregA + 1);
       value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
     } else {
       value_compatible = false;
@@ -3569,11 +3592,11 @@
 
 void MethodVerifier::VerifyAPut(const Instruction* inst,
                                 const RegType& insn_type, bool is_primitive) {
-  const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+  const RegType& index_type = work_line_->GetRegisterType(this, inst->VRegC_23x());
   if (!index_type.IsArrayIndexTypes()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
   } else {
-    const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+    const RegType& array_type = work_line_->GetRegisterType(this, inst->VRegB_23x());
     if (array_type.IsZero()) {
       // Null array type; this code path will fail at runtime. Infer a merge-able type from the
       // instruction type.
@@ -3592,7 +3615,7 @@
           // The instruction agrees with the type of array, confirm the value to be stored does too
           // Note: we use the instruction type (rather than the component type) for aput-object as
           // incompatible classes will be caught at runtime as an array store exception
-          work_line_->VerifyRegisterType(vregA, insn_type);
+          work_line_->VerifyRegisterType(this, vregA, insn_type);
         }
       }
     }
@@ -3607,29 +3630,29 @@
     AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
                                          dex_file_->GetFieldDeclaringClassDescriptor(field_id)));
-    return NULL;
+    return nullptr;
   }
   if (klass_type.IsUnresolvedTypes()) {
-    return NULL;  // Can't resolve Class so no more to do here, will do checking at runtime.
+    return nullptr;  // Can't resolve Class so no more to do here, will do checking at runtime.
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
                                                           class_loader_);
-  if (field == NULL) {
+  if (field == nullptr) {
     VLOG(verifier) << "Unable to resolve static field " << field_idx << " ("
               << dex_file_->GetFieldName(field_id) << ") in "
               << dex_file_->GetFieldDeclaringClassDescriptor(field_id);
-    DCHECK(Thread::Current()->IsExceptionPending());
-    Thread::Current()->ClearException();
-    return NULL;
+    DCHECK(self_->IsExceptionPending());
+    self_->ClearException();
+    return nullptr;
   } else if (!GetDeclaringClass().CanAccessMember(field->GetDeclaringClass(),
                                                   field->GetAccessFlags())) {
     Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot access static field " << PrettyField(field)
                                     << " from " << GetDeclaringClass();
-    return NULL;
+    return nullptr;
   } else if (!field->IsStatic()) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected field " << PrettyField(field) << " to be static";
-    return NULL;
+    return nullptr;
   }
   return field;
 }
@@ -3642,30 +3665,30 @@
     AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
                                          field_idx, dex_file_->GetFieldName(field_id),
                                          dex_file_->GetFieldDeclaringClassDescriptor(field_id)));
-    return NULL;
+    return nullptr;
   }
   if (klass_type.IsUnresolvedTypes()) {
-    return NULL;  // Can't resolve Class so no more to do here
+    return nullptr;  // Can't resolve Class so no more to do here
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
                                                           class_loader_);
-  if (field == NULL) {
+  if (field == nullptr) {
     VLOG(verifier) << "Unable to resolve instance field " << field_idx << " ("
               << dex_file_->GetFieldName(field_id) << ") in "
               << dex_file_->GetFieldDeclaringClassDescriptor(field_id);
-    DCHECK(Thread::Current()->IsExceptionPending());
-    Thread::Current()->ClearException();
-    return NULL;
+    DCHECK(self_->IsExceptionPending());
+    self_->ClearException();
+    return nullptr;
   } else if (!GetDeclaringClass().CanAccessMember(field->GetDeclaringClass(),
                                                   field->GetAccessFlags())) {
     Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot access instance field " << PrettyField(field)
                                     << " from " << GetDeclaringClass();
-    return NULL;
+    return nullptr;
   } else if (field->IsStatic()) {
     Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected field " << PrettyField(field)
                                     << " to not be static";
-    return NULL;
+    return nullptr;
   } else if (obj_type.IsZero()) {
     // Cannot infer and check type, however, access will cause null pointer exception
     return field;
@@ -3673,7 +3696,7 @@
     // Trying to read a field from something that isn't a reference
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance field access on object that has "
                                       << "non-reference type " << obj_type;
-    return NULL;
+    return nullptr;
   } else {
     mirror::Class* klass = field->GetDeclaringClass();
     const RegType& field_klass =
@@ -3687,14 +3710,14 @@
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "cannot access instance field " << PrettyField(field)
                                         << " of a not fully initialized object within the context"
                                         << " of " << PrettyMethod(dex_method_idx_, *dex_file_);
-      return NULL;
+      return nullptr;
     } else if (!field_klass.IsAssignableFrom(obj_type)) {
       // Trying to access C1.field1 using reference of type C2, which is neither C1 or a sub-class
       // of C1. For resolution to occur the declared class of the field must be compatible with
       // obj_type, we've discovered this wasn't so, so report the field didn't exist.
       Fail(VERIFY_ERROR_NO_FIELD) << "cannot access instance field " << PrettyField(field)
                                   << " from object of type " << obj_type;
-      return NULL;
+      return nullptr;
     } else {
       return field;
     }
@@ -3708,15 +3731,14 @@
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    const RegType& object_type = work_line_->GetRegisterType(this, inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
   const RegType* field_type = nullptr;
-  if (field != NULL) {
-    Thread* self = Thread::Current();
+  if (field != nullptr) {
     mirror::Class* field_type_class;
     {
-      StackHandleScope<1> hs(self);
+      StackHandleScope<1> hs(self_);
       HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
       field_type_class = FieldHelper(h_field).GetType(can_load_classes_);
     }
@@ -3724,8 +3746,8 @@
       field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                          field_type_class->CannotBeAssignedFromOtherTypes());
     } else {
-      DCHECK(!can_load_classes_ || self->IsExceptionPending());
-      self->ClearException();
+      DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+      self_->ClearException();
     }
   }
   if (field_type == nullptr) {
@@ -3756,14 +3778,14 @@
                                         << " to be compatible with type '" << insn_type
                                         << "' but found type '" << *field_type
                                         << "' in Get-object";
-      work_line_->SetRegisterType(vregA, reg_types_.Conflict());
+      work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
       return;
     }
   }
   if (!field_type->IsLowHalf()) {
-    work_line_->SetRegisterType(vregA, *field_type);
+    work_line_->SetRegisterType(this, vregA, *field_type);
   } else {
-    work_line_->SetRegisterTypeWide(vregA, *field_type, field_type->HighHalf(&reg_types_));
+    work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(&reg_types_));
   }
 }
 
@@ -3774,11 +3796,11 @@
   if (is_static) {
     field = GetStaticField(field_idx);
   } else {
-    const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+    const RegType& object_type = work_line_->GetRegisterType(this, inst->VRegB_22c());
     field = GetInstanceField(object_type, field_idx);
   }
   const RegType* field_type = nullptr;
-  if (field != NULL) {
+  if (field != nullptr) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
                                       << " from other class " << GetDeclaringClass();
@@ -3786,7 +3808,7 @@
     }
     mirror::Class* field_type_class;
     {
-      StackHandleScope<1> hs(Thread::Current());
+      StackHandleScope<1> hs(self_);
       HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
       FieldHelper fh(h_field);
       field_type_class = fh.GetType(can_load_classes_);
@@ -3795,9 +3817,8 @@
       field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                          field_type_class->CannotBeAssignedFromOtherTypes());
     } else {
-      Thread* self = Thread::Current();
-      DCHECK(!can_load_classes_ || self->IsExceptionPending());
-      self->ClearException();
+      DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+      self_->ClearException();
     }
   }
   if (field_type == nullptr) {
@@ -3817,7 +3838,7 @@
                                         << "' in put-object";
       return;
     }
-    work_line_->VerifyRegisterType(vregA, *field_type);
+    work_line_->VerifyRegisterType(this, vregA, *field_type);
   }
 }
 
@@ -3829,7 +3850,7 @@
          inst->Opcode() == Instruction::IPUT_QUICK ||
          inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
          inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
-  const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+  const RegType& object_type = reg_line->GetRegisterType(this, inst->VRegB_22c());
   if (!object_type.HasClass()) {
     VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
     return nullptr;
@@ -3846,15 +3867,15 @@
 
 void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
                                      bool is_primitive) {
-  DCHECK(Runtime::Current()->IsStarted());
+  DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
-  if (field == NULL) {
+  if (field == nullptr) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
     return;
   }
   mirror::Class* field_type_class;
   {
-    StackHandleScope<1> hs(Thread::Current());
+    StackHandleScope<1> hs(self_);
     HandleWrapper<mirror::ArtField> h_field(hs.NewHandleWrapper(&field));
     FieldHelper fh(h_field);
     field_type_class = fh.GetType(can_load_classes_);
@@ -3864,9 +3885,8 @@
     field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
                                        field_type_class->CannotBeAssignedFromOtherTypes());
   } else {
-    Thread* self = Thread::Current();
-    DCHECK(!can_load_classes_ || self->IsExceptionPending());
-    self->ClearException();
+    DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+    self_->ClearException();
     field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
                                             field->GetTypeDescriptor(), false);
   }
@@ -3893,29 +3913,29 @@
                                         << " to be compatible with type '" << insn_type
                                         << "' but found type '" << *field_type
                                         << "' in get-object";
-      work_line_->SetRegisterType(vregA, reg_types_.Conflict());
+      work_line_->SetRegisterType(this, vregA, reg_types_.Conflict());
       return;
     }
   }
   if (!field_type->IsLowHalf()) {
-    work_line_->SetRegisterType(vregA, *field_type);
+    work_line_->SetRegisterType(this, vregA, *field_type);
   } else {
-    work_line_->SetRegisterTypeWide(vregA, *field_type, field_type->HighHalf(&reg_types_));
+    work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(&reg_types_));
   }
 }
 
 void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
                                      bool is_primitive) {
-  DCHECK(Runtime::Current()->IsStarted());
+  DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
   mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
-  if (field == NULL) {
+  if (field == nullptr) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name();
     return;
   }
   const char* descriptor = field->GetTypeDescriptor();
   mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
   const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
-  if (field != NULL) {
+  if (field != nullptr) {
     if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
       Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
                                       << " from other class " << GetDeclaringClass();
@@ -3927,7 +3947,7 @@
     // Primitive field assignability rules are weaker than regular assignability rules
     bool instruction_compatible;
     bool value_compatible;
-    const RegType& value_type = work_line_->GetRegisterType(vregA);
+    const RegType& value_type = work_line_->GetRegisterType(this, vregA);
     if (field_type.IsIntegralTypes()) {
       instruction_compatible = insn_type.IsIntegralTypes();
       value_compatible = value_type.IsIntegralTypes();
@@ -3969,7 +3989,7 @@
                                         << "' in put-object";
       return;
     }
-    work_line_->VerifyRegisterType(vregA, field_type);
+    work_line_->VerifyRegisterType(this, vregA, field_type);
   }
 }
 
@@ -3995,7 +4015,7 @@
       target_line->CopyFromLine(merge_line);
     } else {
       // Verify that the monitor stack is empty on return.
-      if (!merge_line->VerifyMonitorStackEmpty()) {
+      if (!merge_line->VerifyMonitorStackEmpty(this)) {
         return false;
       }
       // For returns we only care about the operand to the return, all other registers are dead.
@@ -4003,33 +4023,33 @@
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
       Instruction::Code opcode = ret_inst->Opcode();
       if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
-        target_line->MarkAllRegistersAsConflicts();
+        target_line->MarkAllRegistersAsConflicts(this);
       } else {
         target_line->CopyFromLine(merge_line);
         if (opcode == Instruction::RETURN_WIDE) {
-          target_line->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x());
+          target_line->MarkAllRegistersAsConflictsExceptWide(this, ret_inst->VRegA_11x());
         } else {
-          target_line->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x());
+          target_line->MarkAllRegistersAsConflictsExcept(this, ret_inst->VRegA_11x());
         }
       }
     }
   } else {
     std::unique_ptr<RegisterLine> copy(gDebugVerify ?
                                  RegisterLine::Create(target_line->NumRegs(), this) :
-                                 NULL);
+                                 nullptr);
     if (gDebugVerify) {
       copy->CopyFromLine(target_line);
     }
-    changed = target_line->MergeRegisters(merge_line);
+    changed = target_line->MergeRegisters(this, merge_line);
     if (have_pending_hard_failure_) {
       return false;
     }
     if (gDebugVerify && changed) {
       LogVerifyInfo() << "Merging at [" << reinterpret_cast<void*>(work_insn_idx_) << "]"
                       << " to [" << reinterpret_cast<void*>(next_insn) << "]: " << "\n"
-                      << *copy.get() << "  MERGE\n"
-                      << *merge_line << "  ==\n"
-                      << *target_line << "\n";
+                      << copy->Dump(this) << "  MERGE\n"
+                      << merge_line->Dump(this) << "  ==\n"
+                      << target_line->Dump(this) << "\n";
     }
     if (update_merge_line && changed) {
       merge_line->CopyFromLine(target_line);
@@ -4048,8 +4068,7 @@
 const RegType& MethodVerifier::GetMethodReturnType() {
   if (return_type_ == nullptr) {
     if (mirror_method_.Get() != nullptr) {
-      Thread* self = Thread::Current();
-      StackHandleScope<1> hs(self);
+      StackHandleScope<1> hs(self_);
       mirror::Class* return_type_class =
           MethodHelper(hs.NewHandle(mirror_method_.Get())).GetReturnType(can_load_classes_);
       if (return_type_class != nullptr) {
@@ -4057,8 +4076,8 @@
                                              return_type_class,
                                              return_type_class->CannotBeAssignedFromOtherTypes());
       } else {
-        DCHECK(!can_load_classes_ || self->IsExceptionPending());
-        self->ClearException();
+        DCHECK(!can_load_classes_ || self_->IsExceptionPending());
+        self_->ClearException();
       }
     }
     if (return_type_ == nullptr) {
@@ -4073,7 +4092,7 @@
 }
 
 const RegType& MethodVerifier::GetDeclaringClass() {
-  if (declaring_class_ == NULL) {
+  if (declaring_class_ == nullptr) {
     const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     const char* descriptor
         = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
@@ -4093,16 +4112,19 @@
   DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
   std::vector<int32_t> result;
   for (size_t i = 0; i < line->NumRegs(); ++i) {
-    const RegType& type = line->GetRegisterType(i);
+    const RegType& type = line->GetRegisterType(this, i);
     if (type.IsConstant()) {
       result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
-      result.push_back(type.ConstantValue());
+      const ConstantType* const_val = down_cast<const ConstantType*>(&type);
+      result.push_back(const_val->ConstantValue());
     } else if (type.IsConstantLo()) {
       result.push_back(type.IsPreciseConstantLo() ? kConstant : kImpreciseConstant);
-      result.push_back(type.ConstantValueLo());
+      const ConstantType* const_val = down_cast<const ConstantType*>(&type);
+      result.push_back(const_val->ConstantValueLo());
     } else if (type.IsConstantHi()) {
       result.push_back(type.IsPreciseConstantHi() ? kConstant : kImpreciseConstant);
-      result.push_back(type.ConstantValueHi());
+      const ConstantType* const_val = down_cast<const ConstantType*>(&type);
+      result.push_back(const_val->ConstantValueHi());
     } else if (type.IsIntegralTypes()) {
       result.push_back(kIntVReg);
       result.push_back(0);
@@ -4169,6 +4191,10 @@
   verifier::RegTypeCache::ShutDown();
 }
 
+void MethodVerifier::VisitStaticRoots(RootCallback* callback, void* arg) {
+  RegTypeCache::VisitStaticRoots(callback, arg);
+}
+
 void MethodVerifier::VisitRoots(RootCallback* callback, void* arg) {
   reg_types_.VisitRoots(callback, arg);
 }
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 45c0a03..87acb20 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -140,20 +140,24 @@
   };
 
   /* Verify a class. Returns "kNoFailure" on success. */
-  static FailureKind VerifyClass(mirror::Class* klass, bool allow_soft_failures, std::string* error)
+  static FailureKind VerifyClass(Thread* self, mirror::Class* klass, bool allow_soft_failures,
+                                 std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FailureKind VerifyClass(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
-                                 ConstHandle<mirror::ClassLoader> class_loader,
+  static FailureKind VerifyClass(Thread* self, const DexFile* dex_file,
+                                 Handle<mirror::DexCache> dex_cache,
+                                 Handle<mirror::ClassLoader> class_loader,
                                  const DexFile::ClassDef* class_def,
                                  bool allow_soft_failures, std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
-                                  ConstHandle<mirror::DexCache> dex_cache,
-                                  ConstHandle<mirror::ClassLoader> class_loader,
-                                  const DexFile::ClassDef* class_def,
-                                  const DexFile::CodeItem* code_item,
-                                  ConstHandle<mirror::ArtMethod> method, uint32_t method_access_flags)
+  static MethodVerifier* VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t method_idx,
+                                             const DexFile* dex_file,
+                                             Handle<mirror::DexCache> dex_cache,
+                                             Handle<mirror::ClassLoader> class_loader,
+                                             const DexFile::ClassDef* class_def,
+                                             const DexFile::CodeItem* code_item,
+                                             Handle<mirror::ArtMethod> method,
+                                             uint32_t method_access_flags)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   uint8_t EncodePcToReferenceMapData() const;
@@ -202,13 +206,15 @@
     return can_load_classes_;
   }
 
-  MethodVerifier(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
-                 ConstHandle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
+  MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
+                 Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
                  const DexFile::CodeItem* code_item, uint32_t method_idx,
-                 ConstHandle<mirror::ArtMethod> method,
+                 Handle<mirror::ArtMethod> method,
                  uint32_t access_flags, bool can_load_classes, bool allow_soft_failures,
-                 bool need_precise_constants)
-          SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+                 bool need_precise_constants) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : MethodVerifier(self, dex_file, dex_cache, class_loader, class_def, code_item, method_idx,
+                       method, access_flags, can_load_classes, allow_soft_failures,
+                       need_precise_constants, false) {}
 
   ~MethodVerifier();
 
@@ -219,6 +225,8 @@
   // Describe VRegs at the given dex pc.
   std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
 
+  static void VisitStaticRoots(RootCallback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Accessors used by the compiler via CompilerCallback
@@ -236,6 +244,15 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
+  // Private constructor for dumping.
+  MethodVerifier(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
+                 Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
+                 const DexFile::CodeItem* code_item, uint32_t method_idx,
+                 Handle<mirror::ArtMethod> method, uint32_t access_flags,
+                 bool can_load_classes, bool allow_soft_failures, bool need_precise_constants,
+                 bool verify_to_dump)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Adds the given string to the beginning of the last failure message.
   void PrependToLastFailMessage(std::string);
 
@@ -253,14 +270,14 @@
    *  (3) Iterate through the method, checking type safety and looking
    *      for code flow problems.
    */
-  static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
-                                  ConstHandle<mirror::DexCache> dex_cache,
-                                  ConstHandle<mirror::ClassLoader> class_loader,
+  static FailureKind VerifyMethod(Thread* self, uint32_t method_idx, const DexFile* dex_file,
+                                  Handle<mirror::DexCache> dex_cache,
+                                  Handle<mirror::ClassLoader> class_loader,
                                   const DexFile::ClassDef* class_def_idx,
                                   const DexFile::CodeItem* code_item,
-                                  ConstHandle<mirror::ArtMethod> method, uint32_t method_access_flags,
+                                  Handle<mirror::ArtMethod> method, uint32_t method_access_flags,
                                   bool allow_soft_failures, bool need_precise_constants)
-          SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -504,7 +521,7 @@
                    bool is_primitive, bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Returns the access field of a quick field access (iget/iput-quick) or NULL
+  // Returns the access field of a quick field access (iget/iput-quick) or nullptr
   // if it cannot be found.
   mirror::ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -559,7 +576,7 @@
    * Widening conversions on integers and references are allowed, but
    * narrowing conversions are not.
    *
-   * Returns the resolved method on success, NULL on failure (with *failure
+   * Returns the resolved method on success, nullptr on failure (with *failure
    * set appropriately).
    */
   mirror::ArtMethod* VerifyInvocationArgs(const Instruction* inst,
@@ -625,6 +642,9 @@
   const RegType& DetermineCat1Constant(int32_t value, bool precise)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // The thread we're verifying on.
+  Thread* const self_;
+
   RegTypeCache reg_types_;
 
   PcToRegisterLineTable reg_table_;
@@ -641,14 +661,14 @@
 
   const uint32_t dex_method_idx_;  // The method we're working on.
   // Its object representation if known.
-  ConstHandle<mirror::ArtMethod> mirror_method_ GUARDED_BY(Locks::mutator_lock_);
+  Handle<mirror::ArtMethod> mirror_method_ GUARDED_BY(Locks::mutator_lock_);
   const uint32_t method_access_flags_;  // Method's access flags.
   const RegType* return_type_;  // Lazily computed return type of the method.
   const DexFile* const dex_file_;  // The dex file containing the method.
   // The dex_cache for the declaring class of the method.
-  ConstHandle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
+  Handle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
   // The class loader for the declaring class of the method.
-  ConstHandle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
+  Handle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
   const DexFile::ClassDef* const class_def_;  // The class def of the declaring class of the method.
   const DexFile::CodeItem* const code_item_;  // The code item containing the code for the method.
   const RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
@@ -657,7 +677,7 @@
   // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
   uint32_t interesting_dex_pc_;
   // The container into which FindLocksAtDexPc should write the registers containing held locks,
-  // NULL if we're not doing FindLocksAtDexPc.
+  // nullptr if we're not doing FindLocksAtDexPc.
   std::vector<uint32_t>* monitor_enter_dex_pcs_;
 
   // The types of any error that occurs.
@@ -699,6 +719,11 @@
   // Indicates the method being verified contains at least one invoke-virtual/range
   // or invoke-interface/range.
   bool has_virtual_or_interface_invokes_;
+
+  // Indicates whether we verify to dump the info. In that case we accept quickened instructions
+  // even though we might detect to be a compiler. Should only be set when running
+  // VerifyMethodAndDump.
+  const bool verify_to_dump_;
 };
 std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
 
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index a5895e6..770ca7e 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -32,11 +32,12 @@
   void VerifyClass(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ASSERT_TRUE(descriptor != NULL);
-    mirror::Class* klass = class_linker_->FindSystemClass(Thread::Current(), descriptor.c_str());
+    Thread* self = Thread::Current();
+    mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
 
     // Verify the class
     std::string error_msg;
-    ASSERT_TRUE(MethodVerifier::VerifyClass(klass, true, &error_msg) == MethodVerifier::kNoFailure)
+    ASSERT_TRUE(MethodVerifier::VerifyClass(self, klass, true, &error_msg) == MethodVerifier::kNoFailure)
         << error_msg;
   }
 
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
new file mode 100644
index 0000000..480ed40
--- /dev/null
+++ b/runtime/verifier/reg_type-inl.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VERIFIER_REG_TYPE_INL_H_
+#define ART_RUNTIME_VERIFIER_REG_TYPE_INL_H_
+
+#include "reg_type.h"
+
+#include "base/casts.h"
+#include "mirror/class.h"
+
+namespace art {
+namespace verifier {
+
+inline bool RegType::CanAccess(const RegType& other) const {
+  if (Equals(other)) {
+    return true;  // Trivial accessibility.
+  } else {
+    bool this_unresolved = IsUnresolvedTypes();
+    bool other_unresolved = other.IsUnresolvedTypes();
+    if (!this_unresolved && !other_unresolved) {
+      return GetClass()->CanAccess(other.GetClass());
+    } else if (!other_unresolved) {
+      return other.GetClass()->IsPublic();  // Be conservative, only allow if other is public.
+    } else {
+      return false;  // More complicated test not possible on unresolved types, be conservative.
+    }
+  }
+}
+
+inline bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+  if ((access_flags & kAccPublic) != 0) {
+    return true;
+  }
+  if (!IsUnresolvedTypes()) {
+    return GetClass()->CanAccessMember(klass, access_flags);
+  } else {
+    return false;  // More complicated test not possible on unresolved types, be conservative.
+  }
+}
+
+inline bool RegType::IsConstantBoolean() const {
+  if (!IsConstant()) {
+    return false;
+  } else {
+    const ConstantType* const_val = down_cast<const ConstantType*>(this);
+    return const_val->ConstantValue() >= 0 && const_val->ConstantValue() <= 1;
+  }
+}
+
+inline bool RegType::AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict) {
+  if (lhs.Equals(rhs)) {
+    return true;
+  } else {
+    if (lhs.IsBoolean()) {
+      return rhs.IsBooleanTypes();
+    } else if (lhs.IsByte()) {
+      return rhs.IsByteTypes();
+    } else if (lhs.IsShort()) {
+      return rhs.IsShortTypes();
+    } else if (lhs.IsChar()) {
+      return rhs.IsCharTypes();
+    } else if (lhs.IsInteger()) {
+      return rhs.IsIntegralTypes();
+    } else if (lhs.IsFloat()) {
+      return rhs.IsFloatTypes();
+    } else if (lhs.IsLongLo()) {
+      return rhs.IsLongTypes();
+    } else if (lhs.IsDoubleLo()) {
+      return rhs.IsDoubleTypes();
+    } else {
+      CHECK(lhs.IsReferenceTypes())
+          << "Unexpected register type in IsAssignableFrom: '"
+          << lhs << "' := '" << rhs << "'";
+      if (rhs.IsZero()) {
+        return true;  // All reference types can be assigned null.
+      } else if (!rhs.IsReferenceTypes()) {
+        return false;  // Expect rhs to be a reference type.
+      } else if (lhs.IsJavaLangObject()) {
+        return true;  // All reference types can be assigned to Object.
+      } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
+        // If we're not strict allow assignment to any interface, see comment in ClassJoin.
+        return true;
+      } else if (lhs.IsJavaLangObjectArray()) {
+        return rhs.IsObjectArrayTypes();  // All reference arrays may be assigned to Object[]
+      } else if (lhs.HasClass() && rhs.HasClass() &&
+                 lhs.GetClass()->IsAssignableFrom(rhs.GetClass())) {
+        // We're assignable from the Class point-of-view.
+        return true;
+      } else {
+        // Unresolved types are only assignable for null and equality.
+        return false;
+      }
+    }
+  }
+}
+
+inline bool RegType::IsAssignableFrom(const RegType& src) const {
+  return AssignableFrom(*this, src, false);
+}
+
+inline bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+  return AssignableFrom(*this, src, true);
+}
+
+inline const DoubleHiType* DoubleHiType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const DoubleLoType* DoubleLoType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const LongHiType* LongHiType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const LongLoType* LongLoType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const FloatType* FloatType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const CharType* CharType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const ShortType* ShortType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const ByteType* ByteType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+
+inline const IntegerType* IntegerType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const BooleanType* BooleanType::GetInstance() {
+  DCHECK(BooleanType::instance_ != nullptr);
+  return BooleanType::instance_;
+}
+
+inline const ConflictType* ConflictType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+inline const UndefinedType* UndefinedType::GetInstance() {
+  DCHECK(instance_ != nullptr);
+  return instance_;
+}
+
+}  // namespace verifier
+}  // namespace art
+
+#endif  // ART_RUNTIME_VERIFIER_REG_TYPE_INL_H_
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 68c7849..41541b5 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -14,8 +14,7 @@
  * limitations under the License.
  */
 
-#include "reg_type.h"
-
+#include "reg_type-inl.h"
 
 #include "base/casts.h"
 #include "class_linker-inl.h"
@@ -33,41 +32,23 @@
 namespace art {
 namespace verifier {
 
-UndefinedType* UndefinedType::instance_ = NULL;
-ConflictType* ConflictType::instance_ = NULL;
-BooleanType* BooleanType::instance = NULL;
-ByteType* ByteType::instance_ = NULL;
-ShortType* ShortType::instance_ = NULL;
-CharType* CharType::instance_ = NULL;
-FloatType* FloatType::instance_ = NULL;
-LongLoType* LongLoType::instance_ = NULL;
-LongHiType* LongHiType::instance_ = NULL;
-DoubleLoType* DoubleLoType::instance_ = NULL;
-DoubleHiType* DoubleHiType::instance_ = NULL;
-IntegerType* IntegerType::instance_ = NULL;
-
-int32_t RegType::ConstantValue() const {
-  ScopedObjectAccess soa(Thread::Current());
-  LOG(FATAL) << "Unexpected call to ConstantValue: " << *this;
-  return 0;
-}
-
-int32_t RegType::ConstantValueLo() const {
-  ScopedObjectAccess soa(Thread::Current());
-  LOG(FATAL) << "Unexpected call to ConstantValueLo: " << *this;
-  return 0;
-}
-
-int32_t RegType::ConstantValueHi() const {
-  ScopedObjectAccess soa(Thread::Current());
-  LOG(FATAL) << "Unexpected call to ConstantValueHi: " << *this;
-  return 0;
-}
+const UndefinedType* UndefinedType::instance_ = nullptr;
+const ConflictType* ConflictType::instance_ = nullptr;
+const BooleanType* BooleanType::instance_ = nullptr;
+const ByteType* ByteType::instance_ = nullptr;
+const ShortType* ShortType::instance_ = nullptr;
+const CharType* CharType::instance_ = nullptr;
+const FloatType* FloatType::instance_ = nullptr;
+const LongLoType* LongLoType::instance_ = nullptr;
+const LongHiType* LongHiType::instance_ = nullptr;
+const DoubleLoType* DoubleLoType::instance_ = nullptr;
+const DoubleHiType* DoubleHiType::instance_ = nullptr;
+const IntegerType* IntegerType::instance_ = nullptr;
 
 PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
     : RegType(klass, descriptor, cache_id) {
-  CHECK(klass != NULL);
+  CHECK(klass != nullptr);
   CHECK(!descriptor.empty());
 }
 
@@ -142,222 +123,160 @@
     return "Integer";
 }
 
-DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                           uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new DoubleHiType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-DoubleHiType* DoubleHiType::GetInstance() {
-  CHECK(instance_ != NULL);
+const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
+                                                 const std::string& descriptor,
+                                                 uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new DoubleHiType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void DoubleHiType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                           uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new DoubleLoType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-DoubleLoType* DoubleLoType::GetInstance() {
-  CHECK(instance_ != NULL);
+const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
+                                                 const std::string& descriptor,
+                                                 uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new DoubleLoType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void DoubleLoType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                       uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new LongLoType(klass, descriptor, cache_id);
-  }
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                             uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new LongLoType(klass, descriptor, cache_id);
   return instance_;
 }
 
-LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                       uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new LongHiType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-LongHiType* LongHiType::GetInstance() {
-  CHECK(instance_ != NULL);
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                             uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new LongHiType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void LongHiType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-LongLoType* LongLoType::GetInstance() {
-  CHECK(instance_ != NULL);
-  return instance_;
-}
-
 void LongLoType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new FloatType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-FloatType* FloatType::GetInstance() {
-  CHECK(instance_ != NULL);
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                           uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new FloatType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void FloatType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new CharType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-CharType* CharType::GetInstance() {
-  CHECK(instance_ != NULL);
+const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                         uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new CharType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void CharType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new ShortType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-ShortType* ShortType::GetInstance() {
-  CHECK(instance_ != NULL);
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                           uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new ShortType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void ShortType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new ByteType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-ByteType* ByteType::GetInstance() {
-  CHECK(instance_ != NULL);
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                         uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new ByteType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void ByteType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                         uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new IntegerType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-IntegerType* IntegerType::GetInstance() {
-  CHECK(instance_ != NULL);
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+                                               uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new IntegerType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void IntegerType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-ConflictType* ConflictType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                           uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new ConflictType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-ConflictType* ConflictType::GetInstance() {
-  CHECK(instance_ != NULL);
+const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
+                                                 const std::string& descriptor,
+                                                 uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new ConflictType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void ConflictType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
-BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
                                          uint16_t cache_id) {
-  if (BooleanType::instance == NULL) {
-    instance = new BooleanType(klass, descriptor, cache_id);
-  }
-  return BooleanType::instance;
-}
-
-BooleanType* BooleanType::GetInstance() {
-  CHECK(BooleanType::instance != NULL);
-  return BooleanType::instance;
+  CHECK(BooleanType::instance_ == nullptr);
+  instance_ = new BooleanType(klass, descriptor, cache_id);
+  return BooleanType::instance_;
 }
 
 void BooleanType::Destroy() {
-  if (BooleanType::instance != NULL) {
-    delete instance;
-    instance = NULL;
+  if (BooleanType::instance_ != nullptr) {
+    delete instance_;
+    instance_ = nullptr;
   }
 }
 
@@ -365,23 +284,18 @@
   return "Undefined";
 }
 
-UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                             uint16_t cache_id) {
-  if (instance_ == NULL) {
-    instance_ = new UndefinedType(klass, descriptor, cache_id);
-  }
-  return instance_;
-}
-
-UndefinedType* UndefinedType::GetInstance() {
-  CHECK(instance_ != NULL);
+const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
+                                                   const std::string& descriptor,
+                                                   uint16_t cache_id) {
+  CHECK(instance_ == nullptr);
+  instance_ = new UndefinedType(klass, descriptor, cache_id);
   return instance_;
 }
 
 void UndefinedType::Destroy() {
-  if (instance_ != NULL) {
+  if (instance_ != nullptr) {
     delete instance_;
-    instance_ = NULL;
+    instance_ = nullptr;
   }
 }
 
@@ -528,18 +442,6 @@
   return result.str();
 }
 
-ConstantType::ConstantType(uint32_t constant, uint16_t cache_id)
-    : RegType(NULL, "", cache_id), constant_(constant) {
-}
-
-const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (incoming_type.IsUndefined()) {
-    return *this;  // Undefined MERGE Undefined => Undefined
-  }
-  return reg_types->Conflict();
-}
-
 const RegType& RegType::HighHalf(RegTypeCache* cache) const {
   DCHECK(IsLowHalf());
   if (IsLongLo()) {
@@ -548,7 +450,8 @@
     return cache->DoubleHi();
   } else {
     DCHECK(IsImpreciseConstantLo());
-    return cache->FromCat2ConstHi(ConstantValue(), false);
+    const ConstantType* const_val = down_cast<const ConstantType*>(this);
+    return cache->FromCat2ConstHi(const_val->ConstantValue(), false);
   }
 }
 
@@ -586,24 +489,21 @@
 bool UnresolvedType::IsNonZeroReferenceTypes() const {
   return true;
 }
+
 std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
   std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
-  const RegType& _left(reg_type_cache_->GetFromId(refs.first));
-  RegType& __left(const_cast<RegType&>(_left));
-  UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
-
-  RegType& _right(
-      const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
-  UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
+  const RegType& left = reg_type_cache_->GetFromId(refs.first);
+  const RegType& right = reg_type_cache_->GetFromId(refs.second);
 
   std::set<uint16_t> types;
-  if (left->IsUnresolvedMergedReference()) {
-    types = left->GetMergedTypes();
+  if (left.IsUnresolvedMergedReference()) {
+    types = down_cast<const UnresolvedMergedType*>(&left)->GetMergedTypes();
   } else {
     types.insert(refs.first);
   }
-  if (right->IsUnresolvedMergedReference()) {
-    std::set<uint16_t> right_types = right->GetMergedTypes();
+  if (right.IsUnresolvedMergedReference()) {
+    std::set<uint16_t> right_types =
+        down_cast<const UnresolvedMergedType*>(&right)->GetMergedTypes();
     types.insert(right_types.begin(), right_types.end());
   } else {
     types.insert(refs.second);
@@ -619,7 +519,7 @@
 const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
   if (!IsUnresolvedTypes()) {
     mirror::Class* super_klass = GetClass()->GetSuperClass();
-    if (super_klass != NULL) {
+    if (super_klass != nullptr) {
       // A super class of a precise type isn't precise as a precise type indicates the register
       // holds exactly that type.
       std::string temp;
@@ -638,33 +538,6 @@
   }
 }
 
-bool RegType::CanAccess(const RegType& other) const {
-  if (Equals(other)) {
-    return true;  // Trivial accessibility.
-  } else {
-    bool this_unresolved = IsUnresolvedTypes();
-    bool other_unresolved = other.IsUnresolvedTypes();
-    if (!this_unresolved && !other_unresolved) {
-      return GetClass()->CanAccess(other.GetClass());
-    } else if (!other_unresolved) {
-      return other.GetClass()->IsPublic();  // Be conservative, only allow if other is public.
-    } else {
-      return false;  // More complicated test not possible on unresolved types, be conservative.
-    }
-  }
-}
-
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
-  if ((access_flags & kAccPublic) != 0) {
-    return true;
-  }
-  if (!IsUnresolvedTypes()) {
-    return GetClass()->CanAccessMember(klass, access_flags);
-  } else {
-    return false;  // More complicated test not possible on unresolved types, be conservative.
-  }
-}
-
 bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
     // Primitive arrays will always resolve
@@ -704,106 +577,38 @@
   return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
 }
 
-ImpreciseConstType::ImpreciseConstType(uint32_t constat, uint16_t cache_id)
-  : ConstantType(constat, cache_id) {
-}
-
-static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (lhs.Equals(rhs)) {
-    return true;
-  } else {
-    if (lhs.IsBoolean()) {
-      return rhs.IsBooleanTypes();
-    } else if (lhs.IsByte()) {
-      return rhs.IsByteTypes();
-    } else if (lhs.IsShort()) {
-      return rhs.IsShortTypes();
-    } else if (lhs.IsChar()) {
-      return rhs.IsCharTypes();
-    } else if (lhs.IsInteger()) {
-      return rhs.IsIntegralTypes();
-    } else if (lhs.IsFloat()) {
-      return rhs.IsFloatTypes();
-    } else if (lhs.IsLongLo()) {
-      return rhs.IsLongTypes();
-    } else if (lhs.IsDoubleLo()) {
-      return rhs.IsDoubleTypes();
-    } else {
-      CHECK(lhs.IsReferenceTypes())
-          << "Unexpected register type in IsAssignableFrom: '"
-          << lhs << "' := '" << rhs << "'";
-      if (rhs.IsZero()) {
-        return true;  // All reference types can be assigned null.
-      } else if (!rhs.IsReferenceTypes()) {
-        return false;  // Expect rhs to be a reference type.
-      } else if (lhs.IsJavaLangObject()) {
-        return true;  // All reference types can be assigned to Object.
-      } else if (!strict && !lhs.IsUnresolvedTypes() && lhs.GetClass()->IsInterface()) {
-        // If we're not strict allow assignment to any interface, see comment in ClassJoin.
-        return true;
-      } else if (lhs.IsJavaLangObjectArray()) {
-        return rhs.IsObjectArrayTypes();  // All reference arrays may be assigned to Object[]
-      } else if (lhs.HasClass() && rhs.HasClass() &&
-                 lhs.GetClass()->IsAssignableFrom(rhs.GetClass())) {
-        // We're assignable from the Class point-of-view.
-        return true;
-      } else {
-        // Unresolved types are only assignable for null and equality.
-        return false;
-      }
-    }
-  }
-}
-
-bool RegType::IsAssignableFrom(const RegType& src) const {
-  return AssignableFrom(*this, src, false);
-}
-
-bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
-  return AssignableFrom(*this, src, true);
-}
-
-int32_t ConstantType::ConstantValueLo() const {
-  DCHECK(IsConstantLo());
-  return constant_;
-}
-
-int32_t ConstantType::ConstantValueHi() const {
-  if (IsConstantHi() || IsPreciseConstantHi() || IsImpreciseConstantHi()) {
-    return constant_;
-  } else {
-    DCHECK(false);
-    return 0;
-  }
-}
-
 static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
   return a.IsConstantTypes() ? b : a;
 }
 
 const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
   DCHECK(!Equals(incoming_type));  // Trivial equality handled by caller
-  if (IsConflict()) {
+  // Perform pointer equality tests for conflict to avoid virtual method dispatch.
+  const ConflictType& conflict = reg_types->Conflict();
+  if (this == &conflict) {
+    DCHECK(IsConflict());
     return *this;  // Conflict MERGE * => Conflict
-  } else if (incoming_type.IsConflict()) {
+  } else if (&incoming_type == &conflict) {
+    DCHECK(incoming_type.IsConflict());
     return incoming_type;  // * MERGE Conflict => Conflict
   } else if (IsUndefined() || incoming_type.IsUndefined()) {
-    return reg_types->Conflict();  // Unknown MERGE * => Conflict
+    return conflict;  // Unknown MERGE * => Conflict
   } else if (IsConstant() && incoming_type.IsConstant()) {
-    int32_t val1 = ConstantValue();
-    int32_t val2 = incoming_type.ConstantValue();
+    const ConstantType& type1 = *down_cast<const ConstantType*>(this);
+    const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type);
+    int32_t val1 = type1.ConstantValue();
+    int32_t val2 = type2.ConstantValue();
     if (val1 >= 0 && val2 >= 0) {
       // +ve1 MERGE +ve2 => MAX(+ve1, +ve2)
       if (val1 >= val2) {
-        if (!IsPreciseConstant()) {
+        if (!type1.IsPreciseConstant()) {
           return *this;
         } else {
           return reg_types->FromCat1Const(val1, false);
         }
       } else {
-        if (!incoming_type.IsPreciseConstant()) {
-          return incoming_type;
+        if (!type2.IsPreciseConstant()) {
+          return type2;
         } else {
           return reg_types->FromCat1Const(val2, false);
         }
@@ -811,30 +616,30 @@
     } else if (val1 < 0 && val2 < 0) {
       // -ve1 MERGE -ve2 => MIN(-ve1, -ve2)
       if (val1 <= val2) {
-        if (!IsPreciseConstant()) {
+        if (!type1.IsPreciseConstant()) {
           return *this;
         } else {
           return reg_types->FromCat1Const(val1, false);
         }
       } else {
-        if (!incoming_type.IsPreciseConstant()) {
-          return incoming_type;
+        if (!type2.IsPreciseConstant()) {
+          return type2;
         } else {
           return reg_types->FromCat1Const(val2, false);
         }
       }
     } else {
       // Values are +ve and -ve, choose smallest signed type in which they both fit
-      if (IsConstantByte()) {
-        if (incoming_type.IsConstantByte()) {
+      if (type1.IsConstantByte()) {
+        if (type2.IsConstantByte()) {
           return reg_types->ByteConstant();
-        } else if (incoming_type.IsConstantShort()) {
+        } else if (type2.IsConstantShort()) {
           return reg_types->ShortConstant();
         } else {
           return reg_types->IntConstant();
         }
-      } else if (IsConstantShort()) {
-        if (incoming_type.IsConstantShort()) {
+      } else if (type1.IsConstantShort()) {
+        if (type2.IsConstantShort()) {
           return reg_types->ShortConstant();
         } else {
           return reg_types->IntConstant();
@@ -844,12 +649,16 @@
       }
     }
   } else if (IsConstantLo() && incoming_type.IsConstantLo()) {
-    int32_t val1 = ConstantValueLo();
-    int32_t val2 = incoming_type.ConstantValueLo();
+    const ConstantType& type1 = *down_cast<const ConstantType*>(this);
+    const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type);
+    int32_t val1 = type1.ConstantValueLo();
+    int32_t val2 = type2.ConstantValueLo();
     return reg_types->FromCat2ConstLo(val1 | val2, false);
   } else if (IsConstantHi() && incoming_type.IsConstantHi()) {
-    int32_t val1 = ConstantValueHi();
-    int32_t val2 = incoming_type.ConstantValueHi();
+    const ConstantType& type1 = *down_cast<const ConstantType*>(this);
+    const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type);
+    int32_t val1 = type1.ConstantValueHi();
+    int32_t val2 = type2.ConstantValueHi();
     return reg_types->FromCat2ConstHi(val1 | val2, false);
   } else if (IsIntegralTypes() && incoming_type.IsIntegralTypes()) {
     if (IsBooleanTypes() && incoming_type.IsBooleanTypes()) {
@@ -889,12 +698,12 @@
       // Something that is uninitialized hasn't had its constructor called. Mark any merge
       // of this type with something that is initialized as conflicting. The cases of a merge
       // with itself, 0 or Object are handled above.
-      return reg_types->Conflict();
+      return conflict;
     } else {  // Two reference types, compute Join
       mirror::Class* c1 = GetClass();
       mirror::Class* c2 = incoming_type.GetClass();
-      DCHECK(c1 != NULL && !c1->IsPrimitive());
-      DCHECK(c2 != NULL && !c2->IsPrimitive());
+      DCHECK(c1 != nullptr && !c1->IsPrimitive());
+      DCHECK(c2 != nullptr && !c2->IsPrimitive());
       mirror::Class* join_class = ClassJoin(c1, c2);
       if (c1 == join_class && !IsPreciseReference()) {
         return *this;
@@ -906,7 +715,7 @@
       }
     }
   } else {
-    return reg_types->Conflict();  // Unexpected types => Conflict
+    return conflict;  // Unexpected types => Conflict
   }
 }
 
@@ -933,7 +742,7 @@
     mirror::Class* common_elem = ClassJoin(s_ct, t_ct);
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), &common_elem);
-    DCHECK(array_class != NULL);
+    DCHECK(array_class != nullptr);
     return array_class;
   } else {
     size_t s_depth = s->Depth();
@@ -969,7 +778,7 @@
   }
 }
 
-void RegType::VisitRoots(RootCallback* callback, void* arg) {
+void RegType::VisitRoots(RootCallback* callback, void* arg) const {
   if (!klass_.IsNull()) {
     callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
   }
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 378b4c9..d429dfd 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -60,7 +60,9 @@
   virtual bool IsUninitializedReference() const { return false; }
   virtual bool IsUninitializedThisReference() const { return false; }
   virtual bool IsUnresolvedAndUninitializedReference() const { return false; }
-  virtual bool IsUnresolvedAndUninitializedThisReference() const { return false; }
+  virtual bool IsUnresolvedAndUninitializedThisReference() const {
+    return false;
+  }
   virtual bool IsUnresolvedMergedReference() const { return false; }
   virtual bool IsUnresolvedSuperClass() const { return false; }
   virtual bool IsReference() const { return false; }
@@ -73,90 +75,64 @@
   virtual bool IsImpreciseConstant() const { return false; }
   virtual bool IsConstantTypes() const { return false; }
   bool IsConstant() const {
-    return IsPreciseConstant() || IsImpreciseConstant();
+    return IsImpreciseConstant() || IsPreciseConstant();
   }
   bool IsConstantLo() const {
-    return IsPreciseConstantLo() || IsImpreciseConstantLo();
+    return IsImpreciseConstantLo() || IsPreciseConstantLo();
   }
   bool IsPrecise() const {
-    return IsPreciseConstantLo() || IsPreciseConstant() || IsPreciseConstantHi();
+    return IsPreciseConstantLo() || IsPreciseConstant() ||
+           IsPreciseConstantHi();
   }
-  bool IsLongConstant() const {
-    return IsConstantLo();
-  }
+  bool IsLongConstant() const { return IsConstantLo(); }
   bool IsConstantHi() const {
     return (IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  bool IsLongConstantHigh() const {
-    return IsConstantHi();
-  }
+  bool IsLongConstantHigh() const { return IsConstantHi(); }
   virtual bool IsUninitializedTypes() const { return false; }
-  bool IsUnresolvedTypes() const {
-    return IsUnresolvedReference() || IsUnresolvedAndUninitializedReference() ||
-           IsUnresolvedAndUninitializedThisReference() ||
-           IsUnresolvedMergedReference() || IsUnresolvedSuperClass();
-  }
+  virtual bool IsUnresolvedTypes() const { return false; }
 
   bool IsLowHalf() const {
-    return (IsLongLo() || IsDoubleLo() || IsPreciseConstantLo() ||
-            IsImpreciseConstantLo());
+    return (IsLongLo() || IsDoubleLo() || IsPreciseConstantLo() || IsImpreciseConstantLo());
   }
   bool IsHighHalf() const {
-    return (IsLongHi() || IsDoubleHi() || IsPreciseConstantHi() ||
-            IsImpreciseConstantHi());
+    return (IsLongHi() || IsDoubleHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  bool IsLongOrDoubleTypes() const {
-    return IsLowHalf();
-  }
+  bool IsLongOrDoubleTypes() const { return IsLowHalf(); }
   // Check this is the low half, and that type_h is its matching high-half.
   inline bool CheckWidePair(const RegType& type_h) const {
     if (IsLowHalf()) {
-      return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
-              (IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
-              (IsImpreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
+      return ((IsImpreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
               (IsImpreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
+              (IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
+              (IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
               (IsDoubleLo() && type_h.IsDoubleHi()) ||
               (IsLongLo() && type_h.IsLongHi()));
     }
     return false;
   }
   // The high half that corresponds to this low half
-  const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& HighHalf(RegTypeCache* cache) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsConstantBoolean() const {
-    return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
-  }
-  virtual bool IsConstantChar() const {
-    return false;
-  }
-  virtual bool IsConstantByte() const {
-    return false;
-  }
-  virtual bool IsConstantShort() const {
-    return false;
-  }
-  virtual bool IsOne() const {
-    return false;
-  }
-  virtual bool IsZero() const {
-    return false;
-  }
+  bool IsConstantBoolean() const;
+  virtual bool IsConstantChar() const { return false; }
+  virtual bool IsConstantByte() const { return false; }
+  virtual bool IsConstantShort() const { return false; }
+  virtual bool IsOne() const { return false; }
+  virtual bool IsZero() const { return false; }
   bool IsReferenceTypes() const {
     return IsNonZeroReferenceTypes() || IsZero();
   }
-  virtual bool IsNonZeroReferenceTypes() const {
-    return false;
-  }
+  virtual bool IsNonZeroReferenceTypes() const { return false; }
   bool IsCategory1Types() const {
-    return IsChar() || IsInteger() || IsFloat() || IsConstant() || IsByte() || IsShort() ||
-        IsBoolean();
+    return IsChar() || IsInteger() || IsFloat() || IsConstant() || IsByte() ||
+           IsShort() || IsBoolean();
   }
   bool IsCategory2Types() const {
     return IsLowHalf();  // Don't expect explicit testing of high halves
   }
-  bool IsBooleanTypes() const {
-    return IsBoolean() || IsConstantBoolean();
-  }
+  bool IsBooleanTypes() const { return IsBoolean() || IsConstantBoolean(); }
   bool IsByteTypes() const {
     return IsConstantByte() || IsByte() || IsBoolean();
   }
@@ -167,48 +143,40 @@
     return IsChar() || IsBooleanTypes() || IsConstantChar();
   }
   bool IsIntegralTypes() const {
-    return IsInteger() || IsConstant() || IsByte() || IsShort() || IsChar() || IsBoolean();
+    return IsInteger() || IsConstant() || IsByte() || IsShort() || IsChar() ||
+           IsBoolean();
   }
-  // Give the constant value encoded, but this shouldn't be called in the general case.
-  virtual int32_t ConstantValue() const;
-  virtual int32_t ConstantValueLo() const;
-  virtual int32_t ConstantValueHi() const;
-  bool IsArrayIndexTypes() const {
-    return IsIntegralTypes();
-  }
+  // Give the constant value encoded, but this shouldn't be called in the
+  // general case.
+  bool IsArrayIndexTypes() const { return IsIntegralTypes(); }
   // Float type may be derived from any constant type
-  bool IsFloatTypes() const {
-    return IsFloat() || IsConstant();
-  }
-  bool IsLongTypes() const {
-    return IsLongLo() || IsLongConstant();
-  }
+  bool IsFloatTypes() const { return IsFloat() || IsConstant(); }
+  bool IsLongTypes() const { return IsLongLo() || IsLongConstant(); }
   bool IsLongHighTypes() const {
-    return (IsLongHi() ||
-            IsPreciseConstantHi() ||
-            IsImpreciseConstantHi());
+    return (IsLongHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  bool IsDoubleTypes() const {
-    return IsDoubleLo() || IsLongConstant();
-  }
+  bool IsDoubleTypes() const { return IsDoubleLo() || IsLongConstant(); }
   bool IsDoubleHighTypes() const {
     return (IsDoubleHi() || IsPreciseConstantHi() || IsImpreciseConstantHi());
   }
-  virtual bool IsLong() const {
-    return false;
+  virtual bool IsLong() const { return false; }
+  bool HasClass() const {
+    bool result = !klass_.IsNull();
+    DCHECK_EQ(result, HasClassVirtual());
+    return result;
   }
-  virtual bool HasClass() const {
-    return false;
-  }
+  virtual bool HasClassVirtual() const { return false; }
   bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   Primitive::Type GetPrimitiveType() const;
-  bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsJavaLangObjectArray() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::string& GetDescriptor() const {
-    DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
-                          !IsUnresolvedSuperClass()));
+    DCHECK(HasClass() ||
+           (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
+            !IsUnresolvedSuperClass()));
     return descriptor_;
   }
   mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -217,53 +185,65 @@
     DCHECK(HasClass());
     return klass_.Read();
   }
-  uint16_t GetId() const {
-    return cache_id_;
-  }
+  uint16_t GetId() const { return cache_id_; }
   const RegType& GetSuperClass(RegTypeCache* cache) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+  virtual std::string Dump() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
 
   // Can this type access other?
-  bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool CanAccess(const RegType& other) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type access a member with the given properties?
   bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Can this type be assigned by src?
-  // Note: Object and interface types may always be assigned to one another, see comment on
+  // Note: Object and interface types may always be assigned to one another, see
+  // comment on
   // ClassJoin.
-  bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsAssignableFrom(const RegType& src) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
+  // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't
+  // allow assignment to
   // an interface from an Object.
   bool IsStrictlyAssignableFrom(const RegType& src) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Are these RegTypes the same?
-  bool Equals(const RegType& other) const {
-    return GetId() == other.GetId();
-  }
+  bool Equals(const RegType& other) const { return GetId() == other.GetId(); }
 
-  // Compute the merge of this register from one edge (path) with incoming_type from another.
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+  // Compute the merge of this register from one edge (path) with incoming_type
+  // from another.
+  const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
-   * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
-   * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
-   * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
+   * A basic Join operation on classes. For a pair of types S and T the Join,
+   *written S v T = J, is
+   * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is
+   *J is the parent of
+   * S and T such that there isn't a parent of both S and T that isn't also the
+   *parent of J (ie J
    * is the deepest (lowest upper bound) parent of S and T).
    *
-   * This operation applies for regular classes and arrays, however, for interface types there
-   * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial
-   * order by introducing sets of types, however, the only operation permissible on an interface is
-   * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
-   * types until an invoke-interface call on the interface typed reference at runtime and allow
-   * the perversion of Object being assignable to an interface type (note, however, that we don't
-   * allow assignment of Object or Interface to any concrete class and are therefore type safe).
+   * This operation applies for regular classes and arrays, however, for
+   *interface types there
+   * needn't be a partial ordering on the types. We could solve the problem of a
+   *lack of a partial
+   * order by introducing sets of types, however, the only operation permissible
+   *on an interface is
+   * invoke-interface. In the tradition of Java verifiers [1] we defer the
+   *verification of interface
+   * types until an invoke-interface call on the interface typed reference at
+   *runtime and allow
+   * the perversion of Object being assignable to an interface type (note,
+   *however, that we don't
+   * allow assignment of Object or Interface to any concrete class and are
+   *therefore type safe).
    *
    * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
    */
@@ -272,11 +252,12 @@
 
   virtual ~RegType() {}
 
-  void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VisitRoots(RootCallback* callback, void* arg) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  protected:
-  RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+  RegType(mirror::Class* klass, const std::string& descriptor,
+          uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
@@ -285,414 +266,402 @@
 
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-
   const std::string descriptor_;
-  mutable GcRoot<mirror::Class> klass_;  // Non-const only due to moving classes.
+  mutable GcRoot<mirror::Class>
+      klass_;  // Non-const only due to moving classes.
   const uint16_t cache_id_;
 
   friend class RegTypeCache;
 
  private:
+  static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   DISALLOW_COPY_AND_ASSIGN(RegType);
 };
 
 // Bottom type.
-class ConflictType : public RegType {
+class ConflictType FINAL : public RegType {
  public:
-  bool IsConflict() const {
-    return true;
-  }
+  bool IsConflict() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Conflict instance.
-  static ConflictType* GetInstance();
+  static const ConflictType* GetInstance() PURE;
 
   // Create the singleton instance.
-  static ConflictType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                      uint16_t cache_id)
+  static const ConflictType* CreateInstance(mirror::Class* klass,
+                                            const std::string& descriptor,
+                                            uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Destroy the singleton instance.
   static void Destroy();
 
  private:
-  ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : RegType(klass, descriptor, cache_id) {
-  }
+  ConflictType(mirror::Class* klass, const std::string& descriptor,
+               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(klass, descriptor, cache_id) {}
 
-  static ConflictType* instance_;
+  static const ConflictType* instance_;
 };
 
-// A variant of the bottom type used to specify an undefined value in the incoming registers.
+// A variant of the bottom type used to specify an undefined value in the
+// incoming registers.
 // Merging with UndefinedType yields ConflictType which is the true bottom.
-class UndefinedType : public RegType {
+class UndefinedType FINAL : public RegType {
  public:
-  bool IsUndefined() const {
-    return true;
-  }
+  bool IsUndefined() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the singleton Undefined instance.
-  static UndefinedType* GetInstance();
+  static const UndefinedType* GetInstance() PURE;
 
   // Create the singleton instance.
-  static UndefinedType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                       uint16_t cache_id)
+  static const UndefinedType* CreateInstance(mirror::Class* klass,
+                                             const std::string& descriptor,
+                                             uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Destroy the singleton instance.
   static void Destroy();
 
  private:
-  UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : RegType(klass, descriptor, cache_id) {
-  }
+  UndefinedType(mirror::Class* klass, const std::string& descriptor,
+                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(klass, descriptor, cache_id) {}
 
-  virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  static UndefinedType* instance_;
+  static const UndefinedType* instance_;
 };
 
 class PrimitiveType : public RegType {
  public:
-  PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class Cat1Type : public PrimitiveType {
  public:
-  Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Cat1Type(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 class IntegerType : public Cat1Type {
  public:
-  bool IsInteger() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id)
+  bool IsInteger() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const IntegerType* CreateInstance(mirror::Class* klass,
+                                           const std::string& descriptor,
+                                           uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static IntegerType* GetInstance();
+  static const IntegerType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  IntegerType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static IntegerType* instance_;
+  IntegerType(mirror::Class* klass, const std::string& descriptor,
+              uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const IntegerType* instance_;
 };
 
-class BooleanType : public Cat1Type {
+class BooleanType FINAL : public Cat1Type {
  public:
-  bool IsBoolean() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                     uint16_t cache_id)
+  bool IsBoolean() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const BooleanType* CreateInstance(mirror::Class* klass,
+                                           const std::string& descriptor,
+                                           uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static BooleanType* GetInstance();
+  static const BooleanType* GetInstance() PURE;
   static void Destroy();
- private:
-  BooleanType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
 
-  static BooleanType* instance;
+ private:
+  BooleanType(mirror::Class* klass, const std::string& descriptor,
+              uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+
+  static const BooleanType* instance_;
 };
 
-class ByteType : public Cat1Type {
+class ByteType FINAL : public Cat1Type {
  public:
-  bool IsByte() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                  uint16_t cache_id)
+  bool IsByte() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const ByteType* CreateInstance(mirror::Class* klass,
+                                        const std::string& descriptor,
+                                        uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ByteType* GetInstance();
+  static const ByteType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  ByteType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static ByteType* instance_;
+  ByteType(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const ByteType* instance_;
 };
 
-class ShortType : public Cat1Type {
+class ShortType FINAL : public Cat1Type {
  public:
-  bool IsShort() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id)
+  bool IsShort() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const ShortType* CreateInstance(mirror::Class* klass,
+                                         const std::string& descriptor,
+                                         uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static ShortType* GetInstance();
+  static const ShortType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  ShortType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static ShortType* instance_;
+  ShortType(mirror::Class* klass, const std::string& descriptor,
+            uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const ShortType* instance_;
 };
 
-class CharType : public Cat1Type {
+class CharType FINAL : public Cat1Type {
  public:
-  bool IsChar() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                  uint16_t cache_id)
+  bool IsChar() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const CharType* CreateInstance(mirror::Class* klass,
+                                        const std::string& descriptor,
+                                        uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static CharType* GetInstance();
+  static const CharType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  CharType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static CharType* instance_;
+  CharType(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const CharType* instance_;
 };
 
-class FloatType : public Cat1Type {
+class FloatType FINAL : public Cat1Type {
  public:
-  bool IsFloat() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                   uint16_t cache_id)
+  bool IsFloat() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const FloatType* CreateInstance(mirror::Class* klass,
+                                         const std::string& descriptor,
+                                         uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FloatType* GetInstance();
+  static const FloatType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  FloatType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat1Type(klass, descriptor, cache_id) {
-  }
-  static FloatType* instance_;
+  FloatType(mirror::Class* klass, const std::string& descriptor,
+            uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat1Type(klass, descriptor, cache_id) {}
+  static const FloatType* instance_;
 };
 
 class Cat2Type : public PrimitiveType {
  public:
-  Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  Cat2Type(mirror::Class* klass, const std::string& descriptor,
+           uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class LongLoType : public Cat2Type {
+class LongLoType FINAL : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsLongLo() const {
-    return true;
-  }
-  bool IsLong() const {
-    return true;
-  }
-  static LongLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                    uint16_t cache_id)
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsLongLo() const OVERRIDE { return true; }
+  bool IsLong() const OVERRIDE { return true; }
+  static const LongLoType* CreateInstance(mirror::Class* klass,
+                                          const std::string& descriptor,
+                                          uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static LongLoType* GetInstance();
+  static const LongLoType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  LongLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static LongLoType* instance_;
+  LongLoType(mirror::Class* klass, const std::string& descriptor,
+             uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const LongLoType* instance_;
 };
 
-class LongHiType : public Cat2Type {
+class LongHiType FINAL : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsLongHi() const {
-    return true;
-  }
-  static LongHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                    uint16_t cache_id)
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsLongHi() const OVERRIDE { return true; }
+  static const LongHiType* CreateInstance(mirror::Class* klass,
+                                          const std::string& descriptor,
+                                          uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static LongHiType* GetInstance();
+  static const LongHiType* GetInstance() PURE;
   static void Destroy();
+
  private:
-  LongHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static LongHiType* instance_;
+  LongHiType(mirror::Class* klass, const std::string& descriptor,
+             uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const LongHiType* instance_;
 };
 
-class DoubleLoType : public Cat2Type {
+class DoubleLoType FINAL : public Cat2Type {
  public:
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsDoubleLo() const {
-    return true;
-  }
-  bool IsDouble() const {
-    return true;
-  }
-  static DoubleLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsDoubleLo() const OVERRIDE { return true; }
+  bool IsDouble() const OVERRIDE { return true; }
+  static const DoubleLoType* CreateInstance(mirror::Class* klass,
+                                            const std::string& descriptor,
+                                            uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static const DoubleLoType* GetInstance() PURE;
+  static void Destroy();
+
+ private:
+  DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const DoubleLoType* instance_;
+};
+
+class DoubleHiType FINAL : public Cat2Type {
+ public:
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual bool IsDoubleHi() const OVERRIDE { return true; }
+  static const DoubleHiType* CreateInstance(mirror::Class* klass,
+                                      const std::string& descriptor,
                                       uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static DoubleLoType* GetInstance();
+  static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
- private:
-  DoubleLoType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static DoubleLoType* instance_;
-};
 
-class DoubleHiType : public Cat2Type {
- public:
-  std::string Dump() const;
-  virtual bool IsDoubleHi() const {
-    return true;
-  }
-  static DoubleHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
-                                      uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static DoubleHiType* GetInstance();
-  static void Destroy();
  private:
-  DoubleHiType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : Cat2Type(klass, descriptor, cache_id) {
-  }
-  static DoubleHiType* instance_;
+  DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+               uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : Cat2Type(klass, descriptor, cache_id) {}
+  static const DoubleHiType* instance_;
 };
 
 class ConstantType : public RegType {
  public:
-  ConstantType(uint32_t constat, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ConstantType(uint32_t constant, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(nullptr, "", cache_id), constant_(constant) {
+  }
 
-  // If this is a 32-bit constant, what is the value? This value may be imprecise in which case
-  // the value represents part of the integer range of values that may be held in the register.
+
+  // If this is a 32-bit constant, what is the value? This value may be
+  // imprecise in which case
+  // the value represents part of the integer range of values that may be held
+  // in the register.
   int32_t ConstantValue() const {
     DCHECK(IsConstantTypes());
     return constant_;
   }
-  int32_t ConstantValueLo() const;
-  int32_t ConstantValueHi() const;
 
-  bool IsZero() const {
+  int32_t ConstantValueLo() const {
+    DCHECK(IsConstantLo());
+    return constant_;
+  }
+
+  int32_t ConstantValueHi() const {
+    if (IsConstantHi() || IsPreciseConstantHi() || IsImpreciseConstantHi()) {
+      return constant_;
+    } else {
+      DCHECK(false);
+      return 0;
+    }
+  }
+
+  bool IsZero() const OVERRIDE {
     return IsPreciseConstant() && ConstantValue() == 0;
   }
-  bool IsOne() const {
+  bool IsOne() const OVERRIDE {
     return IsPreciseConstant() && ConstantValue() == 1;
   }
 
-  bool IsConstantChar() const {
+  bool IsConstantChar() const OVERRIDE {
     return IsConstant() && ConstantValue() >= 0 &&
            ConstantValue() <= std::numeric_limits<jchar>::max();
   }
-  bool IsConstantByte() const {
+  bool IsConstantByte() const OVERRIDE {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<jbyte>::min() &&
            ConstantValue() <= std::numeric_limits<jbyte>::max();
   }
-  bool IsConstantShort() const {
+  bool IsConstantShort() const OVERRIDE {
     return IsConstant() &&
            ConstantValue() >= std::numeric_limits<jshort>::min() &&
            ConstantValue() <= std::numeric_limits<jshort>::max();
   }
-  virtual bool IsConstantTypes() const { return true; }
+  virtual bool IsConstantTypes() const OVERRIDE { return true; }
 
  private:
   const uint32_t constant_;
 };
 
-class PreciseConstType : public ConstantType {
+class PreciseConstType FINAL : public ConstantType {
  public:
-  PreciseConstType(uint32_t constat, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstantType(constat, cache_id) {
-  }
+  PreciseConstType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
 
-  bool IsPreciseConstant() const {
-    return true;
-  }
+  bool IsPreciseConstant() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class PreciseConstLoType : public ConstantType {
+class PreciseConstLoType FINAL : public ConstantType {
  public:
-  PreciseConstLoType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstantType(constat, cache_id) {
-  }
-  bool IsPreciseConstantLo() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  PreciseConstLoType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsPreciseConstantLo() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class PreciseConstHiType : public ConstantType {
+class PreciseConstHiType FINAL : public ConstantType {
  public:
-  PreciseConstHiType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : ConstantType(constat, cache_id) {
-  }
-  bool IsPreciseConstantHi() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  PreciseConstHiType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsPreciseConstantHi() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class ImpreciseConstType : public ConstantType {
+class ImpreciseConstType FINAL : public ConstantType {
  public:
   ImpreciseConstType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool IsImpreciseConstant() const {
-    return true;
+       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+       : ConstantType(constat, cache_id) {
   }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsImpreciseConstant() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class ImpreciseConstLoType : public ConstantType {
+class ImpreciseConstLoType FINAL : public ConstantType {
  public:
-  ImpreciseConstLoType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstantType(constat, cache_id) {
-  }
-  bool IsImpreciseConstantLo() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsImpreciseConstantLo() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class ImpreciseConstHiType : public ConstantType {
+class ImpreciseConstHiType FINAL : public ConstantType {
  public:
-  ImpreciseConstHiType(uint32_t constat, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : ConstantType(constat, cache_id) {
-  }
-  bool IsImpreciseConstantHi() const {
-    return true;
-  }
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : ConstantType(constant, cache_id) {}
+  bool IsImpreciseConstantHi() const OVERRIDE { return true; }
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
+// Common parent of all uninitialized types. Uninitialized types are created by
+// "new" dex
 // instructions and must be passed to a constructor.
 class UninitializedType : public RegType {
  public:
-  UninitializedType(mirror::Class* klass, const std::string& descriptor, uint32_t allocation_pc,
-                    uint16_t cache_id)
-      : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {
-  }
+  UninitializedType(mirror::Class* klass, const std::string& descriptor,
+                    uint32_t allocation_pc, uint16_t cache_id)
+      : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
 
-  bool IsUninitializedTypes() const;
-  bool IsNonZeroReferenceTypes() const;
+  bool IsUninitializedTypes() const OVERRIDE;
+  bool IsNonZeroReferenceTypes() const OVERRIDE;
 
   uint32_t GetAllocationPc() const {
     DCHECK(IsUninitializedTypes());
@@ -704,30 +673,27 @@
 };
 
 // Similar to ReferenceType but not yet having been passed to a constructor.
-class UninitializedReferenceType : public UninitializedType {
+class UninitializedReferenceType FINAL : public UninitializedType {
  public:
-  UninitializedReferenceType(mirror::Class* klass, const std::string& descriptor,
+  UninitializedReferenceType(mirror::Class* klass,
+                             const std::string& descriptor,
                              uint32_t allocation_pc, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UninitializedType(klass, descriptor, allocation_pc, cache_id) {
-  }
+      : UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
 
-  bool IsUninitializedReference() const {
-    return true;
-  }
+  bool IsUninitializedReference() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
-class UnresolvedUninitializedRefType : public UninitializedType {
+// Similar to UnresolvedReferenceType but not yet having been passed to a
+// constructor.
+class UnresolvedUninitializedRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedRefType(const std::string& descriptor, uint32_t allocation_pc,
-                                 uint16_t cache_id)
+  UnresolvedUninitializedRefType(const std::string& descriptor,
+                                 uint32_t allocation_pc, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : UninitializedType(NULL, descriptor, allocation_pc, cache_id) {
     if (kIsDebugBuild) {
@@ -735,19 +701,22 @@
     }
   }
 
-  bool IsUnresolvedAndUninitializedReference() const {
-    return true;
-  }
+  bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// Similar to UninitializedReferenceType but special case for the this argument of a constructor.
-class UninitializedThisReferenceType : public UninitializedType {
+// Similar to UninitializedReferenceType but special case for the this argument
+// of a constructor.
+class UninitializedThisReferenceType FINAL : public UninitializedType {
  public:
-  UninitializedThisReferenceType(mirror::Class* klass, const std::string& descriptor,
+  UninitializedThisReferenceType(mirror::Class* klass,
+                                 const std::string& descriptor,
                                  uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, 0, cache_id) {
@@ -756,23 +725,20 @@
     }
   }
 
-  virtual bool IsUninitializedThisReference() const {
-    return true;
-  }
+  virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-class UnresolvedUninitializedThisRefType : public UninitializedType {
+class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedThisRefType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedUninitializedThisRefType(const std::string& descriptor,
+                                     uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       : UninitializedType(NULL, descriptor, 0, cache_id) {
     if (kIsDebugBuild) {
@@ -780,112 +746,108 @@
     }
   }
 
-  bool IsUnresolvedAndUninitializedThisReference() const {
-    return true;
-  }
+  bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// A type of register holding a reference to an Object of type GetClass or a sub-class.
-class ReferenceType : public RegType {
+// A type of register holding a reference to an Object of type GetClass or a
+// sub-class.
+class ReferenceType FINAL : public RegType {
  public:
-  ReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-     : RegType(klass, descriptor, cache_id) {
-  }
+  ReferenceType(mirror::Class* klass, const std::string& descriptor,
+                uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(klass, descriptor, cache_id) {}
 
-  bool IsReference() const {
-    return true;
-  }
+  bool IsReference() const OVERRIDE { return true; }
 
-  bool IsNonZeroReferenceTypes() const {
-    return true;
-  }
+  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
-// A type of register holding a reference to an Object of type GetClass and only an object of that
+// A type of register holding a reference to an Object of type GetClass and only
+// an object of that
 // type.
-class PreciseReferenceType : public RegType {
+class PreciseReferenceType FINAL : public RegType {
  public:
-  PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
+  PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+                       uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsPreciseReference() const {
-    return true;
-  }
+  bool IsPreciseReference() const OVERRIDE { return true; }
 
-  bool IsNonZeroReferenceTypes() const {
-    return true;
-  }
+  bool IsNonZeroReferenceTypes() const OVERRIDE { return true; }
 
-  bool HasClass() const {
-    return true;
-  }
+  bool HasClassVirtual() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Common parent of unresolved types.
 class UnresolvedType : public RegType {
  public:
   UnresolvedType(const std::string& descriptor, uint16_t cache_id)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : RegType(NULL, descriptor, cache_id) {
-  }
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : RegType(NULL, descriptor, cache_id) {}
 
-  bool IsNonZeroReferenceTypes() const;
+  bool IsNonZeroReferenceTypes() const OVERRIDE;
 };
 
-// Similar to ReferenceType except the Class couldn't be loaded. Assignability and other tests made
+// Similar to ReferenceType except the Class couldn't be loaded. Assignability
+// and other tests made
 // of this type must be conservative.
-class UnresolvedReferenceType : public UnresolvedType {
+class UnresolvedReferenceType FINAL : public UnresolvedType {
  public:
   UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
-     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : UnresolvedType(descriptor, cache_id) {
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : UnresolvedType(descriptor, cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
   }
 
-  bool IsUnresolvedReference() const {
-    return true;
-  }
+  bool IsUnresolvedReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 };
 
 // Type representing the super-class of an unresolved type.
-class UnresolvedSuperClass : public UnresolvedType {
+class UnresolvedSuperClass FINAL : public UnresolvedType {
  public:
-  UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache, uint16_t cache_id)
+  UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
+                       uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UnresolvedType("", cache_id), unresolved_child_id_(child_id),
+      : UnresolvedType("", cache_id),
+        unresolved_child_id_(child_id),
         reg_type_cache_(reg_type_cache) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
   }
 
-  bool IsUnresolvedSuperClass() const {
-    return true;
-  }
+  bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
+
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
 
   uint16_t GetUnresolvedSuperClassChildId() const {
     DCHECK(IsUnresolvedSuperClass());
     return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -894,14 +856,17 @@
   const RegTypeCache* const reg_type_cache_;
 };
 
-// A merge of two unresolved types. If the types were resolved this may be Conflict or another
+// A merge of two unresolved types. If the types were resolved this may be
+// Conflict or another
 // known ReferenceType.
-class UnresolvedMergedType : public UnresolvedType {
+class UnresolvedMergedType FINAL : public UnresolvedType {
  public:
-  UnresolvedMergedType(uint16_t left_id, uint16_t right_id, const RegTypeCache* reg_type_cache,
-                       uint16_t cache_id)
+  UnresolvedMergedType(uint16_t left_id, uint16_t right_id,
+                       const RegTypeCache* reg_type_cache, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : UnresolvedType("", cache_id), reg_type_cache_(reg_type_cache), merged_types_(left_id, right_id) {
+      : UnresolvedType("", cache_id),
+        reg_type_cache_(reg_type_cache),
+        merged_types_(left_id, right_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -916,11 +881,11 @@
   // The complete set of merged types.
   std::set<uint16_t> GetMergedTypes() const;
 
-  bool IsUnresolvedMergedReference() const {
-    return true;
-  }
+  bool IsUnresolvedMergedReference() const OVERRIDE { return true; }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  bool IsUnresolvedTypes() const OVERRIDE { return true; }
+
+  std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fc9e5c9..9024a7d 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -17,16 +17,19 @@
 #ifndef ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
 #define ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
 
+#include "class_linker.h"
+#include "mirror/class-inl.h"
+#include "mirror/string.h"
+#include "mirror/throwable.h"
 #include "reg_type.h"
 #include "reg_type_cache.h"
-#include "class_linker.h"
 
 namespace art {
 namespace verifier {
 
 inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
   DCHECK_LT(id, entries_.size());
-  RegType* result = entries_[id];
+  const RegType* result = entries_[id];
   DCHECK(result != NULL);
   return *result;
 }
@@ -40,6 +43,81 @@
   return FromCat1NonSmallConstant(value, precise);
 }
 
+inline const ImpreciseConstType& RegTypeCache::ByteConstant() {
+  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::CharConstant() {
+  int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
+  const ConstantType& result =  FromCat1Const(jchar_max, false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::ShortConstant() {
+  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::IntConstant() {
+  const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::PosByteConstant() {
+  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const ImpreciseConstType& RegTypeCache::PosShortConstant() {
+  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
+  DCHECK(result.IsImpreciseConstant());
+  return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+inline const PreciseReferenceType& RegTypeCache::JavaLangClass() {
+  const RegType* result = &FromClass("Ljava/lang/Class;", mirror::Class::GetJavaLangClass(), true);
+  DCHECK(result->IsPreciseReference());
+  return *down_cast<const PreciseReferenceType*>(result);
+}
+
+inline const PreciseReferenceType& RegTypeCache::JavaLangString() {
+  // String is final and therefore always precise.
+  const RegType* result = &FromClass("Ljava/lang/String;", mirror::String::GetJavaLangString(),
+                                     true);
+  DCHECK(result->IsPreciseReference());
+  return *down_cast<const PreciseReferenceType*>(result);
+}
+
+inline const RegType&  RegTypeCache::JavaLangThrowable(bool precise) {
+  const RegType* result = &FromClass("Ljava/lang/Throwable;",
+                                     mirror::Throwable::GetJavaLangThrowable(), precise);
+  if (precise) {
+    DCHECK(result->IsPreciseReference());
+    return *down_cast<const PreciseReferenceType*>(result);
+  } else {
+    DCHECK(result->IsReference());
+    return *down_cast<const ReferenceType*>(result);
+  }
+}
+
+inline const RegType& RegTypeCache::JavaLangObject(bool precise) {
+  const RegType* result = &FromClass("Ljava/lang/Object;",
+                                     mirror::Class::GetJavaLangClass()->GetSuperClass(), precise);
+  if (precise) {
+    DCHECK(result->IsPreciseReference());
+    return *down_cast<const PreciseReferenceType*>(result);
+  } else {
+    DCHECK(result->IsReference());
+    return *down_cast<const ReferenceType*>(result);
+  }
+}
+
 }  // namespace verifier
 }  // namespace art
 #endif  // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 92a005b..bffec4b 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -21,15 +21,16 @@
 #include "dex_file-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
+#include "reg_type-inl.h"
 
 namespace art {
 namespace verifier {
 
 bool RegTypeCache::primitive_initialized_ = false;
 uint16_t RegTypeCache::primitive_count_ = 0;
-PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
 
-static bool MatchingPrecisionForClass(RegType* entry, bool precise)
+static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (entry->IsPreciseReference() == precise) {
     // We were or weren't looking for a precise reference and we found what we need.
@@ -98,7 +99,7 @@
 };
 
 const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
-  CHECK(RegTypeCache::primitive_initialized_);
+  DCHECK(RegTypeCache::primitive_initialized_);
   switch (prim_type) {
     case Primitive::kPrimBoolean:
       return *BooleanType::GetInstance();
@@ -123,7 +124,7 @@
 }
 
 bool RegTypeCache::MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise) {
-  RegType* entry = entries_[idx];
+  const RegType* entry = entries_[idx];
   if (descriptor != entry->descriptor_) {
     return false;
   }
@@ -143,11 +144,11 @@
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(loader));
-  mirror::Class* klass = NULL;
+  mirror::Class* klass = nullptr;
   if (can_load_classes_) {
     klass = class_linker->FindClass(self, descriptor, class_loader);
   } else {
-    klass = class_linker->LookupClass(descriptor, loader);
+    klass = class_linker->LookupClass(self, descriptor, loader);
     if (klass != nullptr && !klass->IsLoaded()) {
       // We found the class but without it being loaded its not safe for use.
       klass = nullptr;
@@ -169,7 +170,7 @@
   // Class not found in the cache, will create a new type for that.
   // Try resolving class.
   mirror::Class* klass = ResolveClass(descriptor, loader);
-  if (klass != NULL) {
+  if (klass != nullptr) {
     // Class resolved, first look for the class in the list of entries
     // Class was not found, must create new type.
     // To pass the verification, the type should be imprecise,
@@ -219,7 +220,7 @@
   } else {
     // Look for the reference in the list of entries to have.
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
         return *cur_entry;
       }
@@ -237,8 +238,8 @@
 }
 
 RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
-  if (kIsDebugBuild && can_load_classes) {
-    Thread::Current()->AssertThreadSuspensionIsAllowable();
+  if (kIsDebugBuild) {
+    Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
   }
   entries_.reserve(64);
   FillPrimitiveAndSmallConstantTypes();
@@ -251,7 +252,7 @@
     // All entries are from the global pool, nothing to delete.
     return;
   }
-  std::vector<RegType*>::iterator non_primitive_begin = entries_.begin();
+  std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
   std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
   STLDeleteContainerPointers(non_primitive_begin, entries_.end());
 }
@@ -271,7 +272,7 @@
     DoubleLoType::Destroy();
     DoubleHiType::Destroy();
     for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
-      PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
+      const PreciseConstType* type = small_precise_constants_[value - kMinSmallConstant];
       delete type;
       small_precise_constants_[value - kMinSmallConstant] = nullptr;
     }
@@ -281,14 +282,14 @@
 }
 
 template <class Type>
-Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
-  mirror::Class* klass = NULL;
+const Type* RegTypeCache::CreatePrimitiveTypeInstance(const std::string& descriptor) {
+  mirror::Class* klass = nullptr;
   // Try loading the class from linker.
   if (!descriptor.empty()) {
     klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
                                                                        descriptor.c_str());
   }
-  Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
+  const Type* entry = Type::CreateInstance(klass, descriptor, RegTypeCache::primitive_count_);
   RegTypeCache::primitive_count_++;
   return entry;
 }
@@ -330,10 +331,10 @@
   }
   // Check if entry already exists.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsUnresolvedMergedReference()) {
       std::set<uint16_t> cur_entry_types =
-          (down_cast<UnresolvedMergedType*>(cur_entry))->GetMergedTypes();
+          (down_cast<const UnresolvedMergedType*>(cur_entry))->GetMergedTypes();
       if (cur_entry_types == types) {
         return *cur_entry;
       }
@@ -353,10 +354,10 @@
 const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
   // Check if entry already exists.
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsUnresolvedSuperClass()) {
-      UnresolvedSuperClass* tmp_entry =
-          down_cast<UnresolvedSuperClass*>(cur_entry);
+      const UnresolvedSuperClass* tmp_entry =
+          down_cast<const UnresolvedSuperClass*>(cur_entry);
       uint16_t unresolved_super_child_id =
           tmp_entry->GetUnresolvedSuperClassChildId();
       if (unresolved_super_child_id == child.GetId()) {
@@ -370,27 +371,28 @@
 }
 
 const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
-  UninitializedType* entry = NULL;
+  UninitializedType* entry = nullptr;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedAndUninitializedReference() &&
-          down_cast<UnresolvedUninitializedRefType*>(cur_entry)->GetAllocationPc() == allocation_pc &&
+          down_cast<const UnresolvedUninitializedRefType*>(cur_entry)->GetAllocationPc()
+              == allocation_pc &&
           (cur_entry->GetDescriptor() == descriptor)) {
-        return *down_cast<UnresolvedUninitializedRefType*>(cur_entry);
+        return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
       }
     }
     entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUninitializedReference() &&
-          down_cast<UninitializedReferenceType*>(cur_entry)
+          down_cast<const UninitializedReferenceType*>(cur_entry)
               ->GetAllocationPc() == allocation_pc &&
           cur_entry->GetClass() == klass) {
-        return *down_cast<UninitializedReferenceType*>(cur_entry);
+        return *down_cast<const UninitializedReferenceType*>(cur_entry);
       }
     }
     entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
@@ -405,7 +407,7 @@
   if (uninit_type.IsUnresolvedTypes()) {
     const std::string& descriptor(uninit_type.GetDescriptor());
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedReference() &&
           cur_entry->GetDescriptor() == descriptor) {
         return *cur_entry;
@@ -417,7 +419,7 @@
     if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
       // For uninitialized "this reference" look for reference types that are not precise.
       for (size_t i = primitive_count_; i < entries_.size(); i++) {
-        RegType* cur_entry = entries_[i];
+        const RegType* cur_entry = entries_[i];
         if (cur_entry->IsReference() && cur_entry->GetClass() == klass) {
           return *cur_entry;
         }
@@ -427,7 +429,7 @@
       // We're uninitialized because of allocation, look or create a precise type as allocations
       // may only create objects of that type.
       for (size_t i = primitive_count_; i < entries_.size(); i++) {
-        RegType* cur_entry = entries_[i];
+        const RegType* cur_entry = entries_[i];
         if (cur_entry->IsPreciseReference() && cur_entry->GetClass() == klass) {
           return *cur_entry;
         }
@@ -441,61 +443,24 @@
   return *entry;
 }
 
-const ImpreciseConstType& RegTypeCache::ByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::CharConstant() {
-  int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
-  const ConstantType& result =  FromCat1Const(jchar_max, false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::ShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::min(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::IntConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::PosByteConstant() {
-  const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
-const ImpreciseConstType& RegTypeCache::PosShortConstant() {
-  const ConstantType& result =  FromCat1Const(std::numeric_limits<jshort>::max(), false);
-  DCHECK(result.IsImpreciseConstant());
-  return *down_cast<const ImpreciseConstType*>(&result);
-}
-
 const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
   UninitializedType* entry;
   const std::string& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedAndUninitializedThisReference() &&
           cur_entry->GetDescriptor() == descriptor) {
-        return *down_cast<UninitializedType*>(cur_entry);
+        return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
     entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      RegType* cur_entry = entries_[i];
+      const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUninitializedThisReference() && cur_entry->GetClass() == klass) {
-        return *down_cast<UninitializedType*>(cur_entry);
+        return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
     entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
@@ -506,11 +471,11 @@
 
 const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
         cur_entry->IsPreciseConstant() == precise &&
-        (down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
-      return *down_cast<ConstantType*>(cur_entry);
+        (down_cast<const ConstantType*>(cur_entry))->ConstantValue() == value) {
+      return *down_cast<const ConstantType*>(cur_entry);
     }
   }
   ConstantType* entry;
@@ -525,10 +490,10 @@
 
 const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
-        (down_cast<ConstantType*>(cur_entry))->ConstantValueLo() == value) {
-      return *down_cast<ConstantType*>(cur_entry);
+        (down_cast<const ConstantType*>(cur_entry))->ConstantValueLo() == value) {
+      return *down_cast<const ConstantType*>(cur_entry);
     }
   }
   ConstantType* entry;
@@ -543,10 +508,10 @@
 
 const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
+    const RegType* cur_entry = entries_[i];
     if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
-        (down_cast<ConstantType*>(cur_entry))->ConstantValueHi() == value) {
-      return *down_cast<ConstantType*>(cur_entry);
+        (down_cast<const ConstantType*>(cur_entry))->ConstantValueHi() == value) {
+      return *down_cast<const ConstantType*>(cur_entry);
     }
   }
   ConstantType* entry;
@@ -583,15 +548,37 @@
 
 void RegTypeCache::Dump(std::ostream& os) {
   for (size_t i = 0; i < entries_.size(); i++) {
-    RegType* cur_entry = entries_[i];
-    if (cur_entry != NULL) {
+    const RegType* cur_entry = entries_[i];
+    if (cur_entry != nullptr) {
       os << i << ": " << cur_entry->Dump() << "\n";
     }
   }
 }
 
+void RegTypeCache::VisitStaticRoots(RootCallback* callback, void* arg) {
+  // Visit the primitive types, this is required since if there are no active verifiers they wont
+  // be in the entries array, and therefore not visited as roots.
+  if (primitive_initialized_) {
+    UndefinedType::GetInstance()->VisitRoots(callback, arg);
+    ConflictType::GetInstance()->VisitRoots(callback, arg);
+    BooleanType::GetInstance()->VisitRoots(callback, arg);
+    ByteType::GetInstance()->VisitRoots(callback, arg);
+    ShortType::GetInstance()->VisitRoots(callback, arg);
+    CharType::GetInstance()->VisitRoots(callback, arg);
+    IntegerType::GetInstance()->VisitRoots(callback, arg);
+    LongLoType::GetInstance()->VisitRoots(callback, arg);
+    LongHiType::GetInstance()->VisitRoots(callback, arg);
+    FloatType::GetInstance()->VisitRoots(callback, arg);
+    DoubleLoType::GetInstance()->VisitRoots(callback, arg);
+    DoubleHiType::GetInstance()->VisitRoots(callback, arg);
+    for (int32_t value = kMinSmallConstant; value <= kMaxSmallConstant; ++value) {
+      small_precise_constants_[value - kMinSmallConstant]->VisitRoots(callback, arg);
+    }
+  }
+}
+
 void RegTypeCache::VisitRoots(RootCallback* callback, void* arg) {
-  for (RegType* entry : entries_) {
+  for (const RegType* entry : entries_) {
     entry->VisitRoots(callback, arg);
   }
 }
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 8baf3ff..ff7b1f3 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -68,14 +68,6 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const RegType& FromUnresolvedSuperClass(const RegType& child)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // String is final and therefore always precise.
-    return From(NULL, "Ljava/lang/String;", true);
-  }
-  const RegType& JavaLangThrowable(bool precise)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return From(NULL, "Ljava/lang/Throwable;", precise);
-  }
   const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return FromCat1Const(0, true);
   }
@@ -85,48 +77,48 @@
   size_t GetCacheSize() {
     return entries_.size();
   }
-  const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  const BooleanType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *BooleanType::GetInstance();
   }
-  const RegType& Byte() {
+  const ByteType& Byte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *ByteType::GetInstance();
   }
-  const RegType& Char()  {
+  const CharType& Char() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *CharType::GetInstance();
   }
-  const RegType& Short()  {
+  const ShortType& Short() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *ShortType::GetInstance();
   }
-  const RegType& Integer() {
+  const IntegerType& Integer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *IntegerType::GetInstance();
   }
-  const RegType& Float() {
+  const FloatType& Float() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *FloatType::GetInstance();
   }
-  const RegType& LongLo() {
+  const LongLoType& LongLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *LongLoType::GetInstance();
   }
-  const RegType& LongHi() {
+  const LongHiType& LongHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *LongHiType::GetInstance();
   }
-  const RegType& DoubleLo() {
+  const DoubleLoType& DoubleLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *DoubleLoType::GetInstance();
   }
-  const RegType& DoubleHi() {
+  const DoubleHiType& DoubleHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *DoubleHiType::GetInstance();
   }
-  const RegType& Undefined() {
+  const UndefinedType& Undefined() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return *UndefinedType::GetInstance();
   }
-  const RegType& Conflict() {
+  const ConflictType& Conflict() {
     return *ConflictType::GetInstance();
   }
-  const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return From(NULL, "Ljava/lang/Class;", precise);
-  }
-  const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return From(NULL, "Ljava/lang/Object;", precise);
-  }
+
+  const PreciseReferenceType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const PreciseReferenceType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& JavaLangThrowable(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Create an uninitialized 'this' argument for the given type.
@@ -146,6 +138,8 @@
   const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
 
   void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void VisitStaticRoots(RootCallback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -159,17 +153,14 @@
   void AddEntry(RegType* new_entry);
 
   template <class Type>
-  static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
+  static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // The actual storage for the RegTypes.
-  std::vector<RegType*> entries_;
-
   // A quick look up for popular small constants.
   static constexpr int32_t kMinSmallConstant = -1;
   static constexpr int32_t kMaxSmallConstant = 4;
-  static PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
 
   static constexpr size_t kNumPrimitivesAndSmallConstants =
       12 + (kMaxSmallConstant - kMinSmallConstant + 1);
@@ -180,6 +171,9 @@
   // Number of well known primitives that will be copied into a RegTypeCache upon construction.
   static uint16_t primitive_count_;
 
+  // The actual storage for the RegTypes.
+  std::vector<const RegType*> entries_;
+
   // Whether or not we're allowed to load classes.
   const bool can_load_classes_;
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 9dc0df1..aad3b5a 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -21,6 +21,7 @@
 #include "base/casts.h"
 #include "common_runtime_test.h"
 #include "reg_type_cache-inl.h"
+#include "reg_type-inl.h"
 #include "scoped_thread_state_change.h"
 #include "thread-inl.h"
 
@@ -346,7 +347,7 @@
   RegTypeCache cache(true);
   const RegType& imprecise_obj = cache.JavaLangObject(false);
   const RegType& precise_obj = cache.JavaLangObject(true);
-  const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
   EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,11 +360,11 @@
   // a hit second time.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
 
-  const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_1 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
 
   const RegType& unresolved_super_class =  cache.FromUnresolvedSuperClass(ref_type_0);
@@ -375,9 +376,9 @@
   // Tests creating types uninitialized types from unresolved types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.Equals(ref_type));
   // Create an uninitialized type of this unresolved type
   const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
@@ -397,8 +398,8 @@
   // Tests types for proper Dump messages.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache(true);
-  const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
-  const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+  const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
+  const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
   const RegType& resolved_ref = cache.JavaLangString();
   const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
   const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
@@ -424,7 +425,7 @@
   RegTypeCache cache(true);
   const RegType& ref_type = cache.JavaLangString();
   const RegType& ref_type_2 = cache.JavaLangString();
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+  const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
@@ -444,7 +445,7 @@
   RegTypeCache cache(true);
   const RegType& ref_type = cache.JavaLangObject(true);
   const RegType& ref_type_2 = cache.JavaLangObject(true);
-  const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+  const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
 
   EXPECT_TRUE(ref_type.Equals(ref_type_2));
   EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -459,9 +460,9 @@
   const RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
   // Merge two unresolved types.
-  const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+  const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
-  const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+  const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true);
   EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
 
   const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 0989cd0..219e687 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,10 +25,135 @@
 namespace art {
 namespace verifier {
 
-inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline const RegType& RegisterLine::GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const {
   // The register index was validated during the static pass, so we don't need to check it here.
   DCHECK_LT(vsrc, num_regs_);
-  return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
+  return verifier->GetRegTypeCache()->GetFromId(line_[vsrc]);
+}
+
+inline bool RegisterLine::SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
+                                          const RegType& new_type) {
+  DCHECK_LT(vdst, num_regs_);
+  if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
+        << new_type << "'";
+    return false;
+  } else if (new_type.IsConflict()) {  // should only be set during a merge
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Set register to unknown type " << new_type;
+    return false;
+  } else {
+    line_[vdst] = new_type.GetId();
+  }
+  // Clear the monitor entry bits for this register.
+  ClearAllRegToLockDepths(vdst);
+  return true;
+}
+
+inline bool RegisterLine::SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst,
+                                              const RegType& new_type1,
+                                              const RegType& new_type2) {
+  DCHECK_LT(vdst + 1, num_regs_);
+  if (!new_type1.CheckWidePair(new_type2)) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
+        << new_type1 << "' '" << new_type2 << "'";
+    return false;
+  } else {
+    line_[vdst] = new_type1.GetId();
+    line_[vdst + 1] = new_type2.GetId();
+  }
+  // Clear the monitor entry bits for this register.
+  ClearAllRegToLockDepths(vdst);
+  ClearAllRegToLockDepths(vdst + 1);
+  return true;
+}
+
+inline void RegisterLine::SetResultTypeToUnknown(MethodVerifier* verifier) {
+  result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
+  result_[1] = result_[0];
+}
+
+inline void RegisterLine::SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type) {
+  DCHECK(!new_type.IsLowHalf());
+  DCHECK(!new_type.IsHighHalf());
+  result_[0] = new_type.GetId();
+  result_[1] = verifier->GetRegTypeCache()->Undefined().GetId();
+}
+
+inline void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
+                                                    const RegType& new_type2) {
+  DCHECK(new_type1.CheckWidePair(new_type2));
+  result_[0] = new_type1.GetId();
+  result_[1] = new_type2.GetId();
+}
+
+inline void RegisterLine::CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc,
+                                 TypeCategory cat) {
+  DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
+  const RegType& type = GetRegisterType(verifier, vsrc);
+  if (!SetRegisterType(verifier, vdst, type)) {
+    return;
+  }
+  if ((cat == kTypeCategory1nr && !type.IsCategory1Types()) ||
+      (cat == kTypeCategoryRef && !type.IsReferenceTypes())) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy1 v" << vdst << "<-v" << vsrc << " type=" << type
+                                                 << " cat=" << static_cast<int>(cat);
+  } else if (cat == kTypeCategoryRef) {
+    CopyRegToLockDepth(vdst, vsrc);
+  }
+}
+
+inline void RegisterLine::CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc) {
+  const RegType& type_l = GetRegisterType(verifier, vsrc);
+  const RegType& type_h = GetRegisterType(verifier, vsrc + 1);
+
+  if (!type_l.CheckWidePair(type_h)) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
+                                                 << " type=" << type_l << "/" << type_h;
+  } else {
+    SetRegisterTypeWide(verifier, vdst, type_l, type_h);
+  }
+}
+
+inline bool RegisterLine::VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
+                                             const RegType& check_type) {
+  // Verify the src register type against the check type refining the type of the register
+  const RegType& src_type = GetRegisterType(verifier, vsrc);
+  if (UNLIKELY(!check_type.IsAssignableFrom(src_type))) {
+    enum VerifyError fail_type;
+    if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
+      // Hard fail if one of the types is primitive, since they are concretely known.
+      fail_type = VERIFY_ERROR_BAD_CLASS_HARD;
+    } else if (check_type.IsUnresolvedTypes() || src_type.IsUnresolvedTypes()) {
+      fail_type = VERIFY_ERROR_NO_CLASS;
+    } else {
+      fail_type = VERIFY_ERROR_BAD_CLASS_SOFT;
+    }
+    verifier->Fail(fail_type) << "register v" << vsrc << " has type "
+                               << src_type << " but expected " << check_type;
+    return false;
+  }
+  if (check_type.IsLowHalf()) {
+    const RegType& src_type_h = GetRegisterType(verifier, vsrc + 1);
+    if (UNLIKELY(!src_type.CheckWidePair(src_type_h))) {
+      verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
+                                                   << src_type << "/" << src_type_h;
+      return false;
+    }
+  }
+  // The register at vsrc has a defined type, we know the lower-upper-bound, but this is less
+  // precise than the subtype in vsrc so leave it for reference types. For primitive types
+  // if they are a defined type then they are as precise as we can get, however, for constant
+  // types we may wish to refine them. Unfortunately constant propagation has rendered this useless.
+  return true;
+}
+
+inline bool RegisterLine::VerifyMonitorStackEmpty(MethodVerifier* verifier) const {
+  if (MonitorStackDepth() != 0) {
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
+    return false;
+  } else {
+    return true;
+  }
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 556056c..3139204 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -20,15 +20,16 @@
 #include "dex_instruction-inl.h"
 #include "method_verifier.h"
 #include "register_line-inl.h"
+#include "reg_type-inl.h"
 
 namespace art {
 namespace verifier {
 
-bool RegisterLine::CheckConstructorReturn() const {
+bool RegisterLine::CheckConstructorReturn(MethodVerifier* verifier) const {
   for (size_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(i).IsUninitializedThisReference() ||
-        GetRegisterType(i).IsUnresolvedAndUninitializedThisReference()) {
-      verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
+    if (GetRegisterType(verifier, i).IsUninitializedThisReference() ||
+        GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) {
+      verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
           << "Constructor returning without calling superclass constructor";
       return false;
     }
@@ -36,122 +37,38 @@
   return true;
 }
 
-bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
-  DCHECK_LT(vdst, num_regs_);
-  if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
-        << new_type << "'";
-    return false;
-  } else if (new_type.IsConflict()) {  // should only be set during a merge
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Set register to unknown type " << new_type;
-    return false;
-  } else {
-    line_[vdst] = new_type.GetId();
-  }
-  // Clear the monitor entry bits for this register.
-  ClearAllRegToLockDepths(vdst);
-  return true;
-}
-
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
-                                       const RegType& new_type2) {
-  DCHECK_LT(vdst + 1, num_regs_);
-  if (!new_type1.CheckWidePair(new_type2)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
-        << new_type1 << "' '" << new_type2 << "'";
-    return false;
-  } else {
-    line_[vdst] = new_type1.GetId();
-    line_[vdst + 1] = new_type2.GetId();
-  }
-  // Clear the monitor entry bits for this register.
-  ClearAllRegToLockDepths(vdst);
-  ClearAllRegToLockDepths(vdst + 1);
-  return true;
-}
-
-void RegisterLine::SetResultTypeToUnknown() {
-  result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
-  result_[1] = result_[0];
-}
-
-void RegisterLine::SetResultRegisterType(const RegType& new_type) {
-  DCHECK(!new_type.IsLowHalf());
-  DCHECK(!new_type.IsHighHalf());
-  result_[0] = new_type.GetId();
-  result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
-}
-
-void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
-                                             const RegType& new_type2) {
-  DCHECK(new_type1.CheckWidePair(new_type2));
-  result_[0] = new_type1.GetId();
-  result_[1] = new_type2.GetId();
-}
-
-const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+const RegType& RegisterLine::GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
+                                               bool is_range) {
   const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
   if (args_count < 1) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
-    return verifier_->GetRegTypeCache()->Conflict();
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
+    return verifier->GetRegTypeCache()->Conflict();
   }
   /* Get the element type of the array held in vsrc */
   const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
-  const RegType& this_type = GetRegisterType(this_reg);
+  const RegType& this_type = GetRegisterType(verifier, this_reg);
   if (!this_type.IsReferenceTypes()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
                                                  << this_reg << " (type=" << this_type << ")";
-    return verifier_->GetRegTypeCache()->Conflict();
+    return verifier->GetRegTypeCache()->Conflict();
   }
   return this_type;
 }
 
-bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
-                                      const RegType& check_type) {
-  // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
-  if (!(check_type.IsAssignableFrom(src_type))) {
-    enum VerifyError fail_type;
-    if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
-      // Hard fail if one of the types is primitive, since they are concretely known.
-      fail_type = VERIFY_ERROR_BAD_CLASS_HARD;
-    } else if (check_type.IsUnresolvedTypes() || src_type.IsUnresolvedTypes()) {
-      fail_type = VERIFY_ERROR_NO_CLASS;
-    } else {
-      fail_type = VERIFY_ERROR_BAD_CLASS_SOFT;
-    }
-    verifier_->Fail(fail_type) << "register v" << vsrc << " has type "
-                               << src_type << " but expected " << check_type;
-    return false;
-  }
-  if (check_type.IsLowHalf()) {
-    const RegType& src_type_h = GetRegisterType(vsrc + 1);
-    if (!src_type.CheckWidePair(src_type_h)) {
-      verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
-                                                   << src_type << "/" << src_type_h;
-      return false;
-    }
-  }
-  // The register at vsrc has a defined type, we know the lower-upper-bound, but this is less
-  // precise than the subtype in vsrc so leave it for reference types. For primitive types
-  // if they are a defined type then they are as precise as we can get, however, for constant
-  // types we may wish to refine them. Unfortunately constant propagation has rendered this useless.
-  return true;
-}
-
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
+bool RegisterLine::VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc,
+                                          const RegType& check_type1,
                                           const RegType& check_type2) {
   DCHECK(check_type1.CheckWidePair(check_type2));
   // Verify the src register type against the check type refining the type of the register
-  const RegType& src_type = GetRegisterType(vsrc);
+  const RegType& src_type = GetRegisterType(verifier, vsrc);
   if (!check_type1.IsAssignableFrom(src_type)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
                                << " but expected " << check_type1;
     return false;
   }
-  const RegType& src_type_h = GetRegisterType(vsrc + 1);
+  const RegType& src_type_h = GetRegisterType(verifier, vsrc + 1);
   if (!src_type.CheckWidePair(src_type_h)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
         << src_type << "/" << src_type_h;
     return false;
   }
@@ -162,12 +79,12 @@
   return true;
 }
 
-void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type) {
   DCHECK(uninit_type.IsUninitializedTypes());
-  const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+  const RegType& init_type = verifier->GetRegTypeCache()->FromUninitialized(uninit_type);
   size_t changed = 0;
   for (uint32_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(i).Equals(uninit_type)) {
+    if (GetRegisterType(verifier, i).Equals(uninit_type)) {
       line_[i] = init_type.GetId();
       changed++;
     }
@@ -175,15 +92,15 @@
   DCHECK_GT(changed, 0u);
 }
 
-void RegisterLine::MarkAllRegistersAsConflicts() {
-  uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+void RegisterLine::MarkAllRegistersAsConflicts(MethodVerifier* verifier) {
+  uint16_t conflict_type_id = verifier->GetRegTypeCache()->Conflict().GetId();
   for (uint32_t i = 0; i < num_regs_; i++) {
     line_[i] = conflict_type_id;
   }
 }
 
-void RegisterLine::MarkAllRegistersAsConflictsExcept(uint32_t vsrc) {
-  uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+void RegisterLine::MarkAllRegistersAsConflictsExcept(MethodVerifier* verifier, uint32_t vsrc) {
+  uint16_t conflict_type_id = verifier->GetRegTypeCache()->Conflict().GetId();
   for (uint32_t i = 0; i < num_regs_; i++) {
     if (i != vsrc) {
       line_[i] = conflict_type_id;
@@ -191,8 +108,8 @@
   }
 }
 
-void RegisterLine::MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc) {
-  uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+void RegisterLine::MarkAllRegistersAsConflictsExceptWide(MethodVerifier* verifier, uint32_t vsrc) {
+  uint16_t conflict_type_id = verifier->GetRegTypeCache()->Conflict().GetId();
   for (uint32_t i = 0; i < num_regs_; i++) {
     if ((i != vsrc) && (i != (vsrc + 1))) {
       line_[i] = conflict_type_id;
@@ -200,11 +117,11 @@
   }
 }
 
-std::string RegisterLine::Dump() const {
+std::string RegisterLine::Dump(MethodVerifier* verifier) const {
   std::string result;
   for (size_t i = 0; i < num_regs_; i++) {
     result += StringPrintf("%zd:[", i);
-    result += GetRegisterType(i).Dump();
+    result += GetRegisterType(verifier, i).Dump();
     result += "],";
   }
   for (const auto& monitor : monitors_) {
@@ -213,52 +130,25 @@
   return result;
 }
 
-void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type) {
   for (size_t i = 0; i < num_regs_; i++) {
-    if (GetRegisterType(i).Equals(uninit_type)) {
-      line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
+    if (GetRegisterType(verifier, i).Equals(uninit_type)) {
+      line_[i] = verifier->GetRegTypeCache()->Conflict().GetId();
       ClearAllRegToLockDepths(i);
     }
   }
 }
 
-void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
-  DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
-  const RegType& type = GetRegisterType(vsrc);
-  if (!SetRegisterType(vdst, type)) {
-    return;
-  }
-  if ((cat == kTypeCategory1nr && !type.IsCategory1Types()) ||
-      (cat == kTypeCategoryRef && !type.IsReferenceTypes())) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy1 v" << vdst << "<-v" << vsrc << " type=" << type
-                                                 << " cat=" << static_cast<int>(cat);
-  } else if (cat == kTypeCategoryRef) {
-    CopyRegToLockDepth(vdst, vsrc);
-  }
-}
-
-void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
-  const RegType& type_l = GetRegisterType(vsrc);
-  const RegType& type_h = GetRegisterType(vsrc + 1);
-
-  if (!type_l.CheckWidePair(type_h)) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
-                                                 << " type=" << type_l << "/" << type_h;
-  } else {
-    SetRegisterTypeWide(vdst, type_l, type_h);
-  }
-}
-
-void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
-  const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+void RegisterLine::CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference) {
+  const RegType& type = verifier->GetRegTypeCache()->GetFromId(result_[0]);
   if ((!is_reference && !type.IsCategory1Types()) ||
       (is_reference && !type.IsReferenceTypes())) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "copyRes1 v" << vdst << "<- result0"  << " type=" << type;
   } else {
-    DCHECK(verifier_->GetRegTypeCache()->GetFromId(result_[1]).IsUndefined());
-    SetRegisterType(vdst, type);
-    result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
+    DCHECK(verifier->GetRegTypeCache()->GetFromId(result_[1]).IsUndefined());
+    SetRegisterType(verifier, vdst, type);
+    result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
   }
 }
 
@@ -266,178 +156,179 @@
  * Implement "move-result-wide". Copy the category-2 value from the result
  * register to another register, and reset the result register.
  */
-void RegisterLine::CopyResultRegister2(uint32_t vdst) {
-  const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
-  const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+void RegisterLine::CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst) {
+  const RegType& type_l = verifier->GetRegTypeCache()->GetFromId(result_[0]);
+  const RegType& type_h = verifier->GetRegTypeCache()->GetFromId(result_[1]);
   if (!type_l.IsCategory2Types()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "copyRes2 v" << vdst << "<- result0"  << " type=" << type_l;
   } else {
     DCHECK(type_l.CheckWidePair(type_h));  // Set should never allow this case
-    SetRegisterTypeWide(vdst, type_l, type_h);  // also sets the high
-    result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
-    result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
+    SetRegisterTypeWide(verifier, vdst, type_l, type_h);  // also sets the high
+    result_[0] = verifier->GetRegTypeCache()->Undefined().GetId();
+    result_[1] = verifier->GetRegTypeCache()->Undefined().GetId();
   }
 }
 
-void RegisterLine::CheckUnaryOp(const Instruction* inst,
-                                const RegType& dst_type,
-                                const RegType& src_type) {
-  if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
-    SetRegisterType(inst->VRegA_12x(), dst_type);
+void RegisterLine::CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst,
+                                const RegType& dst_type, const RegType& src_type) {
+  if (VerifyRegisterType(verifier, inst->VRegB_12x(), src_type)) {
+    SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
   }
 }
 
-void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
+void RegisterLine::CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                                     const RegType& dst_type1, const RegType& dst_type2,
                                     const RegType& src_type1, const RegType& src_type2) {
-  if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
-    SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_12x(), src_type1, src_type2)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
+void RegisterLine::CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst,
                                       const RegType& dst_type1, const RegType& dst_type2,
                                       const RegType& src_type) {
-  if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
-    SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
+  if (VerifyRegisterType(verifier, inst->VRegB_12x(), src_type)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_12x(), dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
+void RegisterLine::CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst,
                                         const RegType& dst_type,
                                         const RegType& src_type1, const RegType& src_type2) {
-  if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
-    SetRegisterType(inst->VRegA_12x(), dst_type);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_12x(), src_type1, src_type2)) {
+    SetRegisterType(verifier, inst->VRegA_12x(), dst_type);
   }
 }
 
-void RegisterLine::CheckBinaryOp(const Instruction* inst,
+void RegisterLine::CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst,
                                  const RegType& dst_type,
                                  const RegType& src_type1, const RegType& src_type2,
                                  bool check_boolean_op) {
   const uint32_t vregB = inst->VRegB_23x();
   const uint32_t vregC = inst->VRegC_23x();
-  if (VerifyRegisterType(vregB, src_type1) &&
-      VerifyRegisterType(vregC, src_type2)) {
+  if (VerifyRegisterType(verifier, vregB, src_type1) &&
+      VerifyRegisterType(verifier, vregC, src_type2)) {
     if (check_boolean_op) {
       DCHECK(dst_type.IsInteger());
-      if (GetRegisterType(vregB).IsBooleanTypes() &&
-          GetRegisterType(vregC).IsBooleanTypes()) {
-        SetRegisterType(inst->VRegA_23x(), verifier_->GetRegTypeCache()->Boolean());
+      if (GetRegisterType(verifier, vregB).IsBooleanTypes() &&
+          GetRegisterType(verifier, vregC).IsBooleanTypes()) {
+        SetRegisterType(verifier, inst->VRegA_23x(), verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(inst->VRegA_23x(), dst_type);
+    SetRegisterType(verifier, inst->VRegA_23x(), dst_type);
   }
 }
 
-void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
+void RegisterLine::CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                                      const RegType& dst_type1, const RegType& dst_type2,
                                      const RegType& src_type1_1, const RegType& src_type1_2,
                                      const RegType& src_type2_1, const RegType& src_type2_2) {
-  if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
-      VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
-    SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_23x(), src_type1_1, src_type1_2) &&
+      VerifyRegisterTypeWide(verifier, inst->VRegC_23x(), src_type2_1, src_type2_2)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_23x(), dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
+void RegisterLine::CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst,
                                           const RegType& long_lo_type, const RegType& long_hi_type,
                                           const RegType& int_type) {
-  if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
-      VerifyRegisterType(inst->VRegC_23x(), int_type)) {
-    SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
+  if (VerifyRegisterTypeWide(verifier, inst->VRegB_23x(), long_lo_type, long_hi_type) &&
+      VerifyRegisterType(verifier, inst->VRegC_23x(), int_type)) {
+    SetRegisterTypeWide(verifier, inst->VRegA_23x(), long_lo_type, long_hi_type);
   }
 }
 
-void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
+void RegisterLine::CheckBinaryOp2addr(MethodVerifier* verifier, const Instruction* inst,
                                       const RegType& dst_type, const RegType& src_type1,
                                       const RegType& src_type2, bool check_boolean_op) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
-  if (VerifyRegisterType(vregA, src_type1) &&
-      VerifyRegisterType(vregB, src_type2)) {
+  if (VerifyRegisterType(verifier, vregA, src_type1) &&
+      VerifyRegisterType(verifier, vregB, src_type2)) {
     if (check_boolean_op) {
       DCHECK(dst_type.IsInteger());
-      if (GetRegisterType(vregA).IsBooleanTypes() &&
-          GetRegisterType(vregB).IsBooleanTypes()) {
-        SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
+      if (GetRegisterType(verifier, vregA).IsBooleanTypes() &&
+          GetRegisterType(verifier, vregB).IsBooleanTypes()) {
+        SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(vregA, dst_type);
+    SetRegisterType(verifier, vregA, dst_type);
   }
 }
 
-void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
+void RegisterLine::CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst,
                                           const RegType& dst_type1, const RegType& dst_type2,
                                           const RegType& src_type1_1, const RegType& src_type1_2,
                                           const RegType& src_type2_1, const RegType& src_type2_2) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
-  if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
-      VerifyRegisterTypeWide(vregB, src_type2_1, src_type2_2)) {
-    SetRegisterTypeWide(vregA, dst_type1, dst_type2);
+  if (VerifyRegisterTypeWide(verifier, vregA, src_type1_1, src_type1_2) &&
+      VerifyRegisterTypeWide(verifier, vregB, src_type2_1, src_type2_2)) {
+    SetRegisterTypeWide(verifier, vregA, dst_type1, dst_type2);
   }
 }
 
-void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
+void RegisterLine::CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst,
                                                const RegType& long_lo_type, const RegType& long_hi_type,
                                                const RegType& int_type) {
   const uint32_t vregA = inst->VRegA_12x();
   const uint32_t vregB = inst->VRegB_12x();
-  if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
-      VerifyRegisterType(vregB, int_type)) {
-    SetRegisterTypeWide(vregA, long_lo_type, long_hi_type);
+  if (VerifyRegisterTypeWide(verifier, vregA, long_lo_type, long_hi_type) &&
+      VerifyRegisterType(verifier, vregB, int_type)) {
+    SetRegisterTypeWide(verifier, vregA, long_lo_type, long_hi_type);
   }
 }
 
-void RegisterLine::CheckLiteralOp(const Instruction* inst,
+void RegisterLine::CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst,
                                   const RegType& dst_type, const RegType& src_type,
                                   bool check_boolean_op, bool is_lit16) {
   const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
   const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
-  if (VerifyRegisterType(vregB, src_type)) {
+  if (VerifyRegisterType(verifier, vregB, src_type)) {
     if (check_boolean_op) {
       DCHECK(dst_type.IsInteger());
       /* check vB with the call, then check the constant manually */
       const uint32_t val = is_lit16 ? inst->VRegC_22s() : inst->VRegC_22b();
-      if (GetRegisterType(vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
-        SetRegisterType(vregA, verifier_->GetRegTypeCache()->Boolean());
+      if (GetRegisterType(verifier, vregB).IsBooleanTypes() && (val == 0 || val == 1)) {
+        SetRegisterType(verifier, vregA, verifier->GetRegTypeCache()->Boolean());
         return;
       }
     }
-    SetRegisterType(vregA, dst_type);
+    SetRegisterType(verifier, vregA, dst_type);
   }
 }
 
-void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx) {
+  const RegType& reg_type = GetRegisterType(verifier, reg_idx);
   if (!reg_type.IsReferenceTypes()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object ("
+        << reg_type << ")";
   } else if (monitors_.size() >= 32) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: " << monitors_.size();
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter stack overflow: "
+        << monitors_.size();
   } else {
     SetRegToLockDepth(reg_idx, monitors_.size());
     monitors_.push_back(insn_idx);
   }
 }
 
-void RegisterLine::PopMonitor(uint32_t reg_idx) {
-  const RegType& reg_type = GetRegisterType(reg_idx);
+void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
+  const RegType& reg_type = GetRegisterType(verifier, reg_idx);
   if (!reg_type.IsReferenceTypes()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
   } else if (monitors_.empty()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit stack underflow";
+    verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit stack underflow";
   } else {
     monitors_.pop_back();
     if (!IsSetLockDepth(reg_idx, monitors_.size())) {
       // Bug 3215458: Locks and unlocks are on objects, if that object is a literal then before
       // format "036" the constant collector may create unlocks on the same object but referenced
       // via different registers.
-      ((verifier_->DexFileVersion() >= 36) ? verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
-                                           : verifier_->LogVerifyInfo())
+      ((verifier->DexFileVersion() >= 36) ? verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT)
+                                          : verifier->LogVerifyInfo())
             << "monitor-exit not unlocking the top of the monitor stack";
     } else {
       // Record the register was unlocked
@@ -446,41 +337,34 @@
   }
 }
 
-bool RegisterLine::VerifyMonitorStackEmpty() const {
-  if (MonitorStackDepth() != 0) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
-    return false;
-  } else {
-    return true;
-  }
-}
-
-bool RegisterLine::MergeRegisters(const RegisterLine* incoming_line) {
+bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) {
   bool changed = false;
   DCHECK(incoming_line != nullptr);
   for (size_t idx = 0; idx < num_regs_; idx++) {
     if (line_[idx] != incoming_line->line_[idx]) {
-      const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
-      const RegType& cur_type = GetRegisterType(idx);
-      const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+      const RegType& incoming_reg_type = incoming_line->GetRegisterType(verifier, idx);
+      const RegType& cur_type = GetRegisterType(verifier, idx);
+      const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier->GetRegTypeCache());
       changed = changed || !cur_type.Equals(new_type);
       line_[idx] = new_type.GetId();
     }
   }
-  if (monitors_.size() != incoming_line->monitors_.size()) {
-    LOG(WARNING) << "mismatched stack depths (depth=" << MonitorStackDepth()
-                 << ", incoming depth=" << incoming_line->MonitorStackDepth() << ")";
-  } else if (reg_to_lock_depths_ != incoming_line->reg_to_lock_depths_) {
-    for (uint32_t idx = 0; idx < num_regs_; idx++) {
-      size_t depths = reg_to_lock_depths_.count(idx);
-      size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
-      if (depths != incoming_depths) {
-        if (depths == 0 || incoming_depths == 0) {
-          reg_to_lock_depths_.erase(idx);
-        } else {
-          LOG(WARNING) << "mismatched stack depths for register v" << idx
-                       << ": " << depths  << " != " << incoming_depths;
-          break;
+  if (monitors_.size() > 0 || incoming_line->monitors_.size() > 0) {
+    if (monitors_.size() != incoming_line->monitors_.size()) {
+      LOG(WARNING) << "mismatched stack depths (depth=" << MonitorStackDepth()
+                     << ", incoming depth=" << incoming_line->MonitorStackDepth() << ")";
+    } else if (reg_to_lock_depths_ != incoming_line->reg_to_lock_depths_) {
+      for (uint32_t idx = 0; idx < num_regs_; idx++) {
+        size_t depths = reg_to_lock_depths_.count(idx);
+        size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
+        if (depths != incoming_depths) {
+          if (depths == 0 || incoming_depths == 0) {
+            reg_to_lock_depths_.erase(idx);
+          } else {
+            LOG(WARNING) << "mismatched stack depths for register v" << idx
+                << ": " << depths  << " != " << incoming_depths;
+            break;
+          }
         }
       }
     }
@@ -488,12 +372,13 @@
   return changed;
 }
 
-void RegisterLine::WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_bytes) {
+void RegisterLine::WriteReferenceBitMap(MethodVerifier* verifier,
+                                        std::vector<uint8_t>* data, size_t max_bytes) {
   for (size_t i = 0; i < num_regs_; i += 8) {
     uint8_t val = 0;
     for (size_t j = 0; j < 8 && (i + j) < num_regs_; j++) {
       // Note: we write 1 for a Reference but not for Null
-      if (GetRegisterType(i + j).IsNonZeroReferenceTypes()) {
+      if (GetRegisterType(verifier, i + j).IsNonZeroReferenceTypes()) {
         val |= 1 << j;
       }
     }
@@ -502,15 +387,9 @@
       continue;
     }
     DCHECK_LT(i / 8, max_bytes) << "val=" << static_cast<uint32_t>(val);
-    data.push_back(val);
+    data->push_back(val);
   }
 }
 
-std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  os << rhs.Dump();
-  return os;
-}
-
 }  // namespace verifier
 }  // namespace art
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index a9d0dbb..c7fd369 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -57,50 +57,54 @@
   }
 
   // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
-  void CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat)
+  void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This
   // copies both halves of the register.
-  void CopyRegister2(uint32_t vdst, uint32_t vsrc)
+  void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Implement "move-result". Copy the category-1 value from the result register to another
   // register, and reset the result register.
-  void CopyResultRegister1(uint32_t vdst, bool is_reference)
+  void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Implement "move-result-wide". Copy the category-2 value from the result register to another
   // register, and reset the result register.
-  void CopyResultRegister2(uint32_t vdst)
+  void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Set the invisible result register to unknown
-  void SetResultTypeToUnknown() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Set the type of register N, verifying that the register is valid.  If "newType" is the "Lo"
   // part of a 64-bit value, register N+1 will be set to "newType+1".
   // The register index was validated during the static pass, so we don't need to check it here.
-  bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+  ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
+                                     const RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+  bool SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type1,
+                           const RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /* Set the type of the "result" register. */
-  void SetResultRegisterType(const RegType& new_type)
+  void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get the type of register vsrc.
-  const RegType& GetRegisterType(uint32_t vsrc) const;
+  const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const;
 
-  bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+  ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
+                                        const RegType& check_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+  bool VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type1,
+                              const RegType& check_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +114,7 @@
     reg_to_lock_depths_ = src->reg_to_lock_depths_;
   }
 
-  std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  std::string Dump(MethodVerifier* verifier) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FillWithGarbage() {
     memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +130,7 @@
    * to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
    * the new ones at the same time).
    */
-  void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+  void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
@@ -134,15 +138,15 @@
    * reference type. This is called when an appropriate constructor is invoked -- all copies of
    * the reference must be marked as initialized.
    */
-  void MarkRefsAsInitialized(const RegType& uninit_type)
+  void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Update all registers to be Conflict except vsrc.
    */
-  void MarkAllRegistersAsConflicts();
-  void MarkAllRegistersAsConflictsExcept(uint32_t vsrc);
-  void MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc);
+  void MarkAllRegistersAsConflicts(MethodVerifier* verifier);
+  void MarkAllRegistersAsConflictsExcept(MethodVerifier* verifier, uint32_t vsrc);
+  void MarkAllRegistersAsConflictsExceptWide(MethodVerifier* verifier, uint32_t vsrc);
 
   /*
    * Check constraints on constructor return. Specifically, make sure that the "this" argument got
@@ -151,7 +155,7 @@
    * of the list in slot 0. If we see a register with an uninitialized slot 0 reference, we know it
    * somehow didn't get initialized.
    */
-  bool CheckConstructorReturn() const;
+  bool CheckConstructorReturn(MethodVerifier* verifier) const;
 
   // Compare two register lines. Returns 0 if they match.
   // Using this for a sort is unwise, since the value can change based on machine endianness.
@@ -173,28 +177,29 @@
    * The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
    * versions. We just need to make sure vA is >= 1 and then return vC.
    */
-  const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+  const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
+                                   bool is_range)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   /*
    * Verify types for a simple two-register instruction (e.g. "neg-int").
    * "dst_type" is stored into vA, and "src_type" is verified against vB.
    */
-  void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
+  void CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type,
                     const RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckUnaryOpWide(const Instruction* inst,
+  void CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                         const RegType& dst_type1, const RegType& dst_type2,
                         const RegType& src_type1, const RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckUnaryOpToWide(const Instruction* inst,
+  void CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst,
                           const RegType& dst_type1, const RegType& dst_type2,
                           const RegType& src_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckUnaryOpFromWide(const Instruction* inst,
+  void CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst,
                             const RegType& dst_type,
                             const RegType& src_type1, const RegType& src_type2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -204,18 +209,18 @@
    * "dst_type" is stored into vA, and "src_type1"/"src_type2" are verified
    * against vB/vC.
    */
-  void CheckBinaryOp(const Instruction* inst,
+  void CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst,
                      const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
                      bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOpWide(const Instruction* inst,
+  void CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst,
                          const RegType& dst_type1, const RegType& dst_type2,
                          const RegType& src_type1_1, const RegType& src_type1_2,
                          const RegType& src_type2_1, const RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOpWideShift(const Instruction* inst,
+  void CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst,
                               const RegType& long_lo_type, const RegType& long_hi_type,
                               const RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -224,19 +229,19 @@
    * Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
    * are verified against vA/vB, then "dst_type" is stored into vA.
    */
-  void CheckBinaryOp2addr(const Instruction* inst,
+  void CheckBinaryOp2addr(MethodVerifier* verifier, const Instruction* inst,
                           const RegType& dst_type,
                           const RegType& src_type1, const RegType& src_type2,
                           bool check_boolean_op)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOp2addrWide(const Instruction* inst,
+  void CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst,
                               const RegType& dst_type1, const RegType& dst_type2,
                               const RegType& src_type1_1, const RegType& src_type1_2,
                               const RegType& src_type2_1, const RegType& src_type2_2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CheckBinaryOp2addrWideShift(const Instruction* inst,
+  void CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst,
                                    const RegType& long_lo_type, const RegType& long_hi_type,
                                    const RegType& int_type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -247,16 +252,18 @@
    *
    * If "check_boolean_op" is set, we use the constant value in vC.
    */
-  void CheckLiteralOp(const Instruction* inst,
+  void CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst,
                       const RegType& dst_type, const RegType& src_type,
                       bool check_boolean_op, bool is_lit16)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
-  void PushMonitor(uint32_t reg_idx, int32_t insn_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked
-  void PopMonitor(uint32_t reg_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Stack of currently held monitors and where they were locked
   size_t MonitorStackDepth() const {
@@ -265,23 +272,23 @@
 
   // We expect no monitors to be held at certain points, such a method returns. Verify the stack
   // is empty, failing and returning false if not.
-  bool VerifyMonitorStackEmpty() const;
+  bool VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
 
-  bool MergeRegisters(const RegisterLine* incoming_line)
+  bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  size_t GetMaxNonZeroReferenceReg(size_t max_ref_reg) {
+  size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) {
     size_t i = static_cast<int>(max_ref_reg) < 0 ? 0 : max_ref_reg;
     for (; i < num_regs_; i++) {
-      if (GetRegisterType(i).IsNonZeroReferenceTypes()) {
+      if (GetRegisterType(verifier, i).IsNonZeroReferenceTypes()) {
         max_ref_reg = i;
       }
     }
     return max_ref_reg;
   }
 
-  // Write a bit at each register location that holds a reference
-  void WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_bytes);
+  // Write a bit at each register location that holds a reference.
+  void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
 
   size_t GetMonitorEnterCount() {
     return monitors_.size();
@@ -337,19 +344,17 @@
   }
 
   RegisterLine(size_t num_regs, MethodVerifier* verifier)
-      : verifier_(verifier), num_regs_(num_regs) {
+      : num_regs_(num_regs) {
     memset(&line_, 0, num_regs_ * sizeof(uint16_t));
-    SetResultTypeToUnknown();
+    SetResultTypeToUnknown(verifier);
   }
 
   // Storage for the result register's type, valid after an invocation
   uint16_t result_[2];
 
-  // Back link to the verifier
-  MethodVerifier* verifier_;
-
   // Length of reg_types_
   const uint32_t num_regs_;
+
   // A stack of monitor enter locations
   std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
   // A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
@@ -360,7 +365,6 @@
   // An array of RegType Ids associated with each dex register.
   uint16_t line_[0];
 };
-std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs);
 
 }  // namespace verifier
 }  // namespace art
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 7068a4d..cef604b 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -21,6 +21,7 @@
 #include "base/logging.h"
 #include "mirror/class.h"
 #include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
 #include "thread-inl.h"
 
 namespace art {
@@ -97,11 +98,18 @@
 jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_name;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup;
+jfieldID WellKnownClasses::java_lang_Throwable_cause;
+jfieldID WellKnownClasses::java_lang_Throwable_detailMessage;
+jfieldID WellKnownClasses::java_lang_Throwable_stackTrace;
+jfieldID WellKnownClasses::java_lang_Throwable_stackState;
+jfieldID WellKnownClasses::java_lang_Throwable_suppressedExceptions;
 jfieldID WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod;
 jfieldID WellKnownClasses::java_lang_reflect_Field_artField;
 jfieldID WellKnownClasses::java_lang_reflect_Proxy_h;
 jfieldID WellKnownClasses::java_nio_DirectByteBuffer_capacity;
 jfieldID WellKnownClasses::java_nio_DirectByteBuffer_effectiveDirectAddress;
+jfieldID WellKnownClasses::java_util_Collections_EMPTY_LIST;
+jfieldID WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT;
 jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data;
 jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length;
 jfieldID WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset;
@@ -115,18 +123,32 @@
   return reinterpret_cast<jclass>(env->NewGlobalRef(c.get()));
 }
 
-static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) {
-  jfieldID fid = is_static ? env->GetStaticFieldID(c, name, signature) : env->GetFieldID(c, name, signature);
+static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static,
+                           const char* name, const char* signature) {
+  jfieldID fid = (is_static ?
+                  env->GetStaticFieldID(c, name, signature) :
+                  env->GetFieldID(c, name, signature));
   if (fid == NULL) {
-    LOG(FATAL) << "Couldn't find field \"" << name << "\" with signature \"" << signature << "\"";
+    ScopedObjectAccess soa(env);
+    std::ostringstream os;
+    WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
+    LOG(FATAL) << "Couldn't find field \"" << name << "\" with signature \"" << signature << "\": "
+               << os.str();
   }
   return fid;
 }
 
-jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static, const char* name, const char* signature) {
-  jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) : env->GetMethodID(c, name, signature);
+jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static,
+                      const char* name, const char* signature) {
+  jmethodID mid = (is_static ?
+                   env->GetStaticMethodID(c, name, signature) :
+                   env->GetMethodID(c, name, signature));
   if (mid == NULL) {
-    LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature << "\"";
+    ScopedObjectAccess soa(env);
+    std::ostringstream os;
+    WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
+    LOG(FATAL) << "Couldn't find method \"" << name << "\" with signature \"" << signature << "\": "
+               << os.str();
   }
   return mid;
 }
@@ -205,11 +227,18 @@
   java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
   java_lang_ThreadGroup_name = CacheField(env, java_lang_ThreadGroup, false, "name", "Ljava/lang/String;");
   java_lang_ThreadGroup_systemThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "systemThreadGroup", "Ljava/lang/ThreadGroup;");
+  java_lang_Throwable_cause = CacheField(env, java_lang_Throwable, false, "cause", "Ljava/lang/Throwable;");
+  java_lang_Throwable_detailMessage = CacheField(env, java_lang_Throwable, false, "detailMessage", "Ljava/lang/String;");
+  java_lang_Throwable_stackTrace = CacheField(env, java_lang_Throwable, false, "stackTrace", "[Ljava/lang/StackTraceElement;");
+  java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "stackState", "Ljava/lang/Object;");
+  java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;");
   java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "Ljava/lang/reflect/ArtMethod;");
   java_lang_reflect_Field_artField = CacheField(env, java_lang_reflect_Field, false, "artField", "Ljava/lang/reflect/ArtField;");
   java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;");
   java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I");
   java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "effectiveDirectAddress", "J");
+  java_util_Collections_EMPTY_LIST = CacheField(env, java_util_Collections, true, "EMPTY_LIST", "Ljava/util/List;");
+  libcore_util_EmptyArray_STACK_TRACE_ELEMENT = CacheField(env, libcore_util_EmptyArray, true, "STACK_TRACE_ELEMENT", "[Ljava/lang/StackTraceElement;");
   org_apache_harmony_dalvik_ddmc_Chunk_data = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "data", "[B");
   org_apache_harmony_dalvik_ddmc_Chunk_length = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "length", "I");
   org_apache_harmony_dalvik_ddmc_Chunk_offset = CacheField(env, org_apache_harmony_dalvik_ddmc_Chunk, false, "offset", "I");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index b10106c..3780733 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -114,8 +114,15 @@
   static jfieldID java_lang_ThreadGroup_mainThreadGroup;
   static jfieldID java_lang_ThreadGroup_name;
   static jfieldID java_lang_ThreadGroup_systemThreadGroup;
+  static jfieldID java_lang_Throwable_cause;
+  static jfieldID java_lang_Throwable_detailMessage;
+  static jfieldID java_lang_Throwable_stackTrace;
+  static jfieldID java_lang_Throwable_stackState;
+  static jfieldID java_lang_Throwable_suppressedExceptions;
   static jfieldID java_nio_DirectByteBuffer_capacity;
   static jfieldID java_nio_DirectByteBuffer_effectiveDirectAddress;
+  static jfieldID java_util_Collections_EMPTY_LIST;
+  static jfieldID libcore_util_EmptyArray_STACK_TRACE_ELEMENT;
   static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_data;
   static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_length;
   static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_offset;
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 7539990..74bfb7e 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -26,6 +26,8 @@
 #include <stdio.h>
 #include <stdlib.h>
 
+#include "sigchain.h"
+
 #if defined(__APPLE__)
 #define _NSIG NSIG
 #define sighandler_t sig_t
@@ -81,6 +83,9 @@
 
 // User's signal handlers
 static SignalAction user_sigactions[_NSIG];
+static bool initialized;
+static void* linked_sigaction_sym;
+static void* linked_sigprocmask_sym;
 
 static void log(const char* format, ...) {
   char buf[256];
@@ -102,6 +107,7 @@
   }
 }
 
+
 // Claim a signal chain for a particular signal.
 void ClaimSignalChain(int signal, struct sigaction* oldaction) {
   CheckSignalValid(signal);
@@ -163,14 +169,17 @@
   // Will only get here if the signal chain has not been claimed.  We want
   // to pass the sigaction on to the kernel via the real sigaction in libc.
 
-  void* linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
   if (linked_sigaction_sym == nullptr) {
-    linked_sigaction_sym = dlsym(RTLD_DEFAULT, "sigaction");
-    if (linked_sigaction_sym == nullptr ||
-        linked_sigaction_sym == reinterpret_cast<void*>(sigaction)) {
-      log("Unable to find next sigaction in signal chain");
-      abort();
-    }
+    // Perform lazy initialization.
+    // This will only occur outside of a signal context since we have
+    // not been initialized and therefore cannot be within the ART
+    // runtime.
+    InitializeSignalChain();
+  }
+
+  if (linked_sigaction_sym == nullptr) {
+    log("Unable to find next sigaction in signal chain");
+    abort();
   }
 
   typedef int (*SigAction)(int, const struct sigaction*, struct sigaction*);
@@ -198,14 +207,14 @@
   // Will only get here if the signal chain has not been claimed.  We want
   // to pass the sigaction on to the kernel via the real sigaction in libc.
 
-  void* linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
   if (linked_sigaction_sym == nullptr) {
-    linked_sigaction_sym = dlsym(RTLD_DEFAULT, "sigaction");
-    if (linked_sigaction_sym == nullptr ||
-        linked_sigaction_sym == reinterpret_cast<void*>(sigaction)) {
-      log("Unable to find next sigaction in signal chain");
-      abort();
-    }
+    // Perform lazy initialization.
+    InitializeSignalChain();
+  }
+
+  if (linked_sigaction_sym == nullptr) {
+    log("Unable to find next sigaction in signal chain");
+    abort();
   }
 
   typedef int (*SigAction)(int, const struct sigaction*, struct sigaction*);
@@ -235,14 +244,14 @@
     new_set_ptr = &tmpset;
   }
 
-  void* linked_sigprocmask_sym = dlsym(RTLD_NEXT, "sigprocmask");
   if (linked_sigprocmask_sym == nullptr) {
-    linked_sigprocmask_sym = dlsym(RTLD_DEFAULT, "sigprocmask");
-    if (linked_sigprocmask_sym == nullptr ||
-        linked_sigprocmask_sym == reinterpret_cast<void*>(sigprocmask)) {
-      log("Unable to find next sigprocmask in signal chain");
-      abort();
-    }
+    // Perform lazy initialization.
+    InitializeSignalChain();
+  }
+
+  if (linked_sigprocmask_sym == nullptr) {
+    log("Unable to find next sigprocmask in signal chain");
+    abort();
   }
 
   typedef int (*SigProcMask)(int how, const sigset_t*, sigset_t*);
@@ -250,5 +259,36 @@
   return linked_sigprocmask(how, new_set_ptr, bionic_old_set);
 }
 }   // extern "C"
+
+void InitializeSignalChain() {
+  // Warning.
+  // Don't call this from within a signal context as it makes calls to
+  // dlsym.  Calling into the dynamic linker will result in locks being
+  // taken and if it so happens that a signal occurs while one of these
+  // locks is already taken, dlsym will block trying to reenter a
+  // mutex and we will never get out of it.
+  if (initialized) {
+    // Don't initialize twice.
+    return;
+  }
+  linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
+  if (linked_sigaction_sym == nullptr) {
+    linked_sigaction_sym = dlsym(RTLD_DEFAULT, "sigaction");
+    if (linked_sigaction_sym == nullptr ||
+      linked_sigaction_sym == reinterpret_cast<void*>(sigaction)) {
+        linked_sigaction_sym = nullptr;
+    }
+  }
+
+  linked_sigprocmask_sym = dlsym(RTLD_NEXT, "sigprocmask");
+  if (linked_sigprocmask_sym == nullptr) {
+    linked_sigprocmask_sym = dlsym(RTLD_DEFAULT, "sigprocmask");
+    if (linked_sigprocmask_sym == nullptr ||
+        linked_sigprocmask_sym == reinterpret_cast<void*>(sigprocmask)) {
+         linked_sigprocmask_sym = nullptr;
+    }
+  }
+  initialized = true;
+}
 }   // namespace art
 
diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h
index a4ce81c..5bc4026 100644
--- a/sigchainlib/sigchain.h
+++ b/sigchainlib/sigchain.h
@@ -21,6 +21,8 @@
 
 namespace art {
 
+void InitializeSignalChain();
+
 void ClaimSignalChain(int signal, struct sigaction* oldaction);
 
 void UnclaimSignalChain(int signal);
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 7929554..e914bd9 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -14,138 +14,57 @@
  * limitations under the License.
  */
 
-#include <stdio.h>
-#include <memory>
-
-#include "class_linker.h"
-#include "dex_file-inl.h"
-#include "gc_map.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
+#include "check_reference_map_visitor.h"
 #include "jni.h"
-#include "verifier/method_verifier.h"
 
 namespace art {
 
-#define IS_IN_REF_BITMAP(ref_bitmap, reg) \
-    (((reg) < m->GetCodeItem()->registers_size_) && \
-     ((*((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
+#define CHECK_REGS_CONTAIN_REFS(native_pc_offset, ...) do { \
+  int t[] = {__VA_ARGS__}; \
+  int t_size = sizeof(t) / sizeof(*t); \
+  CheckReferences(t, t_size, m->NativePcOffset(m->ToNativePc(native_pc_offset))); \
+} while (false);
 
-#define CHECK_REGS_CONTAIN_REFS(...)     \
-  do {                                   \
-    int t[] = {__VA_ARGS__};             \
-    int t_size = sizeof(t) / sizeof(*t);      \
-    for (int i = 0; i < t_size; ++i)          \
-      CHECK(IS_IN_REF_BITMAP(ref_bitmap, t[i])) \
-          << "Error: Reg @ " << i << "-th argument is not in GC map"; \
-  } while (false)
-
-struct ReferenceMap2Visitor : public StackVisitor {
-  explicit ReferenceMap2Visitor(Thread* thread)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : StackVisitor(thread, NULL) {
-  }
+struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
+  explicit ReferenceMap2Visitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : CheckReferenceMapVisitor(thread) {}
 
   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (CheckReferenceMapVisitor::VisitFrame()) {
+      return true;
+    }
     mirror::ArtMethod* m = GetMethod();
-    if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
-      return true;
-    }
-    LOG(INFO) << "At " << PrettyMethod(m, false);
-
-    NativePcOffsetToReferenceMap map(m->GetNativeGcMap());
-
-    if (m->IsCalleeSaveMethod()) {
-      LOG(WARNING) << "no PC for " << PrettyMethod(m);
-      return true;
-    }
-
-    const uint8_t* ref_bitmap = NULL;
     std::string m_name(m->GetName());
 
     // Given the method name and the number of times the method has been called,
     // we know the Dex registers with live reference values. Assert that what we
     // find is what is expected.
     if (m_name.compare("f") == 0) {
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x03U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8);  // v8: this
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x06U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 1);  // v8: this, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x08U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0cU)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0eU)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x10U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 3, 1);  // v8: this, v3: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x13U)));
-      CHECK(ref_bitmap);
+      CHECK_REGS_CONTAIN_REFS(0x03U, 8);  // v8: this
+      CHECK_REGS_CONTAIN_REFS(0x06U, 8, 1);  // v8: this, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x08U, 8, 3, 1);  // v8: this, v3: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x0cU, 8, 3, 1);  // v8: this, v3: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x0eU, 8, 3, 1);  // v8: this, v3: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x10U, 8, 3, 1);  // v8: this, v3: y, v1: x
       // v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
       //   0024: move-object v3, v2
       //   0025: goto 0013
       // Detaled dex instructions for ReferenceMap.java are at the end of this function.
       // CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1);  // v8: this, v3: y, v2: y, v1: x
       // We eliminate the non-live registers at a return, so only v3 is live:
-      CHECK_REGS_CONTAIN_REFS(3);  // v3: y
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x18U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1aU)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1dU)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1fU)));
-      CHECK(ref_bitmap);
+      CHECK_REGS_CONTAIN_REFS(0x13U);  // v3: y
+      CHECK_REGS_CONTAIN_REFS(0x18U, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x1aU, 8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x1dU, 8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
       // v5 is removed from the root set because there is a "merge" operation.
       // See 0015: if-nez v2, 001f.
-      CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x21U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x27U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x29U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2cU)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2fU)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 4, 3, 2, 1);  // v8: this, v4: ex, v3: y, v2: y, v1: x
-
-      ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x32U)));
-      CHECK(ref_bitmap);
-      CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1, 0);  // v8: this, v3: y, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x1fU, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x21U, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x27U, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x29U, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x2cU, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x2fU, 8, 4, 3, 2, 1);  // v8: this, v4: ex, v3: y, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x32U, 8, 3, 2, 1, 0);  // v8: this, v3: y, v2: y, v1: x, v0: ex
     }
 
     return true;
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 30a0d59..c40de7e 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -14,54 +14,29 @@
  * limitations under the License.
  */
 
-#include <stdio.h>
-#include <memory>
-
-#include "class_linker.h"
-#include "gc_map.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
+#include "check_reference_map_visitor.h"
 #include "jni.h"
-#include "scoped_thread_state_change.h"
 
 namespace art {
 
-#define REG(reg_bitmap, reg) \
-    (((reg) < m->GetCodeItem()->registers_size_) && \
-     ((*((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
-
-#define CHECK_REGS(...) if (!IsShadowFrame()) { \
-    int t[] = {__VA_ARGS__}; \
-    int t_size = sizeof(t) / sizeof(*t); \
-    for (int i = 0; i < t_size; ++i) \
-      CHECK(REG(reg_bitmap, t[i])) << "Error: Reg " << i << " is not in RegisterMap"; \
-  }
+#define CHECK_REGS(...) do { \
+  int t[] = {__VA_ARGS__}; \
+  int t_size = sizeof(t) / sizeof(*t); \
+  CheckReferences(t, t_size, GetNativePcOffset()); \
+} while (false);
 
 static int gJava_StackWalk_refmap_calls = 0;
 
-struct TestReferenceMapVisitor : public StackVisitor {
-  explicit TestReferenceMapVisitor(Thread* thread)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      : StackVisitor(thread, NULL) {
-  }
+class TestReferenceMapVisitor : public CheckReferenceMapVisitor {
+ public:
+  explicit TestReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      : CheckReferenceMapVisitor(thread) {}
 
   bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    mirror::ArtMethod* m = GetMethod();
-    CHECK(m != NULL);
-    LOG(INFO) << "At " << PrettyMethod(m, false);
-
-    if (m->IsCalleeSaveMethod() || m->IsNative()) {
-      LOG(WARNING) << "no PC for " << PrettyMethod(m);
-      CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
+    if (CheckReferenceMapVisitor::VisitFrame()) {
       return true;
     }
-    const uint8_t* reg_bitmap = NULL;
-    if (!IsShadowFrame()) {
-      NativePcOffsetToReferenceMap map(m->GetNativeGcMap());
-      reg_bitmap = map.FindBitMap(GetNativePcOffset());
-    }
+    mirror::ArtMethod* m = GetMethod();
     StringPiece m_name(m->GetName());
 
     // Given the method name and the number of times the method has been called,
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index f8d92cc..e907fd1 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -1,3 +1,4 @@
+b17411468 passes
 b2296099 passes
 b2302318 passes
 b2487514 passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index c089c52..8d7bf01 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -30,6 +30,7 @@
     }
 
     public static void main(String args[]) throws Exception {
+        b17411468();
         b2296099Test();
         b2302318Test();
         b2487514Test();
@@ -61,6 +62,17 @@
         minDoubleWith3ConstsTest();
     }
 
+    public static void b17411468() {
+      // b/17411468 - inline Math.round failure.
+      double d1 = 1.0;
+      double d2 = Math.round(d1);
+      if (d1 == d2) {
+        System.out.println("b17411468 passes");
+      } else {
+        System.out.println("b17411468 fails: Math.round(" + d1 + ") returned " + d2);
+      }
+    }
+
     public static double minDouble(double a, double b, double c) {
         return Math.min(Math.min(a, b), c);
     }
diff --git a/test/121-modifiers/build b/test/121-modifiers/build
new file mode 100644
index 0000000..d73be86
--- /dev/null
+++ b/test/121-modifiers/build
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# The classes are pre-compiled and modified with ASM.
+#
+# To reproduce, compile the source files. Asm.java needs the ASM libraries (core and tree). Then
+# run Asm.java, which produces Inf.out and NonInf.out. Rename these to class files and put them
+# into the classes directory (this assumes the ASM libraries are names asm.jar and asm-tree.jar):
+#
+# javac Inf.java NonInf.java Main.java
+# javac -cp asm.jar:asm-tree.jar:. Asm.java
+# java -cp asm.jar:asm-tree.jar:. Asm
+# mv Inf.out classes/Inf.class
+# mv NonInf.out classes/NonInf.class
+# mv Main.class A.class A\$B.class A\$C.class classes/
+
+${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/121-modifiers/classes/A$B.class b/test/121-modifiers/classes/A$B.class
new file mode 100644
index 0000000..bd7ebfe
--- /dev/null
+++ b/test/121-modifiers/classes/A$B.class
Binary files differ
diff --git a/test/121-modifiers/classes/A$C.class b/test/121-modifiers/classes/A$C.class
new file mode 100644
index 0000000..3ae872e
--- /dev/null
+++ b/test/121-modifiers/classes/A$C.class
Binary files differ
diff --git a/test/121-modifiers/classes/A.class b/test/121-modifiers/classes/A.class
new file mode 100644
index 0000000..d89d029
--- /dev/null
+++ b/test/121-modifiers/classes/A.class
Binary files differ
diff --git a/test/121-modifiers/classes/Inf.class b/test/121-modifiers/classes/Inf.class
new file mode 100644
index 0000000..e8dd680
--- /dev/null
+++ b/test/121-modifiers/classes/Inf.class
Binary files differ
diff --git a/test/121-modifiers/classes/Main.class b/test/121-modifiers/classes/Main.class
new file mode 100644
index 0000000..e044074
--- /dev/null
+++ b/test/121-modifiers/classes/Main.class
Binary files differ
diff --git a/test/121-modifiers/classes/NonInf.class b/test/121-modifiers/classes/NonInf.class
new file mode 100644
index 0000000..0f1e826
--- /dev/null
+++ b/test/121-modifiers/classes/NonInf.class
Binary files differ
diff --git a/test/121-modifiers/expected.txt b/test/121-modifiers/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/121-modifiers/expected.txt
diff --git a/test/121-modifiers/info.txt b/test/121-modifiers/info.txt
new file mode 100644
index 0000000..943cbf8
--- /dev/null
+++ b/test/121-modifiers/info.txt
@@ -0,0 +1 @@
+This is a test checking the modifier (access flags) handling of ART.
diff --git a/test/121-modifiers/src/Asm.java b/test/121-modifiers/src/Asm.java
new file mode 100644
index 0000000..f120622
--- /dev/null
+++ b/test/121-modifiers/src/Asm.java
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.objectweb.asm.*;
+import org.objectweb.asm.tree.*;
+import java.io.*;
+import java.util.*;
+
+public class Asm {
+  /*
+
+  Overall class access flags:
+
+      0x0001 |  // public
+      0x0010 |  // final
+      0x0020 |  // super
+      0x0200 |  // interface
+      0x0400 |  // abstract
+      0x1000 |  // synthetic
+      0x2000 |  // annotation
+      0x4000 ;  // enum
+
+  */
+
+  public final static int INTERFACE_DEFINED_BITS =
+      0x0001 |  // public, may be set.
+      0x0010 |  // final, must not be set.
+      0x0020 |  // super, must not be set.
+      0x0200 |  // interface, must be set.
+      0x0400 |  // abstract, must be set.
+      0x1000 |  // synthetic, may be set.
+      0x2000 |  // annotation, may be set (annotation implies interface)
+      0x4000 ;  // enum, must not be set.
+
+  public final static int CLASS_DEFINED_BITS =
+      0x0001 |  // public, may be set.
+      0x0010 |  // final, may be set.
+      0x0020 |  // super, may be set.
+      0x0200 |  // interface, must not be set.
+      0x0400 |  // abstract, may be set.
+      0x1000 |  // synthetic, may be set.
+      0x2000 |  // annotation, must not be set.
+      0x4000 ;  // enum, may be set.
+
+  public final static int FIELD_DEFINED_BITS =
+       0x0001 |  // public
+       0x0002 |  // private
+       0x0004 |  // protected
+       0x0008 |  // static
+       0x0010 |  // final
+       0x0040 |  // volatile
+       0x0080 |  // transient
+       0x1000 |  // synthetic
+       0x4000 ;  // enum
+
+  public final static int METHOD_DEFINED_BITS =
+       0x0001 |  // public
+       0x0002 |  // private
+       0x0004 |  // protected
+       0x0008 |  // static
+       0x0010 |  // final
+       0x0020 |  // synchronized
+       0x0040 |  // bridge
+       0x0080 |  // varargs
+       0x0100 |  // native
+       0x0400 |  // abstract
+       0x0800 |  // strictfp
+       0x1000 ;  // synthetic
+
+  public static void main(String args[]) throws Exception {
+    modify("Inf");
+    modify("NonInf");
+  }
+
+  private static void modify(String clazz) throws Exception {
+    ClassNode classNode = new ClassNode();
+    ClassReader cr = new ClassReader(clazz);
+    cr.accept(classNode, 0);
+
+    modify(classNode);
+
+    ClassWriter cw = new ClassWriter(0);
+    classNode.accept(cw);
+    byte[] b = cw.toByteArray();
+    OutputStream out = new FileOutputStream(clazz + ".out");
+    out.write(b, 0, b.length);
+    out.close();
+  }
+
+  private static void modify(ClassNode classNode) throws Exception {
+    int classFlagsOr = 0xFFFF;
+    // Check whether classNode is an interface or class.
+    if ((classNode.access & Opcodes.ACC_INTERFACE) == 0) {
+      classFlagsOr ^= CLASS_DEFINED_BITS;
+    } else {
+      classFlagsOr ^= INTERFACE_DEFINED_BITS;
+    }
+    classNode.access |= classFlagsOr;
+
+    // Fields.
+    int fieldFlagsOr = 0xFFFF ^ FIELD_DEFINED_BITS;
+    for (FieldNode fieldNode : (List<FieldNode>)classNode.fields) {
+      fieldNode.access |= fieldFlagsOr;
+    }
+
+    // Methods.
+    int methodFlagsOr = 0xFFFF ^ METHOD_DEFINED_BITS;
+    for (MethodNode methodNode :(List<MethodNode>) classNode.methods) {
+      methodNode.access |= methodFlagsOr;
+    }
+  }
+}
diff --git a/test/121-modifiers/src/Inf.java b/test/121-modifiers/src/Inf.java
new file mode 100644
index 0000000..1dadae0
--- /dev/null
+++ b/test/121-modifiers/src/Inf.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Inf {
+
+  public final int I = 0;
+
+}
\ No newline at end of file
diff --git a/test/121-modifiers/src/Main.java b/test/121-modifiers/src/Main.java
new file mode 100644
index 0000000..e21b789
--- /dev/null
+++ b/test/121-modifiers/src/Main.java
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// These classes are to check the additional flags for inner classes.
+class A {
+  private static class B {
+  }
+  protected static interface C {
+  }
+}
+
+public class Main {
+  public final static int INTERFACE_DEFINED_BITS =
+      0x0001 |  // public, may be set.
+      0x0002 |  // private, may be flagged by inner class.
+      0x0004 |  // protected, may be flagged by inner class.
+      0x0008 |  // static, may be flagged by inner class.
+      0x0010 |  // final, must not be set.
+      0x0020 |  // super, must not be set.
+      0x0200 |  // interface, must be set.
+      0x0400 |  // abstract, must be set.
+      0x1000 |  // synthetic, may be set.
+      0x2000 |  // annotation, may be set (annotation implies interface)
+      0x4000 ;  // enum, must not be set.
+
+  public final static int CLASS_DEFINED_BITS =
+      0x0001 |  // public, may be set.
+      0x0002 |  // private, may be flagged by inner class.
+      0x0004 |  // protected, may be flagged by inner class.
+      0x0008 |  // static, may be flagged by inner class.
+      0x0010 |  // final, may be set.
+      0x0020 |  // super, may be set.
+      0x0200 |  // interface, must not be set.
+      0x0400 |  // abstract, may be set.
+      0x1000 |  // synthetic, may be set.
+      0x2000 |  // annotation, must not be set.
+      0x4000 ;  // enum, may be set.
+
+  public final static int FIELD_DEFINED_BITS =
+       0x0001 |  // public
+       0x0002 |  // private
+       0x0004 |  // protected
+       0x0008 |  // static
+       0x0010 |  // final
+       0x0040 |  // volatile
+       0x0080 |  // transient
+       0x1000 |  // synthetic
+       0x4000 ;  // enum
+
+  public final static int METHOD_DEFINED_BITS =
+       0x0001 |  // public
+       0x0002 |  // private
+       0x0004 |  // protected
+       0x0008 |  // static
+       0x0010 |  // final
+       0x0020 |  // synchronized
+       0x0040 |  // bridge
+       0x0080 |  // varargs
+       0x0100 |  // native
+       0x0400 |  // abstract
+       0x0800 |  // strictfp
+       0x1000 ;  // synthetic
+
+  public static void main(String args[]) throws Exception {
+    check("Inf");
+    check("NonInf");
+    check("A");
+    check("A$B");
+  }
+
+  private static void check(String className) throws Exception {
+    Class<?> clazz = Class.forName(className);
+    if (className.equals("Inf")) {
+      if (!clazz.isInterface()) {
+        throw new RuntimeException("Expected an interface.");
+      }
+      int undefinedBits = 0xFFFF ^ INTERFACE_DEFINED_BITS;
+      if ((clazz.getModifiers() & undefinedBits) != 0) {
+        System.out.println("Clazz.getModifiers(): " + Integer.toBinaryString(clazz.getModifiers()));
+        System.out.println("INTERFACE_DEF_BITS: " + Integer.toBinaryString(INTERFACE_DEFINED_BITS));
+        throw new RuntimeException("Undefined bits for an interface: " + className);
+      }
+    } else {
+      if (clazz.isInterface()) {
+        throw new RuntimeException("Expected a class.");
+      }
+      int undefinedBits = 0xFFFF ^ CLASS_DEFINED_BITS;
+      if ((clazz.getModifiers() & undefinedBits) != 0) {
+        System.out.println("Clazz.getModifiers(): " + Integer.toBinaryString(clazz.getModifiers()));
+        System.out.println("CLASS_DEF_BITS: " + Integer.toBinaryString(CLASS_DEFINED_BITS));
+        throw new RuntimeException("Undefined bits for a class: " + className);
+      }
+    }
+
+    // Check fields.
+    for (java.lang.reflect.Field f : clazz.getDeclaredFields()) {
+      String name = f.getName();
+      int undefinedBits = 0xFFFF ^ FIELD_DEFINED_BITS;
+      if ((f.getModifiers() & undefinedBits) != 0) {
+        System.out.println("f.getModifiers(): " + Integer.toBinaryString(f.getModifiers()));
+        System.out.println("FIELD_DEF_BITS: " + Integer.toBinaryString(FIELD_DEFINED_BITS));
+        throw new RuntimeException("Unexpected field bits: " + name);
+      }
+      if (name.equals("I")) {
+        // Interface field, just check generically.
+      } else {
+        // Check the name, see that the corresponding bit is set.
+        int bitmask = getFieldMask(name);
+        if ((bitmask & f.getModifiers()) == 0) {
+          throw new RuntimeException("Expected field bit not set.");
+        }
+      }
+    }
+
+    // Check methods.
+    for (java.lang.reflect.Method m : clazz.getDeclaredMethods()) {
+      String name = m.getName();
+      int undefinedBits = 0xFFFF ^ METHOD_DEFINED_BITS;
+      if ((m.getModifiers() & undefinedBits) != 0) {
+          System.out.println("m.getModifiers(): " + Integer.toBinaryString(m.getModifiers()));
+          System.out.println("METHOD_DEF_BITS: " + Integer.toBinaryString(METHOD_DEFINED_BITS));
+        throw new RuntimeException("Unexpected method bits: " + name);
+      }
+      // Check the name, see that the corresponding bit is set.
+      int bitmask = getMethodMask(name);
+      if ((bitmask & m.getModifiers()) == 0) {
+        throw new RuntimeException("Expected method bit not set.");
+      }
+    }
+  }
+
+  private static int getFieldMask(String name) {
+    int index = name.indexOf("Field");
+    if (index > 0) {
+      String shortS = name.substring(0, index);
+      if (shortS.equals("public")) {
+        return 0x0001;
+      }
+      if (shortS.equals("private")) {
+        return 0x0002;
+      }
+      if (shortS.equals("protected")) {
+        return 0x0004;
+      }
+      if (shortS.equals("static")) {
+        return 0x0008;
+      }
+      if (shortS.equals("transient")) {
+        return 0x0080;
+      }
+      if (shortS.equals("volatile")) {
+        return 0x0040;
+      }
+      if (shortS.equals("final")) {
+        return 0x0010;
+      }
+    }
+    throw new RuntimeException("Unexpected field name " + name);
+  }
+
+  private static int getMethodMask(String name) {
+    int index = name.indexOf("Method");
+    if (index > 0) {
+      String shortS = name.substring(0, index);
+      if (shortS.equals("public")) {
+        return 0x0001;
+      }
+      if (shortS.equals("private")) {
+        return 0x0002;
+      }
+      if (shortS.equals("protected")) {
+        return 0x0004;
+      }
+      if (shortS.equals("static")) {
+        return 0x0008;
+      }
+      if (shortS.equals("synchronized")) {
+        return 0x0020;
+      }
+      if (shortS.equals("varargs")) {
+        return 0x0080;
+      }
+      if (shortS.equals("final")) {
+        return 0x0010;
+      }
+      if (shortS.equals("native")) {
+        return 0x0100;
+      }
+      if (shortS.equals("abstract")) {
+        return 0x0400;
+      }
+      if (shortS.equals("strictfp")) {
+        return 0x0800;
+      }
+    }
+    throw new RuntimeException("Unexpected method name " + name);
+  }
+}
diff --git a/test/121-modifiers/src/NonInf.java b/test/121-modifiers/src/NonInf.java
new file mode 100644
index 0000000..52e4882
--- /dev/null
+++ b/test/121-modifiers/src/NonInf.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public abstract class NonInf {
+
+  public int publicField;
+  private int privateField;
+  protected int protectedField;
+  static int staticField;
+  transient int transientField;
+  volatile int volatileField;
+  final int finalField;
+
+  public NonInf() {
+    publicField = 0;
+    privateField = 1;
+    protectedField = 2;
+    staticField = 3;
+    transientField = 4;
+    volatileField = 5;
+    finalField = 6;
+  }
+
+  public native void nativeMethod();
+
+  private int privateMethod() {
+    return 0;
+  }
+
+  protected int protectedMethod() {
+    return 0;
+  }
+
+  public int publicMethod() {
+    return 0;
+  }
+
+  public abstract int abstractMethod();
+
+  public synchronized int synchronizedMethod() {
+    return 0;
+  }
+
+  public static int staticMethod() {
+    return 0;
+  }
+
+  public strictfp double strictfpMethod() {
+    return 0.0;
+  }
+
+  public int varargsMethod(Object... args) {
+    return 0;
+  }
+
+  public final int finalMethod() {
+    return 0;
+  }
+}
\ No newline at end of file
diff --git a/test/121-simple-suspend-check/expected.txt b/test/121-simple-suspend-check/expected.txt
new file mode 100644
index 0000000..7ef22e9
--- /dev/null
+++ b/test/121-simple-suspend-check/expected.txt
@@ -0,0 +1 @@
+PASS
diff --git a/test/121-simple-suspend-check/info.txt b/test/121-simple-suspend-check/info.txt
new file mode 100644
index 0000000..61611f9
--- /dev/null
+++ b/test/121-simple-suspend-check/info.txt
@@ -0,0 +1 @@
+Simple test to ensure the compiler emits suspend checks on loops.
diff --git a/test/121-simple-suspend-check/src/Main.java b/test/121-simple-suspend-check/src/Main.java
new file mode 100644
index 0000000..80daf37
--- /dev/null
+++ b/test/121-simple-suspend-check/src/Main.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String args[]) {
+    SpinThread thread = new SpinThread();
+    thread.setDaemon(true);
+    thread.start();
+    Runtime.getRuntime().gc();
+    try {
+      Thread.sleep(3000);
+    } catch (InterruptedException ie) {/*ignore */}
+    Runtime.getRuntime().gc();
+    System.out.println("PASS");
+  }
+}
+
+class SpinThread extends Thread {
+  public void run() {
+    while (true) {}
+  }
+}
diff --git a/test/122-npe/expected.txt b/test/122-npe/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/122-npe/expected.txt
diff --git a/test/122-npe/info.txt b/test/122-npe/info.txt
new file mode 100644
index 0000000..eef46d8
--- /dev/null
+++ b/test/122-npe/info.txt
@@ -0,0 +1 @@
+Test that our NPE checks and stack traces work.
diff --git a/test/122-npe/src/Main.java b/test/122-npe/src/Main.java
new file mode 100644
index 0000000..2fdcb9c
--- /dev/null
+++ b/test/122-npe/src/Main.java
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test that null pointer exceptions are thrown by the VM.
+ */
+public class Main {
+  private int f;
+  public static void main(String[] args) {
+    methodOne();
+  }
+
+  static void methodOne() {
+    methodTwo();
+  }
+
+  private int callSpecial() {
+    return f;
+  }
+
+  final int callFinal() {
+    return f;
+  }
+
+  static void methodTwo() {
+    NullPointerException npe = null;
+
+    int thisLine = 41;
+
+    new Object().getClass(); // Ensure compiled.
+    try {
+      ((Object) null).getClass();
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 4);
+
+    new Main().callSpecial();  // Ensure compiled.
+    try {
+      ((Main) null).callSpecial();  // Test invokespecial.
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 8);
+
+    new Main().callFinal();  // Ensure compiled.
+    try {
+      ((Main) null).callFinal();  // Test invokevirtual on final.
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 8);
+
+    try {
+      ((Value) null).objectField.toString();
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((Value) null).intField);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useFloat(((Value) null).floatField);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useLong(((Value) null).longField);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useDouble(((Value) null).doubleField);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).objectField = "Fisk";
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).intField = 42;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).floatField = 42.0F;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).longField = 42L;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).doubleField = 42.0d;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((Value) null).byteField);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      if (((Value) null).booleanField) { }
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((Value) null).charField);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((Value) null).shortField);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).byteField = 42;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).booleanField = true;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).charField = '\u0042';
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Value) null).shortField = 42;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Object[]) null)[0].toString();
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((int[]) null)[0]);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useFloat(((float[]) null)[0]);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useLong(((long[]) null)[0]);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useDouble(((double[]) null)[0]);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((Object[]) null)[0] = "Fisk";
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((int[]) null)[0] = 42;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((float[]) null)[0] = 42.0F;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((long[]) null)[0] = 42L;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((double[]) null)[0] = 42.0d;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((byte[]) null)[0]);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      if (((boolean[]) null)[0]) { }
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((char[]) null)[0]);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((short[]) null)[0]);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((byte[]) null)[0] = 42;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((boolean[]) null)[0] = true;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((char[]) null)[0] = '\u0042';
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      ((short[]) null)[0] = 42;
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((Object[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((int[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((float[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((long[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((double[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((byte[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((boolean[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((char[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      useInt(((short[]) null).length);
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 7);
+
+    try {
+      Interface i = null;
+      i.methodInterface();  // Test null on invokeinterface.
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 8);
+
+    try {
+      Object o = null;
+      o.toString();  // Test null on invokevirtual.
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 8);
+
+    npe = null;
+    try {
+      String s = null;
+      try {
+        throw new AssertionError();
+      } finally {
+        // Cause an implicit NPE.
+        s.getClass();
+      }
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 13);
+
+    npe = null;
+    try {
+      String s = null;
+      try {
+        throw new AssertionError();
+      } catch (AssertionError ex) {
+      }
+      s.getClass();
+    } catch (NullPointerException e) {
+      npe = e;
+    }
+    check(npe, thisLine += 14);
+  }
+
+  static void check(NullPointerException npe, int firstLine) {
+    final boolean debug = false;
+    if (debug) {
+      System.out.print("Got to line ");
+      System.out.print(firstLine);
+      System.out.println();
+    }
+    StackTraceElement[] trace = npe.getStackTrace();
+    checkElement(trace[0], "Main", "methodTwo", "Main.java", firstLine);
+    checkElement(trace[1], "Main", "methodOne", "Main.java", 27);
+    checkElement(trace[2], "Main", "main", "Main.java", 23);
+  }
+
+  static void checkElement(StackTraceElement element,
+                                  String declaringClass, String methodName,
+                                  String fileName, int lineNumber) {
+    assertEquals(declaringClass, element.getClassName());
+    assertEquals(methodName, element.getMethodName());
+    assertEquals(fileName, element.getFileName());
+    assertEquals(lineNumber, element.getLineNumber());
+  }
+
+  static void assertEquals(Object expected, Object actual) {
+    if (!expected.equals(actual)) {
+      String msg = "Expected \"" + expected + "\" but got \"" + actual + "\"";
+      throw new AssertionError(msg);
+    }
+  }
+
+  static void assertEquals(int expected, int actual) {
+    if (expected != actual) {
+      throw new AssertionError("Expected " + expected + " got " + actual);
+    }
+  }
+
+  interface Interface {
+    void methodInterface();
+  }
+
+  static void useInt(int i) {
+  }
+
+  static void useFloat(float f) {
+  }
+
+  static void useDouble(double d) {
+  }
+
+  static void useLong(long l) {
+  }
+
+  static class Value {
+    Object objectField;
+    int intField;
+    float floatField; long longField;
+    double doubleField;
+    byte byteField;
+    boolean booleanField;
+    char charField;
+    short shortField;
+  }
+}
diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java
index 2c6d1c2..07c407b 100644
--- a/test/401-optimizing-compiler/src/Main.java
+++ b/test/401-optimizing-compiler/src/Main.java
@@ -97,6 +97,11 @@
     if (exception == null) {
       throw new Error("Missing NullPointerException");
     }
+
+    result = $opt$InvokeVirtualMethod();
+    if (result != 42) {
+      throw new Error("Unexpected result: " + result);
+    }
   }
 
   public static void invokePrivate() {
@@ -205,5 +210,13 @@
     m.o = new Main();
   }
 
+  public static int $opt$InvokeVirtualMethod() {
+    return new Main().virtualMethod();
+  }
+
+  public int virtualMethod() {
+    return 42;
+  }
+
   Object o;
 }
diff --git a/test/etc/host-run-test-jar b/test/etc/host-run-test-jar
index 49aa912..d2b3fb1 100755
--- a/test/etc/host-run-test-jar
+++ b/test/etc/host-run-test-jar
@@ -27,6 +27,8 @@
 DEX2OAT=""
 FALSE_BIN="/bin/false"
 HAVE_IMAGE="y"
+TIME_OUT="y"
+TIME_OUT_VALUE=5m
 exe="${ANDROID_HOST_OUT}/bin/dalvikvm32"
 main="Main"
 
@@ -65,10 +67,12 @@
         shift
     elif [ "x$1" = "x--debug" ]; then
         DEBUGGER="y"
+        TIME_OUT="n"
         shift
     elif [ "x$1" = "x--gdb" ]; then
         GDB="y"
         DEV_MODE="y"
+        TIME_OUT="n"
         shift
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
@@ -199,6 +203,10 @@
 
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
 cmdline="$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB $PATCHOAT $DEX2OAT $JNI_OPTS $FLAGS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar $main"
+if [ "$TIME_OUT" = "y" ]; then
+  # Add timeout command if time out is desired.
+  cmdline="timeout $TIME_OUT_VALUE $cmdline"
+fi
 if [ "$DEV_MODE" = "y" ]; then
   if [ "$PREBUILD" = "y" ]; then
     echo "$mkdir_cmd && $prebuild_cmd && $cmdline"
@@ -221,5 +229,9 @@
   # If we are execing /bin/false we might not be on the same ISA as libsigchain.so
   # ld.so will helpfully warn us of this. Unfortunately this messes up our error
   # checking so we will just filter out the error with a grep.
-  LD_PRELOAD=libsigchain.so $cmdline "$@" 2>&1 | grep -v -E "^ERROR: ld\.so: object '.+\.so' from LD_PRELOAD cannot be preloaded: ignored\.$"
+  LD_PRELOAD=libsigchain.so $cmdline "$@" 2>&1 | grep -v -E "^ERROR: ld\.so: object '.+\.so' from LD_PRELOAD cannot be preloaded.*: ignored\.$"
+  # Add extra detail if time out is enabled.
+  if [ ${PIPESTATUS[0]} = 124 ] && [ "$TIME_OUT" = "y" ]; then
+    echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2
+  fi
 fi
\ No newline at end of file