Merge "Rosalloc fast path in assembly for arm64."
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index a561c5f..c53479c 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -89,7 +89,11 @@
 HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
 TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
 
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
 # Classpath for Jack compilation: we only need core-libart.
-HOST_JACK_CLASSPATH   := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
-TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
+HOST_JACK_CLASSPATH_DEPENDENCIES   := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
+HOST_JACK_CLASSPATH                := $(foreach dep,$(HOST_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
+TARGET_JACK_CLASSPATH              := $(foreach dep,$(TARGET_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+endif
 endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 6295e15..fdfd94c 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -230,6 +230,7 @@
   runtime/jni_internal_test.cc \
   runtime/proxy_test.cc \
   runtime/reflection_test.cc \
+  compiler/compiled_method_test.cc \
   compiler/dex/gvn_dead_code_elimination_test.cc \
   compiler/dex/global_value_numbering_test.cc \
   compiler/dex/local_value_numbering_test.cc \
@@ -237,6 +238,7 @@
   compiler/dex/mir_optimization_test.cc \
   compiler/dex/type_inference_test.cc \
   compiler/dwarf/dwarf_test.cc \
+  compiler/driver/compiled_method_storage_test.cc \
   compiler/driver/compiler_driver_test.cc \
   compiler/elf_writer_test.cc \
   compiler/image_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 20c8023..aaac126 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -54,6 +54,7 @@
 	dex/verification_results.cc \
 	dex/vreg_analysis.cc \
 	dex/quick_compiler_callbacks.cc \
+	driver/compiled_method_storage.cc \
 	driver/compiler_driver.cc \
 	driver/compiler_options.cc \
 	driver/dex_compilation_unit.cc \
@@ -78,6 +79,7 @@
 	optimizing/instruction_simplifier.cc \
 	optimizing/intrinsics.cc \
 	optimizing/licm.cc \
+	optimizing/load_store_elimination.cc \
 	optimizing/locations.cc \
 	optimizing/nodes.cc \
 	optimizing/optimization.cc \
@@ -87,13 +89,13 @@
 	optimizing/primitive_type_propagation.cc \
 	optimizing/reference_type_propagation.cc \
 	optimizing/register_allocator.cc \
+	optimizing/sharpening.cc \
 	optimizing/side_effects_analysis.cc \
 	optimizing/ssa_builder.cc \
 	optimizing/ssa_liveness_analysis.cc \
 	optimizing/ssa_phi_elimination.cc \
 	optimizing/stack_map_stream.cc \
 	trampolines/trampoline_compiler.cc \
-	utils/arena_bit_vector.cc \
 	utils/assembler.cc \
 	utils/swap_space.cc \
 	buffered_output_stream.cc \
@@ -152,6 +154,7 @@
 	dex/quick/mips/target_mips.cc \
 	dex/quick/mips/utility_mips.cc \
 	jni/quick/mips/calling_convention_mips.cc \
+	optimizing/code_generator_mips.cc \
 	utils/mips/assembler_mips.cc \
 	utils/mips/managed_register_mips.cc \
 
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 5e345db..6fd4575 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -51,7 +51,7 @@
     dwarf::WriteDebugFrameCIE(is64bit, dwarf::DW_EH_PE_absptr, dwarf::Reg(8),
                               initial_opcodes, kCFIFormat, &debug_frame_data_);
     std::vector<uintptr_t> debug_frame_patches;
-    dwarf::WriteDebugFrameFDE(is64bit, 0, 0, actual_asm.size(), &actual_cfi,
+    dwarf::WriteDebugFrameFDE(is64bit, 0, 0, actual_asm.size(), ArrayRef<const uint8_t>(actual_cfi),
                               kCFIFormat, &debug_frame_data_, &debug_frame_patches);
     ReformatCfi(Objdump(false, "-W"), &lines);
     // Pretty-print assembly.
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 58a2f96..c37ceca 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -54,22 +54,22 @@
                                                             method->GetDexMethodIndex()));
   }
   if (compiled_method != nullptr) {
-    const SwapVector<uint8_t>* code = compiled_method->GetQuickCode();
-    uint32_t code_size = code->size();
+    ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
+    uint32_t code_size = code.size();
     CHECK_NE(0u, code_size);
-    const SwapVector<uint8_t>* vmap_table = compiled_method->GetVmapTable();
-    uint32_t vmap_table_offset = vmap_table->empty() ? 0u
-        : sizeof(OatQuickMethodHeader) + vmap_table->size();
-    const SwapVector<uint8_t>* mapping_table = compiled_method->GetMappingTable();
-    bool mapping_table_used = mapping_table != nullptr && !mapping_table->empty();
-    size_t mapping_table_size = mapping_table_used ? mapping_table->size() : 0U;
+    ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
+    uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+        : sizeof(OatQuickMethodHeader) + vmap_table.size();
+    ArrayRef<const uint8_t> mapping_table = compiled_method->GetMappingTable();
+    bool mapping_table_used = !mapping_table.empty();
+    size_t mapping_table_size = mapping_table.size();
     uint32_t mapping_table_offset = !mapping_table_used ? 0u
-        : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size;
-    const SwapVector<uint8_t>* gc_map = compiled_method->GetGcMap();
-    bool gc_map_used = gc_map != nullptr && !gc_map->empty();
-    size_t gc_map_size = gc_map_used ? gc_map->size() : 0U;
+        : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size;
+    ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap();
+    bool gc_map_used = !gc_map.empty();
+    size_t gc_map_size = gc_map.size();
     uint32_t gc_map_offset = !gc_map_used ? 0u
-        : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size + gc_map_size;
+        : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size + gc_map_size;
     OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
                                        compiled_method->GetFrameSizeInBytes(),
                                        compiled_method->GetCoreSpillMask(),
@@ -77,25 +77,30 @@
 
     header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
     std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
-    size_t size = sizeof(method_header) + code_size + vmap_table->size() + mapping_table_size +
-        gc_map_size;
-    size_t code_offset = compiled_method->AlignCode(size - code_size);
-    size_t padding = code_offset - (size - code_size);
-    chunk->reserve(padding + size);
+    const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet());
+    const size_t size =
+        gc_map_size + mapping_table_size + vmap_table.size() + sizeof(method_header) + code_size;
+    chunk->reserve(size + max_padding);
     chunk->resize(sizeof(method_header));
     memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
-    chunk->insert(chunk->begin(), vmap_table->begin(), vmap_table->end());
+    chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
     if (mapping_table_used) {
-      chunk->insert(chunk->begin(), mapping_table->begin(), mapping_table->end());
+      chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
     }
     if (gc_map_used) {
-      chunk->insert(chunk->begin(), gc_map->begin(), gc_map->end());
+      chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end());
     }
+    chunk->insert(chunk->end(), code.begin(), code.end());
+    CHECK_EQ(chunk->size(), size);
+    const void* unaligned_code_ptr = chunk->data() + (size - code_size);
+    size_t offset = dchecked_integral_cast<size_t>(reinterpret_cast<uintptr_t>(unaligned_code_ptr));
+    size_t padding = compiled_method->AlignCode(offset) - offset;
+    // Make sure no resizing takes place.
+    CHECK_GE(chunk->capacity(), chunk->size() + padding);
     chunk->insert(chunk->begin(), padding, 0);
-    chunk->insert(chunk->end(), code->begin(), code->end());
-    CHECK_EQ(padding + size, chunk->size());
-    const void* code_ptr = &(*chunk)[code_offset];
-    MakeExecutable(code_ptr, code->size());
+    const void* code_ptr = reinterpret_cast<const uint8_t*>(unaligned_code_ptr) + padding;
+    CHECK_EQ(code_ptr, static_cast<const void*>(chunk->data() + (chunk->size() - code_size)));
+    MakeExecutable(code_ptr, code.size());
     const void* method_code = CompiledMethod::CodePointer(code_ptr,
                                                           compiled_method->GetInstructionSet());
     LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 74ef35e..9551d22 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -15,27 +15,22 @@
  */
 
 #include "compiled_method.h"
+
+#include "driver/compiled_method_storage.h"
 #include "driver/compiler_driver.h"
+#include "utils/swap_space.h"
 
 namespace art {
 
 CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
-                           const ArrayRef<const uint8_t>& quick_code, bool owns_code_array)
-    : compiler_driver_(compiler_driver), instruction_set_(instruction_set),
-      owns_code_array_(owns_code_array), quick_code_(nullptr) {
-  if (owns_code_array_) {
-    // If we are supposed to own the code, don't deduplicate it.
-    quick_code_ = new SwapVector<uint8_t>(quick_code.begin(), quick_code.end(),
-                                          compiler_driver_->GetSwapSpaceAllocator());
-  } else {
-    quick_code_ = compiler_driver_->DeduplicateCode(quick_code);
-  }
+                           const ArrayRef<const uint8_t>& quick_code)
+    : compiler_driver_(compiler_driver),
+      instruction_set_(instruction_set),
+      quick_code_(compiler_driver_->GetCompiledMethodStorage()->DeduplicateCode(quick_code)) {
 }
 
 CompiledCode::~CompiledCode() {
-  if (owns_code_array_) {
-    delete quick_code_;
-  }
+  compiler_driver_->GetCompiledMethodStorage()->ReleaseCode(quick_code_);
 }
 
 bool CompiledCode::operator==(const CompiledCode& rhs) const {
@@ -104,59 +99,28 @@
   }
 }
 
-const std::vector<uint32_t>& CompiledCode::GetOatdataOffsetsToCompliledCodeOffset() const {
-  CHECK_NE(0U, oatdata_offsets_to_compiled_code_offset_.size());
-  return oatdata_offsets_to_compiled_code_offset_;
-}
-
-void CompiledCode::AddOatdataOffsetToCompliledCodeOffset(uint32_t offset) {
-  oatdata_offsets_to_compiled_code_offset_.push_back(offset);
-}
-
 CompiledMethod::CompiledMethod(CompilerDriver* driver,
                                InstructionSet instruction_set,
                                const ArrayRef<const uint8_t>& quick_code,
                                const size_t frame_size_in_bytes,
                                const uint32_t core_spill_mask,
                                const uint32_t fp_spill_mask,
-                               DefaultSrcMap* src_mapping_table,
+                               const ArrayRef<const SrcMapElem>& src_mapping_table,
                                const ArrayRef<const uint8_t>& mapping_table,
                                const ArrayRef<const uint8_t>& vmap_table,
                                const ArrayRef<const uint8_t>& native_gc_map,
                                const ArrayRef<const uint8_t>& cfi_info,
                                const ArrayRef<const LinkerPatch>& patches)
-    : CompiledCode(driver, instruction_set, quick_code, !driver->DedupeEnabled()),
-      owns_arrays_(!driver->DedupeEnabled()),
+    : CompiledCode(driver, instruction_set, quick_code),
       frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
       fp_spill_mask_(fp_spill_mask),
-      patches_(patches.begin(), patches.end(), driver->GetSwapSpaceAllocator()) {
-  if (owns_arrays_) {
-    if (src_mapping_table == nullptr) {
-      src_mapping_table_ = new SwapSrcMap(driver->GetSwapSpaceAllocator());
-    } else {
-      src_mapping_table_ = new SwapSrcMap(src_mapping_table->begin(), src_mapping_table->end(),
-                                          driver->GetSwapSpaceAllocator());
-    }
-    mapping_table_ = mapping_table.empty() ?
-        nullptr : new SwapVector<uint8_t>(mapping_table.begin(), mapping_table.end(),
-                                          driver->GetSwapSpaceAllocator());
-    vmap_table_ = new SwapVector<uint8_t>(vmap_table.begin(), vmap_table.end(),
-                                          driver->GetSwapSpaceAllocator());
-    gc_map_ = native_gc_map.empty() ? nullptr :
-        new SwapVector<uint8_t>(native_gc_map.begin(), native_gc_map.end(),
-                                driver->GetSwapSpaceAllocator());
-    cfi_info_ = cfi_info.empty() ? nullptr :
-        new SwapVector<uint8_t>(cfi_info.begin(), cfi_info.end(), driver->GetSwapSpaceAllocator());
-  } else {
-    src_mapping_table_ = src_mapping_table == nullptr ?
-        driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
-        driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(*src_mapping_table));
-    mapping_table_ = mapping_table.empty() ?
-        nullptr : driver->DeduplicateMappingTable(mapping_table);
-    vmap_table_ = driver->DeduplicateVMapTable(vmap_table);
-    gc_map_ = native_gc_map.empty() ? nullptr : driver->DeduplicateGCMap(native_gc_map);
-    cfi_info_ = cfi_info.empty() ? nullptr : driver->DeduplicateCFIInfo(cfi_info);
-  }
+      src_mapping_table_(
+          driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)),
+      mapping_table_(driver->GetCompiledMethodStorage()->DeduplicateMappingTable(mapping_table)),
+      vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
+      gc_map_(driver->GetCompiledMethodStorage()->DeduplicateGCMap(native_gc_map)),
+      cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
+      patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
 }
 
 CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
@@ -166,13 +130,13 @@
     const size_t frame_size_in_bytes,
     const uint32_t core_spill_mask,
     const uint32_t fp_spill_mask,
-    DefaultSrcMap* src_mapping_table,
+    const ArrayRef<const SrcMapElem>& src_mapping_table,
     const ArrayRef<const uint8_t>& mapping_table,
     const ArrayRef<const uint8_t>& vmap_table,
     const ArrayRef<const uint8_t>& native_gc_map,
     const ArrayRef<const uint8_t>& cfi_info,
     const ArrayRef<const LinkerPatch>& patches) {
-  SwapAllocator<CompiledMethod> alloc(driver->GetSwapSpaceAllocator());
+  SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
   CompiledMethod* ret = alloc.allocate(1);
   alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
                   fp_spill_mask, src_mapping_table, mapping_table, vmap_table, native_gc_map,
@@ -180,22 +144,20 @@
   return ret;
 }
 
-
-
 void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m) {
-  SwapAllocator<CompiledMethod> alloc(driver->GetSwapSpaceAllocator());
+  SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
   alloc.destroy(m);
   alloc.deallocate(m, 1);
 }
 
 CompiledMethod::~CompiledMethod() {
-  if (owns_arrays_) {
-    delete src_mapping_table_;
-    delete mapping_table_;
-    delete vmap_table_;
-    delete gc_map_;
-    delete cfi_info_;
-  }
+  CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
+  storage->ReleaseLinkerPatches(patches_);
+  storage->ReleaseCFIInfo(cfi_info_);
+  storage->ReleaseGCMap(gc_map_);
+  storage->ReleaseVMapTable(vmap_table_);
+  storage->ReleaseMappingTable(mapping_table_);
+  storage->ReleaseSrcMappingTable(src_mapping_table_);
 }
 
 }  // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index a4d2387..15a4ba0 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -23,19 +23,20 @@
 
 #include "arch/instruction_set.h"
 #include "base/bit_utils.h"
+#include "length_prefixed_array.h"
 #include "method_reference.h"
 #include "utils/array_ref.h"
-#include "utils/swap_space.h"
 
 namespace art {
 
 class CompilerDriver;
+class CompiledMethodStorage;
 
 class CompiledCode {
  public:
   // For Quick to supply an code blob
   CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
-               const ArrayRef<const uint8_t>& quick_code, bool owns_code_array);
+               const ArrayRef<const uint8_t>& quick_code);
 
   virtual ~CompiledCode();
 
@@ -43,8 +44,8 @@
     return instruction_set_;
   }
 
-  const SwapVector<uint8_t>* GetQuickCode() const {
-    return quick_code_;
+  ArrayRef<const uint8_t> GetQuickCode() const {
+    return GetArray(quick_code_);
   }
 
   bool operator==(const CompiledCode& rhs) const;
@@ -66,41 +67,46 @@
   static const void* CodePointer(const void* code_pointer,
                                  InstructionSet instruction_set);
 
-  const std::vector<uint32_t>& GetOatdataOffsetsToCompliledCodeOffset() const;
-  void AddOatdataOffsetToCompliledCodeOffset(uint32_t offset);
+ protected:
+  template <typename T>
+  static ArrayRef<const T> GetArray(const LengthPrefixedArray<T>* array) {
+    if (array == nullptr) {
+      return ArrayRef<const T>();
+    }
+    DCHECK_NE(array->size(), 0u);
+    return ArrayRef<const T>(&array->At(0), array->size());
+  }
+
+  CompilerDriver* GetCompilerDriver() {
+    return compiler_driver_;
+  }
 
  private:
   CompilerDriver* const compiler_driver_;
 
   const InstructionSet instruction_set_;
 
-  // If we own the code array (means that we free in destructor).
-  const bool owns_code_array_;
-
   // Used to store the PIC code for Quick.
-  SwapVector<uint8_t>* quick_code_;
-
-  // There are offsets from the oatdata symbol to where the offset to
-  // the compiled method will be found. These are computed by the
-  // OatWriter and then used by the ElfWriter to add relocations so
-  // that MCLinker can update the values to the location in the linked .so.
-  std::vector<uint32_t> oatdata_offsets_to_compiled_code_offset_;
+  const LengthPrefixedArray<uint8_t>* const quick_code_;
 };
 
 class SrcMapElem {
  public:
   uint32_t from_;
   int32_t to_;
-
-  // Lexicographical compare.
-  bool operator<(const SrcMapElem& other) const {
-    if (from_ != other.from_) {
-      return from_ < other.from_;
-    }
-    return to_ < other.to_;
-  }
 };
 
+inline bool operator<(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+  if (lhs.from_ != rhs.from_) {
+    return lhs.from_ < rhs.from_;
+  }
+  return lhs.to_ < rhs.to_;
+}
+
+inline bool operator==(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+  return lhs.from_ == rhs.from_ && lhs.to_ == rhs.to_;
+}
+
 template <class Allocator>
 class SrcMap FINAL : public std::vector<SrcMapElem, Allocator> {
  public:
@@ -151,7 +157,6 @@
 };
 
 using DefaultSrcMap = SrcMap<std::allocator<SrcMapElem>>;
-using SwapSrcMap = SrcMap<SwapAllocator<SrcMapElem>>;
 
 
 enum LinkerPatchType {
@@ -273,6 +278,9 @@
     uint32_t method_idx_;       // Method index for Call/Method patches.
     uint32_t type_idx_;         // Type index for Type patches.
     uint32_t element_offset_;   // Element offset in the dex cache arrays.
+    static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators");
+    static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators");
+    static_assert(sizeof(element_offset_) == sizeof(cmp1_), "needed by relational operators");
   };
   union {
     uint32_t cmp2_;             // Used for relational operators.
@@ -313,7 +321,7 @@
                  const size_t frame_size_in_bytes,
                  const uint32_t core_spill_mask,
                  const uint32_t fp_spill_mask,
-                 DefaultSrcMap* src_mapping_table,
+                 const ArrayRef<const SrcMapElem>& src_mapping_table,
                  const ArrayRef<const uint8_t>& mapping_table,
                  const ArrayRef<const uint8_t>& vmap_table,
                  const ArrayRef<const uint8_t>& native_gc_map,
@@ -329,7 +337,7 @@
       const size_t frame_size_in_bytes,
       const uint32_t core_spill_mask,
       const uint32_t fp_spill_mask,
-      DefaultSrcMap* src_mapping_table,
+      const ArrayRef<const SrcMapElem>& src_mapping_table,
       const ArrayRef<const uint8_t>& mapping_table,
       const ArrayRef<const uint8_t>& vmap_table,
       const ArrayRef<const uint8_t>& native_gc_map,
@@ -350,35 +358,31 @@
     return fp_spill_mask_;
   }
 
-  const SwapSrcMap& GetSrcMappingTable() const {
-    DCHECK(src_mapping_table_ != nullptr);
-    return *src_mapping_table_;
+  ArrayRef<const SrcMapElem> GetSrcMappingTable() const {
+    return GetArray(src_mapping_table_);
   }
 
-  SwapVector<uint8_t> const* GetMappingTable() const {
-    return mapping_table_;
+  ArrayRef<const uint8_t> GetMappingTable() const {
+    return GetArray(mapping_table_);
   }
 
-  const SwapVector<uint8_t>* GetVmapTable() const {
-    DCHECK(vmap_table_ != nullptr);
-    return vmap_table_;
+  ArrayRef<const uint8_t> GetVmapTable() const {
+    return GetArray(vmap_table_);
   }
 
-  SwapVector<uint8_t> const* GetGcMap() const {
-    return gc_map_;
+  ArrayRef<const uint8_t> GetGcMap() const {
+    return GetArray(gc_map_);
   }
 
-  const SwapVector<uint8_t>* GetCFIInfo() const {
-    return cfi_info_;
+  ArrayRef<const uint8_t> GetCFIInfo() const {
+    return GetArray(cfi_info_);
   }
 
   ArrayRef<const LinkerPatch> GetPatches() const {
-    return ArrayRef<const LinkerPatch>(patches_);
+    return GetArray(patches_);
   }
 
  private:
-  // Whether or not the arrays are owned by the compiled method or dedupe sets.
-  const bool owns_arrays_;
   // For quick code, the size of the activation used by the code.
   const size_t frame_size_in_bytes_;
   // For quick code, a bit mask describing spilled GPR callee-save registers.
@@ -386,19 +390,19 @@
   // For quick code, a bit mask describing spilled FPR callee-save registers.
   const uint32_t fp_spill_mask_;
   // For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset.
-  SwapSrcMap* src_mapping_table_;
+  const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_;
   // For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
   // native PC offset. Size prefixed.
-  SwapVector<uint8_t>* mapping_table_;
+  const LengthPrefixedArray<uint8_t>* const mapping_table_;
   // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
-  SwapVector<uint8_t>* vmap_table_;
+  const LengthPrefixedArray<uint8_t>* const vmap_table_;
   // For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
   // are live.
-  SwapVector<uint8_t>* gc_map_;
+  const LengthPrefixedArray<uint8_t>* const gc_map_;
   // For quick code, a FDE entry for the debug_frame section.
-  SwapVector<uint8_t>* cfi_info_;
+  const LengthPrefixedArray<uint8_t>* const cfi_info_;
   // For quick code, linker patches needed by the method.
-  const SwapVector<LinkerPatch> patches_;
+  const LengthPrefixedArray<LinkerPatch>* const patches_;
 };
 
 }  // namespace art
diff --git a/compiler/compiled_method_test.cc b/compiler/compiled_method_test.cc
new file mode 100644
index 0000000..99ee875
--- /dev/null
+++ b/compiler/compiled_method_test.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "compiled_method.h"
+
+namespace art {
+
+TEST(CompiledMethod, SrcMapElemOperators) {
+  SrcMapElem elems[] = {
+      { 1u, -1 },
+      { 1u, 0 },
+      { 1u, 1 },
+      { 2u, -1 },
+      { 2u, 0 },    // Index 4.
+      { 2u, 1 },
+      { 2u, 0u },   // Index 6: Arbitrarily add identical SrcMapElem with index 4.
+  };
+
+  for (size_t i = 0; i != arraysize(elems); ++i) {
+    for (size_t j = 0; j != arraysize(elems); ++j) {
+      bool expected = (i != 6u ? i : 4u) == (j != 6u ? j : 4u);
+      EXPECT_EQ(expected, elems[i] == elems[j]) << i << " " << j;
+    }
+  }
+
+  for (size_t i = 0; i != arraysize(elems); ++i) {
+    for (size_t j = 0; j != arraysize(elems); ++j) {
+      bool expected = (i != 6u ? i : 4u) < (j != 6u ? j : 4u);
+      EXPECT_EQ(expected, elems[i] < elems[j]) << i << " " << j;
+    }
+  }
+}
+
+TEST(CompiledMethod, LinkerPatchOperators) {
+  const DexFile* dex_file1 = reinterpret_cast<const DexFile*>(1);
+  const DexFile* dex_file2 = reinterpret_cast<const DexFile*>(2);
+  LinkerPatch patches[] = {
+      LinkerPatch::MethodPatch(16u, dex_file1, 1000u),
+      LinkerPatch::MethodPatch(16u, dex_file1, 1001u),
+      LinkerPatch::MethodPatch(16u, dex_file2, 1000u),
+      LinkerPatch::MethodPatch(16u, dex_file2, 1001u),  // Index 3.
+      LinkerPatch::CodePatch(16u, dex_file1, 1000u),
+      LinkerPatch::CodePatch(16u, dex_file1, 1001u),
+      LinkerPatch::CodePatch(16u, dex_file2, 1000u),
+      LinkerPatch::CodePatch(16u, dex_file2, 1001u),
+      LinkerPatch::RelativeCodePatch(16u, dex_file1, 1000u),
+      LinkerPatch::RelativeCodePatch(16u, dex_file1, 1001u),
+      LinkerPatch::RelativeCodePatch(16u, dex_file2, 1000u),
+      LinkerPatch::RelativeCodePatch(16u, dex_file2, 1001u),
+      LinkerPatch::TypePatch(16u, dex_file1, 1000u),
+      LinkerPatch::TypePatch(16u, dex_file1, 1001u),
+      LinkerPatch::TypePatch(16u, dex_file2, 1000u),
+      LinkerPatch::TypePatch(16u, dex_file2, 1001u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2001u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2001u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2001u),
+      LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2001u),
+      LinkerPatch::MethodPatch(32u, dex_file1, 1000u),
+      LinkerPatch::MethodPatch(32u, dex_file1, 1001u),
+      LinkerPatch::MethodPatch(32u, dex_file2, 1000u),
+      LinkerPatch::MethodPatch(32u, dex_file2, 1001u),
+      LinkerPatch::CodePatch(32u, dex_file1, 1000u),
+      LinkerPatch::CodePatch(32u, dex_file1, 1001u),
+      LinkerPatch::CodePatch(32u, dex_file2, 1000u),
+      LinkerPatch::CodePatch(32u, dex_file2, 1001u),
+      LinkerPatch::RelativeCodePatch(32u, dex_file1, 1000u),
+      LinkerPatch::RelativeCodePatch(32u, dex_file1, 1001u),
+      LinkerPatch::RelativeCodePatch(32u, dex_file2, 1000u),
+      LinkerPatch::RelativeCodePatch(32u, dex_file2, 1001u),
+      LinkerPatch::TypePatch(32u, dex_file1, 1000u),
+      LinkerPatch::TypePatch(32u, dex_file1, 1001u),
+      LinkerPatch::TypePatch(32u, dex_file2, 1000u),
+      LinkerPatch::TypePatch(32u, dex_file2, 1001u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2001u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2001u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2000u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2001u),
+      LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2001u),
+      LinkerPatch::MethodPatch(16u, dex_file2, 1001u),  // identical with patch as index 3.
+  };
+  constexpr size_t last_index = arraysize(patches) - 1u;
+
+  for (size_t i = 0; i != arraysize(patches); ++i) {
+    for (size_t j = 0; j != arraysize(patches); ++j) {
+      bool expected = (i != last_index ? i : 3u) == (j != last_index ? j : 3u);
+      EXPECT_EQ(expected, patches[i] == patches[j]) << i << " " << j;
+    }
+  }
+
+  for (size_t i = 0; i != arraysize(patches); ++i) {
+    for (size_t j = 0; j != arraysize(patches); ++j) {
+      bool expected = (i != last_index ? i : 3u) < (j != last_index ? j : 3u);
+      EXPECT_EQ(expected, patches[i] < patches[j]) << i << " " << j;
+    }
+  }
+}
+
+}  // namespace art
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ff7ddc1..4836041 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -356,7 +356,7 @@
         0,
         0,
         0,
-        nullptr,                                     // src_mapping_table
+        ArrayRef<const SrcMapElem>(),                // src_mapping_table
         ArrayRef<const uint8_t>(),                   // mapping_table
         ArrayRef<const uint8_t>(builder.GetData()),  // vmap_table
         ArrayRef<const uint8_t>(),                   // gc_map
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 4de3410..445859c 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -18,6 +18,7 @@
 
 #include "gvn_dead_code_elimination.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
 #include "base/macros.h"
 #include "base/allocator.h"
@@ -26,7 +27,6 @@
 #include "dex_instruction.h"
 #include "dex/mir_graph.h"
 #include "local_value_numbering.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 097abdc..2da8a98 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -19,6 +19,7 @@
 
 #include <stdint.h>
 
+#include "base/arena_bit_vector.h"
 #include "base/arena_containers.h"
 #include "base/bit_utils.h"
 #include "base/scoped_arena_containers.h"
@@ -30,7 +31,6 @@
 #include "mir_method_info.h"
 #include "reg_location.h"
 #include "reg_storage.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index cde99b3..d68835a 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -22,6 +22,7 @@
 #endif
 
 #include "base/bit_vector-inl.h"
+#include "base/stringprintf.h"
 #include "dex/mir_graph.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
@@ -1165,7 +1166,7 @@
       cu_->compiler_driver, cu_->instruction_set,
       ArrayRef<const uint8_t>(code_buffer_),
       frame_size_, core_spill_mask_, fp_spill_mask_,
-      &src_mapping_table_,
+      ArrayRef<const SrcMapElem>(src_mapping_table_),
       ArrayRef<const uint8_t>(encoded_mapping_table_),
       ArrayRef<const uint8_t>(vmap_encoder.GetData()),
       ArrayRef<const uint8_t>(native_gc_map_),
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index d9d0434..dceb118 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -18,6 +18,7 @@
 
 #include "mir_to_lir-inl.h"
 
+#include "base/stringprintf.h"
 #include "dex/compiler_ir.h"
 #include "dex/dataflow_iterator-inl.h"
 #include "dex/mir_graph.h"
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
new file mode 100644
index 0000000..bc5c6ca
--- /dev/null
+++ b/compiler/driver/compiled_method_storage.cc
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <ostream>
+
+#include "compiled_method_storage.h"
+
+#include "base/logging.h"
+#include "compiled_method.h"
+#include "thread-inl.h"
+#include "utils.h"
+#include "utils/dedupe_set-inl.h"
+#include "utils/swap_space.h"
+
+namespace art {
+
+namespace {  // anonymous namespace
+
+template <typename T>
+const LengthPrefixedArray<T>* CopyArray(SwapSpace* swap_space, const ArrayRef<const T>& array) {
+  DCHECK(!array.empty());
+  SwapAllocator<uint8_t> allocator(swap_space);
+  void* storage = allocator.allocate(LengthPrefixedArray<T>::ComputeSize(array.size()));
+  LengthPrefixedArray<T>* array_copy = new(storage) LengthPrefixedArray<T>(array.size());
+  std::copy(array.begin(), array.end(), array_copy->begin());
+  return array_copy;
+}
+
+template <typename T>
+void ReleaseArray(SwapSpace* swap_space, const LengthPrefixedArray<T>* array) {
+  SwapAllocator<uint8_t> allocator(swap_space);
+  size_t size = LengthPrefixedArray<T>::ComputeSize(array->size());
+  array->~LengthPrefixedArray<T>();
+  allocator.deallocate(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(array)), size);
+}
+
+}  // anonymous namespace
+
+template <typename T, typename DedupeSetType>
+inline const LengthPrefixedArray<T>* CompiledMethodStorage::AllocateOrDeduplicateArray(
+    const ArrayRef<const T>& data,
+    DedupeSetType* dedupe_set) {
+  if (data.empty()) {
+    return nullptr;
+  } else if (!DedupeEnabled()) {
+    return CopyArray(swap_space_.get(), data);
+  } else {
+    return dedupe_set->Add(Thread::Current(), data);
+  }
+}
+
+template <typename T>
+inline void CompiledMethodStorage::ReleaseArrayIfNotDeduplicated(
+    const LengthPrefixedArray<T>* array) {
+  if (array != nullptr && !DedupeEnabled()) {
+    ReleaseArray(swap_space_.get(), array);
+  }
+}
+
+template <typename ContentType>
+class CompiledMethodStorage::DedupeHashFunc {
+ private:
+  static constexpr bool kUseMurmur3Hash = true;
+
+ public:
+  size_t operator()(const ArrayRef<ContentType>& array) const {
+    const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
+    // TODO: More reasonable assertion.
+    // static_assert(IsPowerOfTwo(sizeof(ContentType)),
+    //    "ContentType is not power of two, don't know whether array layout is as assumed");
+    uint32_t len = sizeof(ContentType) * array.size();
+    if (kUseMurmur3Hash) {
+      static constexpr uint32_t c1 = 0xcc9e2d51;
+      static constexpr uint32_t c2 = 0x1b873593;
+      static constexpr uint32_t r1 = 15;
+      static constexpr uint32_t r2 = 13;
+      static constexpr uint32_t m = 5;
+      static constexpr uint32_t n = 0xe6546b64;
+
+      uint32_t hash = 0;
+
+      const int nblocks = len / 4;
+      typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+      const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
+      int i;
+      for (i = 0; i < nblocks; i++) {
+        uint32_t k = blocks[i];
+        k *= c1;
+        k = (k << r1) | (k >> (32 - r1));
+        k *= c2;
+
+        hash ^= k;
+        hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
+      }
+
+      const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
+      uint32_t k1 = 0;
+
+      switch (len & 3) {
+        case 3:
+          k1 ^= tail[2] << 16;
+          FALLTHROUGH_INTENDED;
+        case 2:
+          k1 ^= tail[1] << 8;
+          FALLTHROUGH_INTENDED;
+        case 1:
+          k1 ^= tail[0];
+
+          k1 *= c1;
+          k1 = (k1 << r1) | (k1 >> (32 - r1));
+          k1 *= c2;
+          hash ^= k1;
+      }
+
+      hash ^= len;
+      hash ^= (hash >> 16);
+      hash *= 0x85ebca6b;
+      hash ^= (hash >> 13);
+      hash *= 0xc2b2ae35;
+      hash ^= (hash >> 16);
+
+      return hash;
+    } else {
+      size_t hash = 0x811c9dc5;
+      for (uint32_t i = 0; i < len; ++i) {
+        hash = (hash * 16777619) ^ data[i];
+      }
+      hash += hash << 13;
+      hash ^= hash >> 7;
+      hash += hash << 3;
+      hash ^= hash >> 17;
+      hash += hash << 5;
+      return hash;
+    }
+  }
+};
+
+template <typename T>
+class CompiledMethodStorage::LengthPrefixedArrayAlloc {
+ public:
+  explicit LengthPrefixedArrayAlloc(SwapSpace* swap_space)
+      : swap_space_(swap_space) {
+  }
+
+  const LengthPrefixedArray<T>* Copy(const ArrayRef<const T>& array) {
+    return CopyArray(swap_space_, array);
+  }
+
+  void Destroy(const LengthPrefixedArray<T>* array) {
+    ReleaseArray(swap_space_, array);
+  }
+
+ private:
+  SwapSpace* const swap_space_;
+};
+
+CompiledMethodStorage::CompiledMethodStorage(int swap_fd)
+    : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
+      dedupe_enabled_(true),
+      dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+      dedupe_src_mapping_table_("dedupe source mapping table",
+                                LengthPrefixedArrayAlloc<SrcMapElem>(swap_space_.get())),
+      dedupe_mapping_table_("dedupe mapping table",
+                            LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+      dedupe_vmap_table_("dedupe vmap table",
+                         LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+      dedupe_gc_map_("dedupe gc map", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+      dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+      dedupe_linker_patches_("dedupe cfi info",
+                             LengthPrefixedArrayAlloc<LinkerPatch>(swap_space_.get())) {
+}
+
+CompiledMethodStorage::~CompiledMethodStorage() {
+  // All done by member destructors.
+}
+
+void CompiledMethodStorage::DumpMemoryUsage(std::ostream& os, bool extended) const {
+  if (swap_space_.get() != nullptr) {
+    os << " swap=" << PrettySize(swap_space_->GetSize());
+  }
+  if (extended) {
+    Thread* self = Thread::Current();
+    os << "\nCode dedupe: " << dedupe_code_.DumpStats(self);
+    os << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats(self);
+    os << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats(self);
+    os << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats(self);
+    os << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats(self);
+  }
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateCode(
+    const ArrayRef<const uint8_t>& code) {
+  return AllocateOrDeduplicateArray(code, &dedupe_code_);
+}
+
+void CompiledMethodStorage::ReleaseCode(const LengthPrefixedArray<uint8_t>* code) {
+  ReleaseArrayIfNotDeduplicated(code);
+}
+
+const LengthPrefixedArray<SrcMapElem>* CompiledMethodStorage::DeduplicateSrcMappingTable(
+    const ArrayRef<const SrcMapElem>& src_map) {
+  return AllocateOrDeduplicateArray(src_map, &dedupe_src_mapping_table_);
+}
+
+void CompiledMethodStorage::ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map) {
+  ReleaseArrayIfNotDeduplicated(src_map);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMappingTable(
+    const ArrayRef<const uint8_t>& table) {
+  return AllocateOrDeduplicateArray(table, &dedupe_mapping_table_);
+}
+
+void CompiledMethodStorage::ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table) {
+  ReleaseArrayIfNotDeduplicated(table);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable(
+    const ArrayRef<const uint8_t>& table) {
+  return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_);
+}
+
+void CompiledMethodStorage::ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table) {
+  ReleaseArrayIfNotDeduplicated(table);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateGCMap(
+    const ArrayRef<const uint8_t>& gc_map) {
+  return AllocateOrDeduplicateArray(gc_map, &dedupe_gc_map_);
+}
+
+void CompiledMethodStorage::ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map) {
+  ReleaseArrayIfNotDeduplicated(gc_map);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateCFIInfo(
+    const ArrayRef<const uint8_t>& cfi_info) {
+  return AllocateOrDeduplicateArray(cfi_info, &dedupe_cfi_info_);
+}
+
+void CompiledMethodStorage::ReleaseCFIInfo(const LengthPrefixedArray<uint8_t>* cfi_info) {
+  ReleaseArrayIfNotDeduplicated(cfi_info);
+}
+
+const LengthPrefixedArray<LinkerPatch>* CompiledMethodStorage::DeduplicateLinkerPatches(
+    const ArrayRef<const LinkerPatch>& linker_patches) {
+  return AllocateOrDeduplicateArray(linker_patches, &dedupe_linker_patches_);
+}
+
+void CompiledMethodStorage::ReleaseLinkerPatches(
+    const LengthPrefixedArray<LinkerPatch>* linker_patches) {
+  ReleaseArrayIfNotDeduplicated(linker_patches);
+}
+
+}  // namespace art
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
new file mode 100644
index 0000000..ef10b67
--- /dev/null
+++ b/compiler/driver/compiled_method_storage.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_
+#define ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_
+
+#include <iosfwd>
+#include <memory>
+
+#include "base/macros.h"
+#include "length_prefixed_array.h"
+#include "utils/array_ref.h"
+#include "utils/dedupe_set.h"
+#include "utils/swap_space.h"
+
+namespace art {
+
+class LinkerPatch;
+class SrcMapElem;
+
+class CompiledMethodStorage {
+ public:
+  explicit CompiledMethodStorage(int swap_fd);
+  ~CompiledMethodStorage();
+
+  void DumpMemoryUsage(std::ostream& os, bool extended) const;
+
+  void SetDedupeEnabled(bool dedupe_enabled) {
+    dedupe_enabled_ = dedupe_enabled;
+  }
+  bool DedupeEnabled() const {
+    return dedupe_enabled_;
+  }
+
+  SwapAllocator<void> GetSwapSpaceAllocator() {
+    return SwapAllocator<void>(swap_space_.get());
+  }
+
+  const LengthPrefixedArray<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code);
+  void ReleaseCode(const LengthPrefixedArray<uint8_t>* code);
+
+  const LengthPrefixedArray<SrcMapElem>* DeduplicateSrcMappingTable(
+      const ArrayRef<const SrcMapElem>& src_map);
+  void ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map);
+
+  const LengthPrefixedArray<uint8_t>* DeduplicateMappingTable(const ArrayRef<const uint8_t>& table);
+  void ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table);
+
+  const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table);
+  void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table);
+
+  const LengthPrefixedArray<uint8_t>* DeduplicateGCMap(const ArrayRef<const uint8_t>& gc_map);
+  void ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map);
+
+  const LengthPrefixedArray<uint8_t>* DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info);
+  void ReleaseCFIInfo(const LengthPrefixedArray<uint8_t>* cfi_info);
+
+  const LengthPrefixedArray<LinkerPatch>* DeduplicateLinkerPatches(
+      const ArrayRef<const LinkerPatch>& linker_patches);
+  void ReleaseLinkerPatches(const LengthPrefixedArray<LinkerPatch>* linker_patches);
+
+ private:
+  template <typename T, typename DedupeSetType>
+  const LengthPrefixedArray<T>* AllocateOrDeduplicateArray(const ArrayRef<const T>& data,
+                                                           DedupeSetType* dedupe_set);
+
+  template <typename T>
+  void ReleaseArrayIfNotDeduplicated(const LengthPrefixedArray<T>* array);
+
+  // DeDuplication data structures.
+  template <typename ContentType>
+  class DedupeHashFunc;
+
+  template <typename T>
+  class LengthPrefixedArrayAlloc;
+
+  template <typename T>
+  using ArrayDedupeSet = DedupeSet<ArrayRef<const T>,
+                                   LengthPrefixedArray<T>,
+                                   LengthPrefixedArrayAlloc<T>,
+                                   size_t,
+                                   DedupeHashFunc<const T>,
+                                   4>;
+
+  // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first
+  // as other fields rely on this.
+  std::unique_ptr<SwapSpace> swap_space_;
+
+  bool dedupe_enabled_;
+
+  ArrayDedupeSet<uint8_t> dedupe_code_;
+  ArrayDedupeSet<SrcMapElem> dedupe_src_mapping_table_;
+  ArrayDedupeSet<uint8_t> dedupe_mapping_table_;
+  ArrayDedupeSet<uint8_t> dedupe_vmap_table_;
+  ArrayDedupeSet<uint8_t> dedupe_gc_map_;
+  ArrayDedupeSet<uint8_t> dedupe_cfi_info_;
+  ArrayDedupeSet<LinkerPatch> dedupe_linker_patches_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompiledMethodStorage);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
new file mode 100644
index 0000000..c6dbd24
--- /dev/null
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "compiled_method_storage.h"
+#include "compiled_method.h"
+#include "compiler_driver.h"
+#include "compiler_options.h"
+#include "dex/verification_results.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+
+namespace art {
+
+TEST(CompiledMethodStorage, Deduplicate) {
+  CompilerOptions compiler_options;
+  VerificationResults verification_results(&compiler_options);
+  DexFileToMethodInlinerMap method_inliner_map;
+  CompilerDriver driver(&compiler_options,
+                        &verification_results,
+                        &method_inliner_map,
+                        Compiler::kOptimizing, kNone,
+                        nullptr,
+                        false,
+                        nullptr,
+                        nullptr,
+                        nullptr,
+                        1u,
+                        false,
+                        false,
+                        "",
+                        false,
+                        nullptr,
+                        -1,
+                        "");
+  CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
+
+  ASSERT_TRUE(storage->DedupeEnabled());  // The default.
+
+  const uint8_t raw_code1[] = { 1u, 2u, 3u };
+  const uint8_t raw_code2[] = { 4u, 3u, 2u, 1u };
+  ArrayRef<const uint8_t> code[] = {
+      ArrayRef<const uint8_t>(raw_code1),
+      ArrayRef<const uint8_t>(raw_code2),
+  };
+  const SrcMapElem raw_src_map1[] = { { 1u, 2u }, { 3u, 4u }, { 5u, 6u } };
+  const SrcMapElem raw_src_map2[] = { { 8u, 7u }, { 6u, 5u }, { 4u, 3u }, { 2u, 1u } };
+  ArrayRef<const SrcMapElem> src_map[] = {
+      ArrayRef<const SrcMapElem>(raw_src_map1),
+      ArrayRef<const SrcMapElem>(raw_src_map2),
+  };
+  const uint8_t raw_mapping_table1[] = { 5, 6, 7 };
+  const uint8_t raw_mapping_table2[] = { 7, 6, 5, 4 };
+  ArrayRef<const uint8_t> mapping_table[] = {
+      ArrayRef<const uint8_t>(raw_mapping_table1),
+      ArrayRef<const uint8_t>(raw_mapping_table2),
+  };
+  const uint8_t raw_vmap_table1[] = { 2, 4, 6 };
+  const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 };
+  ArrayRef<const uint8_t> vmap_table[] = {
+      ArrayRef<const uint8_t>(raw_vmap_table1),
+      ArrayRef<const uint8_t>(raw_vmap_table2),
+  };
+  const uint8_t raw_gc_map1[] = { 9, 8, 7 };
+  const uint8_t raw_gc_map2[] = { 6, 7, 8, 9 };
+  ArrayRef<const uint8_t> gc_map[] = {
+      ArrayRef<const uint8_t>(raw_gc_map1),
+      ArrayRef<const uint8_t>(raw_gc_map2),
+  };
+  const uint8_t raw_cfi_info1[] = { 1, 3, 5 };
+  const uint8_t raw_cfi_info2[] = { 8, 6, 4, 2 };
+  ArrayRef<const uint8_t> cfi_info[] = {
+      ArrayRef<const uint8_t>(raw_cfi_info1),
+      ArrayRef<const uint8_t>(raw_cfi_info2),
+  };
+  const LinkerPatch raw_patches1[] = {
+      LinkerPatch::CodePatch(0u, nullptr, 1u),
+      LinkerPatch::MethodPatch(4u, nullptr, 1u),
+  };
+  const LinkerPatch raw_patches2[] = {
+      LinkerPatch::CodePatch(0u, nullptr, 1u),
+      LinkerPatch::MethodPatch(4u, nullptr, 2u),
+  };
+  ArrayRef<const LinkerPatch> patches[] = {
+      ArrayRef<const LinkerPatch>(raw_patches1),
+      ArrayRef<const LinkerPatch>(raw_patches2),
+  };
+
+  std::vector<CompiledMethod*> compiled_methods;
+  compiled_methods.reserve(1u << 7);
+  for (auto&& c : code) {
+    for (auto&& s : src_map) {
+      for (auto&& m : mapping_table) {
+        for (auto&& v : vmap_table) {
+          for (auto&& g : gc_map) {
+            for (auto&& f : cfi_info) {
+              for (auto&& p : patches) {
+                compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
+                        &driver, kNone, c, 0u, 0u, 0u, s, m, v, g, f, p));
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+  constexpr size_t code_bit = 1u << 6;
+  constexpr size_t src_map_bit = 1u << 5;
+  constexpr size_t mapping_table_bit = 1u << 4;
+  constexpr size_t vmap_table_bit = 1u << 3;
+  constexpr size_t gc_map_bit = 1u << 2;
+  constexpr size_t cfi_info_bit = 1u << 1;
+  constexpr size_t patches_bit = 1u << 0;
+  CHECK_EQ(compiled_methods.size(), 1u << 7);
+  for (size_t i = 0; i != compiled_methods.size(); ++i) {
+    for (size_t j = 0; j != compiled_methods.size(); ++j) {
+      CompiledMethod* lhs = compiled_methods[i];
+      CompiledMethod* rhs = compiled_methods[j];
+      bool same_code = ((i ^ j) & code_bit) == 0u;
+      bool same_src_map = ((i ^ j) & src_map_bit) == 0u;
+      bool same_mapping_table = ((i ^ j) & mapping_table_bit) == 0u;
+      bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u;
+      bool same_gc_map = ((i ^ j) & gc_map_bit) == 0u;
+      bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u;
+      bool same_patches = ((i ^ j) & patches_bit) == 0u;
+      ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data())
+          << i << " " << j;
+      ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data())
+          << i << " " << j;
+      ASSERT_EQ(same_mapping_table, lhs->GetMappingTable().data() == rhs->GetMappingTable().data())
+          << i << " " << j;
+      ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data())
+          << i << " " << j;
+      ASSERT_EQ(same_gc_map, lhs->GetGcMap().data() == rhs->GetGcMap().data())
+          << i << " " << j;
+      ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data())
+          << i << " " << j;
+      ASSERT_EQ(same_patches, lhs->GetPatches().data() == rhs->GetPatches().data())
+          << i << " " << j;
+    }
+  }
+  for (CompiledMethod* method : compiled_methods) {
+    CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&driver, method);
+  }
+}
+
+}  // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index b956584..8750aa8 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -348,9 +348,8 @@
                                const std::string& dump_cfg_file_name, bool dump_cfg_append,
                                CumulativeLogger* timer, int swap_fd,
                                const std::string& profile_file)
-    : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
-      swap_space_allocator_(new SwapAllocator<void>(swap_space_.get())),
-      profile_present_(false), compiler_options_(compiler_options),
+    : profile_present_(false),
+      compiler_options_(compiler_options),
       verification_results_(verification_results),
       method_inliner_map_(method_inliner_map),
       compiler_(Compiler::Create(this, compiler_kind)),
@@ -369,7 +368,6 @@
       had_hard_verifier_failure_(false),
       thread_count_(thread_count),
       stats_(new AOTCompilationStats),
-      dedupe_enabled_(true),
       dump_stats_(dump_stats),
       dump_passes_(dump_passes),
       dump_cfg_file_name_(dump_cfg_file_name),
@@ -377,12 +375,7 @@
       timings_logger_(timer),
       compiler_context_(nullptr),
       support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
-      dedupe_code_("dedupe code", *swap_space_allocator_),
-      dedupe_src_mapping_table_("dedupe source mapping table", *swap_space_allocator_),
-      dedupe_mapping_table_("dedupe mapping table", *swap_space_allocator_),
-      dedupe_vmap_table_("dedupe vmap table", *swap_space_allocator_),
-      dedupe_gc_map_("dedupe gc map", *swap_space_allocator_),
-      dedupe_cfi_info_("dedupe cfi info", *swap_space_allocator_) {
+      compiled_method_storage_(swap_fd) {
   DCHECK(compiler_options_ != nullptr);
   DCHECK(verification_results_ != nullptr);
   DCHECK(method_inliner_map_ != nullptr);
@@ -402,36 +395,6 @@
   }
 }
 
-SwapVector<uint8_t>* CompilerDriver::DeduplicateCode(const ArrayRef<const uint8_t>& code) {
-  DCHECK(dedupe_enabled_);
-  return dedupe_code_.Add(Thread::Current(), code);
-}
-
-SwapSrcMap* CompilerDriver::DeduplicateSrcMappingTable(const ArrayRef<SrcMapElem>& src_map) {
-  DCHECK(dedupe_enabled_);
-  return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const ArrayRef<const uint8_t>& code) {
-  DCHECK(dedupe_enabled_);
-  return dedupe_mapping_table_.Add(Thread::Current(), code);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateVMapTable(const ArrayRef<const uint8_t>& code) {
-  DCHECK(dedupe_enabled_);
-  return dedupe_vmap_table_.Add(Thread::Current(), code);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateGCMap(const ArrayRef<const uint8_t>& code) {
-  DCHECK(dedupe_enabled_);
-  return dedupe_gc_map_.Add(Thread::Current(), code);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info) {
-  DCHECK(dedupe_enabled_);
-  return dedupe_cfi_info_.Add(Thread::Current(), cfi_info);
-}
-
 CompilerDriver::~CompilerDriver() {
   Thread* self = Thread::Current();
   {
@@ -447,6 +410,7 @@
   compiler_->UnInit();
 }
 
+
 #define CREATE_TRAMPOLINE(type, abi, offset) \
     if (Is64BitInstructionSet(instruction_set_)) { \
       return CreateTrampoline64(instruction_set_, abi, \
@@ -2642,16 +2606,7 @@
   oss << " native alloc=" << PrettySize(allocated_space) << " free="
       << PrettySize(free_space);
 #endif
-  if (swap_space_.get() != nullptr) {
-    oss << " swap=" << PrettySize(swap_space_->GetSize());
-  }
-  if (extended) {
-    oss << "\nCode dedupe: " << dedupe_code_.DumpStats();
-    oss << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats();
-    oss << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats();
-    oss << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats();
-    oss << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats();
-  }
+  compiled_method_storage_.DumpMemoryUsage(oss, extended);
   return oss.str();
 }
 
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 0dc8261..485cdcf 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -30,6 +30,7 @@
 #include "class_reference.h"
 #include "compiler.h"
 #include "dex_file.h"
+#include "driver/compiled_method_storage.h"
 #include "invoke_type.h"
 #include "method_reference.h"
 #include "mirror/class.h"  // For mirror::Class::Status.
@@ -38,10 +39,7 @@
 #include "runtime.h"
 #include "safe_map.h"
 #include "thread_pool.h"
-#include "utils/array_ref.h"
-#include "utils/dedupe_set.h"
 #include "utils/dex_cache_arrays_layout.h"
-#include "utils/swap_space.h"
 
 namespace art {
 
@@ -80,8 +78,6 @@
   kQuickAbi
 };
 
-static constexpr bool kUseMurmur3Hash = true;
-
 class CompilerDriver {
  public:
   // Create a compiler targeting the requested "instruction_set".
@@ -388,10 +384,6 @@
     support_boot_image_fixup_ = support_boot_image_fixup;
   }
 
-  SwapAllocator<void>& GetSwapSpaceAllocator() {
-    return *swap_space_allocator_.get();
-  }
-
   bool WriteElf(const std::string& android_root,
                 bool is_host,
                 const std::vector<const DexFile*>& dex_files,
@@ -431,10 +423,10 @@
   }
 
   void SetDedupeEnabled(bool dedupe_enabled) {
-    dedupe_enabled_ = dedupe_enabled;
+    compiled_method_storage_.SetDedupeEnabled(dedupe_enabled);
   }
   bool DedupeEnabled() const {
-    return dedupe_enabled_;
+    return compiled_method_storage_.DedupeEnabled();
   }
 
   // Checks if class specified by type_idx is one of the image_classes_
@@ -455,13 +447,6 @@
                                        uint16_t class_def_idx,
                                        const DexFile& dex_file) const;
 
-  SwapVector<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code);
-  SwapSrcMap* DeduplicateSrcMappingTable(const ArrayRef<SrcMapElem>& src_map);
-  SwapVector<uint8_t>* DeduplicateMappingTable(const ArrayRef<const uint8_t>& code);
-  SwapVector<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& code);
-  SwapVector<uint8_t>* DeduplicateGCMap(const ArrayRef<const uint8_t>& code);
-  SwapVector<uint8_t>* DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info);
-
   // Should the compiler run on this method given profile information?
   bool SkipCompilation(const std::string& method_name);
 
@@ -479,6 +464,10 @@
     return compiler_kind_;
   }
 
+  CompiledMethodStorage* GetCompiledMethodStorage() {
+    return &compiled_method_storage_;
+  }
+
  private:
   // Return whether the declaring class of `resolved_member` is
   // available to `referrer_class` for read or write access using two
@@ -599,11 +588,6 @@
                       ThreadPool* thread_pool, TimingLogger* timings)
       REQUIRES(!Locks::mutator_lock_);
 
-  // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first
-  // as other fields rely on this.
-  std::unique_ptr<SwapSpace> swap_space_;
-  std::unique_ptr<SwapAllocator<void> > swap_space_allocator_;
-
   ProfileFile profile_file_;
   bool profile_present_;
 
@@ -663,7 +647,6 @@
   class AOTCompilationStats;
   std::unique_ptr<AOTCompilationStats> stats_;
 
-  bool dedupe_enabled_;
   bool dump_stats_;
   const bool dump_passes_;
   const std::string dump_cfg_file_name_;
@@ -678,93 +661,7 @@
 
   bool support_boot_image_fixup_;
 
-  // DeDuplication data structures, these own the corresponding byte arrays.
-  template <typename ContentType>
-  class DedupeHashFunc {
-   public:
-    size_t operator()(const ArrayRef<ContentType>& array) const {
-      const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
-      static_assert(IsPowerOfTwo(sizeof(ContentType)),
-          "ContentType is not power of two, don't know whether array layout is as assumed");
-      uint32_t len = sizeof(ContentType) * array.size();
-      if (kUseMurmur3Hash) {
-        static constexpr uint32_t c1 = 0xcc9e2d51;
-        static constexpr uint32_t c2 = 0x1b873593;
-        static constexpr uint32_t r1 = 15;
-        static constexpr uint32_t r2 = 13;
-        static constexpr uint32_t m = 5;
-        static constexpr uint32_t n = 0xe6546b64;
-
-        uint32_t hash = 0;
-
-        const int nblocks = len / 4;
-        typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
-        const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
-        int i;
-        for (i = 0; i < nblocks; i++) {
-          uint32_t k = blocks[i];
-          k *= c1;
-          k = (k << r1) | (k >> (32 - r1));
-          k *= c2;
-
-          hash ^= k;
-          hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
-        }
-
-        const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
-        uint32_t k1 = 0;
-
-        switch (len & 3) {
-          case 3:
-            k1 ^= tail[2] << 16;
-            FALLTHROUGH_INTENDED;
-          case 2:
-            k1 ^= tail[1] << 8;
-            FALLTHROUGH_INTENDED;
-          case 1:
-            k1 ^= tail[0];
-
-            k1 *= c1;
-            k1 = (k1 << r1) | (k1 >> (32 - r1));
-            k1 *= c2;
-            hash ^= k1;
-        }
-
-        hash ^= len;
-        hash ^= (hash >> 16);
-        hash *= 0x85ebca6b;
-        hash ^= (hash >> 13);
-        hash *= 0xc2b2ae35;
-        hash ^= (hash >> 16);
-
-        return hash;
-      } else {
-        size_t hash = 0x811c9dc5;
-        for (uint32_t i = 0; i < len; ++i) {
-          hash = (hash * 16777619) ^ data[i];
-        }
-        hash += hash << 13;
-        hash ^= hash >> 7;
-        hash += hash << 3;
-        hash ^= hash >> 17;
-        hash += hash << 5;
-        return hash;
-      }
-    }
-  };
-
-  DedupeSet<ArrayRef<const uint8_t>,
-            SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_code_;
-  DedupeSet<ArrayRef<SrcMapElem>,
-            SwapSrcMap, size_t, DedupeHashFunc<SrcMapElem>, 4> dedupe_src_mapping_table_;
-  DedupeSet<ArrayRef<const uint8_t>,
-            SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_mapping_table_;
-  DedupeSet<ArrayRef<const uint8_t>,
-            SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_vmap_table_;
-  DedupeSet<ArrayRef<const uint8_t>,
-            SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_gc_map_;
-  DedupeSet<ArrayRef<const uint8_t>,
-            SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_cfi_info_;
+  CompiledMethodStorage compiled_method_storage_;
 
   friend class CompileClassVisitor;
   DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index a07d27c..3ba380e 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -126,7 +126,7 @@
                      initial_opcodes, kCFIFormat, &debug_frame_data_);
   std::vector<uintptr_t> debug_frame_patches;
   std::vector<uintptr_t> expected_patches { 28 };  // NOLINT
-  WriteDebugFrameFDE(is64bit, 0, 0x01000000, 0x01000000, opcodes.data(),
+  WriteDebugFrameFDE(is64bit, 0, 0x01000000, 0x01000000, ArrayRef<const uint8_t>(*opcodes.data()),
                      kCFIFormat, &debug_frame_data_, &debug_frame_patches);
 
   EXPECT_EQ(expected_patches, debug_frame_patches);
@@ -142,7 +142,8 @@
   std::vector<uintptr_t> debug_frame_patches;
   std::vector<uintptr_t> expected_patches { 32 };  // NOLINT
   WriteDebugFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
-                     opcodes.data(), kCFIFormat, &debug_frame_data_, &debug_frame_patches);
+                     ArrayRef<const uint8_t>(*opcodes.data()),
+                     kCFIFormat, &debug_frame_data_, &debug_frame_patches);
   DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000");
 
   EXPECT_EQ(expected_patches, debug_frame_patches);
@@ -179,7 +180,8 @@
                      initial_opcodes, kCFIFormat, &debug_frame_data_);
   std::vector<uintptr_t> debug_frame_patches;
   WriteDebugFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
-                     opcodes.data(), kCFIFormat, &debug_frame_data_, &debug_frame_patches);
+                     ArrayRef<const uint8_t>(*opcodes.data()),
+                     kCFIFormat, &debug_frame_data_, &debug_frame_patches);
 
   CheckObjdumpOutput(is64bit, "-W");
 }
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index b7eff19..f3fba4b 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -25,6 +25,7 @@
 #include "dwarf/dwarf_constants.h"
 #include "dwarf/register.h"
 #include "dwarf/writer.h"
+#include "utils/array_ref.h"
 
 namespace art {
 namespace dwarf {
@@ -70,21 +71,19 @@
       writer.PushUint8(DW_EH_PE_absptr | DW_EH_PE_udata4);  // R: Pointer encoding.
     }
   }
-  writer.PushData(opcodes.data());
+  writer.PushData(*opcodes.data());
   writer.Pad(is64bit ? 8 : 4);
   writer.UpdateUint32(cie_header_start_, writer.data()->size() - cie_header_start_ - 4);
 }
 
 // Write frame description entry (FDE) to .debug_frame or .eh_frame section.
-template<typename Vector>
+inline
 void WriteDebugFrameFDE(bool is64bit, size_t cie_offset,
                         uint64_t initial_address, uint64_t address_range,
-                        const Vector* opcodes,
+                        const ArrayRef<const uint8_t>& opcodes,
                         CFIFormat format,
                         std::vector<uint8_t>* debug_frame,
                         std::vector<uintptr_t>* debug_frame_patches) {
-  static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
-
   Writer<> writer(debug_frame);
   size_t fde_header_start = writer.data()->size();
   writer.PushUint32(0);  // Length placeholder.
@@ -125,7 +124,7 @@
   writer.PushUint32(debug_abbrev_offset);
   writer.PushUint8(entries.Is64bit() ? 8 : 4);
   size_t entries_offset = writer.data()->size();
-  writer.PushData(entries.data());
+  writer.PushData(*entries.data());
   writer.UpdateUint32(start, writer.data()->size() - start - 4);
   // Copy patch locations and make them relative to .debug_info section.
   for (uintptr_t patch_location : entries.GetPatchLocations()) {
@@ -181,7 +180,7 @@
   writer.PushUint8(0);  // Terminate file list.
   writer.UpdateUint32(header_length_pos, writer.data()->size() - header_length_pos - 4);
   size_t opcodes_offset = writer.data()->size();
-  writer.PushData(opcodes.data());
+  writer.PushData(*opcodes.data());
   writer.UpdateUint32(header_start, writer.data()->size() - header_start - 4);
   // Copy patch locations and make them relative to .debug_line section.
   for (uintptr_t patch_location : opcodes.GetPatchLocations()) {
diff --git a/compiler/dwarf/writer.h b/compiler/dwarf/writer.h
index 42c32c4..00b9dfa 100644
--- a/compiler/dwarf/writer.h
+++ b/compiler/dwarf/writer.h
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_DWARF_WRITER_H_
 #define ART_COMPILER_DWARF_WRITER_H_
 
+#include <type_traits>
 #include <vector>
 #include "base/bit_utils.h"
 #include "base/logging.h"
@@ -119,9 +120,10 @@
   }
 
   template<typename Vector2>
-  void PushData(const Vector2* buffer) {
-    static_assert(std::is_same<typename Vector2::value_type, uint8_t>::value, "Invalid value type");
-    data_->insert(data_->end(), buffer->begin(), buffer->end());
+  void PushData(const Vector2& buffer) {
+    static_assert(std::is_same<typename std::add_const<typename Vector::value_type>::type,
+                               const uint8_t>::value, "Invalid value type");
+    data_->insert(data_->end(), buffer.begin(), buffer.end());
   }
 
   void UpdateUint32(size_t offset, uint32_t value) {
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index c10ffeb..3a9e312 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -182,8 +182,8 @@
   WriteDebugFrameCIE(isa, address_type, format, debug_frame);
   for (const OatWriter::DebugInfo& mi : method_infos) {
     if (!mi.deduped_) {  // Only one FDE per unique address.
-      const SwapVector<uint8_t>* opcodes = mi.compiled_method_->GetCFIInfo();
-      if (opcodes != nullptr) {
+      ArrayRef<const uint8_t> opcodes = mi.compiled_method_->GetCFIInfo();
+      if (!opcodes.empty()) {
         address_to_fde_offset_map.emplace(mi.low_pc_, debug_frame->size());
         WriteDebugFrameFDE(Is64BitInstructionSet(isa), cie_offset,
                            mi.low_pc_, mi.high_pc_ - mi.low_pc_,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4310be6..0e5a97f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -796,7 +796,7 @@
                   offset, kNativeObjectRelocationTypeArtFieldArray });
           offset += header_size;
           // Forward individual fields so that we can quickly find where they belong.
-          for (size_t i = 0, count = cur_fields->Length(); i < count; ++i) {
+          for (size_t i = 0, count = cur_fields->size(); i < count; ++i) {
             // Need to forward arrays separate of fields.
             ArtField* field = &cur_fields->At(i);
             auto it2 = native_object_relocations_.find(field);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3d1b42f..b563c80 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -160,7 +160,7 @@
   Runtime* runtime = Runtime::Current();
 
   // Check if the method is already compiled.
-  if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+  if (runtime->GetJit()->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
     VLOG(jit) << "Already compiled " << PrettyMethod(method);
     return true;
   }
@@ -207,10 +207,7 @@
     result = true;
   } else {
     TimingLogger::ScopedTiming t2("LinkCode", &logger);
-    OatFile::OatMethod oat_method(nullptr, 0);
-    if (AddToCodeCache(method, compiled_method, &oat_method)) {
-      oat_method.LinkMethod(method);
-      CHECK(runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) << PrettyMethod(method);
+    if (AddToCodeCache(method, compiled_method)) {
       result = true;
     }
   }
@@ -227,57 +224,57 @@
 }
 
 bool JitCompiler::AddToCodeCache(ArtMethod* method,
-                                 const CompiledMethod* compiled_method,
-                                 OatFile::OatMethod* out_method) {
+                                 const CompiledMethod* compiled_method) {
   Runtime* runtime = Runtime::Current();
   JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
-  const auto* quick_code = compiled_method->GetQuickCode();
-  if (quick_code == nullptr) {
+  auto const quick_code = compiled_method->GetQuickCode();
+  if (quick_code.empty()) {
     return false;
   }
-  const auto code_size = quick_code->size();
+  const auto code_size = quick_code.size();
   Thread* const self = Thread::Current();
-  auto* const mapping_table = compiled_method->GetMappingTable();
-  auto* const vmap_table = compiled_method->GetVmapTable();
-  auto* const gc_map = compiled_method->GetGcMap();
+  auto const mapping_table = compiled_method->GetMappingTable();
+  auto const vmap_table = compiled_method->GetVmapTable();
+  auto const gc_map = compiled_method->GetGcMap();
   uint8_t* mapping_table_ptr = nullptr;
   uint8_t* vmap_table_ptr = nullptr;
   uint8_t* gc_map_ptr = nullptr;
 
-  if (mapping_table != nullptr) {
+  if (!mapping_table.empty()) {
     // Write out pre-header stuff.
     mapping_table_ptr = code_cache->AddDataArray(
-        self, mapping_table->data(), mapping_table->data() + mapping_table->size());
+        self, mapping_table.data(), mapping_table.data() + mapping_table.size());
     if (mapping_table_ptr == nullptr) {
       return false;  // Out of data cache.
     }
   }
 
-  if (vmap_table != nullptr) {
+  if (!vmap_table.empty()) {
     vmap_table_ptr = code_cache->AddDataArray(
-        self, vmap_table->data(), vmap_table->data() + vmap_table->size());
+        self, vmap_table.data(), vmap_table.data() + vmap_table.size());
     if (vmap_table_ptr == nullptr) {
       return false;  // Out of data cache.
     }
   }
 
-  if (gc_map != nullptr) {
+  if (!gc_map.empty()) {
     gc_map_ptr = code_cache->AddDataArray(
-        self, gc_map->data(), gc_map->data() + gc_map->size());
+        self, gc_map.data(), gc_map.data() + gc_map.size());
     if (gc_map_ptr == nullptr) {
       return false;  // Out of data cache.
     }
   }
 
   uint8_t* const code = code_cache->CommitCode(self,
+                                               method,
                                                mapping_table_ptr,
                                                vmap_table_ptr,
                                                gc_map_ptr,
                                                compiled_method->GetFrameSizeInBytes(),
                                                compiled_method->GetCoreSpillMask(),
                                                compiled_method->GetFpSpillMask(),
-                                               compiled_method->GetQuickCode()->data(),
-                                               compiled_method->GetQuickCode()->size());
+                                               compiled_method->GetQuickCode().data(),
+                                               compiled_method->GetQuickCode().size());
 
   if (code == nullptr) {
     return false;
@@ -285,13 +282,6 @@
 
   const size_t thumb_offset = compiled_method->CodeDelta();
   const uint32_t code_offset = sizeof(OatQuickMethodHeader) + thumb_offset;
-  *out_method = OatFile::OatMethod(code, code_offset);
-  DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr);
-  DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr);
-  DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr);
-  DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
-  DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
-  DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask());
   VLOG(jit)
       << "JIT added "
       << PrettyMethod(method) << "@" << method
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 757f3f3..913a6d0 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -59,8 +59,8 @@
   // This is in the compiler since the runtime doesn't have access to the compiled method
   // structures.
   bool AddToCodeCache(ArtMethod* method,
-                      const CompiledMethod* compiled_method,
-                      OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
+                      const CompiledMethod* compiled_method)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(JitCompiler);
 };
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 34f0802..52a2382 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -487,7 +487,7 @@
                                                  frame_size,
                                                  main_jni_conv->CoreSpillMask(),
                                                  main_jni_conv->FpSpillMask(),
-                                                 nullptr,  // src_mapping_table.
+                                                 ArrayRef<const SrcMapElem>(),
                                                  ArrayRef<const uint8_t>(),  // mapping_table.
                                                  ArrayRef<const uint8_t>(),  // vmap_table.
                                                  ArrayRef<const uint8_t>(),  // native_gc_map.
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index cb9ea38..ac38f3d 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -85,8 +85,7 @@
                                                       const CompiledMethod* compiled_method,
                                                       MethodReference method_ref,
                                                       uint32_t max_extra_space) {
-  DCHECK(compiled_method->GetQuickCode() != nullptr);
-  uint32_t quick_code_size = compiled_method->GetQuickCode()->size();
+  uint32_t quick_code_size = compiled_method->GetQuickCode().size();
   uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
   uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size);
   // Adjust for extra space required by the subclass.
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 6f234a8..57018af 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -74,7 +74,7 @@
   // Now that we have the actual offset where the code will be placed, locate the ADRP insns
   // that actually require the thunk.
   uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
-  ArrayRef<const uint8_t> code(*compiled_method->GetQuickCode());
+  ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
   uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size());
   DCHECK(compiled_method != nullptr);
   for (const LinkerPatch& patch : compiled_method->GetPatches()) {
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 857d584..2a426b5 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -237,7 +237,7 @@
     CHECK(!compiled_method_refs_.empty());
     CHECK_EQ(compiled_method_refs_[0].dex_method_index, 1u);
     CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
-    uint32_t method1_size = compiled_methods_[0]->GetQuickCode()->size();
+    uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
     uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
     uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
     ASSERT_EQ(b_diff & 3u, 0u);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index e357662..92cf8ca 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -74,8 +74,8 @@
     compiled_method_refs_.push_back(method_ref);
     compiled_methods_.emplace_back(new CompiledMethod(
         &driver_, instruction_set_, code,
-        0u, 0u, 0u, nullptr, ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
-        ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
+        0u, 0u, 0u, ArrayRef<const SrcMapElem>(), ArrayRef<const uint8_t>(),
+        ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
         patches));
   }
 
@@ -93,7 +93,7 @@
 
       offset += sizeof(OatQuickMethodHeader);
       uint32_t quick_code_offset = offset + compiled_method->CodeDelta();
-      const auto& code = *compiled_method->GetQuickCode();
+      const auto code = compiled_method->GetQuickCode();
       offset += code.size();
 
       method_offset_map_.map.Put(compiled_method_refs_[idx], quick_code_offset);
@@ -125,7 +125,7 @@
 
       out_.WriteFully(dummy_header, sizeof(OatQuickMethodHeader));
       offset += sizeof(OatQuickMethodHeader);
-      ArrayRef<const uint8_t> code(*compiled_method->GetQuickCode());
+      ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
       if (!compiled_method->GetPatches().empty()) {
         patched_code_.assign(code.begin(), code.end());
         code = ArrayRef<const uint8_t>(patched_code_);
@@ -164,7 +164,7 @@
       ++idx;
     }
     CHECK_NE(idx, compiled_method_refs_.size());
-    CHECK_EQ(compiled_methods_[idx]->GetQuickCode()->size(), expected_code.size());
+    CHECK_EQ(compiled_methods_[idx]->GetQuickCode().size(), expected_code.size());
 
     auto result = method_offset_map_.FindMethodOffset(method_ref);
     CHECK(result.first);  // Must have been linked.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 2d9d91a..06576cc 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -63,9 +63,9 @@
       EXPECT_EQ(oat_method.GetFpSpillMask(), compiled_method->GetFpSpillMask());
       uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(quick_oat_code), 2);
       quick_oat_code = reinterpret_cast<const void*>(oat_code_aligned);
-      const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
-      EXPECT_TRUE(quick_code != nullptr);
-      size_t code_size = quick_code->size() * sizeof(quick_code[0]);
+      ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
+      EXPECT_FALSE(quick_code.empty());
+      size_t code_size = quick_code.size() * sizeof(quick_code[0]);
       EXPECT_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size))
           << PrettyMethod(method) << " " << code_size;
       CHECK_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 640698b..dcb23bf 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -172,7 +172,7 @@
 }
 
 struct OatWriter::GcMapDataAccess {
-  static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+  static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
     return compiled_method->GetGcMap();
   }
 
@@ -194,7 +194,7 @@
 };
 
 struct OatWriter::MappingTableDataAccess {
-  static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+  static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
     return compiled_method->GetMappingTable();
   }
 
@@ -216,7 +216,7 @@
 };
 
 struct OatWriter::VmapTableDataAccess {
-  static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+  static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
     return compiled_method->GetVmapTable();
   }
 
@@ -388,8 +388,8 @@
       // Derived from CompiledMethod.
       uint32_t quick_code_offset = 0;
 
-      const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
-      uint32_t code_size = quick_code->size() * sizeof(uint8_t);
+      ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
+      uint32_t code_size = quick_code.size() * sizeof(uint8_t);
       uint32_t thumb_offset = compiled_method->CodeDelta();
 
       // Deduplicate code arrays if we are not producing debuggable code.
@@ -428,7 +428,7 @@
       uint32_t vmap_table_offset = method_header->vmap_table_offset_;
       // If we don't have quick code, then we must have a vmap, as that is how the dex2dex
       // compiler records its transformations.
-      DCHECK(quick_code != nullptr || vmap_table_offset != 0);
+      DCHECK(!quick_code.empty() || vmap_table_offset != 0);
       uint32_t gc_map_offset = method_header->gc_map_offset_;
       // The code offset was 0 when the mapping/vmap table offset was set, so it's set
       // to 0-offset and we need to adjust it by code_offset.
@@ -496,12 +496,12 @@
         } else {
           status = mirror::Class::kStatusNotReady;
         }
-        const SwapVector<uint8_t>* gc_map = compiled_method->GetGcMap();
-        if (gc_map != nullptr) {
-          size_t gc_map_size = gc_map->size() * sizeof(gc_map[0]);
+        ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap();
+        if (!gc_map.empty()) {
+          size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
           bool is_native = it.MemberIsNative();
           CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
-              << gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
+              << gc_map_size << " " << (is_native ? "true" : "false") << " "
               << (status < mirror::Class::kStatusVerified) << " " << status << " "
               << PrettyMethod(it.GetMemberIndex(), *dex_file_);
         }
@@ -519,30 +519,22 @@
  private:
   struct CodeOffsetsKeyComparator {
     bool operator()(const CompiledMethod* lhs, const CompiledMethod* rhs) const {
-      if (lhs->GetQuickCode() != rhs->GetQuickCode()) {
-        return lhs->GetQuickCode() < rhs->GetQuickCode();
+      // Code is deduplicated by CompilerDriver, compare only data pointers.
+      if (lhs->GetQuickCode().data() != rhs->GetQuickCode().data()) {
+        return lhs->GetQuickCode().data() < rhs->GetQuickCode().data();
       }
       // If the code is the same, all other fields are likely to be the same as well.
-      if (UNLIKELY(lhs->GetMappingTable() != rhs->GetMappingTable())) {
-        return lhs->GetMappingTable() < rhs->GetMappingTable();
+      if (UNLIKELY(lhs->GetMappingTable().data() != rhs->GetMappingTable().data())) {
+        return lhs->GetMappingTable().data() < rhs->GetMappingTable().data();
       }
-      if (UNLIKELY(lhs->GetVmapTable() != rhs->GetVmapTable())) {
-        return lhs->GetVmapTable() < rhs->GetVmapTable();
+      if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) {
+        return lhs->GetVmapTable().data() < rhs->GetVmapTable().data();
       }
-      if (UNLIKELY(lhs->GetGcMap() != rhs->GetGcMap())) {
-        return lhs->GetGcMap() < rhs->GetGcMap();
+      if (UNLIKELY(lhs->GetGcMap().data() != rhs->GetGcMap().data())) {
+        return lhs->GetGcMap().data() < rhs->GetGcMap().data();
       }
-      const auto& lhs_patches = lhs->GetPatches();
-      const auto& rhs_patches = rhs->GetPatches();
-      if (UNLIKELY(lhs_patches.size() != rhs_patches.size())) {
-        return lhs_patches.size() < rhs_patches.size();
-      }
-      auto rit = rhs_patches.begin();
-      for (const LinkerPatch& lpatch : lhs_patches) {
-        if (UNLIKELY(!(lpatch == *rit))) {
-          return lpatch < *rit;
-        }
-        ++rit;
+      if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) {
+        return lhs->GetPatches().data() < rhs->GetPatches().data();
       }
       return false;
     }
@@ -583,17 +575,17 @@
       DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
       DCHECK_EQ(DataAccess::GetOffset(oat_class, method_offsets_index_), 0u);
 
-      const SwapVector<uint8_t>* map = DataAccess::GetData(compiled_method);
-      uint32_t map_size = map == nullptr ? 0 : map->size() * sizeof((*map)[0]);
+      ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method);
+      uint32_t map_size = map.size() * sizeof(map[0]);
       if (map_size != 0u) {
-        auto lb = dedupe_map_.lower_bound(map);
-        if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(map, lb->first)) {
+        auto lb = dedupe_map_.lower_bound(map.data());
+        if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(map.data(), lb->first)) {
           DataAccess::SetOffset(oat_class, method_offsets_index_, lb->second);
         } else {
           DataAccess::SetOffset(oat_class, method_offsets_index_, offset_);
-          dedupe_map_.PutBefore(lb, map, offset_);
+          dedupe_map_.PutBefore(lb, map.data(), offset_);
           offset_ += map_size;
-          writer_->oat_header_->UpdateChecksum(&(*map)[0], map_size);
+          writer_->oat_header_->UpdateChecksum(&map[0], map_size);
         }
       }
       ++method_offsets_index_;
@@ -605,7 +597,7 @@
  private:
   // Deduplication is already done on a pointer basis by the compiler driver,
   // so we can simply compare the pointers to find out if things are duplicated.
-  SafeMap<const SwapVector<uint8_t>*, uint32_t> dedupe_map_;
+  SafeMap<const uint8_t*, uint32_t> dedupe_map_;
 };
 
 class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
@@ -647,7 +639,7 @@
       UNREACHABLE();
     }
 
-    if (compiled_method != nullptr && compiled_method->GetQuickCode()->size() != 0) {
+    if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0) {
       method->SetEntryPointFromQuickCompiledCodePtrSize(
           reinterpret_cast<void*>(offsets.code_offset_), pointer_size_);
     }
@@ -713,10 +705,8 @@
       size_t file_offset = file_offset_;
       OutputStream* out = out_;
 
-      const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
-      // Need a wrapper if we create a copy for patching.
-      ArrayRef<const uint8_t> wrapped(*quick_code);
-      uint32_t code_size = quick_code->size() * sizeof(uint8_t);
+      ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
+      uint32_t code_size = quick_code.size() * sizeof(uint8_t);
 
       // Deduplicate code arrays.
       const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_];
@@ -753,8 +743,8 @@
         DCHECK_OFFSET_();
 
         if (!compiled_method->GetPatches().empty()) {
-          patched_code_.assign(quick_code->begin(), quick_code->end());
-          wrapped = ArrayRef<const uint8_t>(patched_code_);
+          patched_code_.assign(quick_code.begin(), quick_code.end());
+          quick_code = ArrayRef<const uint8_t>(patched_code_);
           for (const LinkerPatch& patch : compiled_method->GetPatches()) {
             if (patch.Type() == kLinkerPatchCallRelative) {
               // NOTE: Relative calls across oat files are not supported.
@@ -781,8 +771,8 @@
           }
         }
 
-        writer_->oat_header_->UpdateChecksum(wrapped.data(), code_size);
-        if (!out->WriteFully(wrapped.data(), code_size)) {
+        writer_->oat_header_->UpdateChecksum(quick_code.data(), code_size);
+        if (!out->WriteFully(quick_code.data(), code_size)) {
           ReportWriteFailure("method code", it);
           return false;
         }
@@ -947,14 +937,14 @@
       ++method_offsets_index_;
 
       // Write deduplicated map.
-      const SwapVector<uint8_t>* map = DataAccess::GetData(compiled_method);
-      size_t map_size = map == nullptr ? 0 : map->size() * sizeof((*map)[0]);
+      ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method);
+      size_t map_size = map.size() * sizeof(map[0]);
       DCHECK((map_size == 0u && map_offset == 0u) ||
             (map_size != 0u && map_offset != 0u && map_offset <= offset_))
           << map_size << " " << map_offset << " " << offset_ << " "
           << PrettyMethod(it.GetMemberIndex(), *dex_file_) << " for " << DataAccess::Name();
       if (map_size != 0u && map_offset == offset_) {
-        if (UNLIKELY(!out->WriteFully(&(*map)[0], map_size))) {
+        if (UNLIKELY(!out->WriteFully(&map[0], map_size))) {
           ReportWriteFailure(it);
           return false;
         }
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 5dd5be3..ed193c7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -774,11 +774,12 @@
                                                        &string_init_offset);
   // Replace calls to String.<init> with StringFactory.
   if (is_string_init) {
-    HInvokeStaticOrDirect::DispatchInfo dispatch_info = ComputeDispatchInfo(is_string_init,
-                                                                            string_init_offset,
-                                                                            target_method,
-                                                                            direct_method,
-                                                                            direct_code);
+    HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
+        HInvokeStaticOrDirect::MethodLoadKind::kStringInit,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        dchecked_integral_cast<uint64_t>(string_init_offset),
+        0U
+    };
     HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
         arena_,
         number_of_arguments - 1,
@@ -841,11 +842,12 @@
       clinit_check = ProcessClinitCheckForInvoke(dex_pc, method_idx, &clinit_check_requirement);
     }
 
-    HInvokeStaticOrDirect::DispatchInfo dispatch_info = ComputeDispatchInfo(is_string_init,
-                                                                            string_init_offset,
-                                                                            target_method,
-                                                                            direct_method,
-                                                                            direct_code);
+    HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
+        HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        0u,
+        0U
+    };
     invoke = new (arena_) HInvokeStaticOrDirect(arena_,
                                                 number_of_arguments,
                                                 return_type,
@@ -958,77 +960,6 @@
   return clinit_check;
 }
 
-HInvokeStaticOrDirect::DispatchInfo HGraphBuilder::ComputeDispatchInfo(
-    bool is_string_init,
-    int32_t string_init_offset,
-    MethodReference target_method,
-    uintptr_t direct_method,
-    uintptr_t direct_code) {
-  HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
-  HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
-  uint64_t method_load_data = 0u;
-  uint64_t direct_code_ptr = 0u;
-
-  if (is_string_init) {
-    // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
-    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kStringInit;
-    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-    method_load_data = string_init_offset;
-  } else if (target_method.dex_file == outer_compilation_unit_->GetDexFile() &&
-      target_method.dex_method_index == outer_compilation_unit_->GetDexMethodIndex()) {
-    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
-    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
-  } else {
-    if (direct_method != 0u) {  // Should we use a direct pointer to the method?
-      if (direct_method != static_cast<uintptr_t>(-1)) {  // Is the method pointer known now?
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
-        method_load_data = direct_method;
-      } else {  // The direct pointer will be known at link time.
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
-      }
-    } else {  // Use dex cache.
-      DCHECK(target_method.dex_file == dex_compilation_unit_->GetDexFile());
-      DexCacheArraysLayout layout =
-          compiler_driver_->GetDexCacheArraysLayout(target_method.dex_file);
-      if (layout.Valid()) {  // Can we use PC-relative access to the dex cache arrays?
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
-        method_load_data = layout.MethodOffset(target_method.dex_method_index);
-      } else {  // We must go through the ArtMethod's pointer to resolved methods.
-        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
-      }
-    }
-    if (direct_code != 0u) {  // Should we use a direct pointer to the code?
-      if (direct_code != static_cast<uintptr_t>(-1)) {  // Is the code pointer known now?
-        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
-        direct_code_ptr = direct_code;
-      } else if (compiler_driver_->IsImage() ||
-          target_method.dex_file == dex_compilation_unit_->GetDexFile()) {
-        // Use PC-relative calls for invokes within a multi-dex oat file.
-        // TODO: Recognize when the target dex file is within the current oat file for
-        // app compilation. At the moment we recognize only the boot image as multi-dex.
-        // NOTE: This will require changing the ARM backend which currently falls
-        // through from kCallPCRelative to kDirectCodeFixup for different dex files.
-        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
-      } else {  // The direct pointer will be known at link time.
-        // NOTE: This is used for app->boot calls when compiling an app against
-        // a relocatable but not yet relocated image.
-        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup;
-      }
-    } else {  // We must use the code pointer from the ArtMethod.
-      code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-    }
-  }
-
-  if (graph_->IsDebuggable()) {
-    // For debuggable apps always use the code pointer from ArtMethod
-    // so that we don't circumvent instrumentation stubs if installed.
-    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-  }
-
-  return HInvokeStaticOrDirect::DispatchInfo {
-    method_load_kind, code_ptr_location, method_load_data, direct_code_ptr };
-}
-
 bool HGraphBuilder::SetupInvokeArguments(HInvoke* invoke,
                                          uint32_t number_of_vreg_arguments,
                                          uint32_t* args,
@@ -1241,12 +1172,14 @@
                                                            field_index,
                                                            dex_pc);
     } else {
+      uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
       field_set = new (arena_) HInstanceFieldSet(null_check,
                                                  value,
                                                  field_type,
                                                  resolved_field->GetOffset(),
                                                  resolved_field->IsVolatile(),
                                                  field_index,
+                                                 class_def_index,
                                                  *dex_file_,
                                                  dex_compilation_unit_->GetDexCache(),
                                                  dex_pc);
@@ -1261,11 +1194,13 @@
                                                            field_index,
                                                            dex_pc);
     } else {
+      uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
       field_get = new (arena_) HInstanceFieldGet(null_check,
                                                  field_type,
                                                  resolved_field->GetOffset(),
                                                  resolved_field->IsVolatile(),
                                                  field_index,
+                                                 class_def_index,
                                                  *dex_file_,
                                                  dex_compilation_unit_->GetDexCache(),
                                                  dex_pc);
@@ -1407,6 +1342,8 @@
     cls = new (arena_) HClinitCheck(constant, dex_pc);
     current_block_->AddInstruction(cls);
   }
+
+  uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
   if (is_put) {
     // We need to keep the class alive before loading the value.
     Temporaries temps(graph_);
@@ -1419,6 +1356,7 @@
                                                                 resolved_field->GetOffset(),
                                                                 resolved_field->IsVolatile(),
                                                                 field_index,
+                                                                class_def_index,
                                                                 *dex_file_,
                                                                 dex_cache_,
                                                                 dex_pc));
@@ -1428,6 +1366,7 @@
                                                                 resolved_field->GetOffset(),
                                                                 resolved_field->IsVolatile(),
                                                                 field_index,
+                                                                class_def_index,
                                                                 *dex_file_,
                                                                 dex_cache_,
                                                                 dex_pc));
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 6910d51..9eaa4b6 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -276,12 +276,6 @@
                                      uint32_t dex_pc,
                                      HInvoke* invoke);
 
-  HInvokeStaticOrDirect::DispatchInfo ComputeDispatchInfo(bool is_string_init,
-                                                          int32_t string_init_offset,
-                                                          MethodReference target_method,
-                                                          uintptr_t direct_method,
-                                                          uintptr_t direct_code);
-
   bool SetupInvokeArguments(HInvoke* invoke,
                             uint32_t number_of_vreg_arguments,
                             uint32_t* args,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6a743eb..a1bb5e0 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -32,6 +32,10 @@
 #include "code_generator_x86_64.h"
 #endif
 
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "code_generator_mips.h"
+#endif
+
 #ifdef ART_ENABLE_CODEGEN_mips64
 #include "code_generator_mips64.h"
 #endif
@@ -375,13 +379,17 @@
 
   if (invoke->IsInvokeStaticOrDirect()) {
     HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
-    if (call->IsStringInit()) {
-      locations->AddTemp(visitor->GetMethodLocation());
-    } else if (call->IsRecursive()) {
-      locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
-    } else {
-      locations->AddTemp(visitor->GetMethodLocation());
-      locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+    switch (call->GetMethodLoadKind()) {
+      case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+        locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
+        break;
+      case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
+        locations->AddTemp(visitor->GetMethodLocation());
+        locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+        break;
+      default:
+        locations->AddTemp(visitor->GetMethodLocation());
+        break;
     }
   } else {
     locations->AddTemp(visitor->GetMethodLocation());
@@ -742,11 +750,12 @@
     }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
-    case kMips:
-      UNUSED(compiler_options);
-      UNUSED(graph);
-      UNUSED(isa_features);
-      return nullptr;
+    case kMips: {
+      return new mips::CodeGeneratorMIPS(graph,
+                                         *isa_features.AsMipsInstructionSetFeatures(),
+                                         compiler_options,
+                                         stats);
+    }
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
     case kMips64: {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b04dfc0..47b6f30 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -172,6 +172,7 @@
                                OptimizingCompilerStats* stats = nullptr);
   virtual ~CodeGenerator() {}
 
+  // Get the graph. This is the outermost graph, never the graph of a method being inlined.
   HGraph* GetGraph() const { return graph_; }
 
   HBasicBlock* GetNextBlockToEmit() const;
@@ -431,6 +432,12 @@
                              uint32_t dex_pc,
                              SlowPathCode* slow_path) = 0;
 
+  // Check if the desired_dispatch_info is supported. If it is, return it,
+  // otherwise return a fall-back info that should be used instead.
+  virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) = 0;
+
   // Generate a call to a static or direct method.
   virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
   // Generate a call to a virtual method.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 92a5878..8d9794b 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5155,26 +5155,51 @@
   }
 }
 
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) {
+  if (desired_dispatch_info.method_load_kind ==
+      HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
+    // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+    return HInvokeStaticOrDirect::DispatchInfo {
+      HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+      HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+      0u,
+      0u
+    };
+  }
+  if (desired_dispatch_info.code_ptr_location ==
+      HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
+    const DexFile& outer_dex_file = GetGraph()->GetDexFile();
+    if (&outer_dex_file != target_method.dex_file) {
+      // Calls across dex files are more likely to exceed the available BL range,
+      // so use absolute patch with fixup if available and kCallArtMethod otherwise.
+      HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
+          (desired_dispatch_info.method_load_kind ==
+           HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup)
+          ? HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup
+          : HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+      return HInvokeStaticOrDirect::DispatchInfo {
+        desired_dispatch_info.method_load_kind,
+        code_ptr_location,
+        desired_dispatch_info.method_load_data,
+        0u
+      };
+    }
+  }
+  return desired_dispatch_info;
+}
+
 void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   // For better instruction scheduling we load the direct code pointer before the method pointer.
-  bool direct_code_loaded = false;
   switch (invoke->GetCodePtrLocation()) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
-      if (IsSameDexFile(*invoke->GetTargetMethod().dex_file, GetGraph()->GetDexFile())) {
-        break;
-      }
-      // Calls across dex files are more likely to exceed the available BL range,
-      // so use absolute patch by falling through to kDirectCodeFixup.
-      FALLTHROUGH_INTENDED;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
       // LR = code address from literal pool with link-time patch.
       __ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
-      direct_code_loaded = true;
       break;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
       // LR = invoke->GetDirectCodePtr();
       __ LoadImmediate(LR, invoke->GetDirectCodePtr());
-      direct_code_loaded = true;
       break;
     default:
       break;
@@ -5197,8 +5222,10 @@
                      DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
-      // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
-      FALLTHROUGH_INTENDED;
+      // TODO: Implement this type.
+      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
       Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
       Register method_reg;
@@ -5227,20 +5254,14 @@
       __ bl(GetFrameEntryLabel());
       break;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
-      if (!direct_code_loaded) {
-        relative_call_patches_.emplace_back(invoke->GetTargetMethod());
-        __ Bind(&relative_call_patches_.back().label);
-        Label label;
-        __ bl(&label);  // Arbitrarily branch to the instruction after BL, override at link time.
-        __ Bind(&label);
-        break;
-      }
-      // If we loaded the direct code above, fall through.
-      FALLTHROUGH_INTENDED;
+      relative_call_patches_.emplace_back(invoke->GetTargetMethod());
+      __ Bind(&relative_call_patches_.back().label);
+      // Arbitrarily branch to the BL itself, override at link time.
+      __ bl(&relative_call_patches_.back().label);
+      break;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
       // LR prepared above for better instruction scheduling.
-      DCHECK(direct_code_loaded);
       // LR()
       __ blx(LR);
       break;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 6900933..cef1095 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -362,6 +362,12 @@
 
   Label* GetFrameEntryLabel() { return &frame_entry_label_; }
 
+  // Check if the desired_dispatch_info is supported. If it is, return it,
+  // otherwise return a fall-back info that should be used instead.
+  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) OVERRIDE;
+
   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
   void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f68b11b..b0be446 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1580,6 +1580,21 @@
   HandleBinaryOp(instruction);
 }
 
+void LocationsBuilderARM64::VisitArm64IntermediateAddress(HArm64IntermediateAddress* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress(
+    HArm64IntermediateAddress* instruction) {
+  __ Add(OutputRegister(instruction),
+         InputRegisterAt(instruction, 0),
+         Operand(InputOperandAt(instruction, 1)));
+}
+
 void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -1593,14 +1608,16 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
   Primitive::Type type = instruction->GetType();
   Register obj = InputRegisterAt(instruction, 0);
-  Location index = locations->InAt(1);
+  Location index = instruction->GetLocations()->InAt(1);
   size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
   MemOperand source = HeapOperand(obj);
+  CPURegister dest = OutputCPURegister(instruction);
+
   MacroAssembler* masm = GetVIXLAssembler();
   UseScratchRegisterScope temps(masm);
+  // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
   BlockPoolsScope block_pools(masm);
 
   if (index.IsConstant()) {
@@ -1608,15 +1625,26 @@
     source = HeapOperand(obj, offset);
   } else {
     Register temp = temps.AcquireSameSizeAs(obj);
-    __ Add(temp, obj, offset);
+    if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+      // We do not need to compute the intermediate address from the array: the
+      // input instruction has done it already. See the comment in
+      // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+      if (kIsDebugBuild) {
+        HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+        DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+      }
+      temp = obj;
+    } else {
+      __ Add(temp, obj, offset);
+    }
     source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
   }
 
-  codegen_->Load(type, OutputCPURegister(instruction), source);
+  codegen_->Load(type, dest, source);
   codegen_->MaybeRecordImplicitNullCheck(instruction);
 
-  if (type == Primitive::kPrimNot) {
-    GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
+  if (instruction->GetType() == Primitive::kPrimNot) {
+    GetAssembler()->MaybeUnpoisonHeapReference(dest.W());
   }
 }
 
@@ -1670,7 +1698,18 @@
     } else {
       UseScratchRegisterScope temps(masm);
       Register temp = temps.AcquireSameSizeAs(array);
-      __ Add(temp, array, offset);
+      if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+        // We do not need to compute the intermediate address from the array: the
+        // input instruction has done it already. See the comment in
+        // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+        if (kIsDebugBuild) {
+          HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+          DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+        }
+        temp = array;
+      } else {
+        __ Add(temp, array, offset);
+      }
       destination = HeapOperand(temp,
                                 XRegisterFrom(index),
                                 LSL,
@@ -1680,6 +1719,7 @@
     codegen_->MaybeRecordImplicitNullCheck(instruction);
   } else {
     DCHECK(needs_write_barrier);
+    DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
     vixl::Label done;
     SlowPathCodeARM64* slow_path = nullptr;
     {
@@ -2786,6 +2826,13 @@
   return false;
 }
 
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method ATTRIBUTE_UNUSED) {
+  // On arm64 we support all dispatch types.
+  return desired_dispatch_info;
+}
+
 void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   // For better instruction scheduling we load the direct code pointer before the method pointer.
   bool direct_code_loaded = false;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a068b48..ab684ea 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -382,12 +382,18 @@
                      uint32_t dex_pc,
                      SlowPathCode* slow_path);
 
-  ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; }
+  ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
 
   bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
     return false;
   }
 
+  // Check if the desired_dispatch_info is supported. If it is, return it,
+  // otherwise return a fall-back info that should be used instead.
+  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) OVERRIDE;
+
   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
   void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
 
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
new file mode 100644
index 0000000..e6b9273
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -0,0 +1,4228 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_mips.h"
+
+#include "arch/mips/entrypoints_direct_mips.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "art_method.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "gc/accounting/card_table.h"
+#include "intrinsics.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils/assembler.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/stack_checks.h"
+
+namespace art {
+namespace mips {
+
+static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = A0;
+
+// We need extra temporary/scratch registers (in addition to AT) in some cases.
+static constexpr Register TMP = T8;
+static constexpr FRegister FTMP = F8;
+
+// ART Thread Register.
+static constexpr Register TR = S1;
+
+Location MipsReturnLocation(Primitive::Type return_type) {
+  switch (return_type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot:
+      return Location::RegisterLocation(V0);
+
+    case Primitive::kPrimLong:
+      return Location::RegisterPairLocation(V0, V1);
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      return Location::FpuRegisterLocation(F0);
+
+    case Primitive::kPrimVoid:
+      return Location();
+  }
+  UNREACHABLE();
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(Primitive::Type type) const {
+  return MipsReturnLocation(type);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
+  return Location::RegisterLocation(kMethodRegisterArgument);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type type) {
+  Location next_location;
+
+  switch (type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot: {
+      uint32_t gp_index = gp_index_++;
+      if (gp_index < calling_convention.GetNumberOfRegisters()) {
+        next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
+      } else {
+        size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+        next_location = Location::StackSlot(stack_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      uint32_t gp_index = gp_index_;
+      gp_index_ += 2;
+      if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
+        if (calling_convention.GetRegisterAt(gp_index) == A1) {
+          gp_index_++;  // Skip A1, and use A2_A3 instead.
+          gp_index++;
+        }
+        Register low_even = calling_convention.GetRegisterAt(gp_index);
+        Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
+        DCHECK_EQ(low_even + 1, high_odd);
+        next_location = Location::RegisterPairLocation(low_even, high_odd);
+      } else {
+        size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+        next_location = Location::DoubleStackSlot(stack_offset);
+      }
+      break;
+    }
+
+    // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
+    // will take up the even/odd pair, while floats are stored in even regs only.
+    // On 64 bit FPU, both double and float are stored in even registers only.
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      uint32_t float_index = float_index_++;
+      if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
+        next_location = Location::FpuRegisterLocation(
+            calling_convention.GetFpuRegisterAt(float_index));
+      } else {
+        size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+        next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+                                                     : Location::StackSlot(stack_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unexpected parameter type " << type;
+      break;
+  }
+
+  // Space on the stack is reserved for all arguments.
+  stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
+
+  return next_location;
+}
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
+  return MipsReturnLocation(type);
+}
+
+#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+    __ Bind(GetEntryLabel());
+    if (instruction_->CanThrowIntoCatchBlock()) {
+      // Live registers will be restored in the catch block if caught.
+      SaveLiveRegisters(codegen, instruction_->GetLocations());
+    }
+    // We're moving two locations to locations that could overlap, so we need a parallel
+    // move resolver.
+    InvokeRuntimeCallingConvention calling_convention;
+    codegen->EmitParallelMoves(locations->InAt(0),
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+                               Primitive::kPrimInt,
+                               locations->InAt(1),
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+                               Primitive::kPrimInt);
+    mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+                                instruction_,
+                                instruction_->GetDexPc(),
+                                this,
+                                IsDirectEntrypoint(kQuickThrowArrayBounds));
+    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
+  }
+
+  bool IsFatal() const OVERRIDE { return true; }
+
+  const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+
+ private:
+  HBoundsCheck* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
+};
+
+class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+    __ Bind(GetEntryLabel());
+    if (instruction_->CanThrowIntoCatchBlock()) {
+      // Live registers will be restored in the catch block if caught.
+      SaveLiveRegisters(codegen, instruction_->GetLocations());
+    }
+    mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+                                instruction_,
+                                instruction_->GetDexPc(),
+                                this,
+                                IsDirectEntrypoint(kQuickThrowDivZero));
+    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
+  }
+
+  bool IsFatal() const OVERRIDE { return true; }
+
+  const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+
+ private:
+  HDivZeroCheck* const instruction_;
+  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
+};
+
+class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  LoadClassSlowPathMIPS(HLoadClass* cls,
+                        HInstruction* at,
+                        uint32_t dex_pc,
+                        bool do_clinit)
+      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+  }
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = at_->GetLocations();
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConvention calling_convention;
+    __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
+
+    int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+                                            : QUICK_ENTRY_POINT(pInitializeType);
+    bool direct = do_clinit_ ? IsDirectEntrypoint(kQuickInitializeStaticStorage)
+                             : IsDirectEntrypoint(kQuickInitializeType);
+
+    mips_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this, direct);
+    if (do_clinit_) {
+      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+    } else {
+      CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+    }
+
+    // Move the class to the desired location.
+    Location out = locations->Out();
+    if (out.IsValid()) {
+      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+      Primitive::Type type = at_->GetType();
+      mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
+    }
+
+    RestoreLiveRegisters(codegen, locations);
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+
+ private:
+  // The class this slow path will load.
+  HLoadClass* const cls_;
+
+  // The instruction where this slow path is happening.
+  // (Might be the load class or an initialization check).
+  HInstruction* const at_;
+
+  // The dex PC of `at_`.
+  const uint32_t dex_pc_;
+
+  // Whether to initialize the class.
+  const bool do_clinit_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
+};
+
+class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  explicit LoadStringSlowPathMIPS(HLoadString* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConvention calling_convention;
+    __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+    mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+                                instruction_,
+                                instruction_->GetDexPc(),
+                                this,
+                                IsDirectEntrypoint(kQuickResolveString));
+    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+    Primitive::Type type = instruction_->GetType();
+    mips_codegen->MoveLocation(locations->Out(),
+                               calling_convention.GetReturnLocation(type),
+                               type);
+
+    RestoreLiveRegisters(codegen, locations);
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+
+ private:
+  HLoadString* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
+};
+
+class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  explicit NullCheckSlowPathMIPS(HNullCheck* instr) : instruction_(instr) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+    __ Bind(GetEntryLabel());
+    if (instruction_->CanThrowIntoCatchBlock()) {
+      // Live registers will be restored in the catch block if caught.
+      SaveLiveRegisters(codegen, instruction_->GetLocations());
+    }
+    mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+                                instruction_,
+                                instruction_->GetDexPc(),
+                                this,
+                                IsDirectEntrypoint(kQuickThrowNullPointer));
+    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
+  }
+
+  bool IsFatal() const OVERRIDE { return true; }
+
+  const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+
+ private:
+  HNullCheck* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
+};
+
+class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
+      : instruction_(instruction), successor_(successor) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
+    mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+                                instruction_,
+                                instruction_->GetDexPc(),
+                                this,
+                                IsDirectEntrypoint(kQuickTestSuspend));
+    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+    RestoreLiveRegisters(codegen, instruction_->GetLocations());
+    if (successor_ == nullptr) {
+      __ B(GetReturnLabel());
+    } else {
+      __ B(mips_codegen->GetLabelOf(successor_));
+    }
+  }
+
+  MipsLabel* GetReturnLabel() {
+    DCHECK(successor_ == nullptr);
+    return &return_label_;
+  }
+
+  const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+
+ private:
+  HSuspendCheck* const instruction_;
+  // If not null, the block to branch to after the suspend check.
+  HBasicBlock* const successor_;
+
+  // If `successor_` is null, the label to branch to after the suspend check.
+  MipsLabel return_label_;
+
+  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
+};
+
+class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+    uint32_t dex_pc = instruction_->GetDexPc();
+    DCHECK(instruction_->IsCheckCast()
+           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    // We're moving two locations to locations that could overlap, so we need a parallel
+    // move resolver.
+    InvokeRuntimeCallingConvention calling_convention;
+    codegen->EmitParallelMoves(locations->InAt(1),
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+                               Primitive::kPrimNot,
+                               object_class,
+                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+                               Primitive::kPrimNot);
+
+    if (instruction_->IsInstanceOf()) {
+      mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+                                  instruction_,
+                                  dex_pc,
+                                  this,
+                                  IsDirectEntrypoint(kQuickInstanceofNonTrivial));
+      Primitive::Type ret_type = instruction_->GetType();
+      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
+      mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
+      CheckEntrypointTypes<kQuickInstanceofNonTrivial,
+                           uint32_t,
+                           const mirror::Class*,
+                           const mirror::Class*>();
+    } else {
+      DCHECK(instruction_->IsCheckCast());
+      mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+                                  instruction_,
+                                  dex_pc,
+                                  this,
+                                  IsDirectEntrypoint(kQuickCheckCast));
+      CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+    }
+
+    RestoreLiveRegisters(codegen, locations);
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+
+ private:
+  HInstruction* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
+};
+
+class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+  explicit DeoptimizationSlowPathMIPS(HInstruction* instruction)
+    : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, instruction_->GetLocations());
+    DCHECK(instruction_->IsDeoptimize());
+    HDeoptimize* deoptimize = instruction_->AsDeoptimize();
+    uint32_t dex_pc = deoptimize->GetDexPc();
+    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+    mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+                                instruction_,
+                                dex_pc,
+                                this,
+                                IsDirectEntrypoint(kQuickDeoptimize));
+  }
+
+  const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+
+ private:
+  HInstruction* const instruction_;
+  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
+};
+
+CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
+                                     const MipsInstructionSetFeatures& isa_features,
+                                     const CompilerOptions& compiler_options,
+                                     OptimizingCompilerStats* stats)
+    : CodeGenerator(graph,
+                    kNumberOfCoreRegisters,
+                    kNumberOfFRegisters,
+                    kNumberOfRegisterPairs,
+                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
+                                        arraysize(kCoreCalleeSaves)),
+                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+                                        arraysize(kFpuCalleeSaves)),
+                    compiler_options,
+                    stats),
+      block_labels_(nullptr),
+      location_builder_(graph, this),
+      instruction_visitor_(graph, this),
+      move_resolver_(graph->GetArena(), this),
+      assembler_(&isa_features),
+      isa_features_(isa_features) {
+  // Save RA (containing the return address) to mimic Quick.
+  AddAllocatedRegister(Location::RegisterLocation(RA));
+}
+
+#undef __
+#define __ down_cast<MipsAssembler*>(GetAssembler())->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
+  // Ensure that we fix up branches.
+  __ FinalizeCode();
+
+  // Adjust native pc offsets in stack maps.
+  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+    uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+    uint32_t new_position = __ GetAdjustedPosition(old_position);
+    DCHECK_GE(new_position, old_position);
+    stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+  }
+
+  // Adjust pc offsets for the disassembly information.
+  if (disasm_info_ != nullptr) {
+    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
+    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
+    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
+    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
+      it.second.start = __ GetAdjustedPosition(it.second.start);
+      it.second.end = __ GetAdjustedPosition(it.second.end);
+    }
+    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
+      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
+      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
+    }
+  }
+
+  CodeGenerator::Finalize(allocator);
+}
+
+MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
+  return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverMIPS::EmitMove(size_t index) {
+  DCHECK_LT(index, moves_.size());
+  MoveOperands* move = moves_[index];
+  codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
+}
+
+void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
+  DCHECK_LT(index, moves_.size());
+  MoveOperands* move = moves_[index];
+  Primitive::Type type = move->GetType();
+  Location loc1 = move->GetDestination();
+  Location loc2 = move->GetSource();
+
+  DCHECK(!loc1.IsConstant());
+  DCHECK(!loc2.IsConstant());
+
+  if (loc1.Equals(loc2)) {
+    return;
+  }
+
+  if (loc1.IsRegister() && loc2.IsRegister()) {
+    // Swap 2 GPRs.
+    Register r1 = loc1.AsRegister<Register>();
+    Register r2 = loc2.AsRegister<Register>();
+    __ Move(TMP, r2);
+    __ Move(r2, r1);
+    __ Move(r1, TMP);
+  } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
+    FRegister f1 = loc1.AsFpuRegister<FRegister>();
+    FRegister f2 = loc2.AsFpuRegister<FRegister>();
+    if (type == Primitive::kPrimFloat) {
+      __ MovS(FTMP, f2);
+      __ MovS(f2, f1);
+      __ MovS(f1, FTMP);
+    } else {
+      DCHECK_EQ(type, Primitive::kPrimDouble);
+      __ MovD(FTMP, f2);
+      __ MovD(f2, f1);
+      __ MovD(f1, FTMP);
+    }
+  } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
+             (loc1.IsFpuRegister() && loc2.IsRegister())) {
+    // Swap FPR and GPR.
+    DCHECK_EQ(type, Primitive::kPrimFloat);  // Can only swap a float.
+    FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+                                        : loc2.AsFpuRegister<FRegister>();
+    Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>()
+                                    : loc2.AsRegister<Register>();
+    __ Move(TMP, r2);
+    __ Mfc1(r2, f1);
+    __ Mtc1(TMP, f1);
+  } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
+    // Swap 2 GPR register pairs.
+    Register r1 = loc1.AsRegisterPairLow<Register>();
+    Register r2 = loc2.AsRegisterPairLow<Register>();
+    __ Move(TMP, r2);
+    __ Move(r2, r1);
+    __ Move(r1, TMP);
+    r1 = loc1.AsRegisterPairHigh<Register>();
+    r2 = loc2.AsRegisterPairHigh<Register>();
+    __ Move(TMP, r2);
+    __ Move(r2, r1);
+    __ Move(r1, TMP);
+  } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
+             (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
+    // Swap FPR and GPR register pair.
+    DCHECK_EQ(type, Primitive::kPrimDouble);
+    FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+                                        : loc2.AsFpuRegister<FRegister>();
+    Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
+                                          : loc2.AsRegisterPairLow<Register>();
+    Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
+                                          : loc2.AsRegisterPairHigh<Register>();
+    // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
+    // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
+    // unpredictable and the following mfch1 will fail.
+    __ Mfc1(TMP, f1);
+    __ Mfhc1(AT, f1);
+    __ Mtc1(r2_l, f1);
+    __ Mthc1(r2_h, f1);
+    __ Move(r2_l, TMP);
+    __ Move(r2_h, AT);
+  } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
+    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+  } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
+    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+  } else {
+    LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
+  }
+}
+
+void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
+  __ Pop(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::SpillScratch(int reg) {
+  __ Push(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
+  // Allocate a scratch register other than TMP, if available.
+  // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
+  // automatically unspilled when the scratch scope object is destroyed).
+  ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
+  // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
+  int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
+  for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
+    __ LoadFromOffset(kLoadWord,
+                      Register(ensure_scratch.GetRegister()),
+                      SP,
+                      index1 + stack_offset);
+    __ LoadFromOffset(kLoadWord,
+                      TMP,
+                      SP,
+                      index2 + stack_offset);
+    __ StoreToOffset(kStoreWord,
+                     Register(ensure_scratch.GetRegister()),
+                     SP,
+                     index2 + stack_offset);
+    __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
+  }
+}
+
+static dwarf::Reg DWARFReg(Register reg) {
+  return dwarf::Reg::MipsCore(static_cast<int>(reg));
+}
+
+// TODO: mapping of floating-point registers to DWARF.
+
+void CodeGeneratorMIPS::GenerateFrameEntry() {
+  __ Bind(&frame_entry_label_);
+
+  bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+
+  if (do_overflow_check) {
+    __ LoadFromOffset(kLoadWord,
+                      ZERO,
+                      SP,
+                      -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+    RecordPcInfo(nullptr, 0);
+  }
+
+  if (HasEmptyFrame()) {
+    return;
+  }
+
+  // Make sure the frame size isn't unreasonably large.
+  if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
+    LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+  }
+
+  // Spill callee-saved registers.
+  // Note that their cumulative size is small and they can be indexed using
+  // 16-bit offsets.
+
+  // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+  uint32_t ofs = FrameEntrySpillSize();
+  bool unaligned_float = ofs & 0x7;
+  bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+  __ IncreaseFrameSize(ofs);
+
+  for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+    Register reg = kCoreCalleeSaves[i];
+    if (allocated_registers_.ContainsCoreRegister(reg)) {
+      ofs -= kMipsWordSize;
+      __ Sw(reg, SP, ofs);
+      __ cfi().RelOffset(DWARFReg(reg), ofs);
+    }
+  }
+
+  for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+    FRegister reg = kFpuCalleeSaves[i];
+    if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+      ofs -= kMipsDoublewordSize;
+      // TODO: Change the frame to avoid unaligned accesses for fpu registers.
+      if (unaligned_float) {
+        if (fpu_32bit) {
+          __ Swc1(reg, SP, ofs);
+          __ Swc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+        } else {
+          __ Mfhc1(TMP, reg);
+          __ Swc1(reg, SP, ofs);
+          __ Sw(TMP, SP, ofs + 4);
+        }
+      } else {
+        __ Sdc1(reg, SP, ofs);
+      }
+      // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
+    }
+  }
+
+  // Allocate the rest of the frame and store the current method pointer
+  // at its end.
+
+  __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+  static_assert(IsInt<16>(kCurrentMethodStackOffset),
+                "kCurrentMethodStackOffset must fit into int16_t");
+  __ Sw(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+}
+
+void CodeGeneratorMIPS::GenerateFrameExit() {
+  __ cfi().RememberState();
+
+  if (!HasEmptyFrame()) {
+    // Deallocate the rest of the frame.
+
+    __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+    // Restore callee-saved registers.
+    // Note that their cumulative size is small and they can be indexed using
+    // 16-bit offsets.
+
+    // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+    uint32_t ofs = 0;
+    bool unaligned_float = FrameEntrySpillSize() & 0x7;
+    bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+
+    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+      FRegister reg = kFpuCalleeSaves[i];
+      if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+        if (unaligned_float) {
+          if (fpu_32bit) {
+            __ Lwc1(reg, SP, ofs);
+            __ Lwc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+          } else {
+            __ Lwc1(reg, SP, ofs);
+            __ Lw(TMP, SP, ofs + 4);
+            __ Mthc1(TMP, reg);
+          }
+        } else {
+          __ Ldc1(reg, SP, ofs);
+        }
+        ofs += kMipsDoublewordSize;
+        // TODO: __ cfi().Restore(DWARFReg(reg));
+      }
+    }
+
+    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+      Register reg = kCoreCalleeSaves[i];
+      if (allocated_registers_.ContainsCoreRegister(reg)) {
+        __ Lw(reg, SP, ofs);
+        ofs += kMipsWordSize;
+        __ cfi().Restore(DWARFReg(reg));
+      }
+    }
+
+    DCHECK_EQ(ofs, FrameEntrySpillSize());
+    __ DecreaseFrameSize(ofs);
+  }
+
+  __ Jr(RA);
+  __ Nop();
+
+  __ cfi().RestoreState();
+  __ cfi().DefCFAOffset(GetFrameSize());
+}
+
+void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
+  __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorMIPS::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+  if (src.Equals(dst)) {
+    return;
+  }
+
+  if (src.IsConstant()) {
+    MoveConstant(dst, src.GetConstant());
+  } else {
+    if (Primitive::Is64BitType(dst_type)) {
+      Move64(dst, src);
+    } else {
+      Move32(dst, src);
+    }
+  }
+}
+
+void CodeGeneratorMIPS::Move32(Location destination, Location source) {
+  if (source.Equals(destination)) {
+    return;
+  }
+
+  if (destination.IsRegister()) {
+    if (source.IsRegister()) {
+      __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
+    } else if (source.IsFpuRegister()) {
+      __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
+    } else {
+      DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
+    }
+  } else if (destination.IsFpuRegister()) {
+    if (source.IsRegister()) {
+      __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
+    } else if (source.IsFpuRegister()) {
+      __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+    } else {
+      DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+      __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+    }
+  } else {
+    DCHECK(destination.IsStackSlot()) << destination;
+    if (source.IsRegister()) {
+      __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
+    } else if (source.IsFpuRegister()) {
+      __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
+    } else {
+      DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+      __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+      __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
+    }
+  }
+}
+
+void CodeGeneratorMIPS::Move64(Location destination, Location source) {
+  if (source.Equals(destination)) {
+    return;
+  }
+
+  if (destination.IsRegisterPair()) {
+    if (source.IsRegisterPair()) {
+      __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
+      __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+    } else if (source.IsFpuRegister()) {
+      Register dst_high = destination.AsRegisterPairHigh<Register>();
+      Register dst_low =  destination.AsRegisterPairLow<Register>();
+      FRegister src = source.AsFpuRegister<FRegister>();
+      __ Mfc1(dst_low, src);
+      __ Mfhc1(dst_high, src);
+    } else {
+      DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+      int32_t off = source.GetStackIndex();
+      Register r = destination.AsRegisterPairLow<Register>();
+      __ LoadFromOffset(kLoadDoubleword, r, SP, off);
+    }
+  } else if (destination.IsFpuRegister()) {
+    if (source.IsRegisterPair()) {
+      FRegister dst = destination.AsFpuRegister<FRegister>();
+      Register src_high = source.AsRegisterPairHigh<Register>();
+      Register src_low = source.AsRegisterPairLow<Register>();
+      __ Mtc1(src_low, dst);
+      __ Mthc1(src_high, dst);
+    } else if (source.IsFpuRegister()) {
+      __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+    } else {
+      DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+      __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+    }
+  } else {
+    DCHECK(destination.IsDoubleStackSlot()) << destination;
+    int32_t off = destination.GetStackIndex();
+    if (source.IsRegisterPair()) {
+      __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, off);
+    } else if (source.IsFpuRegister()) {
+      __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, off);
+    } else {
+      DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+      __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+      __ StoreToOffset(kStoreWord, TMP, SP, off);
+      __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
+      __ StoreToOffset(kStoreWord, TMP, SP, off + 4);
+    }
+  }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
+  if (c->IsIntConstant() || c->IsNullConstant()) {
+    // Move 32 bit constant.
+    int32_t value = GetInt32ValueOf(c);
+    if (destination.IsRegister()) {
+      Register dst = destination.AsRegister<Register>();
+      __ LoadConst32(dst, value);
+    } else {
+      DCHECK(destination.IsStackSlot())
+          << "Cannot move " << c->DebugName() << " to " << destination;
+      __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+    }
+  } else if (c->IsLongConstant()) {
+    // Move 64 bit constant.
+    int64_t value = GetInt64ValueOf(c);
+    if (destination.IsRegisterPair()) {
+      Register r_h = destination.AsRegisterPairHigh<Register>();
+      Register r_l = destination.AsRegisterPairLow<Register>();
+      __ LoadConst64(r_h, r_l, value);
+    } else {
+      DCHECK(destination.IsDoubleStackSlot())
+          << "Cannot move " << c->DebugName() << " to " << destination;
+      __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+    }
+  } else if (c->IsFloatConstant()) {
+    // Move 32 bit float constant.
+    int32_t value = GetInt32ValueOf(c);
+    if (destination.IsFpuRegister()) {
+      __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
+    } else {
+      DCHECK(destination.IsStackSlot())
+          << "Cannot move " << c->DebugName() << " to " << destination;
+      __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+    }
+  } else {
+    // Move 64 bit double constant.
+    DCHECK(c->IsDoubleConstant()) << c->DebugName();
+    int64_t value = GetInt64ValueOf(c);
+    if (destination.IsFpuRegister()) {
+      FRegister fd = destination.AsFpuRegister<FRegister>();
+      __ LoadDConst64(fd, value, TMP);
+    } else {
+      DCHECK(destination.IsDoubleStackSlot())
+          << "Cannot move " << c->DebugName() << " to " << destination;
+      __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+    }
+  }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
+  DCHECK(destination.IsRegister());
+  Register dst = destination.AsRegister<Register>();
+  __ LoadConst32(dst, value);
+}
+
+void CodeGeneratorMIPS::Move(HInstruction* instruction,
+                             Location location,
+                             HInstruction* move_for) {
+  LocationSummary* locations = instruction->GetLocations();
+  Primitive::Type type = instruction->GetType();
+  DCHECK_NE(type, Primitive::kPrimVoid);
+
+  if (instruction->IsCurrentMethod()) {
+    Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+  } else if (locations != nullptr && locations->Out().Equals(location)) {
+    return;
+  } else if (instruction->IsIntConstant()
+             || instruction->IsLongConstant()
+             || instruction->IsNullConstant()) {
+    MoveConstant(location, instruction->AsConstant());
+  } else if (instruction->IsTemporary()) {
+    Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+    if (temp_location.IsStackSlot()) {
+      Move32(location, temp_location);
+    } else {
+      DCHECK(temp_location.IsDoubleStackSlot());
+      Move64(location, temp_location);
+    }
+  } else if (instruction->IsLoadLocal()) {
+    uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+    if (Primitive::Is64BitType(type)) {
+      Move64(location, Location::DoubleStackSlot(stack_slot));
+    } else {
+      Move32(location, Location::StackSlot(stack_slot));
+    }
+  } else {
+    DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+    if (Primitive::Is64BitType(type)) {
+      Move64(location, locations->Out());
+    } else {
+      Move32(location, locations->Out());
+    }
+  }
+}
+
+void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else if (location.IsRegisterPair()) {
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
+}
+
+Location CodeGeneratorMIPS::GetStackLocation(HLoadLocal* load) const {
+  Primitive::Type type = load->GetType();
+
+  switch (type) {
+    case Primitive::kPrimNot:
+    case Primitive::kPrimInt:
+    case Primitive::kPrimFloat:
+      return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+    case Primitive::kPrimLong:
+    case Primitive::kPrimDouble:
+      return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unexpected type " << type;
+  }
+
+  LOG(FATAL) << "Unreachable";
+  return Location::NoLocation();
+}
+
+void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
+  MipsLabel done;
+  Register card = AT;
+  Register temp = TMP;
+  __ Beqz(value, &done);
+  __ LoadFromOffset(kLoadWord,
+                    card,
+                    TR,
+                    Thread::CardTableOffset<kMipsWordSize>().Int32Value());
+  __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
+  __ Addu(temp, card, temp);
+  __ Sb(card, temp, 0);
+  __ Bind(&done);
+}
+
+void CodeGeneratorMIPS::SetupBlockedRegisters(bool is_baseline) const {
+  // Don't allocate the dalvik style register pair passing.
+  blocked_register_pairs_[A1_A2] = true;
+
+  // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
+  blocked_core_registers_[ZERO] = true;
+  blocked_core_registers_[K0] = true;
+  blocked_core_registers_[K1] = true;
+  blocked_core_registers_[GP] = true;
+  blocked_core_registers_[SP] = true;
+  blocked_core_registers_[RA] = true;
+
+  // AT and TMP(T8) are used as temporary/scratch registers
+  // (similar to how AT is used by MIPS assemblers).
+  blocked_core_registers_[AT] = true;
+  blocked_core_registers_[TMP] = true;
+  blocked_fpu_registers_[FTMP] = true;
+
+  // Reserve suspend and thread registers.
+  blocked_core_registers_[S0] = true;
+  blocked_core_registers_[TR] = true;
+
+  // Reserve T9 for function calls
+  blocked_core_registers_[T9] = true;
+
+  // Reserve odd-numbered FPU registers.
+  for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
+    blocked_fpu_registers_[i] = true;
+  }
+
+  if (is_baseline) {
+    for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+      blocked_core_registers_[kCoreCalleeSaves[i]] = true;
+    }
+
+    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
+    }
+  }
+
+  UpdateBlockedPairRegisters();
+}
+
+void CodeGeneratorMIPS::UpdateBlockedPairRegisters() const {
+  for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+    MipsManagedRegister current =
+        MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+    if (blocked_core_registers_[current.AsRegisterPairLow()]
+        || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+      blocked_register_pairs_[i] = true;
+    }
+  }
+}
+
+Location CodeGeneratorMIPS::AllocateFreeRegister(Primitive::Type type) const {
+  switch (type) {
+    case Primitive::kPrimLong: {
+      size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
+      MipsManagedRegister pair =
+          MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+      DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
+      DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
+
+      blocked_core_registers_[pair.AsRegisterPairLow()] = true;
+      blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
+      UpdateBlockedPairRegisters();
+      return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
+    }
+
+    case Primitive::kPrimByte:
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot: {
+      int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
+      // Block all register pairs that contain `reg`.
+      for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+        MipsManagedRegister current =
+            MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+        if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
+          blocked_register_pairs_[i] = true;
+        }
+      }
+      return Location::RegisterLocation(reg);
+    }
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFRegisters);
+      return Location::FpuRegisterLocation(reg);
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << type;
+  }
+
+  UNREACHABLE();
+}
+
+size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+  __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
+  return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+  __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
+  return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+  __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
+  return kMipsDoublewordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+  __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
+  return kMipsDoublewordSize;
+}
+
+void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
+  stream << MipsManagedRegister::FromCoreRegister(Register(reg));
+}
+
+void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+  stream << MipsManagedRegister::FromFRegister(FRegister(reg));
+}
+
+void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
+                                      HInstruction* instruction,
+                                      uint32_t dex_pc,
+                                      SlowPathCode* slow_path) {
+  InvokeRuntime(GetThreadOffset<kMipsWordSize>(entrypoint).Int32Value(),
+                instruction,
+                dex_pc,
+                slow_path,
+                IsDirectEntrypoint(entrypoint));
+}
+
+constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
+
+void CodeGeneratorMIPS::InvokeRuntime(int32_t entry_point_offset,
+                                      HInstruction* instruction,
+                                      uint32_t dex_pc,
+                                      SlowPathCode* slow_path,
+                                      bool is_direct_entrypoint) {
+  if (is_direct_entrypoint) {
+    // Reserve argument space on stack (for $a0-$a3) for
+    // entrypoints that directly reference native implementations.
+    // Called function may use this space to store $a0-$a3 regs.
+    __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+  }
+  __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+  __ Jalr(T9);
+  __ Nop();
+  if (is_direct_entrypoint) {
+    __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+  }
+  RecordPcInfo(instruction, dex_pc, slow_path);
+}
+
+void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
+                                                                    Register class_reg) {
+  __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+  __ LoadConst32(AT, mirror::Class::kStatusInitialized);
+  __ Blt(TMP, AT, slow_path->GetEntryLabel());
+  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
+  __ Sync(0);
+  __ Bind(slow_path->GetExitLabel());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
+  __ Sync(0);  // Only stype 0 is supported.
+}
+
+void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
+                                                        HBasicBlock* successor) {
+  SuspendCheckSlowPathMIPS* slow_path =
+    new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
+  codegen_->AddSlowPath(slow_path);
+
+  __ LoadFromOffset(kLoadUnsignedHalfword,
+                    TMP,
+                    TR,
+                    Thread::ThreadFlagsOffset<kMipsWordSize>().Int32Value());
+  if (successor == nullptr) {
+    __ Bnez(TMP, slow_path->GetEntryLabel());
+    __ Bind(slow_path->GetReturnLabel());
+  } else {
+    __ Beqz(TMP, codegen_->GetLabelOf(successor));
+    __ B(slow_path->GetEntryLabel());
+    // slow_path will return to GetLabelOf(successor).
+  }
+}
+
+InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
+                                                           CodeGeneratorMIPS* codegen)
+      : HGraphVisitor(graph),
+        assembler_(codegen->GetAssembler()),
+        codegen_(codegen) {}
+
+void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+  DCHECK_EQ(instruction->InputCount(), 2U);
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+  Primitive::Type type = instruction->GetResultType();
+  switch (type) {
+    case Primitive::kPrimInt: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      HInstruction* right = instruction->InputAt(1);
+      bool can_use_imm = false;
+      if (right->IsConstant()) {
+        int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
+        if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
+          can_use_imm = IsUint<16>(imm);
+        } else if (instruction->IsAdd()) {
+          can_use_imm = IsInt<16>(imm);
+        } else {
+          DCHECK(instruction->IsSub());
+          can_use_imm = IsInt<16>(-imm);
+        }
+      }
+      if (can_use_imm)
+        locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
+      else
+        locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      // TODO: can 2nd param be const?
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      if (instruction->IsAdd() || instruction->IsSub()) {
+        locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      } else {
+        DCHECK(instruction->IsAnd() || instruction->IsOr() || instruction->IsXor());
+        locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      DCHECK(instruction->IsAdd() || instruction->IsSub());
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+      break;
+
+    default:
+      LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
+  }
+}
+
+void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+  Primitive::Type type = instruction->GetType();
+  LocationSummary* locations = instruction->GetLocations();
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      Register dst = locations->Out().AsRegister<Register>();
+      Register lhs = locations->InAt(0).AsRegister<Register>();
+      Location rhs_location = locations->InAt(1);
+
+      Register rhs_reg = ZERO;
+      int32_t rhs_imm = 0;
+      bool use_imm = rhs_location.IsConstant();
+      if (use_imm) {
+        rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+      } else {
+        rhs_reg = rhs_location.AsRegister<Register>();
+      }
+
+      if (instruction->IsAnd()) {
+        if (use_imm)
+          __ Andi(dst, lhs, rhs_imm);
+        else
+          __ And(dst, lhs, rhs_reg);
+      } else if (instruction->IsOr()) {
+        if (use_imm)
+          __ Ori(dst, lhs, rhs_imm);
+        else
+          __ Or(dst, lhs, rhs_reg);
+      } else if (instruction->IsXor()) {
+        if (use_imm)
+          __ Xori(dst, lhs, rhs_imm);
+        else
+          __ Xor(dst, lhs, rhs_reg);
+      } else if (instruction->IsAdd()) {
+        if (use_imm)
+          __ Addiu(dst, lhs, rhs_imm);
+        else
+          __ Addu(dst, lhs, rhs_reg);
+      } else {
+        DCHECK(instruction->IsSub());
+        if (use_imm)
+          __ Addiu(dst, lhs, -rhs_imm);
+        else
+          __ Subu(dst, lhs, rhs_reg);
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      // TODO: can 2nd param be const?
+      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+      Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+      if (instruction->IsAnd()) {
+        __ And(dst_low, lhs_low, rhs_low);
+        __ And(dst_high, lhs_high, rhs_high);
+      } else if (instruction->IsOr()) {
+        __ Or(dst_low, lhs_low, rhs_low);
+        __ Or(dst_high, lhs_high, rhs_high);
+      } else if (instruction->IsXor()) {
+        __ Xor(dst_low, lhs_low, rhs_low);
+        __ Xor(dst_high, lhs_high, rhs_high);
+      } else if (instruction->IsAdd()) {
+        __ Addu(dst_low, lhs_low, rhs_low);
+        __ Sltu(TMP, dst_low, lhs_low);
+        __ Addu(dst_high, lhs_high, rhs_high);
+        __ Addu(dst_high, dst_high, TMP);
+      } else {
+        DCHECK(instruction->IsSub());
+        __ Subu(dst_low, lhs_low, rhs_low);
+        __ Sltu(TMP, lhs_low, dst_low);
+        __ Subu(dst_high, lhs_high, rhs_high);
+        __ Subu(dst_high, dst_high, TMP);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+      if (instruction->IsAdd()) {
+        if (type == Primitive::kPrimFloat) {
+          __ AddS(dst, lhs, rhs);
+        } else {
+          __ AddD(dst, lhs, rhs);
+        }
+      } else {
+        DCHECK(instruction->IsSub());
+        if (type == Primitive::kPrimFloat) {
+          __ SubS(dst, lhs, rhs);
+        } else {
+          __ SubD(dst, lhs, rhs);
+        }
+      }
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected binary operation type " << type;
+  }
+}
+
+void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
+  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+  Primitive::Type type = instr->GetResultType();
+  switch (type) {
+    case Primitive::kPrimInt:
+    case Primitive::kPrimLong: {
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+      locations->SetOut(Location::RequiresRegister());
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected shift type " << type;
+  }
+}
+
+static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
+
+void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
+  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+  LocationSummary* locations = instr->GetLocations();
+  Primitive::Type type = instr->GetType();
+
+  Location rhs_location = locations->InAt(1);
+  bool use_imm = rhs_location.IsConstant();
+  Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
+  int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
+  uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
+  uint32_t shift_value = rhs_imm & shift_mask;
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      Register dst = locations->Out().AsRegister<Register>();
+      Register lhs = locations->InAt(0).AsRegister<Register>();
+      if (use_imm) {
+        if (instr->IsShl()) {
+          __ Sll(dst, lhs, shift_value);
+        } else if (instr->IsShr()) {
+          __ Sra(dst, lhs, shift_value);
+        } else {
+          __ Srl(dst, lhs, shift_value);
+        }
+      } else {
+        if (instr->IsShl()) {
+          __ Sllv(dst, lhs, rhs_reg);
+        } else if (instr->IsShr()) {
+          __ Srav(dst, lhs, rhs_reg);
+        } else {
+          __ Srlv(dst, lhs, rhs_reg);
+        }
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+      if (use_imm) {
+          if (shift_value == 0) {
+            codegen_->Move64(locations->Out(), locations->InAt(0));
+          } else if (shift_value < kMipsBitsPerWord) {
+            if (instr->IsShl()) {
+              __ Sll(dst_low, lhs_low, shift_value);
+              __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+              __ Sll(dst_high, lhs_high, shift_value);
+              __ Or(dst_high, dst_high, TMP);
+            } else if (instr->IsShr()) {
+              __ Sra(dst_high, lhs_high, shift_value);
+              __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+              __ Srl(dst_low, lhs_low, shift_value);
+              __ Or(dst_low, dst_low, TMP);
+            } else {
+              __ Srl(dst_high, lhs_high, shift_value);
+              __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+              __ Srl(dst_low, lhs_low, shift_value);
+              __ Or(dst_low, dst_low, TMP);
+            }
+          } else {
+            shift_value -= kMipsBitsPerWord;
+            if (instr->IsShl()) {
+              __ Sll(dst_high, lhs_low, shift_value);
+              __ Move(dst_low, ZERO);
+            } else if (instr->IsShr()) {
+              __ Sra(dst_low, lhs_high, shift_value);
+              __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
+            } else {
+              __ Srl(dst_low, lhs_high, shift_value);
+              __ Move(dst_high, ZERO);
+            }
+          }
+      } else {
+        MipsLabel done;
+        if (instr->IsShl()) {
+          __ Sllv(dst_low, lhs_low, rhs_reg);
+          __ Nor(AT, ZERO, rhs_reg);
+          __ Srl(TMP, lhs_low, 1);
+          __ Srlv(TMP, TMP, AT);
+          __ Sllv(dst_high, lhs_high, rhs_reg);
+          __ Or(dst_high, dst_high, TMP);
+          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+          __ Beqz(TMP, &done);
+          __ Move(dst_high, dst_low);
+          __ Move(dst_low, ZERO);
+        } else if (instr->IsShr()) {
+          __ Srav(dst_high, lhs_high, rhs_reg);
+          __ Nor(AT, ZERO, rhs_reg);
+          __ Sll(TMP, lhs_high, 1);
+          __ Sllv(TMP, TMP, AT);
+          __ Srlv(dst_low, lhs_low, rhs_reg);
+          __ Or(dst_low, dst_low, TMP);
+          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+          __ Beqz(TMP, &done);
+          __ Move(dst_low, dst_high);
+          __ Sra(dst_high, dst_high, 31);
+        } else {
+          __ Srlv(dst_high, lhs_high, rhs_reg);
+          __ Nor(AT, ZERO, rhs_reg);
+          __ Sll(TMP, lhs_high, 1);
+          __ Sllv(TMP, TMP, AT);
+          __ Srlv(dst_low, lhs_low, rhs_reg);
+          __ Or(dst_low, dst_low, TMP);
+          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+          __ Beqz(TMP, &done);
+          __ Move(dst_low, dst_high);
+          __ Move(dst_high, ZERO);
+        }
+        __ Bind(&done);
+      }
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected shift operation type " << type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (Primitive::IsFloatingPointType(instruction->GetType())) {
+    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+  } else {
+    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Register obj = locations->InAt(0).AsRegister<Register>();
+  Location index = locations->InAt(1);
+  Primitive::Type type = instruction->GetType();
+
+  switch (type) {
+    case Primitive::kPrimBoolean: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+      Register out = locations->Out().AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+      } else {
+        __ Addu(TMP, obj, index.AsRegister<Register>());
+        __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimByte: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+      Register out = locations->Out().AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+        __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+      } else {
+        __ Addu(TMP, obj, index.AsRegister<Register>());
+        __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimShort: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+      Register out = locations->Out().AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+        __ Addu(TMP, obj, TMP);
+        __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimChar: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+      Register out = locations->Out().AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+        __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+        __ Addu(TMP, obj, TMP);
+        __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot: {
+      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+      Register out = locations->Out().AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        __ LoadFromOffset(kLoadWord, out, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+        __ Addu(TMP, obj, TMP);
+        __ LoadFromOffset(kLoadWord, out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+      Register out = locations->Out().AsRegisterPairLow<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+        __ Addu(TMP, obj, TMP);
+        __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+      FRegister out = locations->Out().AsFpuRegister<FRegister>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        __ LoadSFromOffset(out, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+        __ Addu(TMP, obj, TMP);
+        __ LoadSFromOffset(out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+      FRegister out = locations->Out().AsFpuRegister<FRegister>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        __ LoadDFromOffset(out, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+        __ Addu(TMP, obj, TMP);
+        __ LoadDFromOffset(out, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << instruction->GetType();
+      UNREACHABLE();
+  }
+  codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+  Register obj = locations->InAt(0).AsRegister<Register>();
+  Register out = locations->Out().AsRegister<Register>();
+  __ LoadFromOffset(kLoadWord, out, obj, offset);
+  codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
+  Primitive::Type value_type = instruction->GetComponentType();
+  bool is_object = value_type == Primitive::kPrimNot;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+      instruction,
+      is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+  if (is_object) {
+    InvokeRuntimeCallingConvention calling_convention;
+    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+  } else {
+    locations->SetInAt(0, Location::RequiresRegister());
+    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+    if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+      locations->SetInAt(2, Location::RequiresFpuRegister());
+    } else {
+      locations->SetInAt(2, Location::RequiresRegister());
+    }
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Register obj = locations->InAt(0).AsRegister<Register>();
+  Location index = locations->InAt(1);
+  Primitive::Type value_type = instruction->GetComponentType();
+  bool needs_runtime_call = locations->WillCall();
+  bool needs_write_barrier =
+      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+
+  switch (value_type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+      Register value = locations->InAt(2).AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+        __ StoreToOffset(kStoreByte, value, obj, offset);
+      } else {
+        __ Addu(TMP, obj, index.AsRegister<Register>());
+        __ StoreToOffset(kStoreByte, value, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+      Register value = locations->InAt(2).AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+        __ StoreToOffset(kStoreHalfword, value, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+        __ Addu(TMP, obj, TMP);
+        __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimInt:
+    case Primitive::kPrimNot: {
+      if (!needs_runtime_call) {
+        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+        Register value = locations->InAt(2).AsRegister<Register>();
+        if (index.IsConstant()) {
+          size_t offset =
+              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+          __ StoreToOffset(kStoreWord, value, obj, offset);
+        } else {
+          DCHECK(index.IsRegister()) << index;
+          __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+          __ Addu(TMP, obj, TMP);
+          __ StoreToOffset(kStoreWord, value, TMP, data_offset);
+        }
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        if (needs_write_barrier) {
+          DCHECK_EQ(value_type, Primitive::kPrimNot);
+          codegen_->MarkGCCard(obj, value);
+        }
+      } else {
+        DCHECK_EQ(value_type, Primitive::kPrimNot);
+        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+                                instruction,
+                                instruction->GetDexPc(),
+                                nullptr,
+                                IsDirectEntrypoint(kQuickAputObject));
+        CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+      }
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+      Register value = locations->InAt(2).AsRegisterPairLow<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        __ StoreToOffset(kStoreDoubleword, value, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+        __ Addu(TMP, obj, TMP);
+        __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimFloat: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+      FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+      DCHECK(locations->InAt(2).IsFpuRegister());
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        __ StoreSToOffset(value, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+        __ Addu(TMP, obj, TMP);
+        __ StoreSToOffset(value, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimDouble: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+      FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+      DCHECK(locations->InAt(2).IsFpuRegister());
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+        __ StoreDToOffset(value, obj, offset);
+      } else {
+        __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+        __ Addu(TMP, obj, TMP);
+        __ StoreDToOffset(value, TMP, data_offset);
+      }
+      break;
+    }
+
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << instruction->GetType();
+      UNREACHABLE();
+  }
+
+  // Ints and objects are handled in the switch.
+  if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
+  }
+}
+
+void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+      ? LocationSummary::kCallOnSlowPath
+      : LocationSummary::kNoCall;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  if (instruction->HasUses()) {
+    locations->SetOut(Location::SameAsFirstInput());
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  BoundsCheckSlowPathMIPS* slow_path =
+      new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
+  codegen_->AddSlowPath(slow_path);
+
+  Register index = locations->InAt(0).AsRegister<Register>();
+  Register length = locations->InAt(1).AsRegister<Register>();
+
+  // length is limited by the maximum positive signed 32-bit integer.
+  // Unsigned comparison of length and index checks for index < 0
+  // and for length <= index simultaneously.
+  __ Bgeu(index, length, slow_path->GetEntryLabel());
+}
+
+void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+      instruction,
+      LocationSummary::kCallOnSlowPath);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  // Note that TypeCheckSlowPathMIPS uses this register too.
+  locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Register obj = locations->InAt(0).AsRegister<Register>();
+  Register cls = locations->InAt(1).AsRegister<Register>();
+  Register obj_cls = locations->GetTemp(0).AsRegister<Register>();
+
+  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+  codegen_->AddSlowPath(slow_path);
+
+  // TODO: avoid this check if we know obj is not null.
+  __ Beqz(obj, slow_path->GetExitLabel());
+  // Compare the class of `obj` with `cls`.
+  __ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+  __ Bne(obj_cls, cls, slow_path->GetEntryLabel());
+  __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (check->HasUses()) {
+    locations->SetOut(Location::SameAsFirstInput());
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
+  // We assume the class is not null.
+  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+      check->GetLoadClass(),
+      check,
+      check->GetDexPc(),
+      true);
+  codegen_->AddSlowPath(slow_path);
+  GenerateClassInitializationCheck(slow_path,
+                                   check->GetLocations()->InAt(0).AsRegister<Register>());
+}
+
+void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
+  Primitive::Type in_type = compare->InputAt(0)->GetType();
+
+  LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
+      ? LocationSummary::kCall
+      : LocationSummary::kNoCall;
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
+
+  switch (in_type) {
+    case Primitive::kPrimLong:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      // Output overlaps because it is written before doing the low comparison.
+      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+      break;
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      InvokeRuntimeCallingConvention calling_convention;
+      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+      locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+  //  0 if: left == right
+  //  1 if: left  > right
+  // -1 if: left  < right
+  switch (in_type) {
+    case Primitive::kPrimLong: {
+      MipsLabel done;
+      Register res = locations->Out().AsRegister<Register>();
+      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register lhs_low  = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+      Register rhs_low  = locations->InAt(1).AsRegisterPairLow<Register>();
+      // TODO: more efficient (direct) comparison with a constant.
+      __ Slt(TMP, lhs_high, rhs_high);
+      __ Slt(AT, rhs_high, lhs_high);  // Inverted: is actually gt.
+      __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
+      __ Bnez(res, &done);             // If we compared ==, check if lower bits are also equal.
+      __ Sltu(TMP, lhs_low, rhs_low);
+      __ Sltu(AT, rhs_low, lhs_low);   // Inverted: is actually gt.
+      __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
+      __ Bind(&done);
+      break;
+    }
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      int32_t entry_point_offset;
+      bool direct;
+      if (in_type == Primitive::kPrimFloat) {
+        if (instruction->IsGtBias()) {
+          entry_point_offset = QUICK_ENTRY_POINT(pCmpgFloat);
+          direct = IsDirectEntrypoint(kQuickCmpgFloat);
+        } else {
+          entry_point_offset = QUICK_ENTRY_POINT(pCmplFloat);
+          direct = IsDirectEntrypoint(kQuickCmplFloat);
+        }
+      } else {
+        if (instruction->IsGtBias()) {
+          entry_point_offset = QUICK_ENTRY_POINT(pCmpgDouble);
+          direct = IsDirectEntrypoint(kQuickCmpgDouble);
+        } else {
+          entry_point_offset = QUICK_ENTRY_POINT(pCmplDouble);
+          direct = IsDirectEntrypoint(kQuickCmplDouble);
+        }
+      }
+      codegen_->InvokeRuntime(entry_point_offset,
+                              instruction,
+                              instruction->GetDexPc(),
+                              nullptr,
+                              direct);
+      if (in_type == Primitive::kPrimFloat) {
+        if (instruction->IsGtBias()) {
+          CheckEntrypointTypes<kQuickCmpgFloat, int32_t, float, float>();
+        } else {
+          CheckEntrypointTypes<kQuickCmplFloat, int32_t, float, float>();
+        }
+      } else {
+        if (instruction->IsGtBias()) {
+          CheckEntrypointTypes<kQuickCmpgDouble, int32_t, double, double>();
+        } else {
+          CheckEntrypointTypes<kQuickCmplDouble, int32_t, double, double>();
+        }
+      }
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unimplemented compare type " << in_type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitCondition(HCondition* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (instruction->NeedsMaterialization()) {
+    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCondition(HCondition* instruction) {
+  if (!instruction->NeedsMaterialization()) {
+    return;
+  }
+  // TODO: generalize to long
+  DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
+
+  LocationSummary* locations = instruction->GetLocations();
+  Register dst = locations->Out().AsRegister<Register>();
+
+  Register lhs = locations->InAt(0).AsRegister<Register>();
+  Location rhs_location = locations->InAt(1);
+
+  Register rhs_reg = ZERO;
+  int64_t rhs_imm = 0;
+  bool use_imm = rhs_location.IsConstant();
+  if (use_imm) {
+    rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+  } else {
+    rhs_reg = rhs_location.AsRegister<Register>();
+  }
+
+  IfCondition if_cond = instruction->GetCondition();
+
+  switch (if_cond) {
+    case kCondEQ:
+    case kCondNE:
+      if (use_imm && IsUint<16>(rhs_imm)) {
+        __ Xori(dst, lhs, rhs_imm);
+      } else {
+        if (use_imm) {
+          rhs_reg = TMP;
+          __ LoadConst32(rhs_reg, rhs_imm);
+        }
+        __ Xor(dst, lhs, rhs_reg);
+      }
+      if (if_cond == kCondEQ) {
+        __ Sltiu(dst, dst, 1);
+      } else {
+        __ Sltu(dst, ZERO, dst);
+      }
+      break;
+
+    case kCondLT:
+    case kCondGE:
+      if (use_imm && IsInt<16>(rhs_imm)) {
+        __ Slti(dst, lhs, rhs_imm);
+      } else {
+        if (use_imm) {
+          rhs_reg = TMP;
+          __ LoadConst32(rhs_reg, rhs_imm);
+        }
+        __ Slt(dst, lhs, rhs_reg);
+      }
+      if (if_cond == kCondGE) {
+        // Simulate lhs >= rhs via !(lhs < rhs) since there's
+        // only the slt instruction but no sge.
+        __ Xori(dst, dst, 1);
+      }
+      break;
+
+    case kCondLE:
+    case kCondGT:
+      if (use_imm && IsInt<16>(rhs_imm + 1)) {
+        // Simulate lhs <= rhs via lhs < rhs + 1.
+        __ Slti(dst, lhs, rhs_imm + 1);
+        if (if_cond == kCondGT) {
+          // Simulate lhs > rhs via !(lhs <= rhs) since there's
+          // only the slti instruction but no sgti.
+          __ Xori(dst, dst, 1);
+        }
+      } else {
+        if (use_imm) {
+          rhs_reg = TMP;
+          __ LoadConst32(rhs_reg, rhs_imm);
+        }
+        __ Slt(dst, rhs_reg, lhs);
+        if (if_cond == kCondLE) {
+          // Simulate lhs <= rhs via !(rhs < lhs) since there's
+          // only the slt instruction but no sle.
+          __ Xori(dst, dst, 1);
+        }
+      }
+      break;
+
+    case kCondB:
+    case kCondAE:
+      // Use sltiu instruction if rhs_imm is in range [0, 32767] or in
+      // [max_unsigned - 32767 = 0xffff8000, max_unsigned = 0xffffffff].
+      if (use_imm &&
+          (IsUint<15>(rhs_imm) ||
+              IsUint<15>(rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+        if (IsUint<15>(rhs_imm)) {
+          __ Sltiu(dst, lhs, rhs_imm);
+        } else {
+          // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+          // and then used as unsigned integer (range [0xffff8000, 0xffffffff]).
+          __ Sltiu(dst, lhs, rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+        }
+      } else {
+        if (use_imm) {
+          rhs_reg = TMP;
+          __ LoadConst32(rhs_reg, rhs_imm);
+        }
+        __ Sltu(dst, lhs, rhs_reg);
+      }
+      if (if_cond == kCondAE) {
+        // Simulate lhs >= rhs via !(lhs < rhs) since there's
+        // only the sltu instruction but no sgeu.
+        __ Xori(dst, dst, 1);
+      }
+      break;
+
+    case kCondBE:
+    case kCondA:
+      // Use sltiu instruction if rhs_imm is in range [0, 32766] or in
+      // [max_unsigned - 32767 - 1 = 0xffff7fff, max_unsigned - 1 = 0xfffffffe].
+      // lhs <= rhs is simulated via lhs < rhs + 1.
+      if (use_imm && (rhs_imm != -1) &&
+          (IsUint<15>(rhs_imm + 1) ||
+              IsUint<15>(rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+        if (IsUint<15>(rhs_imm + 1)) {
+          // Simulate lhs <= rhs via lhs < rhs + 1.
+          __ Sltiu(dst, lhs, rhs_imm + 1);
+        } else {
+          // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+          // and then used as unsigned integer (range [0xffff8000, 0xffffffff] where rhs_imm
+          // is in range [0xffff7fff, 0xfffffffe] since lhs <= rhs is simulated via lhs < rhs + 1).
+          __ Sltiu(dst, lhs, rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+        }
+        if (if_cond == kCondA) {
+          // Simulate lhs > rhs via !(lhs <= rhs) since there's
+          // only the sltiu instruction but no sgtiu.
+          __ Xori(dst, dst, 1);
+        }
+      } else {
+        if (use_imm) {
+          rhs_reg = TMP;
+          __ LoadConst32(rhs_reg, rhs_imm);
+        }
+        __ Sltu(dst, rhs_reg, lhs);
+        if (if_cond == kCondBE) {
+          // Simulate lhs <= rhs via !(rhs < lhs) since there's
+          // only the sltu instruction but no sleu.
+          __ Xori(dst, dst, 1);
+        }
+      }
+      break;
+  }
+}
+
+void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
+  Primitive::Type type = div->GetResultType();
+  LocationSummary::CallKind call_kind = (type == Primitive::kPrimLong)
+      ? LocationSummary::kCall
+      : LocationSummary::kNoCall;
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+  switch (type) {
+    case Primitive::kPrimInt:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+
+    case Primitive::kPrimLong: {
+      InvokeRuntimeCallingConvention calling_convention;
+      locations->SetInAt(0, Location::RegisterPairLocation(
+          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+      locations->SetInAt(1, Location::RegisterPairLocation(
+          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+      locations->SetOut(calling_convention.GetReturnLocation(type));
+      break;
+    }
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+      break;
+
+    default:
+      LOG(FATAL) << "Unexpected div type " << type;
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
+  Primitive::Type type = instruction->GetType();
+  LocationSummary* locations = instruction->GetLocations();
+  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      Register dst = locations->Out().AsRegister<Register>();
+      Register lhs = locations->InAt(0).AsRegister<Register>();
+      Register rhs = locations->InAt(1).AsRegister<Register>();
+      if (isR6) {
+        __ DivR6(dst, lhs, rhs);
+      } else {
+        __ DivR2(dst, lhs, rhs);
+      }
+      break;
+    }
+    case Primitive::kPrimLong: {
+      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
+                              instruction,
+                              instruction->GetDexPc(),
+                              nullptr,
+                              IsDirectEntrypoint(kQuickLdiv));
+      CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
+      break;
+    }
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+      if (type == Primitive::kPrimFloat) {
+        __ DivS(dst, lhs, rhs);
+      } else {
+        __ DivD(dst, lhs, rhs);
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected div type " << type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+      ? LocationSummary::kCallOnSlowPath
+      : LocationSummary::kNoCall;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+  if (instruction->HasUses()) {
+    locations->SetOut(Location::SameAsFirstInput());
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
+  codegen_->AddSlowPath(slow_path);
+  Location value = instruction->GetLocations()->InAt(0);
+  Primitive::Type type = instruction->GetType();
+
+  switch (type) {
+    case Primitive::kPrimByte:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimInt: {
+      if (value.IsConstant()) {
+        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+          __ B(slow_path->GetEntryLabel());
+        } else {
+          // A division by a non-null constant is valid. We don't need to perform
+          // any check, so simply fall through.
+        }
+      } else {
+        DCHECK(value.IsRegister()) << value;
+        __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
+      }
+      break;
+    }
+    case Primitive::kPrimLong: {
+      if (value.IsConstant()) {
+        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+          __ B(slow_path->GetEntryLabel());
+        } else {
+          // A division by a non-null constant is valid. We don't need to perform
+          // any check, so simply fall through.
+        }
+      } else {
+        DCHECK(value.IsRegisterPair()) << value;
+        __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
+        __ Beqz(TMP, slow_path->GetEntryLabel());
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
+  }
+}
+
+void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+  locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
+  // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitExit(HExit* exit) {
+  exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+  locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
+  // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
+  got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
+  DCHECK(!successor->IsExitBlock());
+  HBasicBlock* block = got->GetBlock();
+  HInstruction* previous = got->GetPrevious();
+  HLoopInformation* info = block->GetLoopInformation();
+
+  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+    return;
+  }
+  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+  }
+  if (!codegen_->GoesToNextBlock(block, successor)) {
+    __ B(codegen_->GetLabelOf(successor));
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
+  HandleGoto(got, got->GetSuccessor());
+}
+
+void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+  try_boundary->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
+  if (!successor->IsExitBlock()) {
+    HandleGoto(try_boundary, successor);
+  }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
+                                                         MipsLabel* true_target,
+                                                         MipsLabel* false_target,
+                                                         MipsLabel* always_true_target) {
+  HInstruction* cond = instruction->InputAt(0);
+  HCondition* condition = cond->AsCondition();
+
+  if (cond->IsIntConstant()) {
+    int32_t cond_value = cond->AsIntConstant()->GetValue();
+    if (cond_value == 1) {
+      if (always_true_target != nullptr) {
+        __ B(always_true_target);
+      }
+      return;
+    } else {
+      DCHECK_EQ(cond_value, 0);
+    }
+  } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
+    // The condition instruction has been materialized, compare the output to 0.
+    Location cond_val = instruction->GetLocations()->InAt(0);
+    DCHECK(cond_val.IsRegister());
+    __ Bnez(cond_val.AsRegister<Register>(), true_target);
+  } else {
+    // The condition instruction has not been materialized, use its inputs as
+    // the comparison and its condition as the branch condition.
+    Register lhs = condition->GetLocations()->InAt(0).AsRegister<Register>();
+    Location rhs_location = condition->GetLocations()->InAt(1);
+    Register rhs_reg = ZERO;
+    int32_t rhs_imm = 0;
+    bool use_imm = rhs_location.IsConstant();
+    if (use_imm) {
+      rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+    } else {
+      rhs_reg = rhs_location.AsRegister<Register>();
+    }
+
+    IfCondition if_cond = condition->GetCondition();
+    if (use_imm && rhs_imm == 0) {
+      switch (if_cond) {
+        case kCondEQ:
+          __ Beqz(lhs, true_target);
+          break;
+        case kCondNE:
+          __ Bnez(lhs, true_target);
+          break;
+        case kCondLT:
+          __ Bltz(lhs, true_target);
+          break;
+        case kCondGE:
+          __ Bgez(lhs, true_target);
+          break;
+        case kCondLE:
+          __ Blez(lhs, true_target);
+          break;
+        case kCondGT:
+          __ Bgtz(lhs, true_target);
+          break;
+        case kCondB:
+          break;  // always false
+        case kCondBE:
+          __ Beqz(lhs, true_target);  // <= 0 if zero
+          break;
+        case kCondA:
+          __ Bnez(lhs, true_target);  // > 0 if non-zero
+          break;
+        case kCondAE:
+          __ B(true_target);  // always true
+          break;
+      }
+    } else {
+      if (use_imm) {
+        // TODO: more efficient comparison with 16-bit constants without loading them into TMP.
+        rhs_reg = TMP;
+        __ LoadConst32(rhs_reg, rhs_imm);
+      }
+      switch (if_cond) {
+        case kCondEQ:
+          __ Beq(lhs, rhs_reg, true_target);
+          break;
+        case kCondNE:
+          __ Bne(lhs, rhs_reg, true_target);
+          break;
+        case kCondLT:
+          __ Blt(lhs, rhs_reg, true_target);
+          break;
+        case kCondGE:
+          __ Bge(lhs, rhs_reg, true_target);
+          break;
+        case kCondLE:
+          __ Bge(rhs_reg, lhs, true_target);
+          break;
+        case kCondGT:
+          __ Blt(rhs_reg, lhs, true_target);
+          break;
+        case kCondB:
+          __ Bltu(lhs, rhs_reg, true_target);
+          break;
+        case kCondAE:
+          __ Bgeu(lhs, rhs_reg, true_target);
+          break;
+        case kCondBE:
+          __ Bgeu(rhs_reg, lhs, true_target);
+          break;
+        case kCondA:
+          __ Bltu(rhs_reg, lhs, true_target);
+          break;
+      }
+    }
+  }
+  if (false_target != nullptr) {
+    __ B(false_target);
+  }
+}
+
+void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+  HInstruction* cond = if_instr->InputAt(0);
+  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+    locations->SetInAt(0, Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
+  MipsLabel* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+  MipsLabel* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+  MipsLabel* always_true_target = true_target;
+  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+                                if_instr->IfTrueSuccessor())) {
+    always_true_target = nullptr;
+  }
+  if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+                                if_instr->IfFalseSuccessor())) {
+    false_target = nullptr;
+  }
+  GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
+}
+
+void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+  LocationSummary* locations = new (GetGraph()->GetArena())
+      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+  HInstruction* cond = deoptimize->InputAt(0);
+  if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+    locations->SetInAt(0, Location::RequiresRegister());
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+      DeoptimizationSlowPathMIPS(deoptimize);
+  codegen_->AddSlowPath(slow_path);
+  MipsLabel* slow_path_entry = slow_path->GetEntryLabel();
+  GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
+}
+
+void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
+  Primitive::Type field_type = field_info.GetFieldType();
+  bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+  bool generate_volatile = field_info.IsVolatile() && is_wide;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+      instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (generate_volatile) {
+    InvokeRuntimeCallingConvention calling_convention;
+    // need A0 to hold base + offset
+    locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+    if (field_type == Primitive::kPrimLong) {
+      locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong));
+    } else {
+      locations->SetOut(Location::RequiresFpuRegister());
+      // Need some temp core regs since FP results are returned in core registers
+      Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong);
+      locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
+      locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
+    }
+  } else {
+    if (Primitive::IsFloatingPointType(instruction->GetType())) {
+      locations->SetOut(Location::RequiresFpuRegister());
+    } else {
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+    }
+  }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
+                                                  const FieldInfo& field_info,
+                                                  uint32_t dex_pc) {
+  Primitive::Type type = field_info.GetFieldType();
+  LocationSummary* locations = instruction->GetLocations();
+  Register obj = locations->InAt(0).AsRegister<Register>();
+  LoadOperandType load_type = kLoadUnsignedByte;
+  bool is_volatile = field_info.IsVolatile();
+
+  switch (type) {
+    case Primitive::kPrimBoolean:
+      load_type = kLoadUnsignedByte;
+      break;
+    case Primitive::kPrimByte:
+      load_type = kLoadSignedByte;
+      break;
+    case Primitive::kPrimShort:
+      load_type = kLoadSignedHalfword;
+      break;
+    case Primitive::kPrimChar:
+      load_type = kLoadUnsignedHalfword;
+      break;
+    case Primitive::kPrimInt:
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimNot:
+      load_type = kLoadWord;
+      break;
+    case Primitive::kPrimLong:
+    case Primitive::kPrimDouble:
+      load_type = kLoadDoubleword;
+      break;
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+
+  if (is_volatile && load_type == kLoadDoubleword) {
+    InvokeRuntimeCallingConvention calling_convention;
+    __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+               obj, field_info.GetFieldOffset().Uint32Value());
+    // Do implicit Null check
+    __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+    codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Load),
+                            instruction,
+                            dex_pc,
+                            nullptr,
+                            IsDirectEntrypoint(kQuickA64Load));
+    CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
+    if (type == Primitive::kPrimDouble) {
+      // Need to move to FP regs since FP results are returned in core registers.
+      __ Mtc1(locations->GetTemp(1).AsRegister<Register>(),
+              locations->Out().AsFpuRegister<FRegister>());
+      __ Mthc1(locations->GetTemp(2).AsRegister<Register>(),
+               locations->Out().AsFpuRegister<FRegister>());
+    }
+  } else {
+    if (!Primitive::IsFloatingPointType(type)) {
+      Register dst;
+      if (type == Primitive::kPrimLong) {
+        DCHECK(locations->Out().IsRegisterPair());
+        dst = locations->Out().AsRegisterPairLow<Register>();
+      } else {
+        DCHECK(locations->Out().IsRegister());
+        dst = locations->Out().AsRegister<Register>();
+      }
+      __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+    } else {
+      DCHECK(locations->Out().IsFpuRegister());
+      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+      if (type == Primitive::kPrimFloat) {
+        __ LoadSFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+      } else {
+        __ LoadDFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+      }
+    }
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
+  }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+  }
+}
+
+void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
+  Primitive::Type field_type = field_info.GetFieldType();
+  bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+  bool generate_volatile = field_info.IsVolatile() && is_wide;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+      instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (generate_volatile) {
+    InvokeRuntimeCallingConvention calling_convention;
+    // need A0 to hold base + offset
+    locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+    if (field_type == Primitive::kPrimLong) {
+      locations->SetInAt(1, Location::RegisterPairLocation(
+          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+    } else {
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      // Pass FP parameters in core registers.
+      locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+      locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+    }
+  } else {
+    if (Primitive::IsFloatingPointType(field_type)) {
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+    } else {
+      locations->SetInAt(1, Location::RequiresRegister());
+    }
+  }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
+                                                  const FieldInfo& field_info,
+                                                  uint32_t dex_pc) {
+  Primitive::Type type = field_info.GetFieldType();
+  LocationSummary* locations = instruction->GetLocations();
+  Register obj = locations->InAt(0).AsRegister<Register>();
+  StoreOperandType store_type = kStoreByte;
+  bool is_volatile = field_info.IsVolatile();
+
+  switch (type) {
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+      store_type = kStoreByte;
+      break;
+    case Primitive::kPrimShort:
+    case Primitive::kPrimChar:
+      store_type = kStoreHalfword;
+      break;
+    case Primitive::kPrimInt:
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimNot:
+      store_type = kStoreWord;
+      break;
+    case Primitive::kPrimLong:
+    case Primitive::kPrimDouble:
+      store_type = kStoreDoubleword;
+      break;
+    case Primitive::kPrimVoid:
+      LOG(FATAL) << "Unreachable type " << type;
+      UNREACHABLE();
+  }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+  }
+
+  if (is_volatile && store_type == kStoreDoubleword) {
+    InvokeRuntimeCallingConvention calling_convention;
+    __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+               obj, field_info.GetFieldOffset().Uint32Value());
+    // Do implicit Null check.
+    __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+    codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+    if (type == Primitive::kPrimDouble) {
+      // Pass FP parameters in core registers.
+      __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
+              locations->InAt(1).AsFpuRegister<FRegister>());
+      __ Mfhc1(locations->GetTemp(2).AsRegister<Register>(),
+               locations->InAt(1).AsFpuRegister<FRegister>());
+    }
+    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Store),
+                            instruction,
+                            dex_pc,
+                            nullptr,
+                            IsDirectEntrypoint(kQuickA64Store));
+    CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
+  } else {
+    if (!Primitive::IsFloatingPointType(type)) {
+      Register src;
+      if (type == Primitive::kPrimLong) {
+        DCHECK(locations->InAt(1).IsRegisterPair());
+        src = locations->InAt(1).AsRegisterPairLow<Register>();
+      } else {
+        DCHECK(locations->InAt(1).IsRegister());
+        src = locations->InAt(1).AsRegister<Register>();
+      }
+      __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+    } else {
+      DCHECK(locations->InAt(1).IsFpuRegister());
+      FRegister src = locations->InAt(1).AsFpuRegister<FRegister>();
+      if (type == Primitive::kPrimFloat) {
+        __ StoreSToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+      } else {
+        __ StoreDToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+      }
+    }
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
+  }
+
+  // TODO: memory barriers?
+  if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+    DCHECK(locations->InAt(1).IsRegister());
+    Register src = locations->InAt(1).AsRegister<Register>();
+    codegen_->MarkGCCard(obj, src);
+  }
+
+  if (is_volatile) {
+    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+  }
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+  LocationSummary::CallKind call_kind =
+      instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  // The output does overlap inputs.
+  // Note that TypeCheckSlowPathMIPS uses this register too.
+  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Register obj = locations->InAt(0).AsRegister<Register>();
+  Register cls = locations->InAt(1).AsRegister<Register>();
+  Register out = locations->Out().AsRegister<Register>();
+
+  MipsLabel done;
+
+  // Return 0 if `obj` is null.
+  // TODO: Avoid this check if we know `obj` is not null.
+  __ Move(out, ZERO);
+  __ Beqz(obj, &done);
+
+  // Compare the class of `obj` with `cls`.
+  __ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+  if (instruction->IsExactCheck()) {
+    // Classes must be equal for the instanceof to succeed.
+    __ Xor(out, out, cls);
+    __ Sltiu(out, out, 1);
+  } else {
+    // If the classes are not equal, we go into a slow path.
+    DCHECK(locations->OnlyCallsOnSlowPath());
+    SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+    codegen_->AddSlowPath(slow_path);
+    __ Bne(out, cls, slow_path->GetEntryLabel());
+    __ LoadConst32(out, 1);
+    __ Bind(slow_path->GetExitLabel());
+  }
+
+  __ Bind(&done);
+}
+
+void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+  locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
+  // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+  locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
+  // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
+  InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
+  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
+}
+
+void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+  HandleInvoke(invoke);
+  // The register T0 is required to be used for the hidden argument in
+  // art_quick_imt_conflict_trampoline, so add the hidden argument.
+  invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+  uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+      invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
+  Location receiver = invoke->GetLocations()->InAt(0);
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+  // Set the hidden argument.
+  __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
+                 invoke->GetDexMethodIndex());
+
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+  } else {
+    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+  }
+  codegen_->MaybeRecordImplicitNullCheck(invoke);
+  // temp = temp->GetImtEntryAt(method_offset);
+  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+  // T9 = temp->GetEntryPoint();
+  __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+  // T9();
+  __ Jalr(T9);
+  __ Nop();
+  DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+  // TODO: intrinsic function.
+  HandleInvoke(invoke);
+}
+
+void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+  // When we do not run baseline, explicit clinit checks triggered by static
+  // invokes must have been pruned by art::PrepareForRegisterAllocation.
+  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+  // TODO: intrinsic function.
+  HandleInvoke(invoke);
+}
+
+static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen ATTRIBUTE_UNUSED) {
+  if (invoke->GetLocations()->Intrinsified()) {
+    // TODO: intrinsic function.
+    return true;
+  }
+  return false;
+}
+
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method ATTRIBUTE_UNUSED) {
+  switch (desired_dispatch_info.method_load_kind) {
+    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+      // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
+      return HInvokeStaticOrDirect::DispatchInfo {
+        HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        0u,
+        0u
+      };
+    default:
+      break;
+  }
+  switch (desired_dispatch_info.code_ptr_location) {
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+      // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
+      return HInvokeStaticOrDirect::DispatchInfo {
+        desired_dispatch_info.method_load_kind,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        desired_dispatch_info.method_load_data,
+        0u
+      };
+    default:
+      return desired_dispatch_info;
+  }
+}
+
+void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+  // All registers are assumed to be correctly set up per the calling convention.
+
+  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
+  switch (invoke->GetMethodLoadKind()) {
+    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+      // temp = thread->string_init_entrypoint
+      __ LoadFromOffset(kLoadWord,
+                        temp.AsRegister<Register>(),
+                        TR,
+                        invoke->GetStringInitOffset());
+      break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+      callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+      break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
+      __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
+      break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+      // TODO: Implement these types.
+      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
+    case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
+      Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+      Register reg = temp.AsRegister<Register>();
+      Register method_reg;
+      if (current_method.IsRegister()) {
+        method_reg = current_method.AsRegister<Register>();
+      } else {
+        // TODO: use the appropriate DCHECK() here if possible.
+        // DCHECK(invoke->GetLocations()->Intrinsified());
+        DCHECK(!current_method.IsValid());
+        method_reg = reg;
+        __ Lw(reg, SP, kCurrentMethodStackOffset);
+      }
+
+      // temp = temp->dex_cache_resolved_methods_;
+      __ LoadFromOffset(kLoadWord,
+                        reg,
+                        method_reg,
+                        ArtMethod::DexCacheResolvedMethodsOffset(kMipsPointerSize).Int32Value());
+      // temp = temp[index_in_cache]
+      uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+      __ LoadFromOffset(kLoadWord,
+                        reg,
+                        reg,
+                        CodeGenerator::GetCachePointerOffset(index_in_cache));
+      break;
+    }
+  }
+
+  switch (invoke->GetCodePtrLocation()) {
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
+      __ Jalr(&frame_entry_label_, T9);
+      break;
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+      // LR = invoke->GetDirectCodePtr();
+      __ LoadConst32(T9, invoke->GetDirectCodePtr());
+      // LR()
+      __ Jalr(T9);
+      __ Nop();
+      break;
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+      // TODO: Implement these types.
+      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
+      // T9 = callee_method->entry_point_from_quick_compiled_code_;
+      __ LoadFromOffset(kLoadWord,
+                        T9,
+                        callee_method.AsRegister<Register>(),
+                        ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+                            kMipsWordSize).Int32Value());
+      // T9()
+      __ Jalr(T9);
+      __ Nop();
+      break;
+  }
+  DCHECK(!IsLeafMethod());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+  // When we do not run baseline, explicit clinit checks triggered by static
+  // invokes must have been pruned by art::PrepareForRegisterAllocation.
+  DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+    return;
+  }
+
+  LocationSummary* locations = invoke->GetLocations();
+  codegen_->GenerateStaticOrDirectCall(invoke,
+                                       locations->HasTemps()
+                                           ? locations->GetTemp(0)
+                                           : Location::NoLocation());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+  // TODO: Try to generate intrinsics code.
+  LocationSummary* locations = invoke->GetLocations();
+  Location receiver = locations->InAt(0);
+  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+  size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+      invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+  // temp = object->GetClass();
+  if (receiver.IsStackSlot()) {
+    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+  } else {
+    DCHECK(receiver.IsRegister());
+    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+  }
+  codegen_->MaybeRecordImplicitNullCheck(invoke);
+  // temp = temp->GetMethodAt(method_offset);
+  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+  // T9 = temp->GetEntryPoint();
+  __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+  // T9();
+  __ Jalr(T9);
+  __ Nop();
+  DCHECK(!codegen_->IsLeafMethod());
+  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
+  InvokeRuntimeCallingConvention calling_convention;
+  CodeGenerator::CreateLoadClassLocationSummary(
+      cls,
+      Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+      Location::RegisterLocation(V0));
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
+  LocationSummary* locations = cls->GetLocations();
+  if (cls->NeedsAccessCheck()) {
+    codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+                            cls,
+                            cls->GetDexPc(),
+                            nullptr,
+                            IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess));
+    return;
+  }
+
+  Register out = locations->Out().AsRegister<Register>();
+  Register current_method = locations->InAt(0).AsRegister<Register>();
+  if (cls->IsReferrersClass()) {
+    DCHECK(!cls->CanCallRuntime());
+    DCHECK(!cls->MustGenerateClinitCheck());
+    __ LoadFromOffset(kLoadWord, out, current_method,
+                      ArtMethod::DeclaringClassOffset().Int32Value());
+  } else {
+    DCHECK(cls->CanCallRuntime());
+    __ LoadFromOffset(kLoadWord, out, current_method,
+                      ArtMethod::DexCacheResolvedTypesOffset(kMipsPointerSize).Int32Value());
+    __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+    SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+        cls,
+        cls,
+        cls->GetDexPc(),
+        cls->MustGenerateClinitCheck());
+    codegen_->AddSlowPath(slow_path);
+    __ Beqz(out, slow_path->GetEntryLabel());
+    if (cls->MustGenerateClinitCheck()) {
+      GenerateClassInitializationCheck(slow_path, out);
+    } else {
+      __ Bind(slow_path->GetExitLabel());
+    }
+  }
+}
+
+static int32_t GetExceptionTlsOffset() {
+  return Thread::ExceptionOffset<kMipsWordSize>().Int32Value();
+}
+
+void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
+  Register out = load->GetLocations()->Out().AsRegister<Register>();
+  __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
+  new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+  __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitLoadLocal(HLoadLocal* load) {
+  load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
+  // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
+  codegen_->AddSlowPath(slow_path);
+
+  LocationSummary* locations = load->GetLocations();
+  Register out = locations->Out().AsRegister<Register>();
+  Register current_method = locations->InAt(0).AsRegister<Register>();
+  __ LoadFromOffset(kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
+  __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
+  __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+  __ Beqz(out, slow_path->GetEntryLabel());
+  __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitLocal(HLocal* local) {
+  local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLocal(HLocal* local) {
+  DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+  locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
+  // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+  InvokeRuntimeCallingConvention calling_convention;
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+  if (instruction->IsEnter()) {
+    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLockObject),
+                            instruction,
+                            instruction->GetDexPc(),
+                            nullptr,
+                            IsDirectEntrypoint(kQuickLockObject));
+    CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
+  } else {
+    codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pUnlockObject),
+                            instruction,
+                            instruction->GetDexPc(),
+                            nullptr,
+                            IsDirectEntrypoint(kQuickUnlockObject));
+  }
+  CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitMul(HMul* mul) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+  switch (mul->GetResultType()) {
+    case Primitive::kPrimInt:
+    case Primitive::kPrimLong:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+      break;
+
+    default:
+      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
+  Primitive::Type type = instruction->GetType();
+  LocationSummary* locations = instruction->GetLocations();
+  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      Register dst = locations->Out().AsRegister<Register>();
+      Register lhs = locations->InAt(0).AsRegister<Register>();
+      Register rhs = locations->InAt(1).AsRegister<Register>();
+
+      if (isR6) {
+        __ MulR6(dst, lhs, rhs);
+      } else {
+        __ MulR2(dst, lhs, rhs);
+      }
+      break;
+    }
+    case Primitive::kPrimLong: {
+      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+      Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+      // Extra checks to protect caused by the existance of A1_A2.
+      // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
+      // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
+      DCHECK_NE(dst_high, lhs_low);
+      DCHECK_NE(dst_high, rhs_low);
+
+      // A_B * C_D
+      // dst_hi:  [ low(A*D) + low(B*C) + hi(B*D) ]
+      // dst_lo:  [ low(B*D) ]
+      // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
+
+      if (isR6) {
+        __ MulR6(TMP, lhs_high, rhs_low);
+        __ MulR6(dst_high, lhs_low, rhs_high);
+        __ Addu(dst_high, dst_high, TMP);
+        __ MuhuR6(TMP, lhs_low, rhs_low);
+        __ Addu(dst_high, dst_high, TMP);
+        __ MulR6(dst_low, lhs_low, rhs_low);
+      } else {
+        __ MulR2(TMP, lhs_high, rhs_low);
+        __ MulR2(dst_high, lhs_low, rhs_high);
+        __ Addu(dst_high, dst_high, TMP);
+        __ MultuR2(lhs_low, rhs_low);
+        __ Mfhi(TMP);
+        __ Addu(dst_high, dst_high, TMP);
+        __ Mflo(dst_low);
+      }
+      break;
+    }
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+      if (type == Primitive::kPrimFloat) {
+        __ MulS(dst, lhs, rhs);
+      } else {
+        __ MulD(dst, lhs, rhs);
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected mul type " << type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+  switch (neg->GetResultType()) {
+    case Primitive::kPrimInt:
+    case Primitive::kPrimLong:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+      break;
+
+    default:
+      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
+  Primitive::Type type = instruction->GetType();
+  LocationSummary* locations = instruction->GetLocations();
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      Register dst = locations->Out().AsRegister<Register>();
+      Register src = locations->InAt(0).AsRegister<Register>();
+      __ Subu(dst, ZERO, src);
+      break;
+    }
+    case Primitive::kPrimLong: {
+      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+      Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+      __ Subu(dst_low, ZERO, src_low);
+      __ Sltu(TMP, ZERO, dst_low);
+      __ Subu(dst_high, ZERO, src_high);
+      __ Subu(dst_high, dst_high, TMP);
+      break;
+    }
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+      FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+      if (type == Primitive::kPrimFloat) {
+        __ NegS(dst, src);
+      } else {
+        __ NegD(dst, src);
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected neg type " << type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+  InvokeRuntimeCallingConvention calling_convention;
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
+  InvokeRuntimeCallingConvention calling_convention;
+  Register current_method_register = calling_convention.GetRegisterAt(2);
+  __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+  // Move an uint16_t value to a register.
+  __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+  codegen_->InvokeRuntime(
+      GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+      instruction,
+      instruction->GetDexPc(),
+      nullptr,
+      IsDirectEntrypoint(kQuickAllocArrayWithAccessCheck));
+  CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
+                       void*, uint32_t, int32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+  InvokeRuntimeCallingConvention calling_convention;
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
+  InvokeRuntimeCallingConvention calling_convention;
+  Register current_method_register = calling_convention.GetRegisterAt(1);
+  __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+  // Move an uint16_t value to a register.
+  __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+  codegen_->InvokeRuntime(
+      GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+      instruction,
+      instruction->GetDexPc(),
+      nullptr,
+      IsDirectEntrypoint(kQuickAllocObjectWithAccessCheck));
+  CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
+  Primitive::Type type = instruction->GetType();
+  LocationSummary* locations = instruction->GetLocations();
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      Register dst = locations->Out().AsRegister<Register>();
+      Register src = locations->InAt(0).AsRegister<Register>();
+      __ Nor(dst, src, ZERO);
+      break;
+    }
+
+    case Primitive::kPrimLong: {
+      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+      Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+      __ Nor(dst_high, src_high, ZERO);
+      __ Nor(dst_low, src_low, ZERO);
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+  }
+}
+
+void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  __ Xori(locations->Out().AsRegister<Register>(),
+          locations->InAt(0).AsRegister<Register>(),
+          1);
+}
+
+void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
+  LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+      ? LocationSummary::kCallOnSlowPath
+      : LocationSummary::kNoCall;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+  locations->SetInAt(0, Location::RequiresRegister());
+  if (instruction->HasUses()) {
+    locations->SetOut(Location::SameAsFirstInput());
+  }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
+  if (codegen_->CanMoveNullCheckToUser(instruction)) {
+    return;
+  }
+  Location obj = instruction->GetLocations()->InAt(0);
+
+  __ Lw(ZERO, obj.AsRegister<Register>(), 0);
+  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
+  SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
+  codegen_->AddSlowPath(slow_path);
+
+  Location obj = instruction->GetLocations()->InAt(0);
+
+  __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
+  if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
+    GenerateImplicitNullCheck(instruction);
+  } else {
+    GenerateExplicitNullCheck(instruction);
+  }
+}
+
+void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+  LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
+  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+  if (location.IsStackSlot()) {
+    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+  } else if (location.IsDoubleStackSlot()) {
+    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+  }
+  locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
+                                                         ATTRIBUTE_UNUSED) {
+  // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
+                                                        ATTRIBUTE_UNUSED) {
+  // Nothing to do, the method is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+  for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+    locations->SetInAt(i, Location::Any());
+  }
+  locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
+  LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitRem(HRem* rem) {
+  Primitive::Type type = rem->GetResultType();
+  LocationSummary::CallKind call_kind =
+      (type == Primitive::kPrimInt) ? LocationSummary::kNoCall : LocationSummary::kCall;
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+  switch (type) {
+    case Primitive::kPrimInt:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+
+    case Primitive::kPrimLong: {
+      InvokeRuntimeCallingConvention calling_convention;
+      locations->SetInAt(0, Location::RegisterPairLocation(
+          calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+      locations->SetInAt(1, Location::RegisterPairLocation(
+          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+      locations->SetOut(calling_convention.GetReturnLocation(type));
+      break;
+    }
+
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble: {
+      InvokeRuntimeCallingConvention calling_convention;
+      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+      locations->SetOut(calling_convention.GetReturnLocation(type));
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected rem type " << type;
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
+  Primitive::Type type = instruction->GetType();
+  LocationSummary* locations = instruction->GetLocations();
+  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+  switch (type) {
+    case Primitive::kPrimInt: {
+      Register dst = locations->Out().AsRegister<Register>();
+      Register lhs = locations->InAt(0).AsRegister<Register>();
+      Register rhs = locations->InAt(1).AsRegister<Register>();
+      if (isR6) {
+        __ ModR6(dst, lhs, rhs);
+      } else {
+        __ ModR2(dst, lhs, rhs);
+      }
+      break;
+    }
+    case Primitive::kPrimLong: {
+      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
+                              instruction,
+                              instruction->GetDexPc(),
+                              nullptr,
+                              IsDirectEntrypoint(kQuickLmod));
+      CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
+      break;
+    }
+    case Primitive::kPrimFloat: {
+      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf),
+                              instruction, instruction->GetDexPc(),
+                              nullptr,
+                              IsDirectEntrypoint(kQuickFmodf));
+      CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+      break;
+    }
+    case Primitive::kPrimDouble: {
+      codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod),
+                              instruction, instruction->GetDexPc(),
+                              nullptr,
+                              IsDirectEntrypoint(kQuickFmod));
+      CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unexpected rem type " << type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+  memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
+void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+  Primitive::Type return_type = ret->InputAt(0)->GetType();
+  locations->SetInAt(0, MipsReturnLocation(return_type));
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+  codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
+  ret->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
+  codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitShl(HShl* shl) {
+  HandleShift(shl);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
+  HandleShift(shl);
+}
+
+void LocationsBuilderMIPS::VisitShr(HShr* shr) {
+  HandleShift(shr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
+  HandleShift(shr);
+}
+
+void LocationsBuilderMIPS::VisitStoreLocal(HStoreLocal* store) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+  Primitive::Type field_type = store->InputAt(1)->GetType();
+  switch (field_type) {
+    case Primitive::kPrimNot:
+    case Primitive::kPrimBoolean:
+    case Primitive::kPrimByte:
+    case Primitive::kPrimChar:
+    case Primitive::kPrimShort:
+    case Primitive::kPrimInt:
+    case Primitive::kPrimFloat:
+      locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+      break;
+
+    case Primitive::kPrimLong:
+    case Primitive::kPrimDouble:
+      locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+      break;
+
+    default:
+      LOG(FATAL) << "Unimplemented local type " << field_type;
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+  HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+                                                 instruction->GetFieldType(),
+                                                 calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+                                                 instruction->GetFieldType(),
+                                                 calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+                                                 instruction->GetFieldType(),
+                                                 calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+                                                 instruction->GetFieldType(),
+                                                 calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+  new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+  HBasicBlock* block = instruction->GetBlock();
+  if (block->GetLoopInformation() != nullptr) {
+    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
+    // The back edge will generate the suspend check.
+    return;
+  }
+  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
+    // The goto will generate the suspend check.
+    return;
+  }
+  GenerateSuspendCheck(instruction, nullptr);
+}
+
+void LocationsBuilderMIPS::VisitTemporary(HTemporary* temp) {
+  temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
+  // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+  InvokeRuntimeCallingConvention calling_convention;
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
+  codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+                          instruction,
+                          instruction->GetDexPc(),
+                          nullptr,
+                          IsDirectEntrypoint(kQuickDeliverException));
+  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+  Primitive::Type input_type = conversion->GetInputType();
+  Primitive::Type result_type = conversion->GetResultType();
+  DCHECK_NE(input_type, result_type);
+
+  if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
+      (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
+    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
+  }
+
+  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+  if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
+      (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
+    call_kind = LocationSummary::kCall;
+  }
+
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
+  if (call_kind == LocationSummary::kNoCall) {
+    if (Primitive::IsFloatingPointType(input_type)) {
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+    } else {
+      locations->SetInAt(0, Location::RequiresRegister());
+    }
+
+    if (Primitive::IsFloatingPointType(result_type)) {
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+    } else {
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+    }
+  } else {
+    InvokeRuntimeCallingConvention calling_convention;
+
+    if (Primitive::IsFloatingPointType(input_type)) {
+      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+    } else {
+      DCHECK_EQ(input_type, Primitive::kPrimLong);
+      locations->SetInAt(0, Location::RegisterPairLocation(
+                 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+    }
+
+    locations->SetOut(calling_convention.GetReturnLocation(result_type));
+  }
+}
+
+void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+  LocationSummary* locations = conversion->GetLocations();
+  Primitive::Type result_type = conversion->GetResultType();
+  Primitive::Type input_type = conversion->GetInputType();
+  bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+
+  DCHECK_NE(input_type, result_type);
+
+  if (result_type == Primitive::kPrimLong && Primitive::IsIntegralType(input_type)) {
+    Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+    Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+    Register src = locations->InAt(0).AsRegister<Register>();
+
+    __ Move(dst_low, src);
+    __ Sra(dst_high, src, 31);
+  } else if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
+    Register dst = locations->Out().AsRegister<Register>();
+    Register src = (input_type == Primitive::kPrimLong)
+        ? locations->InAt(0).AsRegisterPairLow<Register>()
+        : locations->InAt(0).AsRegister<Register>();
+
+    switch (result_type) {
+      case Primitive::kPrimChar:
+        __ Andi(dst, src, 0xFFFF);
+        break;
+      case Primitive::kPrimByte:
+        if (has_sign_extension) {
+          __ Seb(dst, src);
+        } else {
+          __ Sll(dst, src, 24);
+          __ Sra(dst, dst, 24);
+        }
+        break;
+      case Primitive::kPrimShort:
+        if (has_sign_extension) {
+          __ Seh(dst, src);
+        } else {
+          __ Sll(dst, src, 16);
+          __ Sra(dst, dst, 16);
+        }
+        break;
+      case Primitive::kPrimInt:
+        __ Move(dst, src);
+        break;
+
+      default:
+        LOG(FATAL) << "Unexpected type conversion from " << input_type
+                   << " to " << result_type;
+    }
+  } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
+    if (input_type != Primitive::kPrimLong) {
+      Register src = locations->InAt(0).AsRegister<Register>();
+      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+      __ Mtc1(src, FTMP);
+      if (result_type == Primitive::kPrimFloat) {
+        __ Cvtsw(dst, FTMP);
+      } else {
+        __ Cvtdw(dst, FTMP);
+      }
+    } else {
+      int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
+                                                                    : QUICK_ENTRY_POINT(pL2d);
+      bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickL2f)
+                                                           : IsDirectEntrypoint(kQuickL2d);
+      codegen_->InvokeRuntime(entry_offset,
+                              conversion,
+                              conversion->GetDexPc(),
+                              nullptr,
+                              direct);
+      if (result_type == Primitive::kPrimFloat) {
+        CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+      } else {
+        CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+      }
+    }
+  } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
+    CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
+    int32_t entry_offset;
+    bool direct;
+    if (result_type != Primitive::kPrimLong) {
+      entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
+                                                           : QUICK_ENTRY_POINT(pD2iz);
+      direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2iz)
+                                                      : IsDirectEntrypoint(kQuickD2iz);
+    } else {
+      entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
+                                                           : QUICK_ENTRY_POINT(pD2l);
+      direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2l)
+                                                      : IsDirectEntrypoint(kQuickD2l);
+    }
+    codegen_->InvokeRuntime(entry_offset,
+                            conversion,
+                            conversion->GetDexPc(),
+                            nullptr,
+                            direct);
+    if (result_type != Primitive::kPrimLong) {
+      if (input_type == Primitive::kPrimFloat) {
+        CheckEntrypointTypes<kQuickF2iz, int32_t, float>();
+      } else {
+        CheckEntrypointTypes<kQuickD2iz, int32_t, double>();
+      }
+    } else {
+      if (input_type == Primitive::kPrimFloat) {
+        CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+      } else {
+        CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+      }
+    }
+  } else if (Primitive::IsFloatingPointType(result_type) &&
+             Primitive::IsFloatingPointType(input_type)) {
+    FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+    FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+    if (result_type == Primitive::kPrimFloat) {
+      __ Cvtsd(dst, src);
+    } else {
+      __ Cvtds(dst, src);
+    }
+  } else {
+    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+                << " to " << result_type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
+  HandleShift(ushr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
+  HandleShift(ushr);
+}
+
+void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
+  HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+  // Nothing to do, this should be removed during prepare for register allocator.
+  LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+  // Nothing to do, this should be removed during prepare for register allocator.
+  LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+  VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitFakeString(HFakeString* instruction) {
+  DCHECK(codegen_->IsBaseline());
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+  DCHECK(codegen_->IsBaseline());
+  // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+  int32_t lower_bound = switch_instr->GetStartValue();
+  int32_t num_entries = switch_instr->GetNumEntries();
+  LocationSummary* locations = switch_instr->GetLocations();
+  Register value_reg = locations->InAt(0).AsRegister<Register>();
+  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+  // Create a set of compare/jumps.
+  const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+  for (int32_t i = 0; i < num_entries; ++i) {
+    int32_t case_value = lower_bound + i;
+    MipsLabel* successor_label = codegen_->GetLabelOf(successors[i]);
+    if (case_value == 0) {
+      __ Beqz(value_reg, successor_label);
+    } else {
+      __ LoadConst32(TMP, case_value);
+      __ Beq(value_reg, TMP, successor_label);
+    }
+  }
+
+  // Insert the default branch for every other value.
+  if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+    __ B(codegen_->GetLabelOf(default_block));
+  }
+}
+
+void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+  // The trampoline uses the same calling convention as dex calling conventions,
+  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+  // the method_idx.
+  HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
+}
+
+#undef __
+#undef QUICK_ENTRY_POINT
+
+}  // namespace mips
+}  // namespace art
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
new file mode 100644
index 0000000..059131d
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.h
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+
+#include "code_generator.h"
+#include "dex/compiler_enums.h"
+#include "driver/compiler_options.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/mips/assembler_mips.h"
+
+namespace art {
+namespace mips {
+
+// InvokeDexCallingConvention registers
+
+static constexpr Register kParameterCoreRegisters[] =
+    { A1, A2, A3 };
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+static constexpr FRegister kParameterFpuRegisters[] =
+    { F12, F14 };
+static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
+
+
+// InvokeRuntimeCallingConvention registers
+
+static constexpr Register kRuntimeParameterCoreRegisters[] =
+    { A0, A1, A2, A3 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+    arraysize(kRuntimeParameterCoreRegisters);
+
+static constexpr FRegister kRuntimeParameterFpuRegisters[] =
+    { F12, F14};
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+    arraysize(kRuntimeParameterFpuRegisters);
+
+
+static constexpr Register kCoreCalleeSaves[] =
+    { S0, S1, S2, S3, S4, S5, S6, S7, FP, RA };
+static constexpr FRegister kFpuCalleeSaves[] =
+    { F20, F22, F24, F26, F28, F30 };
+
+
+class CodeGeneratorMIPS;
+
+class InvokeDexCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+  InvokeDexCallingConvention()
+      : CallingConvention(kParameterCoreRegisters,
+                          kParameterCoreRegistersLength,
+                          kParameterFpuRegisters,
+                          kParameterFpuRegistersLength,
+                          kMipsPointerSize) {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionVisitor {
+ public:
+  InvokeDexCallingConventionVisitorMIPS() {}
+  virtual ~InvokeDexCallingConventionVisitorMIPS() {}
+
+  Location GetNextLocation(Primitive::Type type) OVERRIDE;
+  Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
+  Location GetMethodLocation() const OVERRIDE;
+
+ private:
+  InvokeDexCallingConvention calling_convention;
+
+  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS);
+};
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+  InvokeRuntimeCallingConvention()
+      : CallingConvention(kRuntimeParameterCoreRegisters,
+                          kRuntimeParameterCoreRegistersLength,
+                          kRuntimeParameterFpuRegisters,
+                          kRuntimeParameterFpuRegistersLength,
+                          kMipsPointerSize) {}
+
+  Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionMIPS() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return Location::RegisterLocation(A1);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return Location::RegisterLocation(A0);
+  }
+  Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterPairLocation(V0, V1)
+        : Location::RegisterLocation(V0);
+  }
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterPairLocation(A2, A3)
+        : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
+  }
+  Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return Location::FpuRegisterLocation(F0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS);
+};
+
+class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
+ public:
+  ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
+      : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
+
+  void EmitMove(size_t index) OVERRIDE;
+  void EmitSwap(size_t index) OVERRIDE;
+  void SpillScratch(int reg) OVERRIDE;
+  void RestoreScratch(int reg) OVERRIDE;
+
+  void Exchange(int index1, int index2, bool double_slot);
+
+  MipsAssembler* GetAssembler() const;
+
+ private:
+  CodeGeneratorMIPS* const codegen_;
+
+  DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS);
+};
+
+class SlowPathCodeMIPS : public SlowPathCode {
+ public:
+  SlowPathCodeMIPS() : entry_label_(), exit_label_() {}
+
+  MipsLabel* GetEntryLabel() { return &entry_label_; }
+  MipsLabel* GetExitLabel() { return &exit_label_; }
+
+ private:
+  MipsLabel entry_label_;
+  MipsLabel exit_label_;
+
+  DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS);
+};
+
+class LocationsBuilderMIPS : public HGraphVisitor {
+ public:
+  LocationsBuilderMIPS(HGraph* graph, CodeGeneratorMIPS* codegen)
+      : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super)     \
+  void Visit##name(H##name* instr) OVERRIDE;
+
+  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+               << " (id " << instruction->GetId() << ")";
+  }
+
+ private:
+  void HandleInvoke(HInvoke* invoke);
+  void HandleBinaryOp(HBinaryOperation* operation);
+  void HandleShift(HBinaryOperation* operation);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+  InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
+
+  CodeGeneratorMIPS* const codegen_;
+
+  DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
+};
+
+class InstructionCodeGeneratorMIPS : public HGraphVisitor {
+ public:
+  InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super)     \
+  void Visit##name(H##name* instr) OVERRIDE;
+
+  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+  FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+  void VisitInstruction(HInstruction* instruction) OVERRIDE {
+    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+               << " (id " << instruction->GetId() << ")";
+  }
+
+  MipsAssembler* GetAssembler() const { return assembler_; }
+
+ private:
+  void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
+  void GenerateMemoryBarrier(MemBarrierKind kind);
+  void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+  void HandleBinaryOp(HBinaryOperation* operation);
+  void HandleShift(HBinaryOperation* operation);
+  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+  void GenerateImplicitNullCheck(HNullCheck* instruction);
+  void GenerateExplicitNullCheck(HNullCheck* instruction);
+  void GenerateTestAndBranch(HInstruction* instruction,
+                             MipsLabel* true_target,
+                             MipsLabel* false_target,
+                             MipsLabel* always_true_target);
+  void HandleGoto(HInstruction* got, HBasicBlock* successor);
+
+  MipsAssembler* const assembler_;
+  CodeGeneratorMIPS* const codegen_;
+
+  DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS);
+};
+
+class CodeGeneratorMIPS : public CodeGenerator {
+ public:
+  CodeGeneratorMIPS(HGraph* graph,
+                    const MipsInstructionSetFeatures& isa_features,
+                    const CompilerOptions& compiler_options,
+                    OptimizingCompilerStats* stats = nullptr);
+  virtual ~CodeGeneratorMIPS() {}
+
+  void GenerateFrameEntry() OVERRIDE;
+  void GenerateFrameExit() OVERRIDE;
+
+  void Bind(HBasicBlock* block) OVERRIDE;
+
+  void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+  void Move32(Location destination, Location source);
+  void Move64(Location destination, Location source);
+  void MoveConstant(Location location, HConstant* c);
+
+  size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+
+  size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; }
+
+  uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+    return assembler_.GetLabelLocation(GetLabelOf(block));
+  }
+
+  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+  MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+  const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+
+  void MarkGCCard(Register object, Register value);
+
+  // Register allocation.
+
+  void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
+  // AllocateFreeRegister() is only used when allocating registers locally
+  // during CompileBaseline().
+  Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+  Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+
+  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+  // Blocks all register pairs made out of blocked core registers.
+  void UpdateBlockedPairRegisters() const;
+
+  InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+
+  const MipsInstructionSetFeatures& GetInstructionSetFeatures() const {
+    return isa_features_;
+  }
+
+  MipsLabel* GetLabelOf(HBasicBlock* block) const {
+    return CommonGetLabelOf<MipsLabel>(block_labels_, block);
+  }
+
+  void Initialize() OVERRIDE {
+    block_labels_ = CommonInitializeLabels<MipsLabel>();
+  }
+
+  void Finalize(CodeAllocator* allocator) OVERRIDE;
+
+  // Code generation helpers.
+
+  void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+
+  void MoveConstant(Location destination, int32_t value);
+
+  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+  // Generate code to invoke a runtime entry point.
+  void InvokeRuntime(QuickEntrypointEnum entrypoint,
+                     HInstruction* instruction,
+                     uint32_t dex_pc,
+                     SlowPathCode* slow_path) OVERRIDE;
+
+  void InvokeRuntime(int32_t offset,
+                     HInstruction* instruction,
+                     uint32_t dex_pc,
+                     SlowPathCode* slow_path,
+                     bool is_direct_entrypoint);
+
+  ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+
+  bool NeedsTwoRegisters(Primitive::Type type) const {
+    return type == Primitive::kPrimLong;
+  }
+
+  // Check if the desired_dispatch_info is supported. If it is, return it,
+  // otherwise return a fall-back info that should be used instead.
+  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) OVERRIDE;
+
+  void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+  void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
+                           Location temp ATTRIBUTE_UNUSED) OVERRIDE {
+    UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+  }
+
+  void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
+                              Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+    UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+  }
+
+ private:
+  // Labels for each block that will be compiled.
+  MipsLabel* block_labels_;
+  MipsLabel frame_entry_label_;
+  LocationsBuilderMIPS location_builder_;
+  InstructionCodeGeneratorMIPS instruction_visitor_;
+  ParallelMoveResolverMIPS move_resolver_;
+  MipsAssembler assembler_;
+  const MipsInstructionSetFeatures& isa_features_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS);
+};
+
+}  // namespace mips
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index f561c97..55efd5f 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -342,8 +342,7 @@
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
-    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
-                                                        : locations->Out();
+    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
     uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -2529,6 +2528,37 @@
   return false;
 }
 
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method ATTRIBUTE_UNUSED) {
+  switch (desired_dispatch_info.method_load_kind) {
+    case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+    case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+      // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
+      return HInvokeStaticOrDirect::DispatchInfo {
+        HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        0u,
+        0u
+      };
+    default:
+      break;
+  }
+  switch (desired_dispatch_info.code_ptr_location) {
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+      // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
+      return HInvokeStaticOrDirect::DispatchInfo {
+        desired_dispatch_info.method_load_kind,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        desired_dispatch_info.method_load_data,
+        0u
+      };
+    default:
+      return desired_dispatch_info;
+  }
+}
+
 void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   // All registers are assumed to be correctly set up per the calling convention.
 
@@ -2548,13 +2578,11 @@
       __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
-      // TODO: Implement this type. (Needs literal support.) At the moment, the
-      // CompilerDriver will not direct the backend to use this type for MIPS.
-      LOG(FATAL) << "Unsupported!";
-      UNREACHABLE();
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
-      // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
-      FALLTHROUGH_INTENDED;
+      // TODO: Implement these types.
+      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
       Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
       GpuRegister reg = temp.AsRegister<GpuRegister>();
@@ -2594,12 +2622,12 @@
       // LR()
       __ Jalr(T9);
       break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
-      // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
-      FALLTHROUGH_INTENDED;
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
-      // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
-      FALLTHROUGH_INTENDED;
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+      // TODO: Implement these types.
+      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // T9 = callee_method->entry_point_from_quick_compiled_code_;
       __ LoadFromOffset(kLoadDoubleword,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 7799437..9bbd027 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -217,9 +217,6 @@
   Mips64Assembler* GetAssembler() const { return assembler_; }
 
  private:
-  // Generate code for the given suspend check. If not null, `successor`
-  // is the block to branch to if the suspend check is not needed, and after
-  // the suspend call.
   void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
   void GenerateMemoryBarrier(MemBarrierKind kind);
   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
@@ -329,6 +326,12 @@
 
   bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; }
 
+  // Check if the desired_dispatch_info is supported. If it is, return it,
+  // otherwise return a fall-back info that should be used instead.
+  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) OVERRIDE;
+
   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
   void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
                            Location temp ATTRIBUTE_UNUSED) OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 963eec2..0df7e3b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3757,6 +3757,34 @@
   }
 }
 
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method ATTRIBUTE_UNUSED) {
+  if (desired_dispatch_info.method_load_kind ==
+      HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
+    // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+    return HInvokeStaticOrDirect::DispatchInfo {
+      HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+      HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+      0u,
+      0u
+    };
+  }
+  switch (desired_dispatch_info.code_ptr_location) {
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+      // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
+      // (Though the direct CALL ptr16:32 is available for consideration).
+      return HInvokeStaticOrDirect::DispatchInfo {
+        desired_dispatch_info.method_load_kind,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        desired_dispatch_info.method_load_data,
+        0u
+      };
+    default:
+      return desired_dispatch_info;
+  }
+}
 
 void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
   Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
@@ -3777,8 +3805,10 @@
       __ Bind(&method_patches_.back().label);  // Bind the label at the end of the "movl" insn.
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
-      // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
-      FALLTHROUGH_INTENDED;
+      // TODO: Implement this type.
+      // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
     case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
       Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
       Register method_reg;
@@ -3814,9 +3844,9 @@
     }
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
-      // (Though the direct CALL ptr16:32 is available for consideration).
-      FALLTHROUGH_INTENDED;
+      // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // (callee_method + offset_of_quick_compiled_code)()
       __ call(Address(callee_method.AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fdfc5ab..ac3d06c 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -333,6 +333,12 @@
   // Helper method to move a 64bits value between two locations.
   void Move64(Location destination, Location source);
 
+  // Check if the desired_dispatch_info is supported. If it is, return it,
+  // otherwise return a fall-back info that should be used instead.
+  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) OVERRIDE;
+
   // Generate a call to a static or direct method.
   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
   // Generate a call to a virtual method.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ed2e4ca..5218d70 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -473,6 +473,24 @@
   UNREACHABLE();
 }
 
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method ATTRIBUTE_UNUSED) {
+  switch (desired_dispatch_info.code_ptr_location) {
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+    case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+      // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
+      return HInvokeStaticOrDirect::DispatchInfo {
+        desired_dispatch_info.method_load_kind,
+        HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+        desired_dispatch_info.method_load_data,
+        0u
+      };
+    default:
+      return desired_dispatch_info;
+  }
+}
+
 void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
                                                      Location temp) {
   // All registers are assumed to be correctly set up.
@@ -539,8 +557,9 @@
     }
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
     case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
-      // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
-      FALLTHROUGH_INTENDED;
+      // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+      LOG(FATAL) << "Unsupported";
+      UNREACHABLE();
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // (callee_method + offset_of_quick_compiled_code)()
       __ call(Address(callee_method.AsRegister<CpuRegister>(),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dc86a48..fc485f5 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -335,6 +335,12 @@
     return false;
   }
 
+  // Check if the desired_dispatch_info is supported. If it is, return it,
+  // otherwise return a fall-back info that should be used instead.
+  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+      MethodReference target_method) OVERRIDE;
+
   void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
   void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
 
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index fe5af2f..57de41f 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -20,6 +20,8 @@
 #include "arch/arm/instruction_set_features_arm.h"
 #include "arch/arm/registers_arm.h"
 #include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips/registers_mips.h"
 #include "arch/mips64/instruction_set_features_mips64.h"
 #include "arch/mips64/registers_mips64.h"
 #include "arch/x86/instruction_set_features_x86.h"
@@ -29,6 +31,7 @@
 #include "builder.h"
 #include "code_generator_arm.h"
 #include "code_generator_arm64.h"
+#include "code_generator_mips.h"
 #include "code_generator_mips64.h"
 #include "code_generator_x86.h"
 #include "code_generator_x86_64.h"
@@ -43,6 +46,7 @@
 #include "ssa_liveness_analysis.h"
 #include "utils.h"
 #include "utils/arm/managed_register_arm.h"
+#include "utils/mips/managed_register_mips.h"
 #include "utils/mips64/managed_register_mips64.h"
 #include "utils/x86/managed_register_x86.h"
 
@@ -177,6 +181,14 @@
     Run(allocator, codegenARM64, has_result, expected);
   }
 
+  std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+      MipsInstructionSetFeatures::FromCppDefines());
+  mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+  codegenMIPS.CompileBaseline(&allocator, true);
+  if (kRuntimeISA == kMips) {
+    Run(allocator, codegenMIPS, has_result, expected);
+  }
+
   std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
       Mips64InstructionSetFeatures::FromCppDefines());
   mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
@@ -234,6 +246,11 @@
         X86_64InstructionSetFeatures::FromCppDefines());
     x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
     RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+  } else if (kRuntimeISA == kMips) {
+    std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+        MipsInstructionSetFeatures::FromCppDefines());
+    mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+    RunCodeOptimized(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
   } else if (kRuntimeISA == kMips64) {
     std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
         Mips64InstructionSetFeatures::FromCppDefines());
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 4abe5e9..e1a8c9c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -203,19 +203,23 @@
 
   int64_t value = CodeGenerator::GetInt64ValueOf(constant);
 
-  if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
-      instr->IsCompare() || instr->IsBoundsCheck()) {
+  if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
+    // Uses logical operations.
+    return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
+  } else if (instr->IsNeg()) {
+    // Uses mov -immediate.
+    return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
+  } else {
+    DCHECK(instr->IsAdd() ||
+           instr->IsArm64IntermediateAddress() ||
+           instr->IsBoundsCheck() ||
+           instr->IsCompare() ||
+           instr->IsCondition() ||
+           instr->IsSub());
     // Uses aliases of ADD/SUB instructions.
     // If `value` does not fit but `-value` does, VIXL will automatically use
     // the 'opposite' instruction.
     return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
-  } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
-    // Uses logical operations.
-    return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
-  } else {
-    DCHECK(instr->IsNeg());
-    // Uses mov -immediate.
-    return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
   }
 }
 
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 0a1758a..c36de84 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -16,11 +16,11 @@
 
 #include "gvn.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/arena_containers.h"
 #include "base/bit_vector-inl.h"
 #include "side_effects_analysis.h"
 #include "utils.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index aa375f6..de60cf2 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -49,6 +49,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -57,6 +58,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -66,6 +68,7 @@
                                                            MemberOffset(43),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -77,6 +80,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -85,6 +89,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -128,6 +133,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -150,6 +156,7 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
+                                                          kUnknownClassDefIndex,
                                                           graph->GetDexFile(),
                                                           dex_cache,
                                                           0));
@@ -159,6 +166,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -168,6 +176,7 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
+                                                          kUnknownClassDefIndex,
                                                           graph->GetDexFile(),
                                                           dex_cache,
                                                           0));
@@ -208,6 +217,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -230,6 +240,7 @@
                                                                  MemberOffset(42),
                                                                  false,
                                                                  kUnknownFieldIndex,
+                                                                 kUnknownClassDefIndex,
                                                                  graph->GetDexFile(),
                                                                  dex_cache,
                                                                  0));
@@ -244,6 +255,7 @@
                                                                MemberOffset(42),
                                                                false,
                                                                kUnknownFieldIndex,
+                                                               kUnknownClassDefIndex,
                                                                graph->GetDexFile(),
                                                                dex_cache,
                                                                0));
@@ -253,6 +265,7 @@
                                                                MemberOffset(42),
                                                                false,
                                                                kUnknownFieldIndex,
+                                                               kUnknownClassDefIndex,
                                                                graph->GetDexFile(),
                                                                dex_cache,
                                                                0));
@@ -264,6 +277,7 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
+                                                          kUnknownClassDefIndex,
                                                           graph->GetDexFile(),
                                                           dex_cache,
                                                           0));
@@ -364,6 +378,7 @@
                                                              MemberOffset(42),
                                                              false,
                                                              kUnknownFieldIndex,
+                                                             kUnknownClassDefIndex,
                                                              graph->GetDexFile(),
                                                              dex_cache,
                                                              0));
@@ -388,6 +403,7 @@
                                            MemberOffset(42),
                                            false,
                                            kUnknownFieldIndex,
+                                           kUnknownClassDefIndex,
                                            graph->GetDexFile(),
                                            dex_cache,
                                            0),
@@ -413,6 +429,7 @@
                                            MemberOffset(42),
                                            false,
                                            kUnknownFieldIndex,
+                                           kUnknownClassDefIndex,
                                            graph->GetDexFile(),
                                            dex_cache,
                                            0),
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e2aca30..0aaa6b3 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -32,6 +32,7 @@
 #include "optimizing_compiler.h"
 #include "reference_type_propagation.h"
 #include "register_allocator.h"
+#include "sharpening.h"
 #include "ssa_phi_elimination.h"
 #include "scoped_thread_state_change.h"
 #include "thread.h"
@@ -396,12 +397,14 @@
   HDeadCodeElimination dce(callee_graph, stats_);
   HConstantFolding fold(callee_graph);
   ReferenceTypePropagation type_propagation(callee_graph, handles_);
+  HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
   InstructionSimplifier simplify(callee_graph, stats_);
   IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
 
   HOptimization* optimizations[] = {
     &intrinsics,
     &type_propagation,
+    &sharpening,
     &simplify,
     &dce,
     &fold,
@@ -415,6 +418,7 @@
   size_t number_of_instructions_budget = kMaximumNumberOfHInstructions;
   if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
     HInliner inliner(callee_graph,
+                     codegen_,
                      outer_compilation_unit_,
                      dex_compilation_unit,
                      compiler_driver_,
@@ -484,7 +488,7 @@
         return false;
       }
 
-      if (!same_dex_file && current->NeedsDexCache()) {
+      if (!same_dex_file && current->NeedsDexCacheOfDeclaringClass()) {
         VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
                        << " could not be inlined because " << current->DebugName()
                        << " it is in a different dex file and requires access to the dex cache";
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index bce5915..0f6a945 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -22,6 +22,7 @@
 
 namespace art {
 
+class CodeGenerator;
 class CompilerDriver;
 class DexCompilationUnit;
 class HGraph;
@@ -31,6 +32,7 @@
 class HInliner : public HOptimization {
  public:
   HInliner(HGraph* outer_graph,
+           CodeGenerator* codegen,
            const DexCompilationUnit& outer_compilation_unit,
            const DexCompilationUnit& caller_compilation_unit,
            CompilerDriver* compiler_driver,
@@ -40,6 +42,7 @@
       : HOptimization(outer_graph, kInlinerPassName, stats),
         outer_compilation_unit_(outer_compilation_unit),
         caller_compilation_unit_(caller_compilation_unit),
+        codegen_(codegen),
         compiler_driver_(compiler_driver),
         depth_(depth),
         number_of_inlined_instructions_(0),
@@ -57,6 +60,7 @@
 
   const DexCompilationUnit& outer_compilation_unit_;
   const DexCompilationUnit& caller_compilation_unit_;
+  CodeGenerator* const codegen_;
   CompilerDriver* const compiler_driver_;
   const size_t depth_;
   size_t number_of_inlined_instructions_;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 7814eb9..b97dc1a 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -625,9 +625,9 @@
   // Try to fold an HCompare into this HCondition.
 
   // This simplification is currently supported on x86, x86_64, ARM and ARM64.
-  // TODO: Implement it for MIPS64.
+  // TODO: Implement it for MIPS and MIPS64.
   InstructionSet instruction_set = GetGraph()->GetInstructionSet();
-  if (instruction_set == kMips64) {
+  if (instruction_set == kMips || instruction_set == kMips64) {
     return;
   }
 
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 4b2d36f..eb79f46 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -16,8 +16,65 @@
 
 #include "instruction_simplifier_arm64.h"
 
+#include "mirror/array-inl.h"
+
 namespace art {
 namespace arm64 {
 
+void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstruction* access,
+                                                                     HInstruction* array,
+                                                                     HInstruction* index,
+                                                                     int access_size) {
+  if (index->IsConstant() ||
+      (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
+    // When the index is a constant all the addressing can be fitted in the
+    // memory access instruction, so do not split the access.
+    return;
+  }
+  if (access->IsArraySet() &&
+      access->AsArraySet()->GetValue()->GetType() == Primitive::kPrimNot) {
+    // The access may require a runtime call or the original array pointer.
+    return;
+  }
+
+  // Proceed to extract the base address computation.
+  ArenaAllocator* arena = GetGraph()->GetArena();
+
+  HIntConstant* offset =
+      GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value());
+  HArm64IntermediateAddress* address =
+      new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc);
+  access->GetBlock()->InsertInstructionBefore(address, access);
+  access->ReplaceInput(address, 0);
+  // Both instructions must depend on GC to prevent any instruction that can
+  // trigger GC to be inserted between the two.
+  access->AddSideEffects(SideEffects::DependsOnGC());
+  DCHECK(address->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+  DCHECK(access->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+  // TODO: Code generation for HArrayGet and HArraySet will check whether the input address
+  // is an HArm64IntermediateAddress and generate appropriate code.
+  // We would like to replace the `HArrayGet` and `HArraySet` with custom instructions (maybe
+  // `HArm64Load` and `HArm64Store`). We defer these changes because these new instructions would
+  // not bring any advantages yet.
+  // Also see the comments in
+  // `InstructionCodeGeneratorARM64::VisitArrayGet()` and
+  // `InstructionCodeGeneratorARM64::VisitArraySet()`.
+  RecordSimplification();
+}
+
+void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
+  TryExtractArrayAccessAddress(instruction,
+                               instruction->GetArray(),
+                               instruction->GetIndex(),
+                               Primitive::ComponentSize(instruction->GetType()));
+}
+
+void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
+  TryExtractArrayAccessAddress(instruction,
+                               instruction->GetArray(),
+                               instruction->GetIndex(),
+                               Primitive::ComponentSize(instruction->GetComponentType()));
+}
+
 }  // namespace arm64
 }  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index d7f4eae..4b697db 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -35,6 +35,14 @@
     }
   }
 
+  void TryExtractArrayAccessAddress(HInstruction* access,
+                                    HInstruction* array,
+                                    HInstruction* index,
+                                    int access_size);
+
+  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+  void VisitArraySet(HArraySet* instruction) OVERRIDE;
+
   OptimizingCompilerStats* stats_;
 };
 
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 58e479a..0a5acc3 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -961,6 +961,14 @@
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
 }
 void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) {
+  // The UnsafeCASObject intrinsic does not always work when heap
+  // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
+  // off temporarily as a quick fix.
+  // TODO(rpl): Fix it and turn it back on.
+  if (kPoisonHeapReferences) {
+    return;
+  }
+
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
 }
 void IntrinsicCodeGeneratorARM::VisitUnsafeCASInt(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 4da94ee..059abf0 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1087,6 +1087,14 @@
   CreateIntIntIntIntIntToInt(arena_, invoke);
 }
 void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
+  // The UnsafeCASObject intrinsic does not always work when heap
+  // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
+  // off temporarily as a quick fix.
+  // TODO(rpl): Fix it and turn it back on.
+  if (kPoisonHeapReferences) {
+    return;
+  }
+
   CreateIntIntIntIntIntToInt(arena_, invoke);
 }
 
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 56c4177..0ab0b80 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -272,7 +272,9 @@
   GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
 }
 
-static void GenNumberOfLeadingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfLeadingZeroes(LocationSummary* locations,
+                                     bool is64bit,
+                                     Mips64Assembler* assembler) {
   GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
 
@@ -301,7 +303,9 @@
   GenNumberOfLeadingZeroes(invoke->GetLocations(), true, GetAssembler());
 }
 
-static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfTrailingZeroes(LocationSummary* locations,
+                                      bool is64bit,
+                                      Mips64Assembler* assembler) {
   Location in = locations->InAt(0);
   Location out = locations->Out();
 
@@ -383,7 +387,7 @@
   GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
 }
 
-// int java.lang.Long.rotateRight(long i, int distance)
+// long java.lang.Long.rotateRight(long i, int distance)
 void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
                                                            LocationSummary::kNoCall,
@@ -446,7 +450,7 @@
   GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler());
 }
 
-// int java.lang.Long.rotateLeft(long i, int distance)
+// long java.lang.Long.rotateLeft(long i, int distance)
 void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
                                                            LocationSummary::kNoCall,
@@ -754,17 +758,19 @@
   __ SqrtD(out, in);
 }
 
-static void CreateFPToFP(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFP(ArenaAllocator* arena,
+                         HInvoke* invoke,
+                         Location::OutputOverlap overlaps = Location::kOutputOverlap) {
   LocationSummary* locations = new (arena) LocationSummary(invoke,
                                                            LocationSummary::kNoCall,
                                                            kIntrinsified);
   locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+  locations->SetOut(Location::RequiresFpuRegister(), overlaps);
 }
 
 // double java.lang.Math.rint(double)
 void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
-  CreateFPToFP(arena_, invoke);
+  CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap);
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
@@ -788,15 +794,22 @@
                                              kQuietNaN |
                                              kSignalingNaN;
 
-void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  Mips64Assembler* assembler = GetAssembler();
+enum FloatRoundingMode {
+  kFloor,
+  kCeil,
+};
+
+static void GenRoundingMode(LocationSummary* locations,
+                            FloatRoundingMode mode,
+                            Mips64Assembler* assembler) {
   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
 
+  DCHECK_NE(in, out);
+
   Label done;
 
-  // double floor(double in) {
+  // double floor/ceil(double in) {
   //     if in.isNaN || in.isInfinite || in.isZero {
   //         return in;
   //     }
@@ -806,19 +819,23 @@
   __ MovD(out, in);
   __ Bnezc(AT, &done);
 
-  //     Long outLong = floor(in);
+  //     Long outLong = floor/ceil(in);
   //     if outLong == Long.MAX_VALUE {
-  //         // floor() has almost certainly returned a value which
-  //         // can't be successfully represented as a signed 64-bit
-  //         // number.  Java expects that the input value will be
-  //         // returned in these cases.
-  //         // There is also a small probability that floor(in)
-  //         // correctly truncates the input value to Long.MAX_VALUE.  In
-  //         // that case, this exception handling code still does the
-  //         // correct thing.
+  //         // floor()/ceil() has almost certainly returned a value
+  //         // which can't be successfully represented as a signed
+  //         // 64-bit number.  Java expects that the input value will
+  //         // be returned in these cases.
+  //         // There is also a small probability that floor(in)/ceil(in)
+  //         // correctly truncates/rounds up the input value to
+  //         // Long.MAX_VALUE.  In that case, this exception handling
+  //         // code still does the correct thing.
   //         return in;
   //     }
-  __ FloorLD(out, in);
+  if (mode == kFloor) {
+    __ FloorLD(out, in);
+  } else  if (mode == kCeil) {
+    __ CeilLD(out, in);
+  }
   __ Dmfc1(AT, out);
   __ MovD(out, in);
   __ LoadConst64(TMP, kPrimLongMax);
@@ -832,53 +849,17 @@
   // }
 }
 
+void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
+  GenRoundingMode(invoke->GetLocations(), kFloor, GetAssembler());
+}
+
 // double java.lang.Math.ceil(double)
 void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
   CreateFPToFP(arena_, invoke);
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  Mips64Assembler* assembler = GetAssembler();
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  Label done;
-
-  // double ceil(double in) {
-  //     if in.isNaN || in.isInfinite || in.isZero {
-  //         return in;
-  //     }
-  __ ClassD(out, in);
-  __ Dmfc1(AT, out);
-  __ Andi(AT, AT, kFPLeaveUnchanged);   // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
-  __ MovD(out, in);
-  __ Bnezc(AT, &done);
-
-  //     Long outLong = ceil(in);
-  //     if outLong == Long.MAX_VALUE {
-  //         // ceil() has almost certainly returned a value which
-  //         // can't be successfully represented as a signed 64-bit
-  //         // number.  Java expects that the input value will be
-  //         // returned in these cases.
-  //         // There is also a small probability that ceil(in)
-  //         // correctly rounds up the input value to Long.MAX_VALUE.  In
-  //         // that case, this exception handling code still does the
-  //         // correct thing.
-  //         return in;
-  //     }
-  __ CeilLD(out, in);
-  __ Dmfc1(AT, out);
-  __ MovD(out, in);
-  __ LoadConst64(TMP, kPrimLongMax);
-  __ Beqc(AT, TMP, &done);
-
-  //     double out = outLong;
-  //     return out;
-  __ Dmtc1(AT, out);
-  __ Cvtdl(out, out);
-  __ Bind(&done);
-  // }
+  GenRoundingMode(invoke->GetLocations(), kCeil, GetAssembler());
 }
 
 // byte libcore.io.Memory.peekByte(long address)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8a7aded..040bf6a 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -45,7 +45,7 @@
 
 
 X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() {
-  return reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+  return down_cast<X86Assembler*>(codegen_->GetAssembler());
 }
 
 ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
@@ -1728,7 +1728,7 @@
                          Primitive::Type type,
                          bool is_volatile,
                          CodeGeneratorX86* codegen) {
-  X86Assembler* assembler = reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler());
   Register base = locations->InAt(1).AsRegister<Register>();
   Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
   Location value_loc = locations->InAt(3);
@@ -1822,7 +1822,7 @@
   locations->SetOut(Location::RegisterLocation(EAX));
   if (type == Primitive::kPrimNot) {
     // Need temp registers for card-marking.
-    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     // Need a byte register for marking.
     locations->AddTemp(Location::RegisterLocation(ECX));
   }
@@ -1837,20 +1837,11 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic does not always work when heap
-  // poisoning is enabled (it breaks several libcore tests); turn it
-  // off temporarily as a quick fix.
-  // TODO(rpl): Fix it and turn it back on.
-  if (kPoisonHeapReferences) {
-    return;
-  }
-
   CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
 }
 
 static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
-  X86Assembler* assembler =
-    reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   Register base = locations->InAt(1).AsRegister<Register>();
@@ -1858,47 +1849,92 @@
   Location out = locations->Out();
   DCHECK_EQ(out.AsRegister<Register>(), EAX);
 
-  if (type == Primitive::kPrimLong) {
-    DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
-    DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
-    DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
-    DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
-    __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
-  } else {
-    // Integer or object.
+  if (type == Primitive::kPrimNot) {
     Register expected = locations->InAt(3).AsRegister<Register>();
+    // Ensure `expected` is in EAX (required by the CMPXCHG instruction).
     DCHECK_EQ(expected, EAX);
     Register value = locations->InAt(4).AsRegister<Register>();
-    if (type == Primitive::kPrimNot) {
-      // Mark card for object assuming new value is stored.
-      bool value_can_be_null = true;  // TODO: Worth finding out this information?
-      codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
-                          locations->GetTemp(1).AsRegister<Register>(),
-                          base,
-                          value,
-                          value_can_be_null);
 
-      if (kPoisonHeapReferences) {
-        __ PoisonHeapReference(expected);
-        __ PoisonHeapReference(value);
+    // Mark card for object assuming new value is stored.
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
+                        locations->GetTemp(1).AsRegister<Register>(),
+                        base,
+                        value,
+                        value_can_be_null);
+
+    bool base_equals_value = (base == value);
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value` to a temporary register.  This way, poisoning
+        // `value` won't invalidate `base`.
+        value = locations->GetTemp(0).AsRegister<Register>();
+        __ movl(value, base);
       }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (EAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value, expected);
+      DCHECK_NE(base, expected);
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(value);
     }
 
     __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
-  }
 
-  // locked cmpxchg has full barrier semantics, and we don't need scheduling
-  // barriers at this time.
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
 
-  // Convert ZF into the boolean result.
-  __ setb(kZero, out.AsRegister<Register>());
-  __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
+    // Convert ZF into the boolean result.
+    __ setb(kZero, out.AsRegister<Register>());
+    __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
 
-  if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    Register value = locations->InAt(4).AsRegister<Register>();
-    __ UnpoisonHeapReference(value);
-    // Do not unpoison the reference contained in register `expected`,
-    // as it is the same as register `out`.
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value` has been moved to a temporary register, no need to
+        // unpoison it.
+      } else {
+        // Ensure `value` is different from `out`, so that unpoisoning
+        // the former does not invalidate the latter.
+        DCHECK_NE(value, out.AsRegister<Register>());
+        __ UnpoisonHeapReference(value);
+      }
+      // Do not unpoison the reference contained in register
+      // `expected`, as it is the same as register `out` (EAX).
+    }
+  } else {
+    if (type == Primitive::kPrimInt) {
+      // Ensure the expected value is in EAX (required by the CMPXCHG
+      // instruction).
+      DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX);
+      __ LockCmpxchgl(Address(base, offset, TIMES_1, 0),
+                      locations->InAt(4).AsRegister<Register>());
+    } else if (type == Primitive::kPrimLong) {
+      // Ensure the expected value is in EAX:EDX and that the new
+      // value is in EBX:ECX (required by the CMPXCHG8B instruction).
+      DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
+      DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
+      DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
+      DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
+      __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
+    } else {
+      LOG(FATAL) << "Unexpected CAS type " << type;
+    }
+
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
+
+    // Convert ZF into the boolean result.
+    __ setb(kZero, out.AsRegister<Register>());
+    __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
   }
 }
 
@@ -1936,8 +1972,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) {
-  X86Assembler* assembler =
-    reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   Register reg = locations->InAt(0).AsRegister<Register>();
@@ -1968,8 +2003,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) {
-  X86Assembler* assembler =
-    reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   Register reg_low = locations->InAt(0).AsRegisterPairLow<Register>();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7a1d92d..14c65c9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -41,7 +41,7 @@
 
 
 X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
-  return reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+  return down_cast<X86_64Assembler*>(codegen_->GetAssembler());
 }
 
 ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
@@ -1822,7 +1822,7 @@
 // memory model.
 static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool is_volatile,
                          CodeGeneratorX86_64* codegen) {
-  X86_64Assembler* assembler = reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
   CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
   CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
   CpuRegister value = locations->InAt(3).AsRegister<CpuRegister>();
@@ -1895,7 +1895,7 @@
   locations->SetOut(Location::RequiresRegister());
   if (type == Primitive::kPrimNot) {
     // Need temp registers for card-marking.
-    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     locations->AddTemp(Location::RequiresRegister());
   }
 }
@@ -1909,61 +1909,95 @@
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic does not always work when heap
-  // poisoning is enabled (it breaks several libcore tests); turn it
-  // off temporarily as a quick fix.
-  // TODO(rpl): Fix it and turn it back on.
-  if (kPoisonHeapReferences) {
-    return;
-  }
-
   CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
 }
 
 static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) {
-  X86_64Assembler* assembler =
-    reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
   CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
   CpuRegister expected = locations->InAt(3).AsRegister<CpuRegister>();
+  // Ensure `expected` is in RAX (required by the CMPXCHG instruction).
   DCHECK_EQ(expected.AsRegister(), RAX);
   CpuRegister value = locations->InAt(4).AsRegister<CpuRegister>();
   CpuRegister out = locations->Out().AsRegister<CpuRegister>();
 
-  if (type == Primitive::kPrimLong) {
-    __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value);
-  } else {
-    // Integer or object.
-    if (type == Primitive::kPrimNot) {
-      // Mark card for object assuming new value is stored.
-      bool value_can_be_null = true;  // TODO: Worth finding out this information?
-      codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
-                          locations->GetTemp(1).AsRegister<CpuRegister>(),
-                          base,
-                          value,
-                          value_can_be_null);
+  if (type == Primitive::kPrimNot) {
+    // Mark card for object assuming new value is stored.
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
+                        locations->GetTemp(1).AsRegister<CpuRegister>(),
+                        base,
+                        value,
+                        value_can_be_null);
 
-      if (kPoisonHeapReferences) {
-        __ PoisonHeapReference(expected);
-        __ PoisonHeapReference(value);
+    bool base_equals_value = (base.AsRegister() == value.AsRegister());
+    Register value_reg = value.AsRegister();
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value_reg` to a temporary register.  This way, poisoning
+        // `value_reg` won't invalidate `base`.
+        value_reg = locations->GetTemp(0).AsRegister<CpuRegister>().AsRegister();
+        __ movl(CpuRegister(value_reg), base);
       }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (RAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value_reg, expected.AsRegister());
+      DCHECK_NE(base.AsRegister(), expected.AsRegister());
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(CpuRegister(value_reg));
     }
 
-    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
-  }
+    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg));
 
-  // locked cmpxchg has full barrier semantics, and we don't need scheduling
-  // barriers at this time.
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
 
-  // Convert ZF into the boolean result.
-  __ setcc(kZero, out);
-  __ movzxb(out, out);
+    // Convert ZF into the boolean result.
+    __ setcc(kZero, out);
+    __ movzxb(out, out);
 
-  if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    __ UnpoisonHeapReference(value);
-    __ UnpoisonHeapReference(expected);
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value_reg` has been moved to a temporary register, no need
+        // to unpoison it.
+      } else {
+        // Ensure `value` is different from `out`, so that unpoisoning
+        // the former does not invalidate the latter.
+        DCHECK_NE(value_reg, out.AsRegister());
+        __ UnpoisonHeapReference(CpuRegister(value_reg));
+      }
+      // Ensure `expected` is different from `out`, so that unpoisoning
+      // the former does not invalidate the latter.
+      DCHECK_NE(expected.AsRegister(), out.AsRegister());
+      __ UnpoisonHeapReference(expected);
+    }
+  } else {
+    if (type == Primitive::kPrimInt) {
+      __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
+    } else if (type == Primitive::kPrimLong) {
+      __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value);
+    } else {
+      LOG(FATAL) << "Unexpected CAS type " << type;
+    }
+
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
+
+    // Convert ZF into the boolean result.
+    __ setcc(kZero, out);
+    __ movzxb(out, out);
   }
 }
 
@@ -2001,8 +2035,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerReverse(HInvoke* invoke) {
-  X86_64Assembler* assembler =
-    reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>();
@@ -2046,8 +2079,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongReverse(HInvoke* invoke) {
-  X86_64Assembler* assembler =
-    reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>();
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index a036bd5..47457de 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -104,13 +104,19 @@
 
   // Populate the loop with instructions: set/get field with different types.
   NullHandle<mirror::DexCache> dex_cache;
-  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
-      parameter_, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+                                                                Primitive::kPrimLong,
+                                                                MemberOffset(10),
+                                                                false,
+                                                                kUnknownFieldIndex,
+                                                                kUnknownClassDefIndex,
+                                                                graph_->GetDexFile(),
+                                                                dex_cache,
+                                                                0);
   loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
   HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
       parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+      false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0);
   loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
 
   EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -125,13 +131,26 @@
 
   // Populate the loop with instructions: set/get field with same types.
   NullHandle<mirror::DexCache> dex_cache;
-  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
-      parameter_, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+                                                                Primitive::kPrimLong,
+                                                                MemberOffset(10),
+                                                                false,
+                                                                kUnknownFieldIndex,
+                                                                kUnknownClassDefIndex,
+                                                                graph_->GetDexFile(),
+                                                                dex_cache,
+                                                                0);
   loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
-  HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
-      parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+  HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
+                                                                get_field,
+                                                                Primitive::kPrimLong,
+                                                                MemberOffset(10),
+                                                                false,
+                                                                kUnknownFieldIndex,
+                                                                kUnknownClassDefIndex,
+                                                                graph_->GetDexFile(),
+                                                                dex_cache,
+                                                                0);
   loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
 
   EXPECT_EQ(get_field->GetBlock(), loop_body_);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
new file mode 100644
index 0000000..90f28e5
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -0,0 +1,913 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "load_store_elimination.h"
+#include "side_effects_analysis.h"
+
+#include <iostream>
+
+namespace art {
+
+class ReferenceInfo;
+
+// A cap for the number of heap locations to prevent pathological time/space consumption.
+// The number of heap locations for most of the methods stays below this threshold.
+constexpr size_t kMaxNumberOfHeapLocations = 32;
+
+// A ReferenceInfo contains additional info about a reference such as
+// whether it's a singleton, returned, etc.
+class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
+ public:
+  ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) {
+    is_singleton_ = true;
+    is_singleton_and_not_returned_ = true;
+    if (!reference_->IsNewInstance() && !reference_->IsNewArray()) {
+      // For references not allocated in the method, don't assume anything.
+      is_singleton_ = false;
+      is_singleton_and_not_returned_ = false;
+      return;
+    }
+
+    // Visit all uses to determine if this reference can spread into the heap,
+    // a method call, etc.
+    for (HUseIterator<HInstruction*> use_it(reference_->GetUses());
+         !use_it.Done();
+         use_it.Advance()) {
+      HInstruction* use = use_it.Current()->GetUser();
+      DCHECK(!use->IsNullCheck()) << "NullCheck should have been eliminated";
+      if (use->IsBoundType()) {
+        // BoundType shouldn't normally be necessary for a NewInstance.
+        // Just be conservative for the uncommon cases.
+        is_singleton_ = false;
+        is_singleton_and_not_returned_ = false;
+        return;
+      }
+      if (use->IsPhi() || use->IsInvoke() ||
+          (use->IsInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsUnresolvedInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsStaticFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsArraySet() && (reference_ == use->InputAt(2)))) {
+        // reference_ is merged to a phi, passed to a callee, or stored to heap.
+        // reference_ isn't the only name that can refer to its value anymore.
+        is_singleton_ = false;
+        is_singleton_and_not_returned_ = false;
+        return;
+      }
+      if (use->IsReturn()) {
+        is_singleton_and_not_returned_ = false;
+      }
+    }
+  }
+
+  HInstruction* GetReference() const {
+    return reference_;
+  }
+
+  size_t GetPosition() const {
+    return position_;
+  }
+
+  // Returns true if reference_ is the only name that can refer to its value during
+  // the lifetime of the method. So it's guaranteed to not have any alias in
+  // the method (including its callees).
+  bool IsSingleton() const {
+    return is_singleton_;
+  }
+
+  // Returns true if reference_ is a singleton and not returned to the caller.
+  // The allocation and stores into reference_ may be eliminated for such cases.
+  bool IsSingletonAndNotReturned() const {
+    return is_singleton_and_not_returned_;
+  }
+
+ private:
+  HInstruction* const reference_;
+  const size_t position_;     // position in HeapLocationCollector's ref_info_array_.
+  bool is_singleton_;         // can only be referred to by a single name in the method.
+  bool is_singleton_and_not_returned_;  // reference_ is singleton and not returned to caller.
+
+  DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
+};
+
+// A heap location is a reference-offset/index pair that a value can be loaded from
+// or stored to.
+class HeapLocation : public ArenaObject<kArenaAllocMisc> {
+ public:
+  static constexpr size_t kInvalidFieldOffset = -1;
+
+  // TODO: more fine-grained array types.
+  static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
+
+  HeapLocation(ReferenceInfo* ref_info,
+               size_t offset,
+               HInstruction* index,
+               int16_t declaring_class_def_index)
+      : ref_info_(ref_info),
+        offset_(offset),
+        index_(index),
+        declaring_class_def_index_(declaring_class_def_index),
+        may_become_unknown_(true) {
+    DCHECK(ref_info != nullptr);
+    DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
+           (offset != kInvalidFieldOffset && index == nullptr));
+
+    if (ref_info->IsSingletonAndNotReturned()) {
+      // We try to track stores to singletons that aren't returned to eliminate the stores
+      // since values in singleton's fields cannot be killed due to aliasing. Those values
+      // can still be killed due to merging values since we don't build phi for merging heap
+      // values. SetMayBecomeUnknown(true) may be called later once such merge becomes possible.
+      may_become_unknown_ = false;
+    }
+  }
+
+  ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
+  size_t GetOffset() const { return offset_; }
+  HInstruction* GetIndex() const { return index_; }
+
+  // Returns the definition of declaring class' dex index.
+  // It's kDeclaringClassDefIndexForArrays for an array element.
+  int16_t GetDeclaringClassDefIndex() const {
+    return declaring_class_def_index_;
+  }
+
+  bool IsArrayElement() const {
+    return index_ != nullptr;
+  }
+
+  // Returns true if this heap location's value may become unknown after it's
+  // set to a value, due to merge of values, or killed due to aliasing.
+  bool MayBecomeUnknown() const {
+    return may_become_unknown_;
+  }
+  void SetMayBecomeUnknown(bool val) {
+    may_become_unknown_ = val;
+  }
+
+ private:
+  ReferenceInfo* const ref_info_;      // reference for instance/static field or array access.
+  const size_t offset_;                // offset of static/instance field.
+  HInstruction* const index_;          // index of an array element.
+  const int16_t declaring_class_def_index_;  // declaring class's def's dex index.
+  bool may_become_unknown_;            // value may become kUnknownHeapValue.
+
+  DISALLOW_COPY_AND_ASSIGN(HeapLocation);
+};
+
+static HInstruction* HuntForOriginalReference(HInstruction* ref) {
+  DCHECK(ref != nullptr);
+  while (ref->IsNullCheck() || ref->IsBoundType()) {
+    ref = ref->InputAt(0);
+  }
+  return ref;
+}
+
+// A HeapLocationCollector collects all relevant heap locations and keeps
+// an aliasing matrix for all locations.
+class HeapLocationCollector : public HGraphVisitor {
+ public:
+  static constexpr size_t kHeapLocationNotFound = -1;
+  // Start with a single uint32_t word. That's enough bits for pair-wise
+  // aliasing matrix of 8 heap locations.
+  static constexpr uint32_t kInitialAliasingMatrixBitVectorSize = 32;
+
+  explicit HeapLocationCollector(HGraph* graph)
+      : HGraphVisitor(graph),
+        ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        aliasing_matrix_(graph->GetArena(), kInitialAliasingMatrixBitVectorSize, true),
+        has_heap_stores_(false),
+        has_volatile_(false),
+        has_monitor_operations_(false),
+        may_deoptimize_(false) {}
+
+  size_t GetNumberOfHeapLocations() const {
+    return heap_locations_.size();
+  }
+
+  HeapLocation* GetHeapLocation(size_t index) const {
+    return heap_locations_[index];
+  }
+
+  ReferenceInfo* FindReferenceInfoOf(HInstruction* ref) const {
+    for (size_t i = 0; i < ref_info_array_.size(); i++) {
+      ReferenceInfo* ref_info = ref_info_array_[i];
+      if (ref_info->GetReference() == ref) {
+        DCHECK_EQ(i, ref_info->GetPosition());
+        return ref_info;
+      }
+    }
+    return nullptr;
+  }
+
+  bool HasHeapStores() const {
+    return has_heap_stores_;
+  }
+
+  bool HasVolatile() const {
+    return has_volatile_;
+  }
+
+  bool HasMonitorOps() const {
+    return has_monitor_operations_;
+  }
+
+  // Returns whether this method may be deoptimized.
+  // Currently we don't have meta data support for deoptimizing
+  // a method that eliminates allocations/stores.
+  bool MayDeoptimize() const {
+    return may_deoptimize_;
+  }
+
+  // Find and return the heap location index in heap_locations_.
+  size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
+                               size_t offset,
+                               HInstruction* index,
+                               int16_t declaring_class_def_index) const {
+    for (size_t i = 0; i < heap_locations_.size(); i++) {
+      HeapLocation* loc = heap_locations_[i];
+      if (loc->GetReferenceInfo() == ref_info &&
+          loc->GetOffset() == offset &&
+          loc->GetIndex() == index &&
+          loc->GetDeclaringClassDefIndex() == declaring_class_def_index) {
+        return i;
+      }
+    }
+    return kHeapLocationNotFound;
+  }
+
+  // Returns true if heap_locations_[index1] and heap_locations_[index2] may alias.
+  bool MayAlias(size_t index1, size_t index2) const {
+    if (index1 < index2) {
+      return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index1, index2));
+    } else if (index1 > index2) {
+      return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index2, index1));
+    } else {
+      DCHECK(false) << "index1 and index2 are expected to be different";
+      return true;
+    }
+  }
+
+  void BuildAliasingMatrix() {
+    const size_t number_of_locations = heap_locations_.size();
+    if (number_of_locations == 0) {
+      return;
+    }
+    size_t pos = 0;
+    // Compute aliasing info between every pair of different heap locations.
+    // Save the result in a matrix represented as a BitVector.
+    for (size_t i = 0; i < number_of_locations - 1; i++) {
+      for (size_t j = i + 1; j < number_of_locations; j++) {
+        if (ComputeMayAlias(i, j)) {
+          aliasing_matrix_.SetBit(CheckedAliasingMatrixPosition(i, j, pos));
+        }
+        pos++;
+      }
+    }
+  }
+
+ private:
+  // An allocation cannot alias with a name which already exists at the point
+  // of the allocation, such as a parameter or a load happening before the allocation.
+  bool MayAliasWithPreexistenceChecking(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+    if (ref_info1->GetReference()->IsNewInstance() || ref_info1->GetReference()->IsNewArray()) {
+      // Any reference that can alias with the allocation must appear after it in the block/in
+      // the block's successors. In reverse post order, those instructions will be visited after
+      // the allocation.
+      return ref_info2->GetPosition() >= ref_info1->GetPosition();
+    }
+    return true;
+  }
+
+  bool CanReferencesAlias(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+    if (ref_info1 == ref_info2) {
+      return true;
+    } else if (ref_info1->IsSingleton()) {
+      return false;
+    } else if (ref_info2->IsSingleton()) {
+      return false;
+    } else if (!MayAliasWithPreexistenceChecking(ref_info1, ref_info2) ||
+        !MayAliasWithPreexistenceChecking(ref_info2, ref_info1)) {
+      return false;
+    }
+    return true;
+  }
+
+  // `index1` and `index2` are indices in the array of collected heap locations.
+  // Returns the position in the bit vector that tracks whether the two heap
+  // locations may alias.
+  size_t AliasingMatrixPosition(size_t index1, size_t index2) const {
+    DCHECK(index2 > index1);
+    const size_t number_of_locations = heap_locations_.size();
+    // It's (num_of_locations - 1) + ... + (num_of_locations - index1) + (index2 - index1 - 1).
+    return (number_of_locations * index1 - (1 + index1) * index1 / 2 + (index2 - index1 - 1));
+  }
+
+  // An additional position is passed in to make sure the calculated position is correct.
+  size_t CheckedAliasingMatrixPosition(size_t index1, size_t index2, size_t position) {
+    size_t calculated_position = AliasingMatrixPosition(index1, index2);
+    DCHECK_EQ(calculated_position, position);
+    return calculated_position;
+  }
+
+  // Compute if two locations may alias to each other.
+  bool ComputeMayAlias(size_t index1, size_t index2) const {
+    HeapLocation* loc1 = heap_locations_[index1];
+    HeapLocation* loc2 = heap_locations_[index2];
+    if (loc1->GetOffset() != loc2->GetOffset()) {
+      // Either two different instance fields, or one is an instance
+      // field and the other is an array element.
+      return false;
+    }
+    if (loc1->GetDeclaringClassDefIndex() != loc2->GetDeclaringClassDefIndex()) {
+      // Different types.
+      return false;
+    }
+    if (!CanReferencesAlias(loc1->GetReferenceInfo(), loc2->GetReferenceInfo())) {
+      return false;
+    }
+    if (loc1->IsArrayElement() && loc2->IsArrayElement()) {
+      HInstruction* array_index1 = loc1->GetIndex();
+      HInstruction* array_index2 = loc2->GetIndex();
+      DCHECK(array_index1 != nullptr);
+      DCHECK(array_index2 != nullptr);
+      if (array_index1->IsIntConstant() &&
+          array_index2->IsIntConstant() &&
+          array_index1->AsIntConstant()->GetValue() != array_index2->AsIntConstant()->GetValue()) {
+        // Different constant indices do not alias.
+        return false;
+      }
+    }
+    return true;
+  }
+
+  ReferenceInfo* GetOrCreateReferenceInfo(HInstruction* ref) {
+    ReferenceInfo* ref_info = FindReferenceInfoOf(ref);
+    if (ref_info == nullptr) {
+      size_t pos = ref_info_array_.size();
+      ref_info = new (GetGraph()->GetArena()) ReferenceInfo(ref, pos);
+      ref_info_array_.push_back(ref_info);
+    }
+    return ref_info;
+  }
+
+  HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
+                                        size_t offset,
+                                        HInstruction* index,
+                                        int16_t declaring_class_def_index) {
+    HInstruction* original_ref = HuntForOriginalReference(ref);
+    ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
+    size_t heap_location_idx = FindHeapLocationIndex(
+        ref_info, offset, index, declaring_class_def_index);
+    if (heap_location_idx == kHeapLocationNotFound) {
+      HeapLocation* heap_loc = new (GetGraph()->GetArena())
+          HeapLocation(ref_info, offset, index, declaring_class_def_index);
+      heap_locations_.push_back(heap_loc);
+      return heap_loc;
+    }
+    return heap_locations_[heap_location_idx];
+  }
+
+  void VisitFieldAccess(HInstruction* field_access,
+                        HInstruction* ref,
+                        const FieldInfo& field_info,
+                        bool is_store) {
+    if (field_info.IsVolatile()) {
+      has_volatile_ = true;
+    }
+    const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
+    const size_t offset = field_info.GetFieldOffset().SizeValue();
+    HeapLocation* location = GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
+    // A store of a value may be eliminated if all future loads for that value can be eliminated.
+    // For a value that's stored into a singleton field, the value will not be killed due
+    // to aliasing. However if the value is set in a block that doesn't post dominate the definition,
+    // the value may be killed due to merging later. Before we have post dominating info, we check
+    // if the store is in the same block as the definition just to be conservative.
+    if (is_store &&
+        location->GetReferenceInfo()->IsSingletonAndNotReturned() &&
+        field_access->GetBlock() != ref->GetBlock()) {
+      location->SetMayBecomeUnknown(true);
+    }
+  }
+
+  void VisitArrayAccess(HInstruction* array, HInstruction* index) {
+    GetOrCreateHeapLocation(array, HeapLocation::kInvalidFieldOffset,
+        index, HeapLocation::kDeclaringClassDefIndexForArrays);
+  }
+
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), false);
+  }
+
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), true);
+    has_heap_stores_ = true;
+  }
+
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), false);
+  }
+
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), true);
+    has_heap_stores_ = true;
+  }
+
+  // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
+  // since we cannot accurately track the fields.
+
+  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+    VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+  }
+
+  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+    VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+    has_heap_stores_ = true;
+  }
+
+  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+    // Any references appearing in the ref_info_array_ so far cannot alias with new_instance.
+    GetOrCreateReferenceInfo(new_instance);
+  }
+
+  void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE {
+    may_deoptimize_ = true;
+  }
+
+  void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+    has_monitor_operations_ = true;
+  }
+
+  ArenaVector<ReferenceInfo*> ref_info_array_;   // All references used for heap accesses.
+  ArenaVector<HeapLocation*> heap_locations_;    // All heap locations.
+  ArenaBitVector aliasing_matrix_;    // aliasing info between each pair of locations.
+  bool has_heap_stores_;    // If there is no heap stores, LSE acts as GVN with better
+                            // alias analysis and won't be as effective.
+  bool has_volatile_;       // If there are volatile field accesses.
+  bool has_monitor_operations_;    // If there are monitor operations.
+  bool may_deoptimize_;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
+};
+
+// An unknown heap value. Loads with such a value in the heap location cannot be eliminated.
+static HInstruction* const kUnknownHeapValue =
+    reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-1));
+// Default heap value after an allocation.
+static HInstruction* const kDefaultHeapValue =
+    reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-2));
+
+class LSEVisitor : public HGraphVisitor {
+ public:
+  LSEVisitor(HGraph* graph,
+             const HeapLocationCollector& heap_locations_collector,
+             const SideEffectsAnalysis& side_effects)
+      : HGraphVisitor(graph),
+        heap_location_collector_(heap_locations_collector),
+        side_effects_(side_effects),
+        heap_values_for_(graph->GetBlocks().size(),
+                         ArenaVector<HInstruction*>(heap_locations_collector.
+                                                        GetNumberOfHeapLocations(),
+                                                    kUnknownHeapValue,
+                                                    graph->GetArena()->Adapter(kArenaAllocLSE)),
+                         graph->GetArena()->Adapter(kArenaAllocLSE)),
+        removed_instructions_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        substitute_instructions_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+  }
+
+  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+    int block_id = block->GetBlockId();
+    ArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+    // TODO: try to reuse the heap_values array from one predecessor if possible.
+    if (block->IsLoopHeader()) {
+      // We do a single pass in reverse post order. For loops, use the side effects as a hint
+      // to see if the heap values should be killed.
+      if (side_effects_.GetLoopEffects(block).DoesAnyWrite()) {
+        // Leave all values as kUnknownHeapValue.
+      } else {
+        // Inherit the values from pre-header.
+        HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+        ArenaVector<HInstruction*>& pre_header_heap_values =
+            heap_values_for_[pre_header->GetBlockId()];
+        for (size_t i = 0; i < heap_values.size(); i++) {
+          heap_values[i] = pre_header_heap_values[i];
+        }
+      }
+    } else {
+      MergePredecessorValues(block);
+    }
+    HGraphVisitor::VisitBasicBlock(block);
+  }
+
+  // Remove recorded instructions that should be eliminated.
+  void RemoveInstructions() {
+    size_t size = removed_instructions_.size();
+    DCHECK_EQ(size, substitute_instructions_.size());
+    for (size_t i = 0; i < size; i++) {
+      HInstruction* instruction = removed_instructions_[i];
+      DCHECK(instruction != nullptr);
+      HInstruction* substitute = substitute_instructions_[i];
+      if (substitute != nullptr) {
+        // Keep tracing substitute till one that's not removed.
+        HInstruction* sub_sub = FindSubstitute(substitute);
+        while (sub_sub != substitute) {
+          substitute = sub_sub;
+          sub_sub = FindSubstitute(substitute);
+        }
+        instruction->ReplaceWith(substitute);
+      }
+      instruction->GetBlock()->RemoveInstruction(instruction);
+    }
+    // TODO: remove unnecessary allocations.
+    // Eliminate instructions in singleton_new_instances_ that:
+    // - don't have uses,
+    // - don't have finalizers,
+    // - are instantiable and accessible,
+    // - have no/separate clinit check.
+  }
+
+ private:
+  void MergePredecessorValues(HBasicBlock* block) {
+    const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
+    if (predecessors.size() == 0) {
+      return;
+    }
+    ArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      HInstruction* value = heap_values_for_[predecessors[0]->GetBlockId()][i];
+      if (value != kUnknownHeapValue) {
+        for (size_t j = 1; j < predecessors.size(); j++) {
+          if (heap_values_for_[predecessors[j]->GetBlockId()][i] != value) {
+            value = kUnknownHeapValue;
+            break;
+          }
+        }
+      }
+      heap_values[i] = value;
+    }
+  }
+
+  // `instruction` is being removed. Try to see if the null check on it
+  // can be removed. This can happen if the same value is set in two branches
+  // but not in dominators. Such as:
+  //   int[] a = foo();
+  //   if () {
+  //     a[0] = 2;
+  //   } else {
+  //     a[0] = 2;
+  //   }
+  //   // a[0] can now be replaced with constant 2, and the null check on it can be removed.
+  void TryRemovingNullCheck(HInstruction* instruction) {
+    HInstruction* prev = instruction->GetPrevious();
+    if ((prev != nullptr) && prev->IsNullCheck() && (prev == instruction->InputAt(0))) {
+      // Previous instruction is a null check for this instruction. Remove the null check.
+      prev->ReplaceWith(prev->InputAt(0));
+      prev->GetBlock()->RemoveInstruction(prev);
+    }
+  }
+
+  HInstruction* GetDefaultValue(Primitive::Type type) {
+    switch (type) {
+      case Primitive::kPrimNot:
+        return GetGraph()->GetNullConstant();
+      case Primitive::kPrimBoolean:
+      case Primitive::kPrimByte:
+      case Primitive::kPrimChar:
+      case Primitive::kPrimShort:
+      case Primitive::kPrimInt:
+        return GetGraph()->GetIntConstant(0);
+      case Primitive::kPrimLong:
+        return GetGraph()->GetLongConstant(0);
+      case Primitive::kPrimFloat:
+        return GetGraph()->GetFloatConstant(0);
+      case Primitive::kPrimDouble:
+        return GetGraph()->GetDoubleConstant(0);
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  void VisitGetLocation(HInstruction* instruction,
+                        HInstruction* ref,
+                        size_t offset,
+                        HInstruction* index,
+                        int16_t declaring_class_def_index) {
+    HInstruction* original_ref = HuntForOriginalReference(ref);
+    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
+    size_t idx = heap_location_collector_.FindHeapLocationIndex(
+        ref_info, offset, index, declaring_class_def_index);
+    DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[instruction->GetBlock()->GetBlockId()];
+    HInstruction* heap_value = heap_values[idx];
+    if (heap_value == kDefaultHeapValue) {
+      HInstruction* constant = GetDefaultValue(instruction->GetType());
+      removed_instructions_.push_back(instruction);
+      substitute_instructions_.push_back(constant);
+      heap_values[idx] = constant;
+      return;
+    }
+    if ((heap_value != kUnknownHeapValue) &&
+        // Keep the load due to possible I/F, J/D array aliasing.
+        // See b/22538329 for details.
+        (heap_value->GetType() == instruction->GetType())) {
+      removed_instructions_.push_back(instruction);
+      substitute_instructions_.push_back(heap_value);
+      TryRemovingNullCheck(instruction);
+      return;
+    }
+
+    if (heap_value == kUnknownHeapValue) {
+      // Put the load as the value into the HeapLocation.
+      // This acts like GVN but with better aliasing analysis.
+      heap_values[idx] = instruction;
+    }
+  }
+
+  bool Equal(HInstruction* heap_value, HInstruction* value) {
+    if (heap_value == value) {
+      return true;
+    }
+    if (heap_value == kDefaultHeapValue && GetDefaultValue(value->GetType()) == value) {
+      return true;
+    }
+    return false;
+  }
+
+  void VisitSetLocation(HInstruction* instruction,
+                        HInstruction* ref,
+                        size_t offset,
+                        HInstruction* index,
+                        int16_t declaring_class_def_index,
+                        HInstruction* value) {
+    HInstruction* original_ref = HuntForOriginalReference(ref);
+    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
+    size_t idx = heap_location_collector_.FindHeapLocationIndex(
+        ref_info, offset, index, declaring_class_def_index);
+    DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[instruction->GetBlock()->GetBlockId()];
+    HInstruction* heap_value = heap_values[idx];
+    bool redundant_store = false;
+    if (Equal(heap_value, value)) {
+      // Store into the heap location with the same value.
+      redundant_store = true;
+    } else if (index != nullptr) {
+      // For array element, don't eliminate stores since it can be easily aliased
+      // with non-constant index.
+    } else if (!heap_location_collector_.MayDeoptimize() &&
+               ref_info->IsSingletonAndNotReturned() &&
+               !heap_location_collector_.GetHeapLocation(idx)->MayBecomeUnknown()) {
+      // Store into a field of a singleton that's not returned. And that value cannot be
+      // killed due to merge. It's redundant since future loads will get the value
+      // set by this instruction.
+      Primitive::Type type = Primitive::kPrimVoid;
+      if (instruction->IsInstanceFieldSet()) {
+        type = instruction->AsInstanceFieldSet()->GetFieldInfo().GetFieldType();
+      } else if (instruction->IsStaticFieldSet()) {
+        type = instruction->AsStaticFieldSet()->GetFieldInfo().GetFieldType();
+      } else {
+        DCHECK(false) << "Must be an instance/static field set instruction.";
+      }
+      if (value->GetType() != type) {
+        // I/F, J/D aliasing should not happen for fields.
+        DCHECK(Primitive::IsIntegralType(value->GetType()));
+        DCHECK(!Primitive::Is64BitType(value->GetType()));
+        DCHECK(Primitive::IsIntegralType(type));
+        DCHECK(!Primitive::Is64BitType(type));
+        // Keep the store since the corresponding load isn't eliminated due to different types.
+        // TODO: handle the different int types so that we can eliminate this store.
+        redundant_store = false;
+      } else {
+        redundant_store = true;
+      }
+      // TODO: eliminate the store if the singleton object is not finalizable.
+      redundant_store = false;
+    }
+    if (redundant_store) {
+      removed_instructions_.push_back(instruction);
+      substitute_instructions_.push_back(nullptr);
+      TryRemovingNullCheck(instruction);
+    }
+
+    heap_values[idx] = value;
+    // This store may kill values in other heap locations due to aliasing.
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      if (heap_values[i] == value) {
+        // Same value should be kept even if aliasing happens.
+        continue;
+      }
+      if (heap_values[i] == kUnknownHeapValue) {
+        // Value is already unknown, no need for aliasing check.
+        continue;
+      }
+      if (heap_location_collector_.MayAlias(i, idx)) {
+        // Kill heap locations that may alias.
+        heap_values[i] = kUnknownHeapValue;
+      }
+    }
+  }
+
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+    HInstruction* obj = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    VisitGetLocation(instruction, obj, offset, nullptr, declaring_class_def_index);
+  }
+
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+    HInstruction* obj = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    HInstruction* value = instruction->InputAt(1);
+    VisitSetLocation(instruction, obj, offset, nullptr, declaring_class_def_index, value);
+  }
+
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+    HInstruction* cls = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    VisitGetLocation(instruction, cls, offset, nullptr, declaring_class_def_index);
+  }
+
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+    HInstruction* cls = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    HInstruction* value = instruction->InputAt(1);
+    VisitSetLocation(instruction, cls, offset, nullptr, declaring_class_def_index, value);
+  }
+
+  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+    HInstruction* array = instruction->InputAt(0);
+    HInstruction* index = instruction->InputAt(1);
+    VisitGetLocation(instruction,
+                     array,
+                     HeapLocation::kInvalidFieldOffset,
+                     index,
+                     HeapLocation::kDeclaringClassDefIndexForArrays);
+  }
+
+  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+    HInstruction* array = instruction->InputAt(0);
+    HInstruction* index = instruction->InputAt(1);
+    HInstruction* value = instruction->InputAt(2);
+    VisitSetLocation(instruction,
+                     array,
+                     HeapLocation::kInvalidFieldOffset,
+                     index,
+                     HeapLocation::kDeclaringClassDefIndexForArrays,
+                     value);
+  }
+
+  void HandleInvoke(HInstruction* invoke) {
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[invoke->GetBlock()->GetBlockId()];
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+      if (ref_info->IsSingleton()) {
+        // Singleton references cannot be seen by the callee.
+      } else {
+        heap_values[i] = kUnknownHeapValue;
+      }
+    }
+  }
+
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+    HandleInvoke(clinit);
+  }
+
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance);
+    if (ref_info == nullptr) {
+      // new_instance isn't used for field accesses. No need to process it.
+      return;
+    }
+    if (!heap_location_collector_.MayDeoptimize() &&
+        ref_info->IsSingletonAndNotReturned()) {
+      // The allocation might be eliminated.
+      singleton_new_instances_.push_back(new_instance);
+    }
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[new_instance->GetBlock()->GetBlockId()];
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      HInstruction* ref =
+          heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo()->GetReference();
+      size_t offset = heap_location_collector_.GetHeapLocation(i)->GetOffset();
+      if (ref == new_instance && offset >= mirror::kObjectHeaderSize) {
+        // Instance fields except the header fields are set to default heap values.
+        heap_values[i] = kDefaultHeapValue;
+      }
+    }
+  }
+
+  // Find an instruction's substitute if it should be removed.
+  // Return the same instruction if it should not be removed.
+  HInstruction* FindSubstitute(HInstruction* instruction) {
+    size_t size = removed_instructions_.size();
+    for (size_t i = 0; i < size; i++) {
+      if (removed_instructions_[i] == instruction) {
+        return substitute_instructions_[i];
+      }
+    }
+    return instruction;
+  }
+
+  const HeapLocationCollector& heap_location_collector_;
+  const SideEffectsAnalysis& side_effects_;
+
+  // One array of heap values for each block.
+  ArenaVector<ArenaVector<HInstruction*>> heap_values_for_;
+
+  // We record the instructions that should be eliminated but may be
+  // used by heap locations. They'll be removed in the end.
+  ArenaVector<HInstruction*> removed_instructions_;
+  ArenaVector<HInstruction*> substitute_instructions_;
+  ArenaVector<HInstruction*> singleton_new_instances_;
+
+  DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
+};
+
+void LoadStoreElimination::Run() {
+  if (graph_->IsDebuggable()) {
+    // Debugger may set heap values or trigger deoptimization of callers.
+    // Skip this optimization.
+    return;
+  }
+  HeapLocationCollector heap_location_collector(graph_);
+  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    heap_location_collector.VisitBasicBlock(it.Current());
+  }
+  if (heap_location_collector.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
+    // Bail out if there are too many heap locations to deal with.
+    return;
+  }
+  if (!heap_location_collector.HasHeapStores()) {
+    // Without heap stores, this pass would act mostly as GVN on heap accesses.
+    return;
+  }
+  if (heap_location_collector.HasVolatile() || heap_location_collector.HasMonitorOps()) {
+    // Don't do load/store elimination if the method has volatile field accesses or
+    // monitor operations, for now.
+    // TODO: do it right.
+    return;
+  }
+  heap_location_collector.BuildAliasingMatrix();
+  LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_);
+  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    lse_visitor.VisitBasicBlock(it.Current());
+  }
+  lse_visitor.RemoveInstructions();
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
new file mode 100644
index 0000000..1d9e5c8
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
+#define ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
+
+#include "optimization.h"
+
+namespace art {
+
+class SideEffectsAnalysis;
+
+class LoadStoreElimination : public HOptimization {
+ public:
+  LoadStoreElimination(HGraph* graph, const SideEffectsAnalysis& side_effects)
+      : HOptimization(graph, kLoadStoreEliminationPassName),
+        side_effects_(side_effects) {}
+
+  void Run() OVERRIDE;
+
+  static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
+
+ private:
+  const SideEffectsAnalysis& side_effects_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadStoreElimination);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 98c3096..3480265 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1911,8 +1911,8 @@
   return !opt.GetDoesNotNeedEnvironment();
 }
 
-bool HInvokeStaticOrDirect::NeedsDexCache() const {
-  if (IsRecursive() || IsStringInit()) {
+bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
+  if (GetMethodLoadKind() != MethodLoadKind::kDexCacheViaMethod) {
     return false;
   }
   if (!IsIntrinsic()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 0db1ba2..6028d4b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -21,6 +21,7 @@
 #include <array>
 #include <type_traits>
 
+#include "base/arena_bit_vector.h"
 #include "base/arena_containers.h"
 #include "base/arena_object.h"
 #include "base/stl_util.h"
@@ -34,7 +35,6 @@
 #include "mirror/class.h"
 #include "offsets.h"
 #include "primitive.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
@@ -75,6 +75,7 @@
 static constexpr uint64_t kMaxLongShiftValue = 0x3f;
 
 static constexpr uint32_t kUnknownFieldIndex = static_cast<uint32_t>(-1);
+static constexpr uint16_t kUnknownClassDefIndex = static_cast<uint16_t>(-1);
 
 static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
 
@@ -1079,14 +1080,25 @@
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
 
+#ifndef ART_ENABLE_CODEGEN_arm64
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                          \
+  M(Arm64IntermediateAddress, Instruction)
+#endif
+
+#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
 
+#ifndef ART_ENABLE_CODEGEN_x86
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
+#else
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)                            \
   M(X86ComputeBaseMethodAddress, Instruction)                           \
   M(X86LoadFromConstantTable, Instruction)                              \
   M(X86PackedSwitch, Instruction)
+#endif
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
 
@@ -1094,6 +1106,7 @@
   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M)                               \
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)                                  \
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                                \
+  FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)                                 \
   FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)                               \
   FOR_EACH_CONCRETE_INSTRUCTION_X86(M)                                  \
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1370,6 +1383,10 @@
     return SideEffects(flags_ & ~other.flags_);
   }
 
+  void Add(SideEffects other) {
+    flags_ |= other.flags_;
+  }
+
   bool Includes(SideEffects other) const {
     return (other.flags_ & flags_) == other.flags_;
   }
@@ -1943,6 +1960,7 @@
   }
 
   SideEffects GetSideEffects() const { return side_effects_; }
+  void AddSideEffects(SideEffects other) { side_effects_.Add(other); }
 
   size_t GetLifetimePosition() const { return lifetime_position_; }
   void SetLifetimePosition(size_t position) { lifetime_position_ = position; }
@@ -1962,7 +1980,9 @@
     return NeedsEnvironment() || IsLoadClass() || IsLoadString();
   }
 
-  virtual bool NeedsDexCache() const { return false; }
+  // Returns whether the code generation of the instruction will require to have access
+  // to the dex cache of the current method's declaring class via the current method.
+  virtual bool NeedsDexCacheOfDeclaringClass() const { return false; }
 
   // Does this instruction have any use in an environment before
   // control flow hits 'other'?
@@ -2012,7 +2032,7 @@
   // order of blocks where this instruction's live interval start.
   size_t lifetime_position_;
 
-  const SideEffects side_effects_;
+  SideEffects side_effects_;
 
   // TODO: for primitive types this should be marked as invalid.
   ReferenceTypeInfo reference_type_info_;
@@ -3350,15 +3370,15 @@
   };
 
   struct DispatchInfo {
-    const MethodLoadKind method_load_kind;
-    const CodePtrLocation code_ptr_location;
+    MethodLoadKind method_load_kind;
+    CodePtrLocation code_ptr_location;
     // The method load data holds
     //   - thread entrypoint offset for kStringInit method if this is a string init invoke.
     //     Note that there are multiple string init methods, each having its own offset.
     //   - the method address for kDirectAddress
     //   - the dex cache arrays offset for kDexCachePcRel.
-    const uint64_t method_load_data;
-    const uint64_t direct_code_ptr;
+    uint64_t method_load_data;
+    uint64_t direct_code_ptr;
   };
 
   HInvokeStaticOrDirect(ArenaAllocator* arena,
@@ -3387,6 +3407,10 @@
         target_method_(target_method),
         dispatch_info_(dispatch_info) {}
 
+  void SetDispatchInfo(const DispatchInfo& dispatch_info) {
+    dispatch_info_ = dispatch_info;
+  }
+
   bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
     // We access the method via the dex cache so we can't do an implicit null check.
     // TODO: for intrinsics we can generate implicit null checks.
@@ -3401,11 +3425,13 @@
   MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
   CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
   bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
-  bool NeedsDexCache() const OVERRIDE;
+  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
   bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
   uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
   bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
-  bool HasPcRelDexCache() const { return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative; }
+  bool HasPcRelDexCache() const {
+    return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
+  }
   bool HasDirectCodePtr() const { return GetCodePtrLocation() == CodePtrLocation::kCallDirect; }
   MethodReference GetTargetMethod() const { return target_method_; }
 
@@ -4165,7 +4191,7 @@
   Primitive::Type GetInputType() const { return GetInput()->GetType(); }
   Primitive::Type GetResultType() const { return GetType(); }
 
-  // Required by the x86 and ARM code generators when producing calls
+  // Required by the x86, ARM, MIPS and MIPS64 code generators when producing calls
   // to the runtime.
 
   bool CanBeMoved() const OVERRIDE { return true; }
@@ -4316,18 +4342,21 @@
             Primitive::Type field_type,
             bool is_volatile,
             uint32_t index,
+            uint16_t declaring_class_def_index,
             const DexFile& dex_file,
             Handle<mirror::DexCache> dex_cache)
       : field_offset_(field_offset),
         field_type_(field_type),
         is_volatile_(is_volatile),
         index_(index),
+        declaring_class_def_index_(declaring_class_def_index),
         dex_file_(dex_file),
         dex_cache_(dex_cache) {}
 
   MemberOffset GetFieldOffset() const { return field_offset_; }
   Primitive::Type GetFieldType() const { return field_type_; }
   uint32_t GetFieldIndex() const { return index_; }
+  uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;}
   const DexFile& GetDexFile() const { return dex_file_; }
   bool IsVolatile() const { return is_volatile_; }
   Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; }
@@ -4337,6 +4366,7 @@
   const Primitive::Type field_type_;
   const bool is_volatile_;
   const uint32_t index_;
+  const uint16_t declaring_class_def_index_;
   const DexFile& dex_file_;
   const Handle<mirror::DexCache> dex_cache_;
 };
@@ -4348,13 +4378,20 @@
                     MemberOffset field_offset,
                     bool is_volatile,
                     uint32_t field_idx,
+                    uint16_t declaring_class_def_index,
                     const DexFile& dex_file,
                     Handle<mirror::DexCache> dex_cache,
                     uint32_t dex_pc)
-      : HExpression(
-            field_type,
-            SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
+      : HExpression(field_type,
+                    SideEffects::FieldReadOfType(field_type, is_volatile),
+                    dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache) {
     SetRawInputAt(0, value);
   }
 
@@ -4394,12 +4431,19 @@
                     MemberOffset field_offset,
                     bool is_volatile,
                     uint32_t field_idx,
+                    uint16_t declaring_class_def_index,
                     const DexFile& dex_file,
                     Handle<mirror::DexCache> dex_cache,
                     uint32_t dex_pc)
-      : HTemplateInstruction(
-          SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
+      : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+                             dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache),
         value_can_be_null_(true) {
     SetRawInputAt(0, object);
     SetRawInputAt(1, value);
@@ -4431,8 +4475,11 @@
   HArrayGet(HInstruction* array,
             HInstruction* index,
             Primitive::Type type,
-            uint32_t dex_pc)
-      : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
+            uint32_t dex_pc,
+            SideEffects additional_side_effects = SideEffects::None())
+      : HExpression(type,
+                    SideEffects::ArrayReadOfType(type).Union(additional_side_effects),
+                    dex_pc) {
     SetRawInputAt(0, array);
     SetRawInputAt(1, index);
   }
@@ -4467,10 +4514,13 @@
             HInstruction* index,
             HInstruction* value,
             Primitive::Type expected_component_type,
-            uint32_t dex_pc)
+            uint32_t dex_pc,
+            SideEffects additional_side_effects = SideEffects::None())
       : HTemplateInstruction(
             SideEffects::ArrayWriteOfType(expected_component_type).Union(
-                SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc),
+                SideEffectsForArchRuntimeCalls(value->GetType())).Union(
+                    additional_side_effects),
+            dex_pc),
         expected_component_type_(expected_component_type),
         needs_type_check_(value->GetType() == Primitive::kPrimNot),
         value_can_be_null_(true),
@@ -4525,6 +4575,10 @@
         : expected_component_type_;
   }
 
+  Primitive::Type GetRawExpectedComponentType() const {
+    return expected_component_type_;
+  }
+
   static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) {
     return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None();
   }
@@ -4583,6 +4637,7 @@
 
   bool CanThrow() const OVERRIDE { return true; }
 
+  HInstruction* GetIndex() const { return InputAt(0); }
 
   DECLARE_INSTRUCTION(BoundsCheck);
 
@@ -4718,7 +4773,7 @@
 
   const DexFile& GetDexFile() { return dex_file_; }
 
-  bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; }
+  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return !is_referrers_class_; }
 
   static SideEffects SideEffectsForArchRuntimeCalls() {
     return SideEffects::CanTriggerGC();
@@ -4760,7 +4815,7 @@
 
   // TODO: Can we deopt or debug when we resolve a string?
   bool NeedsEnvironment() const OVERRIDE { return false; }
-  bool NeedsDexCache() const OVERRIDE { return true; }
+  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return true; }
   bool CanBeNull() const OVERRIDE { return false; }
 
   static SideEffects SideEffectsForArchRuntimeCalls() {
@@ -4814,13 +4869,20 @@
                   MemberOffset field_offset,
                   bool is_volatile,
                   uint32_t field_idx,
+                  uint16_t declaring_class_def_index,
                   const DexFile& dex_file,
                   Handle<mirror::DexCache> dex_cache,
                   uint32_t dex_pc)
-      : HExpression(
-            field_type,
-            SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
+      : HExpression(field_type,
+                    SideEffects::FieldReadOfType(field_type, is_volatile),
+                    dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache) {
     SetRawInputAt(0, cls);
   }
 
@@ -4857,12 +4919,19 @@
                   MemberOffset field_offset,
                   bool is_volatile,
                   uint32_t field_idx,
+                  uint16_t declaring_class_def_index,
                   const DexFile& dex_file,
                   Handle<mirror::DexCache> dex_cache,
                   uint32_t dex_pc)
-      : HTemplateInstruction(
-          SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
+      : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+                             dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache),
         value_can_be_null_(true) {
     SetRawInputAt(0, cls);
     SetRawInputAt(1, value);
@@ -5396,6 +5465,9 @@
 
 }  // namespace art
 
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "nodes_arm64.h"
+#endif
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "nodes_x86.h"
 #endif
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
new file mode 100644
index 0000000..885d3a2
--- /dev/null
+++ b/compiler/optimizing/nodes_arm64.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+
+namespace art {
+
+// This instruction computes an intermediate address pointing in the 'middle' of an object. The
+// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
+// never used across anything that can trigger GC.
+class HArm64IntermediateAddress : public HExpression<2> {
+ public:
+  HArm64IntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
+      : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) {
+    SetRawInputAt(0, base_address);
+    SetRawInputAt(1, offset);
+  }
+
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+
+  HInstruction* GetBaseAddress() const { return InputAt(0); }
+  HInstruction* GetOffset() const { return InputAt(1); }
+
+  DECLARE_INSTRUCTION(Arm64IntermediateAddress);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 5404e56..6632f95 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -58,10 +58,12 @@
 #include "intrinsics.h"
 #include "licm.h"
 #include "jni/quick/jni_compiler.h"
+#include "load_store_elimination.h"
 #include "nodes.h"
 #include "prepare_for_register_allocation.h"
 #include "reference_type_propagation.h"
 #include "register_allocator.h"
+#include "sharpening.h"
 #include "side_effects_analysis.h"
 #include "ssa_builder.h"
 #include "ssa_phi_elimination.h"
@@ -361,6 +363,7 @@
   return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat)
       || instruction_set == kArm64
       || (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
+      || instruction_set == kMips
       || instruction_set == kMips64
       || instruction_set == kX86
       || instruction_set == kX86_64;
@@ -376,6 +379,7 @@
 }
 
 static void MaybeRunInliner(HGraph* graph,
+                            CodeGenerator* codegen,
                             CompilerDriver* driver,
                             OptimizingCompilerStats* stats,
                             const DexCompilationUnit& dex_compilation_unit,
@@ -390,7 +394,7 @@
 
   ArenaAllocator* arena = graph->GetArena();
   HInliner* inliner = new (arena) HInliner(
-    graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
+    graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
   ReferenceTypePropagation* type_propagation =
     new (arena) ReferenceTypePropagation(graph, handles,
         "reference_type_propagation_after_inlining");
@@ -443,6 +447,7 @@
 }
 
 static void RunOptimizations(HGraph* graph,
+                             CodeGenerator* codegen,
                              CompilerDriver* driver,
                              OptimizingCompilerStats* stats,
                              const DexCompilationUnit& dex_compilation_unit,
@@ -460,10 +465,12 @@
   SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
   GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
   LICM* licm = new (arena) LICM(graph, *side_effects);
+  LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects);
   HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
   BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, induction);
   ReferenceTypePropagation* type_propagation =
       new (arena) ReferenceTypePropagation(graph, handles);
+  HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
   InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
       graph, stats, "instruction_simplifier_after_types");
   InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
@@ -478,6 +485,7 @@
     fold1,
     simplify1,
     type_propagation,
+    sharpening,
     dce1,
     simplify2
   };
@@ -499,7 +507,7 @@
 
     RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
   } else {
-    MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
+    MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles);
 
     HOptimization* optimizations2[] = {
       // BooleanSimplifier depends on the InstructionSimplifier removing
@@ -512,6 +520,7 @@
       induction,
       bce,
       simplify3,
+      lse,
       dce2,
       // The codegen has a few assumptions that only the instruction simplifier
       // can satisfy. For example, the code generator does not expect to see a
@@ -573,8 +582,13 @@
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScopeCollection handles(soa.Self());
   soa.Self()->TransitionFromRunnableToSuspended(kNative);
-  RunOptimizations(graph, compiler_driver, compilation_stats_.get(),
-                   dex_compilation_unit, pass_observer, &handles);
+  RunOptimizations(graph,
+                   codegen,
+                   compiler_driver,
+                   compilation_stats_.get(),
+                   dex_compilation_unit,
+                   pass_observer,
+                   &handles);
 
   AllocateRegisters(graph, codegen, pass_observer);
 
@@ -603,7 +617,7 @@
       codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
       codegen->GetCoreSpillMask(),
       codegen->GetFpuSpillMask(),
-      &src_mapping_table,
+      ArrayRef<const SrcMapElem>(src_mapping_table),
       ArrayRef<const uint8_t>(),  // mapping_table.
       ArrayRef<const uint8_t>(stack_map),
       ArrayRef<const uint8_t>(),  // native_gc_map.
@@ -648,7 +662,7 @@
       codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
       codegen->GetCoreSpillMask(),
       codegen->GetFpuSpillMask(),
-      &src_mapping_table,
+      ArrayRef<const SrcMapElem>(src_mapping_table),
       AlignVectorSize(mapping_table),
       AlignVectorSize(vmap_table),
       AlignVectorSize(gc_map),
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 26a05da..659da06 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -373,12 +373,18 @@
   if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) {
     // Calls to String.<init> are replaced with a StringFactory.
     if (kIsDebugBuild) {
-      ScopedObjectAccess soa(Thread::Current());
+      HInvoke* invoke = instr->AsInvoke();
       ClassLinker* cl = Runtime::Current()->GetClassLinker();
-      mirror::DexCache* dex_cache = cl->FindDexCache(
-          soa.Self(), instr->AsInvoke()->GetDexFile(), false);
-      ArtMethod* method = dex_cache->GetResolvedMethod(
-          instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize());
+      ScopedObjectAccess soa(Thread::Current());
+      StackHandleScope<2> hs(soa.Self());
+      Handle<mirror::DexCache> dex_cache(
+          hs.NewHandle(cl->FindDexCache(soa.Self(), invoke->GetDexFile(), false)));
+      // Use a null loader. We should probably use the compiling method's class loader,
+      // but then we would need to pass it to RTPVisitor just for this debug check. Since
+      // the method is from the String class, the null loader is good enough.
+      Handle<mirror::ClassLoader> loader;
+      ArtMethod* method = cl->ResolveMethod(
+          invoke->GetDexFile(), invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
       DCHECK(method != nullptr);
       mirror::Class* declaring_class = method->GetDeclaringClass();
       DCHECK(declaring_class != nullptr);
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 6fc7772..ef22c81 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -85,12 +85,13 @@
 
 bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
                                                 InstructionSet instruction_set) {
-  return instruction_set == kArm64
-      || instruction_set == kX86_64
+  return instruction_set == kArm
+      || instruction_set == kArm64
+      || instruction_set == kMips
       || instruction_set == kMips64
-      || instruction_set == kArm
+      || instruction_set == kThumb2
       || instruction_set == kX86
-      || instruction_set == kThumb2;
+      || instruction_set == kX86_64;
 }
 
 static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index ed5419e..080f970 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -488,6 +488,7 @@
                                                          MemberOffset(22),
                                                          false,
                                                          kUnknownFieldIndex,
+                                                         kUnknownClassDefIndex,
                                                          graph->GetDexFile(),
                                                          dex_cache,
                                                          0);
@@ -514,6 +515,7 @@
                                               MemberOffset(42),
                                               false,
                                               kUnknownFieldIndex,
+                                              kUnknownClassDefIndex,
                                               graph->GetDexFile(),
                                               dex_cache,
                                               0);
@@ -522,6 +524,7 @@
                                             MemberOffset(42),
                                             false,
                                             kUnknownFieldIndex,
+                                            kUnknownClassDefIndex,
                                             graph->GetDexFile(),
                                             dex_cache,
                                             0);
@@ -638,6 +641,7 @@
                                              MemberOffset(42),
                                              false,
                                              kUnknownFieldIndex,
+                                             kUnknownClassDefIndex,
                                              graph->GetDexFile(),
                                              dex_cache,
                                              0);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
new file mode 100644
index 0000000..6494964
--- /dev/null
+++ b/compiler/optimizing/sharpening.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sharpening.h"
+
+#include "code_generator.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
+#include "driver/compiler_driver.h"
+#include "nodes.h"
+
+namespace art {
+
+void HSharpening::Run() {
+  // We don't care about the order of the blocks here.
+  for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+    for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+      HInstruction* instruction = it.Current();
+      if (instruction->IsInvokeStaticOrDirect()) {
+        ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
+      }
+      // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
+      //       here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
+      //       because we know the type better when inlining.
+      // TODO: HLoadClass, HLoadString - select PC relative dex cache array access if
+      //       available.
+    }
+  }
+}
+
+void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+  if (invoke->IsStringInit()) {
+    // Not using the dex cache arrays. But we could still try to use a better dispatch...
+    // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
+    return;
+  }
+
+  // TODO: Avoid CompilerDriver.
+  InvokeType invoke_type = invoke->GetOriginalInvokeType();
+  MethodReference target_method(&graph_->GetDexFile(), invoke->GetDexMethodIndex());
+  int vtable_idx;
+  uintptr_t direct_code, direct_method;
+  bool success = compiler_driver_->ComputeInvokeInfo(
+      &compilation_unit_,
+      invoke->GetDexPc(),
+      false /* update_stats: already updated in builder */,
+      true /* enable_devirtualization */,
+      &invoke_type,
+      &target_method,
+      &vtable_idx,
+      &direct_code,
+      &direct_method);
+  DCHECK(success);
+  DCHECK_EQ(invoke_type, invoke->GetInvokeType());
+  DCHECK_EQ(target_method.dex_file, invoke->GetTargetMethod().dex_file);
+  DCHECK_EQ(target_method.dex_method_index, invoke->GetTargetMethod().dex_method_index);
+
+  HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
+  HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
+  uint64_t method_load_data = 0u;
+  uint64_t direct_code_ptr = 0u;
+
+  HGraph* outer_graph = codegen_->GetGraph();
+  if (target_method.dex_file == &outer_graph->GetDexFile() &&
+      target_method.dex_method_index == outer_graph->GetMethodIdx()) {
+    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
+    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
+  } else {
+    if (direct_method != 0u) {  // Should we use a direct pointer to the method?
+      if (direct_method != static_cast<uintptr_t>(-1)) {  // Is the method pointer known now?
+        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
+        method_load_data = direct_method;
+      } else {  // The direct pointer will be known at link time.
+        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
+      }
+    } else {  // Use dex cache.
+      DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
+      DexCacheArraysLayout layout =
+          compiler_driver_->GetDexCacheArraysLayout(target_method.dex_file);
+      if (layout.Valid()) {  // Can we use PC-relative access to the dex cache arrays?
+        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
+        method_load_data = layout.MethodOffset(target_method.dex_method_index);
+      } else {  // We must go through the ArtMethod's pointer to resolved methods.
+        method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
+      }
+    }
+    if (direct_code != 0u) {  // Should we use a direct pointer to the code?
+      if (direct_code != static_cast<uintptr_t>(-1)) {  // Is the code pointer known now?
+        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
+        direct_code_ptr = direct_code;
+      } else if (compiler_driver_->IsImage() ||
+          target_method.dex_file == &graph_->GetDexFile()) {
+        // Use PC-relative calls for invokes within a multi-dex oat file.
+        // TODO: Recognize when the target dex file is within the current oat file for
+        // app compilation. At the moment we recognize only the boot image as multi-dex.
+        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
+      } else {  // The direct pointer will be known at link time.
+        // NOTE: This is used for app->boot calls when compiling an app against
+        // a relocatable but not yet relocated image.
+        code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup;
+      }
+    } else {  // We must use the code pointer from the ArtMethod.
+      code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+    }
+  }
+
+  if (graph_->IsDebuggable()) {
+    // For debuggable apps always use the code pointer from ArtMethod
+    // so that we don't circumvent instrumentation stubs if installed.
+    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+  }
+
+  HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
+      method_load_kind, code_ptr_location, method_load_data, direct_code_ptr
+  };
+  HInvokeStaticOrDirect::DispatchInfo dispatch_info =
+      codegen_->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info,
+                                                         invoke->GetTargetMethod());
+  invoke->SetDispatchInfo(dispatch_info);
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
new file mode 100644
index 0000000..adae700
--- /dev/null
+++ b/compiler/optimizing/sharpening.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_SHARPENING_H_
+#define ART_COMPILER_OPTIMIZING_SHARPENING_H_
+
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+class CompilerDriver;
+class DexCompilationUnit;
+class HInvokeStaticOrDirect;
+
+// Optimization that tries to improve the way we dispatch methods and access types,
+// fields, etc. Besides actual method sharpening based on receiver type (for example
+// virtual->direct), this includes selecting the best available dispatch for
+// invoke-static/-direct based on code generator support.
+class HSharpening : public HOptimization {
+ public:
+  HSharpening(HGraph* graph,
+              CodeGenerator* codegen,
+              const DexCompilationUnit& compilation_unit,
+              CompilerDriver* compiler_driver)
+      : HOptimization(graph, kSharpeningPassName),
+        codegen_(codegen),
+        compilation_unit_(compilation_unit),
+        compiler_driver_(compiler_driver) { }
+
+  void Run() OVERRIDE;
+
+  static constexpr const char* kSharpeningPassName = "sharpening";
+
+ private:
+  void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
+
+  CodeGenerator* codegen_;
+  const DexCompilationUnit& compilation_unit_;
+  CompilerDriver* compiler_driver_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_SHARPENING_H_
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index c4a3b28..560502f 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -15,8 +15,9 @@
  */
 
 #include "stack_map.h"
+
+#include "base/arena_bit_vector.h"
 #include "stack_map_stream.h"
-#include "utils/arena_bit_vector.h"
 
 #include "gtest/gtest.h"
 
diff --git a/compiler/utils/arena_allocator_test.cc b/compiler/utils/arena_allocator_test.cc
index 7065527..7f67ef1 100644
--- a/compiler/utils/arena_allocator_test.cc
+++ b/compiler/utils/arena_allocator_test.cc
@@ -15,8 +15,8 @@
  */
 
 #include "base/arena_allocator.h"
+#include "base/arena_bit_vector.h"
 #include "gtest/gtest.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/utils/dedupe_set-inl.h b/compiler/utils/dedupe_set-inl.h
new file mode 100644
index 0000000..ac54813
--- /dev/null
+++ b/compiler/utils/dedupe_set-inl.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_DEDUPE_SET_INL_H_
+#define ART_COMPILER_UTILS_DEDUPE_SET_INL_H_
+
+#include "dedupe_set.h"
+
+#include <algorithm>
+#include <inttypes.h>
+#include <unordered_map>
+
+#include "base/mutex.h"
+#include "base/hash_set.h"
+#include "base/stl_util.h"
+#include "base/stringprintf.h"
+#include "base/time_utils.h"
+
+namespace art {
+
+template <typename InKey,
+          typename StoreKey,
+          typename Alloc,
+          typename HashType,
+          typename HashFunc,
+          HashType kShard>
+struct DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Stats {
+  size_t collision_sum = 0u;
+  size_t collision_max = 0u;
+  size_t total_probe_distance = 0u;
+  size_t total_size = 0u;
+};
+
+template <typename InKey,
+          typename StoreKey,
+          typename Alloc,
+          typename HashType,
+          typename HashFunc,
+          HashType kShard>
+class DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Shard {
+ public:
+  Shard(const Alloc& alloc, const std::string& lock_name)
+      : alloc_(alloc),
+        lock_name_(lock_name),
+        lock_(lock_name_.c_str()),
+        keys_() {
+  }
+
+  ~Shard() {
+    for (const HashedKey<StoreKey>& key : keys_) {
+      DCHECK(key.Key() != nullptr);
+      alloc_.Destroy(key.Key());
+    }
+  }
+
+  const StoreKey* Add(Thread* self, size_t hash, const InKey& in_key) REQUIRES(!lock_) {
+    MutexLock lock(self, lock_);
+    HashedKey<InKey> hashed_in_key(hash, &in_key);
+    auto it = keys_.Find(hashed_in_key);
+    if (it != keys_.end()) {
+      DCHECK(it->Key() != nullptr);
+      return it->Key();
+    }
+    const StoreKey* store_key = alloc_.Copy(in_key);
+    keys_.Insert(HashedKey<StoreKey> { hash, store_key });
+    return store_key;
+  }
+
+  void UpdateStats(Thread* self, Stats* global_stats) REQUIRES(!lock_) {
+    // HashSet<> doesn't keep entries ordered by hash, so we actually allocate memory
+    // for bookkeeping while collecting the stats.
+    std::unordered_map<HashType, size_t> stats;
+    {
+      MutexLock lock(self, lock_);
+      // Note: The total_probe_distance will be updated with the current state.
+      // It may have been higher before a re-hash.
+      global_stats->total_probe_distance += keys_.TotalProbeDistance();
+      global_stats->total_size += keys_.Size();
+      for (const HashedKey<StoreKey>& key : keys_) {
+        auto it = stats.find(key.Hash());
+        if (it == stats.end()) {
+          stats.insert({key.Hash(), 1u});
+        } else {
+          ++it->second;
+        }
+      }
+    }
+    for (const auto& entry : stats) {
+      size_t number_of_entries = entry.second;
+      if (number_of_entries > 1u) {
+        global_stats->collision_sum += number_of_entries - 1u;
+        global_stats->collision_max = std::max(global_stats->collision_max, number_of_entries);
+      }
+    }
+  }
+
+ private:
+  template <typename T>
+  class HashedKey {
+   public:
+    HashedKey() : hash_(0u), key_(nullptr) { }
+    HashedKey(size_t hash, const T* key) : hash_(hash), key_(key) { }
+
+    size_t Hash() const {
+      return hash_;
+    }
+
+    const T* Key() const {
+      return key_;
+    }
+
+    bool IsEmpty() const {
+      return Key() == nullptr;
+    }
+
+    void MakeEmpty() {
+      key_ = nullptr;
+    }
+
+   private:
+    size_t hash_;
+    const T* key_;
+  };
+
+  class ShardEmptyFn {
+   public:
+    bool IsEmpty(const HashedKey<StoreKey>& key) const {
+      return key.IsEmpty();
+    }
+
+    void MakeEmpty(HashedKey<StoreKey>& key) {
+      key.MakeEmpty();
+    }
+  };
+
+  struct ShardHashFn {
+    template <typename T>
+    size_t operator()(const HashedKey<T>& key) const {
+      return key.Hash();
+    }
+  };
+
+  struct ShardPred {
+    typename std::enable_if<!std::is_same<StoreKey, InKey>::value, bool>::type
+    operator()(const HashedKey<StoreKey>& lhs, const HashedKey<StoreKey>& rhs) const {
+      DCHECK(lhs.Key() != nullptr);
+      DCHECK(rhs.Key() != nullptr);
+      // Rehashing: stored keys are already deduplicated, so we can simply compare key pointers.
+      return lhs.Key() == rhs.Key();
+    }
+
+    template <typename LeftT, typename RightT>
+    bool operator()(const HashedKey<LeftT>& lhs, const HashedKey<RightT>& rhs) const {
+      DCHECK(lhs.Key() != nullptr);
+      DCHECK(rhs.Key() != nullptr);
+      return lhs.Hash() == rhs.Hash() &&
+          lhs.Key()->size() == rhs.Key()->size() &&
+          std::equal(lhs.Key()->begin(), lhs.Key()->end(), rhs.Key()->begin());
+    }
+  };
+
+  Alloc alloc_;
+  const std::string lock_name_;
+  Mutex lock_;
+  HashSet<HashedKey<StoreKey>, ShardEmptyFn, ShardHashFn, ShardPred> keys_ GUARDED_BY(lock_);
+};
+
+template <typename InKey,
+          typename StoreKey,
+          typename Alloc,
+          typename HashType,
+          typename HashFunc,
+          HashType kShard>
+const StoreKey* DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Add(
+    Thread* self, const InKey& key) {
+  uint64_t hash_start;
+  if (kIsDebugBuild) {
+    hash_start = NanoTime();
+  }
+  HashType raw_hash = HashFunc()(key);
+  if (kIsDebugBuild) {
+    uint64_t hash_end = NanoTime();
+    hash_time_ += hash_end - hash_start;
+  }
+  HashType shard_hash = raw_hash / kShard;
+  HashType shard_bin = raw_hash % kShard;
+  return shards_[shard_bin]->Add(self, shard_hash, key);
+}
+
+template <typename InKey,
+          typename StoreKey,
+          typename Alloc,
+          typename HashType,
+          typename HashFunc,
+          HashType kShard>
+DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::DedupeSet(const char* set_name,
+                                                                         const Alloc& alloc)
+    : hash_time_(0) {
+  for (HashType i = 0; i < kShard; ++i) {
+    std::ostringstream oss;
+    oss << set_name << " lock " << i;
+    shards_[i].reset(new Shard(alloc, oss.str()));
+  }
+}
+
+template <typename InKey,
+          typename StoreKey,
+          typename Alloc,
+          typename HashType,
+          typename HashFunc,
+          HashType kShard>
+DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::~DedupeSet() {
+  // Everything done by member destructors.
+}
+
+template <typename InKey,
+          typename StoreKey,
+          typename Alloc,
+          typename HashType,
+          typename HashFunc,
+          HashType kShard>
+std::string DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::DumpStats(
+    Thread* self) const {
+  Stats stats;
+  for (HashType shard = 0; shard < kShard; ++shard) {
+    shards_[shard]->UpdateStats(self, &stats);
+  }
+  return StringPrintf("%zu collisions, %zu max hash collisions, "
+                      "%zu/%zu probe distance, %" PRIu64 " ns hash time",
+                      stats.collision_sum,
+                      stats.collision_max,
+                      stats.total_probe_distance,
+                      stats.total_size,
+                      hash_time_);
+}
+
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_DEDUPE_SET_INL_H_
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index 2c4a689..b62f216 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -17,151 +17,41 @@
 #ifndef ART_COMPILER_UTILS_DEDUPE_SET_H_
 #define ART_COMPILER_UTILS_DEDUPE_SET_H_
 
-#include <algorithm>
-#include <inttypes.h>
 #include <memory>
-#include <set>
+#include <stdint.h>
 #include <string>
 
-#include "base/mutex.h"
-#include "base/stl_util.h"
-#include "base/stringprintf.h"
-#include "base/time_utils.h"
-#include "utils/swap_space.h"
+#include "base/macros.h"
 
 namespace art {
 
+class Thread;
+
 // A set of Keys that support a HashFunc returning HashType. Used to find duplicates of Key in the
 // Add method. The data-structure is thread-safe through the use of internal locks, it also
 // supports the lock being sharded.
-template <typename InKey, typename StoreKey, typename HashType, typename HashFunc,
+template <typename InKey,
+          typename StoreKey,
+          typename Alloc,
+          typename HashType,
+          typename HashFunc,
           HashType kShard = 1>
 class DedupeSet {
-  typedef std::pair<HashType, const InKey*> HashedInKey;
-  struct HashedKey {
-    StoreKey* store_ptr;
-    union {
-      HashType store_hash;        // Valid if store_ptr != null.
-      const HashedInKey* in_key;  // Valid if store_ptr == null.
-    };
-  };
-
-  class Comparator {
-   public:
-    bool operator()(const HashedKey& a, const HashedKey& b) const {
-      HashType a_hash = (a.store_ptr != nullptr) ? a.store_hash : a.in_key->first;
-      HashType b_hash = (b.store_ptr != nullptr) ? b.store_hash : b.in_key->first;
-      if (a_hash != b_hash) {
-        return a_hash < b_hash;
-      }
-      if (a.store_ptr != nullptr && b.store_ptr != nullptr) {
-        return std::lexicographical_compare(a.store_ptr->begin(), a.store_ptr->end(),
-                                            b.store_ptr->begin(), b.store_ptr->end());
-      } else if (a.store_ptr != nullptr && b.store_ptr == nullptr) {
-        return std::lexicographical_compare(a.store_ptr->begin(), a.store_ptr->end(),
-                                            b.in_key->second->begin(), b.in_key->second->end());
-      } else if (a.store_ptr == nullptr && b.store_ptr != nullptr) {
-        return std::lexicographical_compare(a.in_key->second->begin(), a.in_key->second->end(),
-                                            b.store_ptr->begin(), b.store_ptr->end());
-      } else {
-        return std::lexicographical_compare(a.in_key->second->begin(), a.in_key->second->end(),
-                                            b.in_key->second->begin(), b.in_key->second->end());
-      }
-    }
-  };
-
  public:
-  StoreKey* Add(Thread* self, const InKey& key) {
-    uint64_t hash_start;
-    if (kIsDebugBuild) {
-      hash_start = NanoTime();
-    }
-    HashType raw_hash = HashFunc()(key);
-    if (kIsDebugBuild) {
-      uint64_t hash_end = NanoTime();
-      hash_time_ += hash_end - hash_start;
-    }
-    HashType shard_hash = raw_hash / kShard;
-    HashType shard_bin = raw_hash % kShard;
-    HashedInKey hashed_in_key(shard_hash, &key);
-    HashedKey hashed_key;
-    hashed_key.store_ptr = nullptr;
-    hashed_key.in_key = &hashed_in_key;
-    MutexLock lock(self, *lock_[shard_bin]);
-    auto it = keys_[shard_bin].find(hashed_key);
-    if (it != keys_[shard_bin].end()) {
-      DCHECK(it->store_ptr != nullptr);
-      return it->store_ptr;
-    }
-    hashed_key.store_ptr = CreateStoreKey(key);
-    hashed_key.store_hash = shard_hash;
-    keys_[shard_bin].insert(hashed_key);
-    return hashed_key.store_ptr;
-  }
+  // Add a new key to the dedupe set if not present. Return the equivalent deduplicated stored key.
+  const StoreKey* Add(Thread* self, const InKey& key);
 
-  DedupeSet(const char* set_name, SwapAllocator<void>& alloc)
-      : allocator_(alloc), hash_time_(0) {
-    for (HashType i = 0; i < kShard; ++i) {
-      std::ostringstream oss;
-      oss << set_name << " lock " << i;
-      lock_name_[i] = oss.str();
-      lock_[i].reset(new Mutex(lock_name_[i].c_str()));
-    }
-  }
+  DedupeSet(const char* set_name, const Alloc& alloc);
 
-  ~DedupeSet() {
-    // Have to manually free all pointers.
-    for (auto& shard : keys_) {
-      for (const auto& hashed_key : shard) {
-        DCHECK(hashed_key.store_ptr != nullptr);
-        DeleteStoreKey(hashed_key.store_ptr);
-      }
-    }
-  }
+  ~DedupeSet();
 
-  std::string DumpStats() const {
-    size_t collision_sum = 0;
-    size_t collision_max = 0;
-    for (HashType shard = 0; shard < kShard; ++shard) {
-      HashType last_hash = 0;
-      size_t collision_cur_max = 0;
-      for (const HashedKey& key : keys_[shard]) {
-        DCHECK(key.store_ptr != nullptr);
-        if (key.store_hash == last_hash) {
-          collision_cur_max++;
-          if (collision_cur_max > 1) {
-            collision_sum++;
-            if (collision_cur_max > collision_max) {
-              collision_max = collision_cur_max;
-            }
-          }
-        } else {
-          collision_cur_max = 1;
-          last_hash = key.store_hash;
-        }
-      }
-    }
-    return StringPrintf("%zu collisions, %zu max bucket size, %" PRIu64 " ns hash time",
-                        collision_sum, collision_max, hash_time_);
-  }
+  std::string DumpStats(Thread* self) const;
 
  private:
-  StoreKey* CreateStoreKey(const InKey& key) {
-    StoreKey* ret = allocator_.allocate(1);
-    allocator_.construct(ret, key.begin(), key.end(), allocator_);
-    return ret;
-  }
+  struct Stats;
+  class Shard;
 
-  void DeleteStoreKey(StoreKey* key) {
-    SwapAllocator<StoreKey> alloc(allocator_);
-    alloc.destroy(key);
-    alloc.deallocate(key, 1);
-  }
-
-  std::string lock_name_[kShard];
-  std::unique_ptr<Mutex> lock_[kShard];
-  std::set<HashedKey, Comparator> keys_[kShard];
-  SwapAllocator<StoreKey> allocator_;
+  std::unique_ptr<Shard> shards_[kShard];
   uint64_t hash_time_;
 
   DISALLOW_COPY_AND_ASSIGN(DedupeSet);
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 637964e..60a891d 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -18,15 +18,18 @@
 
 #include <algorithm>
 #include <cstdio>
+#include <vector>
 
+#include "dedupe_set-inl.h"
 #include "gtest/gtest.h"
 #include "thread-inl.h"
+#include "utils/array_ref.h"
 
 namespace art {
 
-class DedupeHashFunc {
+class DedupeSetTestHashFunc {
  public:
-  size_t operator()(const std::vector<uint8_t>& array) const {
+  size_t operator()(const ArrayRef<const uint8_t>& array) const {
     size_t hash = 0;
     for (uint8_t c : array) {
       hash += c;
@@ -36,46 +39,52 @@
     return hash;
   }
 };
+
+class DedupeSetTestAlloc {
+ public:
+  const std::vector<uint8_t>* Copy(const ArrayRef<const uint8_t>& src) {
+    return new std::vector<uint8_t>(src.begin(), src.end());
+  }
+
+  void Destroy(const std::vector<uint8_t>* key) {
+    delete key;
+  }
+};
+
 TEST(DedupeSetTest, Test) {
   Thread* self = Thread::Current();
-  typedef std::vector<uint8_t> ByteArray;
-  SwapAllocator<void> swap(nullptr);
-  DedupeSet<ByteArray, SwapVector<uint8_t>, size_t, DedupeHashFunc> deduplicator("test", swap);
-  SwapVector<uint8_t>* array1;
+  DedupeSetTestAlloc alloc;
+  DedupeSet<ArrayRef<const uint8_t>,
+            std::vector<uint8_t>,
+            DedupeSetTestAlloc,
+            size_t,
+            DedupeSetTestHashFunc> deduplicator("test", alloc);
+  const std::vector<uint8_t>* array1;
   {
-    ByteArray test1;
-    test1.push_back(10);
-    test1.push_back(20);
-    test1.push_back(30);
-    test1.push_back(45);
-
+    uint8_t raw_test1[] = { 10u, 20u, 30u, 45u };
+    ArrayRef<const uint8_t> test1(raw_test1);
     array1 = deduplicator.Add(self, test1);
     ASSERT_NE(array1, nullptr);
     ASSERT_TRUE(std::equal(test1.begin(), test1.end(), array1->begin()));
   }
 
-  SwapVector<uint8_t>* array2;
+  const std::vector<uint8_t>* array2;
   {
-    ByteArray test1;
-    test1.push_back(10);
-    test1.push_back(20);
-    test1.push_back(30);
-    test1.push_back(45);
-    array2 = deduplicator.Add(self, test1);
+    uint8_t raw_test2[] = { 10u, 20u, 30u, 45u };
+    ArrayRef<const uint8_t> test2(raw_test2);
+    array2 = deduplicator.Add(self, test2);
     ASSERT_EQ(array2, array1);
-    ASSERT_TRUE(std::equal(test1.begin(), test1.end(), array2->begin()));
+    ASSERT_TRUE(std::equal(test2.begin(), test2.end(), array2->begin()));
   }
 
-  SwapVector<uint8_t>* array3;
+  const std::vector<uint8_t>* array3;
   {
-    ByteArray test1;
-    test1.push_back(10);
-    test1.push_back(22);
-    test1.push_back(30);
-    test1.push_back(47);
-    array3 = deduplicator.Add(self, test1);
+    uint8_t raw_test3[] = { 10u, 22u, 30u, 47u };
+    ArrayRef<const uint8_t> test3(raw_test3);
+    array3 = deduplicator.Add(self, test3);
     ASSERT_NE(array3, nullptr);
-    ASSERT_TRUE(std::equal(test1.begin(), test1.end(), array3->begin()));
+    ASSERT_NE(array3, array1);
+    ASSERT_TRUE(std::equal(test3.begin(), test3.end(), array3->begin()));
   }
 }
 
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 16f29b0..4413906 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -391,10 +391,30 @@
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl");
 }
 
+TEST_F(AssemblerMIPS64Test, Rotr) {
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
+}
+
 TEST_F(AssemblerMIPS64Test, Sra) {
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra");
 }
 
+TEST_F(AssemblerMIPS64Test, Sllv) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "sllv");
+}
+
+TEST_F(AssemblerMIPS64Test, Srlv) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "srlv");
+}
+
+TEST_F(AssemblerMIPS64Test, Rotrv) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Rotrv, "rotrv ${reg1}, ${reg2}, ${reg3}"), "rotrv");
+}
+
+TEST_F(AssemblerMIPS64Test, Srav) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "srav");
+}
+
 TEST_F(AssemblerMIPS64Test, Dsll) {
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll");
 }
@@ -403,20 +423,33 @@
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl");
 }
 
+TEST_F(AssemblerMIPS64Test, Drotr) {
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr, 5, "drotr ${reg1}, ${reg2}, {imm}"),
+            "drotr");
+}
+
 TEST_F(AssemblerMIPS64Test, Dsra) {
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra");
 }
 
 TEST_F(AssemblerMIPS64Test, Dsll32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), "dsll32");
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"),
+            "dsll32");
 }
 
 TEST_F(AssemblerMIPS64Test, Dsrl32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), "dsrl32");
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"),
+            "dsrl32");
+}
+
+TEST_F(AssemblerMIPS64Test, Drotr32) {
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr32, 5, "drotr32 ${reg1}, ${reg2}, {imm}"),
+            "drotr32");
 }
 
 TEST_F(AssemblerMIPS64Test, Dsra32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), "dsra32");
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"),
+            "dsra32");
 }
 
 TEST_F(AssemblerMIPS64Test, Sc) {
@@ -435,10 +468,6 @@
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lld, -9, "lld ${reg1}, {imm}(${reg2})"), "lld");
 }
 
-TEST_F(AssemblerMIPS64Test, Rotr) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
-}
-
 TEST_F(AssemblerMIPS64Test, Seleqz) {
   DriverStr(RepeatRRR(&mips64::Mips64Assembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
             "seleqz");
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 282db5d..52e6c02 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -775,7 +775,7 @@
     // case Instruction::k35ms:       // [opt] invoke-virtual+super
     // case Instruction::k35mi:       // [opt] inline invoke
       {
-        u4 arg[5];
+        u4 arg[Instruction::kMaxVarArgRegs];
         pDecInsn->GetVarArgs(arg);
         fputs(" {", gOutFile);
         for (int i = 0, n = pDecInsn->VRegA(); i < n; i++) {
@@ -788,6 +788,21 @@
         fprintf(gOutFile, "}, %s", indexBuf);
       }
       break;
+    case Instruction::k25x:        // op vC, {vD, vE, vF, vG} (B: count)
+      {
+        u4 arg[Instruction::kMaxVarArgRegs25x];
+        pDecInsn->GetAllArgs25x(arg);
+        fprintf(gOutFile, " v%d, {", arg[0]);
+        for (int i = 0, n = pDecInsn->VRegB(); i < n; i++) {
+          if (i == 0) {
+            fprintf(gOutFile, "v%d", arg[Instruction::kLambdaVirtualRegisterWidth + i]);
+          } else {
+            fprintf(gOutFile, ", v%d", arg[Instruction::kLambdaVirtualRegisterWidth + i]);
+          }
+        }  // for
+        fputc('}', gOutFile);
+      }
+      break;
     case Instruction::k3rc:        // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
     // NOT SUPPORTED:
     // case Instruction::k3rms:       // [opt] invoke-virtual+super/range
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 9236ffb..09d7311 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -25,6 +25,7 @@
   barrier.cc \
   base/allocator.cc \
   base/arena_allocator.cc \
+  base/arena_bit_vector.cc \
   base/bit_vector.cc \
   base/hex_dump.cc \
   base/logging.cc \
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 4dedb33..375a03a 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -28,11 +28,11 @@
   std::fill_n(gprs_, arraysize(gprs_), nullptr);
   std::fill_n(fprs_, arraysize(fprs_), nullptr);
   gprs_[SP] = &sp_;
-  gprs_[RA] = &ra_;
+  gprs_[T9] = &t9_;
   gprs_[A0] = &arg0_;
   // Initialize registers with easy to spot debug values.
   sp_ = MipsContext::kBadGprBase + SP;
-  ra_ = MipsContext::kBadGprBase + RA;
+  t9_ = MipsContext::kBadGprBase + T9;
   arg0_ = 0;
 }
 
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index f1e2905..7dcff63 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -41,7 +41,7 @@
   }
 
   void SetPC(uintptr_t new_pc) OVERRIDE {
-    SetGPR(RA, new_pc);
+    SetGPR(T9, new_pc);
   }
 
   bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
@@ -86,9 +86,10 @@
   // Pointers to registers in the stack, initialized to null except for the special cases below.
   uintptr_t* gprs_[kNumberOfCoreRegisters];
   uint32_t* fprs_[kNumberOfFRegisters];
-  // Hold values for sp and ra (return address) if they are not located within a stack frame, as
-  // well as the first argument.
-  uintptr_t sp_, ra_, arg0_;
+  // Hold values for sp and t9 if they are not located within a stack frame. We use t9 for the
+  // PC (as ra is required to be valid for single-frame deopt and must not be clobbered). We
+  // also need the first argument for single-frame deopt.
+  uintptr_t sp_, t9_, arg0_;
 };
 }  // namespace mips
 }  // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ba58c3f..0691f2a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -374,7 +374,7 @@
     lw      $ra, 124($a0)
     lw      $a0, 16($a0)
     move    $v0, $zero          # clear result registers r0 and r1
-    jalr    $zero, $ra          # do long jump
+    jalr    $zero, $t9          # do long jump
     move    $v1, $zero
 END art_quick_do_long_jump
 
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f5befdf..fe0afa6 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -361,19 +361,6 @@
   return true;
 }
 
-ProfilingInfo* ArtMethod::CreateProfilingInfo() {
-  DCHECK(!Runtime::Current()->IsAotCompiler());
-  ProfilingInfo* info = ProfilingInfo::Create(this);
-  MemberOffset offset = ArtMethod::EntryPointFromJniOffset(sizeof(void*));
-  uintptr_t pointer = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
-  if (!reinterpret_cast<Atomic<ProfilingInfo*>*>(pointer)->
-          CompareExchangeStrongSequentiallyConsistent(nullptr, info)) {
-    return GetProfilingInfo(sizeof(void*));
-  } else {
-    return info;
-  }
-}
-
 const uint8_t* ArtMethod::GetQuickenedInfo() {
   bool found = false;
   OatFile::OatMethod oat_method =
@@ -390,21 +377,76 @@
   }
 
   Runtime* runtime = Runtime::Current();
-  const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
-  DCHECK(code != nullptr);
+  const void* existing_entry_point = GetEntryPointFromQuickCompiledCode();
+  DCHECK(existing_entry_point != nullptr);
+  ClassLinker* class_linker = runtime->GetClassLinker();
 
-  if (runtime->GetClassLinker()->IsQuickGenericJniStub(code)) {
+  if (class_linker->IsQuickGenericJniStub(existing_entry_point)) {
     // The generic JNI does not have any method header.
     return nullptr;
   }
 
-  code = EntryPointToCodePointer(code);
-  OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
-      reinterpret_cast<uintptr_t>(code) - sizeof(OatQuickMethodHeader));
+  // Check whether the current entry point contains this pc.
+  if (!class_linker->IsQuickResolutionStub(existing_entry_point) &&
+      !class_linker->IsQuickToInterpreterBridge(existing_entry_point)) {
+    OatQuickMethodHeader* method_header =
+        OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
 
-  // TODO(ngeoffray): validate the pc. Note that unit tests can give unrelated pcs (for
-  // example arch_test).
-  UNUSED(pc);
+    if (method_header->Contains(pc)) {
+      return method_header;
+    }
+  }
+
+  // Check whether the pc is in the JIT code cache.
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit != nullptr) {
+    jit::JitCodeCache* code_cache = jit->GetCodeCache();
+    OatQuickMethodHeader* method_header = code_cache->LookupMethodHeader(pc, this);
+    if (method_header != nullptr) {
+      DCHECK(method_header->Contains(pc));
+      return method_header;
+    } else {
+      DCHECK(!code_cache->ContainsPc(reinterpret_cast<const void*>(pc))) << std::hex << pc;
+    }
+  }
+
+  // The code has to be in an oat file.
+  bool found;
+  OatFile::OatMethod oat_method = class_linker->FindOatMethodFor(this, &found);
+  if (!found) {
+    if (class_linker->IsQuickResolutionStub(existing_entry_point)) {
+      // We are running the generic jni stub, but the entry point of the method has not
+      // been updated yet.
+      DCHECK(IsNative());
+      return nullptr;
+    }
+    // Only for unit tests.
+    // TODO(ngeoffray): Update these tests to pass the right pc?
+    return OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+  }
+  const void* oat_entry_point = oat_method.GetQuickCode();
+  if (oat_entry_point == nullptr || class_linker->IsQuickGenericJniStub(oat_entry_point)) {
+    DCHECK(IsNative());
+    return nullptr;
+  }
+
+  OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromEntryPoint(oat_entry_point);
+  if (pc == 0) {
+    // This is a downcall, it can only happen for a native method.
+    DCHECK(IsNative());
+    return method_header;
+  }
+
+  if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+    // If we're instrumenting, just return the compiled OAT code.
+    // TODO(ngeoffray): Avoid this call path.
+    return method_header;
+  }
+
+  DCHECK(method_header->Contains(pc))
+      << PrettyMethod(this)
+      << std::hex << pc << " " << oat_entry_point
+      << " " << (uintptr_t)(method_header->code_ + method_header->code_size_);
   return method_header;
 }
 
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 9f1495c..551989d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -305,12 +305,18 @@
         PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
   }
 
-  ProfilingInfo* CreateProfilingInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
   ProfilingInfo* GetProfilingInfo(size_t pointer_size) {
     return reinterpret_cast<ProfilingInfo*>(GetEntryPointFromJniPtrSize(pointer_size));
   }
 
+  ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
+    SetEntryPointFromJniPtrSize(info, sizeof(void*));
+  }
+
+  static MemberOffset ProfilingInfoOffset() {
+    return EntryPointFromJniOffset(sizeof(void*));
+  }
+
   void* GetEntryPointFromJni() {
     return GetEntryPointFromJniPtrSize(sizeof(void*));
   }
@@ -433,6 +439,10 @@
     return ++hotness_count_;
   }
 
+  void ClearCounter() {
+    hotness_count_ = 0;
+  }
+
   const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns the method header for the compiled code containing 'pc'. Note that runtime
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index f9960ac..969f5b9 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -19,8 +19,10 @@
 
 #include <map>
 #include <set>
+#include <unordered_map>
 
 #include "atomic.h"
+#include "base/hash_map.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/type_static_if.h"
@@ -150,20 +152,33 @@
 template<class T, AllocatorTag kTag>
 // C++ doesn't allow template typedefs. This is a workaround template typedef which is
 // TrackingAllocatorImpl<T> if kEnableTrackingAllocator is true, std::allocator<T> otherwise.
-class TrackingAllocator : public TypeStaticIf<kEnableTrackingAllocator,
-                                              TrackingAllocatorImpl<T, kTag>,
-                                              std::allocator<T>>::type {
-};
+using TrackingAllocator = typename TypeStaticIf<kEnableTrackingAllocator,
+                                                TrackingAllocatorImpl<T, kTag>,
+                                                std::allocator<T>>::type;
 
 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
-class AllocationTrackingMultiMap : public std::multimap<
-    Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
-};
+using AllocationTrackingMultiMap = std::multimap<
+    Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>>;
 
 template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
-class AllocationTrackingSet : public std::set<Key, Compare, TrackingAllocator<Key, kTag>> {
-};
+using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
 
+template<class Key,
+         class T,
+         AllocatorTag kTag,
+         class Hash = std::hash<Key>,
+         class Pred = std::equal_to<Key>>
+using AllocationTrackingUnorderedMap = std::unordered_map<
+    Key, T, Hash, Pred, TrackingAllocator<std::pair<const Key, T>, kTag>>;
+
+template<class Key,
+         class T,
+         class EmptyFn,
+         AllocatorTag kTag,
+         class Hash = std::hash<Key>,
+         class Pred = std::equal_to<Key>>
+using AllocationTrackingHashMap = HashMap<
+    Key, T, EmptyFn, Hash, Pred, TrackingAllocator<std::pair<Key, T>, kTag>>;
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_ALLOCATOR_H_
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 1704688..71afa0f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -94,6 +94,8 @@
   "CodeGen      ",
   "ParallelMove ",
   "GraphChecker ",
+  "LSE          ",
+  "Verifier     ",
 };
 
 template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 4e9282f..ace6c38 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -106,6 +106,8 @@
   kArenaAllocCodeGenerator,
   kArenaAllocParallelMoveResolver,
   kArenaAllocGraphChecker,
+  kArenaAllocLSE,
+  kArenaAllocVerifier,
   kNumArenaAllocKinds
 };
 
diff --git a/compiler/utils/arena_bit_vector.cc b/runtime/base/arena_bit_vector.cc
similarity index 100%
rename from compiler/utils/arena_bit_vector.cc
rename to runtime/base/arena_bit_vector.cc
diff --git a/compiler/utils/arena_bit_vector.h b/runtime/base/arena_bit_vector.h
similarity index 92%
rename from compiler/utils/arena_bit_vector.h
rename to runtime/base/arena_bit_vector.h
index f2a7452..d606166 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/runtime/base/arena_bit_vector.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
-#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#ifndef ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
+#define ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
 
 #include "base/arena_object.h"
 #include "base/bit_vector.h"
@@ -65,4 +65,4 @@
 
 }  // namespace art
 
-#endif  // ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#endif  // ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
diff --git a/runtime/base/dchecked_vector.h b/runtime/base/dchecked_vector.h
index 6ec573a..2bd12df 100644
--- a/runtime/base/dchecked_vector.h
+++ b/runtime/base/dchecked_vector.h
@@ -59,8 +59,10 @@
       : Base() { }
   explicit dchecked_vector(const allocator_type& alloc)
       : Base(alloc) { }
+  // Note that we cannot forward to std::vector(size_type, const allocator_type&) because it is not
+  // available in C++11, which is the latest GCC can support. http://b/25022512
   explicit dchecked_vector(size_type n, const allocator_type& alloc = allocator_type())
-      : Base(n, alloc) { }
+      : Base(alloc) { resize(n); }
   dchecked_vector(size_type n,
                   const value_type& value,
                   const allocator_type& alloc = allocator_type())
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 4819f06..95baa82 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -420,6 +420,19 @@
     Resize(Size() / max_load_factor_);
   }
 
+  // Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
+  // set. No-op if the hash set is already large enough to do this.
+  void Reserve(size_t num_elements) {
+    size_t num_buckets = num_elements / max_load_factor_;
+    // Deal with rounding errors. Add one for rounding.
+    while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
+      ++num_buckets;
+    }
+    if (num_buckets > NumBuckets()) {
+      Resize(num_buckets);
+    }
+  }
+
   // To distance that inserted elements were probed. Used for measuring how good hash functions
   // are.
   size_t TotalProbeDistance() const {
@@ -488,6 +501,15 @@
     }
   }
 
+  // The hash set expands when Size() reaches ElementsUntilExpand().
+  size_t ElementsUntilExpand() const {
+    return elements_until_expand_;
+  }
+
+  size_t NumBuckets() const {
+    return num_buckets_;
+  }
+
  private:
   T& ElementForIndex(size_t index) {
     DCHECK_LT(index, NumBuckets());
@@ -543,10 +565,6 @@
     return emptyfn_.IsEmpty(ElementForIndex(index));
   }
 
-  size_t NumBuckets() const {
-    return num_buckets_;
-  }
-
   // Allocate a number of buckets.
   void AllocateStorage(size_t num_buckets) {
     num_buckets_ = num_buckets;
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 743e98e..8254063 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -333,4 +333,25 @@
   ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
 }
 
+TEST_F(HashSetTest, TestReserve) {
+  HashSet<std::string, IsEmptyFnString> hash_set;
+  std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
+  for (size_t size : sizes) {
+    hash_set.Reserve(size);
+    const size_t buckets_before = hash_set.NumBuckets();
+    // Check that we expanded enough.
+    CHECK_GE(hash_set.ElementsUntilExpand(), size);
+    // Try inserting elements until we are at our reserve size and ensure the hash set did not
+    // expand.
+    while (hash_set.Size() < size) {
+      hash_set.Insert(std::to_string(hash_set.Size()));
+    }
+    CHECK_EQ(hash_set.NumBuckets(), buckets_before);
+  }
+  // Check the behaviour for shrinking, it does not necessarily resize down.
+  constexpr size_t size = 100;
+  hash_set.Reserve(size);
+  CHECK_GE(hash_set.ElementsUntilExpand(), size);
+}
+
 }  // namespace art
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 2554fb0..a30c73d 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -31,6 +31,16 @@
 template <typename T>
 class ScopedArenaAllocatorAdapter;
 
+// Tag associated with each allocation to help prevent double free.
+enum class ArenaFreeTag : uint8_t {
+  // Allocation is used and has not yet been destroyed.
+  kUsed,
+  // Allocation has been destroyed.
+  kFree,
+};
+
+static constexpr size_t kArenaAlignment = 8;
+
 // Holds a list of Arenas for use by ScopedArenaAllocator stack.
 class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
  public:
@@ -50,6 +60,12 @@
 
   MemStats GetPeakStats() const;
 
+  // Return the arena tag associated with a pointer.
+  static ArenaFreeTag& ArenaTagForAllocation(void* ptr) {
+    DCHECK(kIsDebugBuild) << "Only debug builds have tags";
+    return *(reinterpret_cast<ArenaFreeTag*>(ptr) - 1);
+  }
+
  private:
   struct Peak;
   struct Current;
@@ -72,13 +88,18 @@
     if (UNLIKELY(IsRunningOnMemoryTool())) {
       return AllocWithMemoryTool(bytes, kind);
     }
-    size_t rounded_bytes = RoundUp(bytes, 8);
+    // Add kArenaAlignment for the free or used tag. Required to preserve alignment.
+    size_t rounded_bytes = RoundUp(bytes + (kIsDebugBuild ? kArenaAlignment : 0u), kArenaAlignment);
     uint8_t* ptr = top_ptr_;
     if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
       ptr = AllocateFromNextArena(rounded_bytes);
     }
     CurrentStats()->RecordAlloc(bytes, kind);
     top_ptr_ = ptr + rounded_bytes;
+    if (kIsDebugBuild) {
+      ptr += kArenaAlignment;
+      ArenaTagForAllocation(ptr) = ArenaFreeTag::kUsed;
+    }
     return ptr;
   }
 
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 562c2bf..1236585 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -20,6 +20,7 @@
 #include <deque>
 #include <queue>
 #include <set>
+#include <type_traits>
 #include <unordered_map>
 #include <utility>
 
@@ -196,6 +197,47 @@
   return ScopedArenaAllocatorAdapter<void>(this, kind);
 }
 
+// Special deleter that only calls the destructor. Also checks for double free errors.
+template <typename T>
+class ArenaDelete {
+  static constexpr uint8_t kMagicFill = 0xCE;
+ public:
+  void operator()(T* ptr) const {
+    ptr->~T();
+    if (RUNNING_ON_MEMORY_TOOL > 0) {
+      // Writing to the memory will fail if it we already destroyed the pointer with
+      // DestroyOnlyDelete since we make it no access.
+      memset(ptr, kMagicFill, sizeof(T));
+      MEMORY_TOOL_MAKE_NOACCESS(ptr, sizeof(T));
+    } else if (kIsDebugBuild) {
+      CHECK(ArenaStack::ArenaTagForAllocation(reinterpret_cast<void*>(ptr)) == ArenaFreeTag::kUsed)
+          << "Freeing invalid object " << ptr;
+      ArenaStack::ArenaTagForAllocation(reinterpret_cast<void*>(ptr)) = ArenaFreeTag::kFree;
+      // Write a magic value to try and catch use after free error.
+      memset(ptr, kMagicFill, sizeof(T));
+    }
+  }
+};
+
+// In general we lack support for arrays. We would need to call the destructor on each element,
+// which requires access to the array size. Support for that is future work.
+//
+// However, we can support trivially destructible component types, as then a destructor doesn't
+// need to be called.
+template <typename T>
+class ArenaDelete<T[]> {
+ public:
+  void operator()(T* ptr ATTRIBUTE_UNUSED) const {
+    static_assert(std::is_trivially_destructible<T>::value,
+                  "ArenaUniquePtr does not support non-trivially-destructible arrays.");
+    // TODO: Implement debug checks, and MEMORY_TOOL support.
+  }
+};
+
+// Arena unique ptr that only calls the destructor of the element.
+template <typename T>
+using ArenaUniquePtr = std::unique_ptr<T, ArenaDelete<T>>;
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_SCOPED_ARENA_CONTAINERS_H_
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index 9c83cf5..46743e9 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -165,7 +165,7 @@
   // Pointer to char data, not necessarily zero terminated.
   const char* ptr_;
   // Length of data.
-  size_type   length_;
+  size_type length_;
 };
 
 // This large function is defined inline so that in a fairly common case where
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 81622e1..69d0799 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1174,15 +1174,26 @@
   mirror::LongArray::ResetArrayClass();
   mirror::ShortArray::ResetArrayClass();
   Thread* const self = Thread::Current();
-  JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
   for (const ClassLoaderData& data : class_loaders_) {
-    vm->DeleteWeakGlobalRef(self, data.weak_root);
-    delete data.allocator;
-    delete data.class_table;
+    DeleteClassLoader(self, data);
   }
   class_loaders_.clear();
 }
 
+void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data) {
+  Runtime* const runtime = Runtime::Current();
+  JavaVMExt* const vm = runtime->GetJavaVM();
+  vm->DeleteWeakGlobalRef(self, data.weak_root);
+  if (runtime->GetJit() != nullptr) {
+    jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
+    if (code_cache != nullptr) {
+      code_cache->RemoveMethodsIn(self, *data.allocator);
+    }
+  }
+  delete data.allocator;
+  delete data.class_table;
+}
+
 mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
   return down_cast<mirror::PointerArray*>(image_pointer_size_ == 8u ?
       static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length)) :
@@ -1833,13 +1844,6 @@
       return code;
     }
   }
-  jit::Jit* const jit = Runtime::Current()->GetJit();
-  if (jit != nullptr) {
-    auto* code = jit->GetCodeCache()->GetCodeFor(method);
-    if (code != nullptr) {
-      return code;
-    }
-  }
   if (method->IsNative()) {
     // No code and native? Use generic trampoline.
     return GetQuickGenericJniStub();
@@ -1856,13 +1860,6 @@
   if (found) {
     return oat_method.GetQuickCode();
   }
-  jit::Jit* jit = Runtime::Current()->GetJit();
-  if (jit != nullptr) {
-    auto* code = jit->GetCodeCache()->GetCodeFor(method);
-    if (code != nullptr) {
-      return code;
-    }
-  }
   return nullptr;
 }
 
@@ -5325,7 +5322,7 @@
     ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
     if (virtuals != old_virtuals) {
       // Maps from heap allocated miranda method to linear alloc miranda method.
-      StrideIterator<ArtMethod> out = virtuals->Begin(method_size, method_alignment);
+      StrideIterator<ArtMethod> out = virtuals->begin(method_size, method_alignment);
       // Copy over the old methods + miranda methods.
       for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) {
         move_table.emplace(&m, &*out);
@@ -5335,7 +5332,7 @@
         ++out;
       }
     }
-    StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment)
+    StrideIterator<ArtMethod> out(virtuals->begin(method_size, method_alignment)
                                       + old_method_count);
     // Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
     // we want the roots of the miranda methods to get visited.
@@ -5367,7 +5364,7 @@
       move_table.emplace(def_method, &new_method);
       ++out;
     }
-    virtuals->SetLength(new_method_count);
+    virtuals->SetSize(new_method_count);
     UpdateClassVirtualMethods(klass.Get(), virtuals);
     // Done copying methods, they are all roots in the class now, so we can end the no thread
     // suspension assert.
@@ -5382,7 +5379,7 @@
       self->AssertPendingOOMException();
       return false;
     }
-    out = virtuals->Begin(method_size, method_alignment) + old_method_count;
+    out = virtuals->begin(method_size, method_alignment) + old_method_count;
     size_t vtable_pos = old_vtable_count;
     for (size_t i = old_method_count; i < new_method_count; ++i) {
       // Leave the declaring class alone as type indices are relative to it
@@ -6387,7 +6384,6 @@
 void ClassLinker::CleanupClassLoaders() {
   Thread* const self = Thread::Current();
   WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
-  JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
   for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
     const ClassLoaderData& data = *it;
     // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
@@ -6395,10 +6391,7 @@
     if (class_loader != nullptr) {
       ++it;
     } else {
-      // Weak reference was cleared, delete the data associated with this class loader.
-      delete data.class_table;
-      delete data.allocator;
-      vm->DeleteWeakGlobalRef(self, data.weak_root);
+      DeleteClassLoader(self, data);
       it = class_loaders_.erase(it);
     }
   }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a2d38ac..392efd2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -551,6 +551,10 @@
     LinearAlloc* allocator;
   };
 
+  static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
+      REQUIRES(Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   void VisitClassLoaders(ClassLoaderVisitor* visitor) const
       SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
 
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index a5f9d09..440d696 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -1416,7 +1416,12 @@
     }
 
     if (IsDataSectionType(type)) {
-      offset_to_type_map_.Put(aligned_offset, type);
+      if (aligned_offset == 0u) {
+        ErrorStringPrintf("Item %d offset is 0", i);
+        return false;
+      }
+      DCHECK(offset_to_type_map_.Find(aligned_offset) == offset_to_type_map_.end());
+      offset_to_type_map_.Insert(std::pair<uint32_t, uint16_t>(aligned_offset, type));
     }
 
     aligned_offset = ptr_ - begin_;
@@ -1589,7 +1594,8 @@
 }
 
 bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) {
-  auto it = offset_to_type_map_.find(offset);
+  DCHECK_NE(offset, 0u);
+  auto it = offset_to_type_map_.Find(offset);
   if (UNLIKELY(it == offset_to_type_map_.end())) {
     ErrorStringPrintf("No data map entry found @ %zx; expected %x", offset, type);
     return false;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 4f15357..6c63749 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -175,7 +175,35 @@
   const char* const location_;
   const DexFile::Header* const header_;
 
-  AllocationTrackingSafeMap<uint32_t, uint16_t, kAllocatorTagDexFileVerifier> offset_to_type_map_;
+  struct OffsetTypeMapEmptyFn {
+    // Make a hash map slot empty by making the offset 0. Offset 0 is a valid dex file offset that
+    // is in the offset of the dex file header. However, we only store data section items in the
+    // map, and these are after the header.
+    void MakeEmpty(std::pair<uint32_t, uint16_t>& pair) const {
+      pair.first = 0u;
+    }
+    // Check if a hash map slot is empty.
+    bool IsEmpty(const std::pair<uint32_t, uint16_t>& pair) const {
+      return pair.first == 0;
+    }
+  };
+  struct OffsetTypeMapHashCompareFn {
+    // Hash function for offset.
+    size_t operator()(const uint32_t key) const {
+      return key;
+    }
+    // std::equal function for offset.
+    bool operator()(const uint32_t a, const uint32_t b) const {
+      return a == b;
+    }
+  };
+  // Map from offset to dex file type, HashMap for performance reasons.
+  AllocationTrackingHashMap<uint32_t,
+                            uint16_t,
+                            OffsetTypeMapEmptyFn,
+                            kAllocatorTagDexFileVerifier,
+                            OffsetTypeMapHashCompareFn,
+                            OffsetTypeMapHashCompareFn> offset_to_type_map_;
   const uint8_t* ptr_;
   const void* previous_item_;
 
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 48a12e5..035230e 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -185,6 +185,7 @@
 
   static constexpr uint32_t kMaxVarArgRegs = 5;
   static constexpr uint32_t kMaxVarArgRegs25x = 6;  // lambdas are 2 registers.
+  static constexpr uint32_t kLambdaVirtualRegisterWidth = 2;
 
   // Returns the size (in 2 byte code units) of this instruction.
   size_t SizeInCodeUnits() const {
@@ -248,7 +249,7 @@
 
   // VRegA
   bool HasVRegA() const;
-  int32_t VRegA() const;
+  ALWAYS_INLINE int32_t VRegA() const;
 
   int8_t VRegA_10t() const {
     return VRegA_10t(Fetch16(0));
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 4de8a8e..18ccd08 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -92,10 +92,25 @@
     fake_header_code_and_maps_.insert(fake_header_code_and_maps_.end(),
                                       fake_code_.begin(), fake_code_.end());
 
-    // NOTE: Don't align the code (it will not be executed) but check that the Thumb2
-    // adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer().
-    CHECK_ALIGNED(mapping_table_offset, 2);
-    const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset];
+    // Align the code.
+    const size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+    fake_header_code_and_maps_.reserve(fake_header_code_and_maps_.size() + alignment);
+    const void* unaligned_code_ptr =
+        fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size);
+    size_t offset = dchecked_integral_cast<size_t>(reinterpret_cast<uintptr_t>(unaligned_code_ptr));
+    size_t padding = RoundUp(offset, alignment) - offset;
+    // Make sure no resizing takes place.
+    CHECK_GE(fake_header_code_and_maps_.capacity(), fake_header_code_and_maps_.size() + padding);
+    fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), padding, 0);
+    const void* code_ptr = reinterpret_cast<const uint8_t*>(unaligned_code_ptr) + padding;
+    CHECK_EQ(code_ptr,
+             static_cast<const void*>(fake_header_code_and_maps_.data() +
+                                          (fake_header_code_and_maps_.size() - code_size)));
+
+    if (kRuntimeISA == kArm) {
+      // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer().
+      CHECK_ALIGNED(mapping_table_offset, 2);
+    }
 
     method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*));
     ASSERT_TRUE(method_f_ != nullptr);
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index fdded02..380cb8e 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -18,6 +18,7 @@
 
 #include "base/bit_utils.h"
 #include "card_table.h"
+#include "jit/jit_code_cache.h"
 #include "mem_map.h"
 
 namespace art {
@@ -91,6 +92,7 @@
 }
 
 template class MemoryRangeBitmap<CardTable::kCardSize>;
+template class MemoryRangeBitmap<jit::kJitCodeAlignment>;
 
 }  // namespace accounting
 }  // namespace gc
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index d2d12af..e433b8d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -36,13 +36,16 @@
 namespace gc {
 namespace collector {
 
+static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
+
 ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
     : GarbageCollector(heap,
                        name_prefix + (name_prefix.empty() ? "" : " ") +
                        "concurrent copying + mark sweep"),
       region_space_(nullptr), gc_barrier_(new Barrier(0)),
       gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
-                                                     2 * MB, 2 * MB)),
+                                                     kDefaultGcMarkStackSize,
+                                                     kDefaultGcMarkStackSize)),
       mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
       thread_running_gc_(nullptr),
       is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
@@ -577,6 +580,18 @@
   Locks::mutator_lock_->SharedLock(self);
 }
 
+void ConcurrentCopying::ExpandGcMarkStack() {
+  DCHECK(gc_mark_stack_->IsFull());
+  const size_t new_size = gc_mark_stack_->Capacity() * 2;
+  std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
+                                                   gc_mark_stack_->End());
+  gc_mark_stack_->Resize(new_size);
+  for (auto& ref : temp) {
+    gc_mark_stack_->PushBack(ref.AsMirrorPtr());
+  }
+  DCHECK(!gc_mark_stack_->IsFull());
+}
+
 void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
   CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
       << " " << to_ref << " " << PrettyTypeOf(to_ref);
@@ -587,7 +602,9 @@
     if (self == thread_running_gc_) {
       // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
       CHECK(self->GetThreadLocalMarkStack() == nullptr);
-      CHECK(!gc_mark_stack_->IsFull());
+      if (UNLIKELY(gc_mark_stack_->IsFull())) {
+        ExpandGcMarkStack();
+      }
       gc_mark_stack_->PushBack(to_ref);
     } else {
       // Otherwise, use a thread-local mark stack.
@@ -621,7 +638,9 @@
   } else if (mark_stack_mode == kMarkStackModeShared) {
     // Access the shared GC mark stack with a lock.
     MutexLock mu(self, mark_stack_lock_);
-    CHECK(!gc_mark_stack_->IsFull());
+    if (UNLIKELY(gc_mark_stack_->IsFull())) {
+      ExpandGcMarkStack();
+    }
     gc_mark_stack_->PushBack(to_ref);
   } else {
     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
@@ -633,7 +652,9 @@
         << "Only GC-running thread should access the mark stack "
         << "in the GC exclusive mark stack mode";
     // Access the GC mark stack without a lock.
-    CHECK(!gc_mark_stack_->IsFull());
+    if (UNLIKELY(gc_mark_stack_->IsFull())) {
+      ExpandGcMarkStack();
+    }
     gc_mark_stack_->PushBack(to_ref);
   }
 }
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 8efad73..c32b19e 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -182,6 +182,7 @@
   void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
   void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
   void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
+  void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
 
   space::RegionSpace* region_space_;      // The underlying region space.
   std::unique_ptr<Barrier> gc_barrier_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 657fcb5..1d38525 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -26,6 +26,7 @@
 
 #include "art_field-inl.h"
 #include "base/allocator.h"
+#include "base/arena_allocator.h"
 #include "base/dumpable.h"
 #include "base/histogram-inl.h"
 #include "base/stl_util.h"
@@ -1258,11 +1259,11 @@
 }
 
 void Heap::Trim(Thread* self) {
+  Runtime* const runtime = Runtime::Current();
   if (!CareAboutPauseTimes()) {
     ATRACE_BEGIN("Deflating monitors");
     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
     // about pauses.
-    Runtime* runtime = Runtime::Current();
     {
       ScopedSuspendAll ssa(__FUNCTION__);
       uint64_t start_time = NanoTime();
@@ -1274,6 +1275,10 @@
   }
   TrimIndirectReferenceTables(self);
   TrimSpaces(self);
+  // Trim arenas that may have been used by JIT or verifier.
+  ATRACE_BEGIN("Trimming arena maps");
+  runtime->GetArenaPool()->TrimMaps();
+  ATRACE_END();
 }
 
 class TrimIndirectReferenceTableClosure : public Closure {
diff --git a/runtime/image.cc b/runtime/image.cc
index 192371f..1bc19ff 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -150,10 +150,10 @@
 void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
   for (size_t pos = 0; pos < Size(); ) {
     auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + Offset() + pos);
-    for (size_t i = 0; i < array->Length(); ++i) {
+    for (size_t i = 0; i < array->size(); ++i) {
       visitor->Visit(&array->At(i, sizeof(ArtField)));
     }
-    pos += array->ComputeSize(array->Length());
+    pos += array->ComputeSize(array->size());
   }
 }
 
@@ -164,10 +164,10 @@
   const size_t method_size = ArtMethod::Size(pointer_size);
   for (size_t pos = 0; pos < Size(); ) {
     auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
-    for (size_t i = 0; i < array->Length(); ++i) {
+    for (size_t i = 0; i < array->size(); ++i) {
       visitor->Visit(&array->At(i, method_size, method_alignment));
     }
-    pos += array->ComputeSize(array->Length(), method_size, method_alignment);
+    pos += array->ComputeSize(array->size(), method_size, method_alignment);
   }
 }
 
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ed64d7e..4db37e6 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -97,16 +97,6 @@
 
 static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
     SHARED_REQUIRES(Locks::mutator_lock_) {
-  Runtime* const runtime = Runtime::Current();
-  jit::Jit* jit = runtime->GetJit();
-  if (jit != nullptr) {
-    const void* old_code_ptr = method->GetEntryPointFromQuickCompiledCode();
-    jit::JitCodeCache* code_cache = jit->GetCodeCache();
-    if (code_cache->ContainsCodePtr(old_code_ptr)) {
-      // Save the old compiled code since we need it to implement ClassLinker::GetQuickOatCodeFor.
-      code_cache->SaveCompiledCode(method, old_code_ptr);
-    }
-  }
   method->SetEntryPointFromQuickCompiledCode(quick_code);
 }
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 0607493..5afd28e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -49,7 +49,7 @@
 void Jit::DumpInfo(std::ostream& os) {
   os << "Code cache size=" << PrettySize(code_cache_->CodeCacheSize())
      << " data cache size=" << PrettySize(code_cache_->DataCacheSize())
-     << " num methods=" << code_cache_->NumMethods()
+     << " number of compiled code=" << code_cache_->NumberOfCompiledCode()
      << "\n";
   cumulative_timings_.Dump(os);
 }
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e73ba82..1f89f9b 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -43,7 +43,7 @@
 class Jit {
  public:
   static constexpr bool kStressMode = kIsDebugBuild;
-  static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 1000;
+  static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 500;
   static constexpr size_t kDefaultWarmupThreshold = kDefaultCompileThreshold / 2;
 
   virtual ~Jit();
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4187358..60568b2 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,8 +19,13 @@
 #include <sstream>
 
 #include "art_method-inl.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "gc/accounting/bitmap-inl.h"
+#include "jit/profiling_info.h"
+#include "linear_alloc.h"
 #include "mem_map.h"
 #include "oat_file-inl.h"
+#include "thread_list.h"
 
 namespace art {
 namespace jit {
@@ -52,9 +57,9 @@
     return nullptr;
   }
 
-  // Data cache is 1 / 4 of the map.
+  // Data cache is 1 / 2 of the map.
   // TODO: Make this variable?
-  size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
+  size_t data_size = RoundUp(data_map->Size() / 2, kPageSize);
   size_t code_size = data_map->Size() - data_size;
   uint8_t* divider = data_map->Begin() + data_size;
 
@@ -74,14 +79,10 @@
 
 JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
     : lock_("Jit code cache", kJitCodeCacheLock),
+      lock_cond_("Jit code cache variable", lock_),
+      collection_in_progress_(false),
       code_map_(code_map),
-      data_map_(data_map),
-      num_methods_(0) {
-
-  VLOG(jit) << "Created jit code cache: data size="
-            << PrettySize(data_map_->Size())
-            << ", code size="
-            << PrettySize(code_map_->Size());
+      data_map_(data_map) {
 
   code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
   data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
@@ -96,13 +97,22 @@
 
   CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
   CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
+
+  live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
+                                             reinterpret_cast<uintptr_t>(code_map_->Begin()),
+                                             reinterpret_cast<uintptr_t>(code_map_->End())));
+
+  if (live_bitmap_.get() == nullptr) {
+    PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
+  }
+
+  VLOG(jit) << "Created jit code cache: data size="
+            << PrettySize(data_map_->Size())
+            << ", code size="
+            << PrettySize(code_map_->Size());
 }
 
-bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
-  return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
-}
-
-bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
+bool JitCodeCache::ContainsPc(const void* ptr) const {
   return code_map_->Begin() <= ptr && ptr < code_map_->End();
 }
 
@@ -121,6 +131,7 @@
 };
 
 uint8_t* JitCodeCache::CommitCode(Thread* self,
+                                  ArtMethod* method,
                                   const uint8_t* mapping_table,
                                   const uint8_t* vmap_table,
                                   const uint8_t* gc_map,
@@ -129,6 +140,106 @@
                                   size_t fp_spill_mask,
                                   const uint8_t* code,
                                   size_t code_size) {
+  uint8_t* result = CommitCodeInternal(self,
+                                       method,
+                                       mapping_table,
+                                       vmap_table,
+                                       gc_map,
+                                       frame_size_in_bytes,
+                                       core_spill_mask,
+                                       fp_spill_mask,
+                                       code,
+                                       code_size);
+  if (result == nullptr) {
+    // Retry.
+    GarbageCollectCache(self);
+    result = CommitCodeInternal(self,
+                                method,
+                                mapping_table,
+                                vmap_table,
+                                gc_map,
+                                frame_size_in_bytes,
+                                core_spill_mask,
+                                fp_spill_mask,
+                                code,
+                                code_size);
+  }
+  return result;
+}
+
+bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
+  bool in_collection = false;
+  while (collection_in_progress_) {
+    in_collection = true;
+    lock_cond_.Wait(self);
+  }
+  return in_collection;
+}
+
+static uintptr_t FromCodeToAllocation(const void* code) {
+  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+  return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
+}
+
+void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
+  uintptr_t allocation = FromCodeToAllocation(code_ptr);
+  const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+  const uint8_t* data = method_header->GetNativeGcMap();
+  if (data != nullptr) {
+    mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+  }
+  data = method_header->GetMappingTable();
+  if (data != nullptr) {
+    mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+  }
+  // Use the offset directly to prevent sanity check that the method is
+  // compiled with optimizing.
+  // TODO(ngeoffray): Clean up.
+  if (method_header->vmap_table_offset_ != 0) {
+    data = method_header->code_ - method_header->vmap_table_offset_;
+    mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+  }
+  mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
+}
+
+void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
+  MutexLock mu(self, lock_);
+  // We do not check if a code cache GC is in progress, as this method comes
+  // with the classlinker_classes_lock_ held, and suspending ourselves could
+  // lead to a deadlock.
+  {
+    ScopedCodeCacheWrite scc(code_map_.get());
+    for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+      if (alloc.ContainsUnsafe(it->second)) {
+        FreeCode(it->first, it->second);
+        it = method_code_map_.erase(it);
+      } else {
+        ++it;
+      }
+    }
+  }
+  for (auto it = profiling_infos_.begin(); it != profiling_infos_.end();) {
+    ProfilingInfo* info = *it;
+    if (alloc.ContainsUnsafe(info->GetMethod())) {
+      info->GetMethod()->SetProfilingInfo(nullptr);
+      mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
+      it = profiling_infos_.erase(it);
+    } else {
+      ++it;
+    }
+  }
+}
+
+uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
+                                          ArtMethod* method,
+                                          const uint8_t* mapping_table,
+                                          const uint8_t* vmap_table,
+                                          const uint8_t* gc_map,
+                                          size_t frame_size_in_bytes,
+                                          size_t core_spill_mask,
+                                          size_t fp_spill_mask,
+                                          const uint8_t* code,
+                                          size_t code_size) {
   size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   // Ensure the header ends up at expected instruction alignment.
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -137,7 +248,9 @@
   OatQuickMethodHeader* method_header = nullptr;
   uint8_t* code_ptr = nullptr;
 
+  ScopedThreadSuspension sts(self, kSuspended);
   MutexLock mu(self, lock_);
+  WaitForPotentialCollectionToComplete(self);
   {
     ScopedCodeCacheWrite scc(code_map_.get());
     uint8_t* result = reinterpret_cast<uint8_t*>(
@@ -149,7 +262,7 @@
     DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
 
     std::copy(code, code + code_size, code_ptr);
-    method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+    method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
     new (method_header) OatQuickMethodHeader(
         (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
         (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
@@ -162,8 +275,12 @@
 
   __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
                           reinterpret_cast<char*>(code_ptr + code_size));
-
-  ++num_methods_;  // TODO: This is hacky but works since each method has exactly one code region.
+  method_code_map_.Put(code_ptr, method);
+  // We have checked there was no collection in progress earlier. If we
+  // were, setting the entry point of a method would be unsafe, as the collection
+  // could delete it.
+  DCHECK(!collection_in_progress_);
+  method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
   return reinterpret_cast<uint8_t*>(method_header);
 }
 
@@ -181,10 +298,32 @@
   return bytes_allocated;
 }
 
+size_t JitCodeCache::NumberOfCompiledCode() {
+  MutexLock mu(Thread::Current(), lock_);
+  return method_code_map_.size();
+}
+
 uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
   size = RoundUp(size, sizeof(void*));
-  MutexLock mu(self, lock_);
-  return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+  uint8_t* result = nullptr;
+
+  {
+    ScopedThreadSuspension sts(self, kSuspended);
+    MutexLock mu(self, lock_);
+    WaitForPotentialCollectionToComplete(self);
+    result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+  }
+
+  if (result == nullptr) {
+    // Retry.
+    GarbageCollectCache(self);
+    ScopedThreadSuspension sts(self, kSuspended);
+    MutexLock mu(self, lock_);
+    WaitForPotentialCollectionToComplete(self);
+    result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+  }
+
+  return result;
 }
 
 uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
@@ -196,29 +335,195 @@
   return result;
 }
 
-const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
-  const void* code = method->GetEntryPointFromQuickCompiledCode();
-  if (ContainsCodePtr(code)) {
-    return code;
+class MarkCodeVisitor FINAL : public StackVisitor {
+ public:
+  MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
+      : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+        code_cache_(code_cache_in),
+        bitmap_(code_cache_->GetLiveBitmap()) {}
+
+  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+    const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+    if (method_header == nullptr) {
+      return true;
+    }
+    const void* code = method_header->GetCode();
+    if (code_cache_->ContainsPc(code)) {
+      // Use the atomic set version, as multiple threads are executing this code.
+      bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
+    }
+    return true;
   }
-  MutexLock mu(Thread::Current(), lock_);
-  auto it = method_code_map_.find(method);
-  if (it != method_code_map_.end()) {
-    return it->second;
+
+ private:
+  JitCodeCache* const code_cache_;
+  CodeCacheBitmap* const bitmap_;
+};
+
+class MarkCodeClosure FINAL : public Closure {
+ public:
+  MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
+      : code_cache_(code_cache), barrier_(barrier) {}
+
+  void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+    DCHECK(thread == Thread::Current() || thread->IsSuspended());
+    MarkCodeVisitor visitor(thread, code_cache_);
+    visitor.WalkStack();
+    if (thread->GetState() == kRunnable) {
+      barrier_->Pass(Thread::Current());
+    }
   }
-  return nullptr;
+
+ private:
+  JitCodeCache* const code_cache_;
+  Barrier* const barrier_;
+};
+
+void JitCodeCache::GarbageCollectCache(Thread* self) {
+  if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+    LOG(INFO) << "Clearing code cache, code="
+              << PrettySize(CodeCacheSize())
+              << ", data=" << PrettySize(DataCacheSize());
+  }
+
+  size_t map_size = 0;
+  ScopedThreadSuspension sts(self, kSuspended);
+
+  // Walk over all compiled methods and set the entry points of these
+  // methods to interpreter.
+  {
+    MutexLock mu(self, lock_);
+    if (WaitForPotentialCollectionToComplete(self)) {
+      return;
+    }
+    collection_in_progress_ = true;
+    map_size = method_code_map_.size();
+    for (auto& it : method_code_map_) {
+      it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+    }
+    for (ProfilingInfo* info : profiling_infos_) {
+      info->GetMethod()->SetProfilingInfo(nullptr);
+    }
+  }
+
+  // Run a checkpoint on all threads to mark the JIT compiled code they are running.
+  {
+    Barrier barrier(0);
+    MarkCodeClosure closure(this, &barrier);
+    size_t threads_running_checkpoint =
+        Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+    if (threads_running_checkpoint != 0) {
+      barrier.Increment(self, threads_running_checkpoint);
+    }
+  }
+
+  {
+    MutexLock mu(self, lock_);
+    DCHECK_EQ(map_size, method_code_map_.size());
+    // Free unused compiled code, and restore the entry point of used compiled code.
+    {
+      ScopedCodeCacheWrite scc(code_map_.get());
+      for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+        const void* code_ptr = it->first;
+        ArtMethod* method = it->second;
+        uintptr_t allocation = FromCodeToAllocation(code_ptr);
+        const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+        if (GetLiveBitmap()->Test(allocation)) {
+          method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
+          ++it;
+        } else {
+          method->ClearCounter();
+          DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
+          FreeCode(code_ptr, method);
+          it = method_code_map_.erase(it);
+        }
+      }
+    }
+    GetLiveBitmap()->Bitmap::Clear();
+
+    // Free all profiling info.
+    for (ProfilingInfo* info : profiling_infos_) {
+      DCHECK(info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr);
+      mspace_free(data_mspace_, reinterpret_cast<uint8_t*>(info));
+    }
+    profiling_infos_.clear();
+
+    collection_in_progress_ = false;
+    lock_cond_.Broadcast(self);
+  }
+
+  if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+    LOG(INFO) << "After clearing code cache, code="
+              << PrettySize(CodeCacheSize())
+              << ", data=" << PrettySize(DataCacheSize());
+  }
 }
 
-void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) {
-  DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
-  DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
-      << old_code_ptr;
-  MutexLock mu(Thread::Current(), lock_);
-  auto it = method_code_map_.find(method);
-  if (it != method_code_map_.end()) {
-    return;
+
+OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
+  static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+  if (kRuntimeISA == kArm) {
+    // On Thumb-2, the pc is offset by one.
+    --pc;
   }
-  method_code_map_.Put(method, old_code_ptr);
+  if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
+    return nullptr;
+  }
+
+  MutexLock mu(Thread::Current(), lock_);
+  if (method_code_map_.empty()) {
+    return nullptr;
+  }
+  auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
+  --it;
+
+  const void* code_ptr = it->first;
+  OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+  if (!method_header->Contains(pc)) {
+    return nullptr;
+  }
+  DCHECK_EQ(it->second, method)
+      << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
+  return method_header;
+}
+
+ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
+                                              ArtMethod* method,
+                                              const std::vector<uint32_t>& entries,
+                                              bool retry_allocation) {
+  ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries);
+
+  if (info == nullptr && retry_allocation) {
+    GarbageCollectCache(self);
+    info = AddProfilingInfoInternal(self, method, entries);
+  }
+  return info;
+}
+
+ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self,
+                                                      ArtMethod* method,
+                                                      const std::vector<uint32_t>& entries) {
+  size_t profile_info_size = RoundUp(
+      sizeof(ProfilingInfo) + sizeof(ProfilingInfo::InlineCache) * entries.size(),
+      sizeof(void*));
+  ScopedThreadSuspension sts(self, kSuspended);
+  MutexLock mu(self, lock_);
+  WaitForPotentialCollectionToComplete(self);
+
+  // Check whether some other thread has concurrently created it.
+  ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*));
+  if (info != nullptr) {
+    return info;
+  }
+
+  uint8_t* data = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, profile_info_size));
+  if (data == nullptr) {
+    return nullptr;
+  }
+  info = new (data) ProfilingInfo(method, entries);
+  method->SetProfilingInfo(info);
+  profiling_infos_.push_back(info);
+  return info;
 }
 
 }  // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fa90c18..e10f962 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
 #include "atomic.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "gc/accounting/bitmap.h"
 #include "gc/allocator/dlmalloc.h"
 #include "gc_root.h"
 #include "jni.h"
@@ -33,32 +34,41 @@
 namespace art {
 
 class ArtMethod;
-class CompiledMethod;
-class CompilerCallbacks;
+class LinearAlloc;
+class ProfilingInfo;
 
 namespace jit {
 
 class JitInstrumentationCache;
 
+// Alignment that will suit all architectures.
+static constexpr int kJitCodeAlignment = 16;
+using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
+
 class JitCodeCache {
  public:
   static constexpr size_t kMaxCapacity = 1 * GB;
-  static constexpr size_t kDefaultCapacity = 2 * MB;
+  // Put the default to a very low amount for debug builds to stress the code cache
+  // collection.
+  static constexpr size_t kDefaultCapacity = kIsDebugBuild ? 20 * KB : 2 * MB;
 
   // Create the code cache with a code + data capacity equal to "capacity", error message is passed
   // in the out arg error_msg.
   static JitCodeCache* Create(size_t capacity, std::string* error_msg);
 
-  size_t NumMethods() const {
-    return num_methods_;
-  }
-
+  // Number of bytes allocated in the code cache.
   size_t CodeCacheSize() REQUIRES(!lock_);
 
+  // Number of bytes allocated in the data cache.
   size_t DataCacheSize() REQUIRES(!lock_);
 
+  // Number of compiled code in the code cache. Note that this is not the number
+  // of methods that got JIT compiled, as we might have collected some.
+  size_t NumberOfCompiledCode() REQUIRES(!lock_);
+
   // Allocate and write code and its metadata to the code cache.
   uint8_t* CommitCode(Thread* self,
+                      ArtMethod* method,
                       const uint8_t* mapping_table,
                       const uint8_t* vmap_table,
                       const uint8_t* gc_map,
@@ -67,51 +77,107 @@
                       size_t fp_spill_mask,
                       const uint8_t* code,
                       size_t code_size)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
-  // Return true if the code cache contains the code pointer which si the entrypoint of the method.
-  bool ContainsMethod(ArtMethod* method) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Return true if the code cache contains a code ptr.
-  bool ContainsCodePtr(const void* ptr) const;
+  // Return true if the code cache contains this pc.
+  bool ContainsPc(const void* pc) const;
 
   // Reserve a region of data of size at least "size". Returns null if there is no more room.
-  uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
+  uint8_t* ReserveData(Thread* self, size_t size)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!lock_);
 
   // Add a data array of size (end - begin) with the associated contents, returns null if there
   // is no more room.
   uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
+      SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!lock_);
 
-  // Get code for a method, returns null if it is not in the jit cache.
-  const void* GetCodeFor(ArtMethod* method)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+  CodeCacheBitmap* GetLiveBitmap() const {
+    return live_bitmap_.get();
+  }
 
-  // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
-  // entrypoint isn't within the cache.
-  void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr)
-      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+  // Perform a collection on the code cache.
+  void GarbageCollectCache(Thread* self)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Given the 'pc', try to find the JIT compiled code associated with it.
+  // Return null if 'pc' is not in the code cache. 'method' is passed for
+  // sanity check.
+  OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Remove all methods in our cache that were allocated by 'alloc'.
+  void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
+      REQUIRES(!lock_)
+      REQUIRES(Locks::classlinker_classes_lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
+  // will collect and retry if the first allocation is unsuccessful.
+  ProfilingInfo* AddProfilingInfo(Thread* self,
+                                  ArtMethod* method,
+                                  const std::vector<uint32_t>& entries,
+                                  bool retry_allocation)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
-  // Takes ownership of code_mem_map.
+  // Take ownership of code_mem_map.
   JitCodeCache(MemMap* code_map, MemMap* data_map);
 
-  // Lock which guards.
+  // Internal version of 'CommitCode' that will not retry if the
+  // allocation fails. Return null if the allocation fails.
+  uint8_t* CommitCodeInternal(Thread* self,
+                              ArtMethod* method,
+                              const uint8_t* mapping_table,
+                              const uint8_t* vmap_table,
+                              const uint8_t* gc_map,
+                              size_t frame_size_in_bytes,
+                              size_t core_spill_mask,
+                              size_t fp_spill_mask,
+                              const uint8_t* code,
+                              size_t code_size)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  ProfilingInfo* AddProfilingInfoInternal(Thread* self,
+                                          ArtMethod* method,
+                                          const std::vector<uint32_t>& entries)
+      REQUIRES(!lock_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // If a collection is in progress, wait for it to finish. Return
+  // whether the thread actually waited.
+  bool WaitForPotentialCollectionToComplete(Thread* self)
+      REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
+
+  // Free in the mspace allocations taken by 'method'.
+  void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
+
+  // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
+  // Condition to wait on during collection.
+  ConditionVariable lock_cond_ GUARDED_BY(lock_);
+  // Whether there is a code cache collection in progress.
+  bool collection_in_progress_ GUARDED_BY(lock_);
   // Mem map which holds code.
   std::unique_ptr<MemMap> code_map_;
   // Mem map which holds data (stack maps and profiling info).
   std::unique_ptr<MemMap> data_map_;
   // The opaque mspace for allocating code.
-  void* code_mspace_;
+  void* code_mspace_ GUARDED_BY(lock_);
   // The opaque mspace for allocating data.
-  void* data_mspace_;
-  // Number of compiled methods.
-  size_t num_methods_;
-  // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
-  // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
-  SafeMap<ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
+  void* data_mspace_ GUARDED_BY(lock_);
+  // Bitmap for collecting code and data.
+  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+  // This map holds compiled code associated to the ArtMethod.
+  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
+  // ProfilingInfo objects we have allocated.
+  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
 };
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 9b9c5d2..8aaa5fa 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -26,7 +26,12 @@
 
 class JitCompileTask FINAL : public Task {
  public:
-  explicit JitCompileTask(ArtMethod* method) : method_(method) {
+  enum TaskKind {
+    kAllocateProfile,
+    kCompile
+  };
+
+  JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind) {
     ScopedObjectAccess soa(Thread::Current());
     // Add a global ref to the class to prevent class unloading until compilation is done.
     klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
@@ -40,9 +45,16 @@
 
   void Run(Thread* self) OVERRIDE {
     ScopedObjectAccess soa(self);
-    VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
-    if (!Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
-      VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
+    if (kind_ == kCompile) {
+      VLOG(jit) << "JitCompileTask compiling method " << PrettyMethod(method_);
+      if (!Runtime::Current()->GetJit()->CompileMethod(method_, self)) {
+        VLOG(jit) << "Failed to compile method " << PrettyMethod(method_);
+      }
+    } else {
+      DCHECK(kind_ == kAllocateProfile);
+      if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) {
+        VLOG(jit) << "Start profiling " << PrettyMethod(method_);
+      }
     }
   }
 
@@ -52,6 +64,7 @@
 
  private:
   ArtMethod* const method_;
+  const TaskKind kind_;
   jobject klass_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
@@ -73,11 +86,9 @@
 }
 
 void JitInstrumentationCache::AddSamples(Thread* self, ArtMethod* method, size_t) {
-  ScopedObjectAccessUnchecked soa(self);
   // Since we don't have on-stack replacement, some methods can remain in the interpreter longer
   // than we want resulting in samples even after the method is compiled.
-  if (method->IsClassInitializer() || method->IsNative() ||
-      Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+  if (method->IsClassInitializer() || method->IsNative()) {
     return;
   }
   if (thread_pool_.get() == nullptr) {
@@ -86,14 +97,20 @@
   }
   uint16_t sample_count = method->IncrementCounter();
   if (sample_count == warm_method_threshold_) {
-    ProfilingInfo* info = method->CreateProfilingInfo();
-    if (info != nullptr) {
+    if (ProfilingInfo::Create(self, method, /* retry_allocation */ false)) {
       VLOG(jit) << "Start profiling " << PrettyMethod(method);
+    } else {
+      // We failed allocating. Instead of doing the collection on the Java thread, we push
+      // an allocation to a compiler thread, that will do the collection.
+      thread_pool_->AddTask(self, new JitCompileTask(
+          method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kAllocateProfile));
+      thread_pool_->StartWorkers(self);
     }
   }
+
   if (sample_count == hot_method_threshold_) {
     thread_pool_->AddTask(self, new JitCompileTask(
-        method->GetInterfaceMethodIfProxy(sizeof(void*))));
+        method->GetInterfaceMethodIfProxy(sizeof(void*)), JitCompileTask::kCompile));
     thread_pool_->StartWorkers(self);
   }
 }
@@ -108,14 +125,18 @@
                                                           ArtMethod* caller,
                                                           uint32_t dex_pc,
                                                           ArtMethod* callee ATTRIBUTE_UNUSED) {
+  instrumentation_cache_->AddSamples(thread, caller, 1);
+  // We make sure we cannot be suspended, as the profiling info can be concurrently deleted.
+  thread->StartAssertNoThreadSuspension("Instrumenting invoke");
   DCHECK(this_object != nullptr);
   ProfilingInfo* info = caller->GetProfilingInfo(sizeof(void*));
   if (info != nullptr) {
     // Since the instrumentation is marked from the declaring class we need to mark the card so
     // that mod-union tables and card rescanning know about the update.
     Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(caller->GetDeclaringClass());
-    info->AddInvokeInfo(thread, dex_pc, this_object->GetClass());
+    info->AddInvokeInfo(dex_pc, this_object->GetClass());
   }
+  thread->EndAssertNoThreadSuspension(nullptr);
 }
 
 void JitInstrumentationCache::WaitForCompilationToFinish(Thread* self) {
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 0c039f2..2e52b1b 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -25,18 +25,13 @@
 
 namespace art {
 
-ProfilingInfo* ProfilingInfo::Create(ArtMethod* method) {
+bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
   // Walk over the dex instructions of the method and keep track of
   // instructions we are interested in profiling.
-  const uint16_t* code_ptr = nullptr;
-  const uint16_t* code_end = nullptr;
-  {
-    ScopedObjectAccess soa(Thread::Current());
-    DCHECK(!method->IsNative());
-    const DexFile::CodeItem& code_item = *method->GetCodeItem();
-    code_ptr = code_item.insns_;
-    code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
-  }
+  DCHECK(!method->IsNative());
+  const DexFile::CodeItem& code_item = *method->GetCodeItem();
+  const uint16_t* code_ptr = code_item.insns_;
+  const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
 
   uint32_t dex_pc = 0;
   std::vector<uint32_t> entries;
@@ -62,23 +57,15 @@
   // If there is no instruction we are interested in, no need to create a `ProfilingInfo`
   // object, it will never be filled.
   if (entries.empty()) {
-    return nullptr;
+    return true;
   }
 
   // Allocate the `ProfilingInfo` object int the JIT's data space.
   jit::JitCodeCache* code_cache = Runtime::Current()->GetJit()->GetCodeCache();
-  size_t profile_info_size = sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size();
-  uint8_t* data = code_cache->ReserveData(Thread::Current(), profile_info_size);
-
-  if (data == nullptr) {
-    VLOG(jit) << "Cannot allocate profiling info anymore";
-    return nullptr;
-  }
-
-  return new (data) ProfilingInfo(entries);
+  return code_cache->AddProfilingInfo(self, method, entries, retry_allocation) != nullptr;
 }
 
-void ProfilingInfo::AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls) {
+void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) {
   InlineCache* cache = nullptr;
   // TODO: binary search if array is too long.
   for (size_t i = 0; i < number_of_inline_caches_; ++i) {
@@ -89,9 +76,8 @@
   }
   DCHECK(cache != nullptr);
 
-  ScopedObjectAccess soa(self);
   for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
-    mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
+    mirror::Class* existing = cache->classes_[i].Read();
     if (existing == cls) {
       // Receiver type is already in the cache, nothing else to do.
       return;
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 73ca41a..b13a315 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -26,6 +26,10 @@
 
 class ArtMethod;
 
+namespace jit {
+class JitCodeCache;
+}
+
 namespace mirror {
 class Class;
 }
@@ -36,10 +40,17 @@
  */
 class ProfilingInfo {
  public:
-  static ProfilingInfo* Create(ArtMethod* method);
+  // Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
+  // not needed in case the method does not have virtual/interface invocations.
+  static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Add information from an executed INVOKE instruction to the profile.
-  void AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls);
+  void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls)
+      // Method should not be interruptible, as it manipulates the ProfilingInfo
+      // which can be concurrently collected.
+      REQUIRES(Roles::uninterruptible_)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
   template<typename RootVisitorType>
@@ -52,6 +63,10 @@
     }
   }
 
+  ArtMethod* GetMethod() const {
+    return method_;
+  }
+
  private:
   // Structure to store the classes seen at runtime for a specific instruction.
   // Once the classes_ array is full, we consider the INVOKE to be megamorphic.
@@ -84,8 +99,9 @@
     GcRoot<mirror::Class> classes_[kIndividualCacheSize];
   };
 
-  explicit ProfilingInfo(const std::vector<uint32_t>& entries)
-      : number_of_inline_caches_(entries.size()) {
+  ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
+      : number_of_inline_caches_(entries.size()),
+        method_(method) {
     memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
       cache_[i].dex_pc = entries[i];
@@ -95,9 +111,14 @@
   // Number of instructions we are profiling in the ArtMethod.
   const uint32_t number_of_inline_caches_;
 
+  // Method this profiling info is for.
+  ArtMethod* const method_;
+
   // Dynamically allocated array of size `number_of_inline_caches_`.
   InlineCache cache_[0];
 
+  friend class jit::JitCodeCache;
+
   DISALLOW_COPY_AND_ASSIGN(ProfilingInfo);
 };
 
diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h
index 0ff6d7a..e01b6cc 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/length_prefixed_array.h
@@ -30,19 +30,34 @@
 class LengthPrefixedArray {
  public:
   explicit LengthPrefixedArray(size_t length)
-      : length_(dchecked_integral_cast<uint32_t>(length)) {}
+      : size_(dchecked_integral_cast<uint32_t>(length)) {}
 
   T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
-    DCHECK_LT(index, length_);
+    DCHECK_LT(index, size_);
     return AtUnchecked(index, element_size, alignment);
   }
 
-  StrideIterator<T> Begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+  const T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) const {
+    DCHECK_LT(index, size_);
+    return AtUnchecked(index, element_size, alignment);
+  }
+
+  StrideIterator<T> begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
     return StrideIterator<T>(&AtUnchecked(0, element_size, alignment), element_size);
   }
 
-  StrideIterator<T> End(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
-    return StrideIterator<T>(&AtUnchecked(length_, element_size, alignment), element_size);
+  StrideIterator<const T> begin(size_t element_size = sizeof(T),
+                                size_t alignment = alignof(T)) const {
+    return StrideIterator<const T>(&AtUnchecked(0, element_size, alignment), element_size);
+  }
+
+  StrideIterator<T> end(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+    return StrideIterator<T>(&AtUnchecked(size_, element_size, alignment), element_size);
+  }
+
+  StrideIterator<const T> end(size_t element_size = sizeof(T),
+                              size_t alignment = alignof(T)) const {
+    return StrideIterator<const T>(&AtUnchecked(size_, element_size, alignment), element_size);
   }
 
   static size_t OffsetOfElement(size_t index,
@@ -60,13 +75,13 @@
     return result;
   }
 
-  uint64_t Length() const {
-    return length_;
+  size_t size() const {
+    return size_;
   }
 
   // Update the length but does not reallocate storage.
-  void SetLength(size_t length) {
-    length_ = dchecked_integral_cast<uint32_t>(length);
+  void SetSize(size_t length) {
+    size_ = dchecked_integral_cast<uint32_t>(length);
   }
 
  private:
@@ -75,7 +90,12 @@
         reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment));
   }
 
-  uint32_t length_;
+  const T& AtUnchecked(size_t index, size_t element_size, size_t alignment) const {
+    return *reinterpret_cast<T*>(
+        reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment));
+  }
+
+  uint32_t size_;
   uint8_t data[0];
 };
 
@@ -84,7 +104,7 @@
 IterationRange<StrideIterator<T>> MakeIterationRangeFromLengthPrefixedArray(
     LengthPrefixedArray<T>* arr, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
   return arr != nullptr ?
-      MakeIterationRange(arr->Begin(element_size, alignment), arr->End(element_size, alignment)) :
+      MakeIterationRange(arr->begin(element_size, alignment), arr->end(element_size, alignment)) :
       MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
 }
 
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
index 43e81d9..f91b0ed 100644
--- a/runtime/linear_alloc.cc
+++ b/runtime/linear_alloc.cc
@@ -48,4 +48,8 @@
   return allocator_.Contains(ptr);
 }
 
+bool LinearAlloc::ContainsUnsafe(void* ptr) const {
+  return allocator_.Contains(ptr);
+}
+
 }  // namespace art
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 1b21527..df7f17d 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -47,6 +47,10 @@
   // Return true if the linear alloc contrains an address.
   bool Contains(void* ptr) const REQUIRES(!lock_);
 
+  // Unsafe version of 'Contains' only to be used when the allocator is going
+  // to be deleted.
+  bool ContainsUnsafe(void* ptr) const NO_THREAD_SAFETY_ANALYSIS;
+
  private:
   mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   ArenaAllocator allocator_ GUARDED_BY(lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index a528c3b..19ee7f4 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -928,22 +928,22 @@
 
 inline uint32_t Class::NumDirectMethods() {
   LengthPrefixedArray<ArtMethod>* arr = GetDirectMethodsPtrUnchecked();
-  return arr != nullptr ? arr->Length() : 0u;
+  return arr != nullptr ? arr->size() : 0u;
 }
 
 inline uint32_t Class::NumVirtualMethods() {
   LengthPrefixedArray<ArtMethod>* arr = GetVirtualMethodsPtrUnchecked();
-  return arr != nullptr ? arr->Length() : 0u;
+  return arr != nullptr ? arr->size() : 0u;
 }
 
 inline uint32_t Class::NumInstanceFields() {
   LengthPrefixedArray<ArtField>* arr = GetIFieldsPtrUnchecked();
-  return arr != nullptr ? arr->Length() : 0u;
+  return arr != nullptr ? arr->size() : 0u;
 }
 
 inline uint32_t Class::NumStaticFields() {
   LengthPrefixedArray<ArtField>* arr = GetSFieldsPtrUnchecked();
-  return arr != nullptr ? arr->Length() : 0u;
+  return arr != nullptr ? arr->size() : 0u;
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 53fedab..9d01a1d 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -574,7 +574,7 @@
     return nullptr;
   }
   size_t low = 0;
-  size_t high = fields->Length();
+  size_t high = fields->size();
   ArtField* ret = nullptr;
   while (low < high) {
     size_t mid = (low + high) / 2;
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 3a73900..5e42392 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -190,7 +190,7 @@
     return nullptr;
   }
   size_t low = 0;
-  size_t high = fields->Length();
+  size_t high = fields->size();
   const uint16_t* const data = name->GetValue();
   const size_t length = name->GetLength();
   while (low < high) {
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 6eadd87..03cad08 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -21,6 +21,7 @@
 #include "base/macros.h"
 #include "quick/quick_method_frame_info.h"
 #include "stack_map.h"
+#include "utils.h"
 
 namespace art {
 
@@ -39,6 +40,18 @@
 
   ~OatQuickMethodHeader();
 
+  static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
+    uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
+    uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
+    DCHECK(IsAlignedParam(code, GetInstructionSetAlignment(kRuntimeISA)) ||
+           IsAlignedParam(header, GetInstructionSetAlignment(kRuntimeISA)));
+    return reinterpret_cast<OatQuickMethodHeader*>(header);
+  }
+
+  static OatQuickMethodHeader* FromEntryPoint(const void* entry_point) {
+    return FromCodePointer(EntryPointToCodePointer(entry_point));
+  }
+
   OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
 
   uintptr_t NativeQuickPcOffset(const uintptr_t pc) const {
@@ -74,6 +87,11 @@
 
   bool Contains(uintptr_t pc) const {
     uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
+    static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+    if (kRuntimeISA == kArm) {
+      // On Thumb-2, the pc is offset by one.
+      code_start++;
+    }
     return code_start <= pc && pc <= (code_start + code_size_);
   }
 
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index bc9ba37..57472ad 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -216,10 +216,10 @@
 
   LengthPrefixedArray<ArtField>* static_fields0 = proxyClass0->GetSFieldsPtr();
   ASSERT_TRUE(static_fields0 != nullptr);
-  ASSERT_EQ(2u, static_fields0->Length());
+  ASSERT_EQ(2u, static_fields0->size());
   LengthPrefixedArray<ArtField>* static_fields1 = proxyClass1->GetSFieldsPtr();
   ASSERT_TRUE(static_fields1 != nullptr);
-  ASSERT_EQ(2u, static_fields1->Length());
+  ASSERT_EQ(2u, static_fields1->size());
 
   EXPECT_EQ(static_fields0->At(0).GetDeclaringClass(), proxyClass0.Get());
   EXPECT_EQ(static_fields0->At(1).GetDeclaringClass(), proxyClass0.Get());
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 99e262e..6554394 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -73,7 +73,6 @@
 bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
                                              InlineMethod* method) {
   DCHECK(verifier != nullptr);
-  DCHECK_EQ(Runtime::Current()->IsCompiler(), method != nullptr);
   if (!Runtime::Current()->UseJit()) {
     DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
   }
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index d05f909..53b4f3a 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -315,7 +315,43 @@
       CHECK_EQ(GetFrameDepth(), 1U);
       return true;
     } else {
-      HandleDeoptimization(method);
+      // Check if a shadow frame already exists for debugger's set-local-value purpose.
+      const size_t frame_id = GetFrameId();
+      ShadowFrame* new_frame = GetThread()->FindDebuggerShadowFrame(frame_id);
+      const bool* updated_vregs;
+      const size_t num_regs = method->GetCodeItem()->registers_size_;
+      if (new_frame == nullptr) {
+        new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, method, GetDexPc());
+        updated_vregs = nullptr;
+      } else {
+        updated_vregs = GetThread()->GetUpdatedVRegFlags(frame_id);
+        DCHECK(updated_vregs != nullptr);
+      }
+      if (GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+        HandleOptimizingDeoptimization(method, new_frame, updated_vregs);
+      } else {
+        HandleQuickDeoptimization(method, new_frame, updated_vregs);
+      }
+      if (updated_vregs != nullptr) {
+        // Calling Thread::RemoveDebuggerShadowFrameMapping will also delete the updated_vregs
+        // array so this must come after we processed the frame.
+        GetThread()->RemoveDebuggerShadowFrameMapping(frame_id);
+        DCHECK(GetThread()->FindDebuggerShadowFrame(frame_id) == nullptr);
+      }
+      if (prev_shadow_frame_ != nullptr) {
+        prev_shadow_frame_->SetLink(new_frame);
+      } else {
+        // Will be popped after the long jump after DeoptimizeStack(),
+        // right before interpreter::EnterInterpreterFromDeoptimize().
+        stacked_shadow_frame_pushed_ = true;
+        GetThread()->PushStackedShadowFrame(
+            new_frame,
+            single_frame_deopt_
+                ? StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame
+                : StackedShadowFrameType::kDeoptimizationShadowFrame);
+      }
+      prev_shadow_frame_ = new_frame;
+
       if (single_frame_deopt_ && !IsInInlinedFrame()) {
         // Single-frame deopt ends at the first non-inlined frame and needs to store that method.
         exception_handler_->SetHandlerQuickArg0(reinterpret_cast<uintptr_t>(method));
@@ -326,16 +362,103 @@
   }
 
  private:
+  void HandleOptimizingDeoptimization(ArtMethod* m,
+                                      ShadowFrame* new_frame,
+                                      const bool* updated_vregs)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
+    const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+    CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+    uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
+    StackMapEncoding encoding = code_info.ExtractEncoding();
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+    const size_t number_of_vregs = m->GetCodeItem()->registers_size_;
+    DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+    MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+    uint32_t register_mask = stack_map.GetRegisterMask(encoding);
+
+    for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
+      if (updated_vregs != nullptr && updated_vregs[vreg]) {
+        // Keep the value set by debugger.
+        continue;
+      }
+
+      DexRegisterLocation::Kind location =
+          vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+      static constexpr uint32_t kDeadValue = 0xEBADDE09;
+      uint32_t value = kDeadValue;
+      bool is_reference = false;
+
+      switch (location) {
+        case DexRegisterLocation::Kind::kInStack: {
+          const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg,
+                                                                number_of_vregs,
+                                                                code_info,
+                                                                encoding);
+          const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
+          value = *reinterpret_cast<const uint32_t*>(addr);
+          uint32_t bit = (offset >> 2);
+          if (stack_mask.size_in_bits() > bit && stack_mask.LoadBit(bit)) {
+            is_reference = true;
+          }
+          break;
+        }
+        case DexRegisterLocation::Kind::kInRegister:
+        case DexRegisterLocation::Kind::kInRegisterHigh:
+        case DexRegisterLocation::Kind::kInFpuRegister:
+        case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
+          uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info, encoding);
+          bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
+          CHECK(result);
+          if (location == DexRegisterLocation::Kind::kInRegister) {
+            if (((1u << reg) & register_mask) != 0) {
+              is_reference = true;
+            }
+          }
+          break;
+        }
+        case DexRegisterLocation::Kind::kConstant: {
+          value = vreg_map.GetConstant(vreg, number_of_vregs, code_info, encoding);
+          if (value == 0) {
+            // Make it a reference for extra safety.
+            is_reference = true;
+          }
+          break;
+        }
+        case DexRegisterLocation::Kind::kNone: {
+          break;
+        }
+        default: {
+          LOG(FATAL)
+              << "Unexpected location kind"
+              << DexRegisterLocation::PrettyDescriptor(
+                    vreg_map.GetLocationInternalKind(vreg,
+                                                     number_of_vregs,
+                                                     code_info,
+                                                     encoding));
+          UNREACHABLE();
+        }
+      }
+      if (is_reference) {
+        new_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(value));
+      } else {
+        new_frame->SetVReg(vreg, value);
+      }
+    }
+  }
+
   static VRegKind GetVRegKind(uint16_t reg, const std::vector<int32_t>& kinds) {
     return static_cast<VRegKind>(kinds.at(reg * 2));
   }
 
-  void HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void HandleQuickDeoptimization(ArtMethod* m,
+                                 ShadowFrame* new_frame,
+                                 const bool* updated_vregs)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     CHECK(code_item != nullptr) << "No code item for " << PrettyMethod(m);
     uint16_t num_regs = code_item->registers_size_;
     uint32_t dex_pc = GetDexPc();
-    StackHandleScope<2> hs(GetThread());  // Dex cache, class loader and method.
+    StackHandleScope<2> hs(GetThread());  // Dex cache and class loader.
     mirror::Class* declaring_class = m->GetDeclaringClass();
     Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -345,17 +468,6 @@
                                       true, true);
     bool verifier_success = verifier.Verify();
     CHECK(verifier_success) << PrettyMethod(m);
-    // Check if a shadow frame already exists for debugger's set-local-value purpose.
-    const size_t frame_id = GetFrameId();
-    ShadowFrame* new_frame = GetThread()->FindDebuggerShadowFrame(frame_id);
-    const bool* updated_vregs;
-    if (new_frame == nullptr) {
-      new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc);
-      updated_vregs = nullptr;
-    } else {
-      updated_vregs = GetThread()->GetUpdatedVRegFlags(frame_id);
-      DCHECK(updated_vregs != nullptr);
-    }
     {
       ScopedStackedShadowFramePusher pusher(GetThread(), new_frame,
                                             StackedShadowFrameType::kShadowFrameUnderConstruction);
@@ -462,25 +574,6 @@
         }
       }
     }
-    if (updated_vregs != nullptr) {
-      // Calling Thread::RemoveDebuggerShadowFrameMapping will also delete the updated_vregs
-      // array so this must come after we processed the frame.
-      GetThread()->RemoveDebuggerShadowFrameMapping(frame_id);
-      DCHECK(GetThread()->FindDebuggerShadowFrame(frame_id) == nullptr);
-    }
-    if (prev_shadow_frame_ != nullptr) {
-      prev_shadow_frame_->SetLink(new_frame);
-    } else {
-      // Will be popped after the long jump after DeoptimizeStack(),
-      // right before interpreter::EnterInterpreterFromDeoptimize().
-      stacked_shadow_frame_pushed_ = true;
-      GetThread()->PushStackedShadowFrame(
-          new_frame,
-          single_frame_deopt_
-              ? StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame
-              : StackedShadowFrameType::kDeoptimizationShadowFrame);
-    }
-    prev_shadow_frame_ = new_frame;
   }
 
   QuickExceptionHandler* const exception_handler_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6c459a3..556ba56 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -326,7 +326,7 @@
     if (self == nullptr) {
       os << "(Aborting thread was not attached to runtime!)\n";
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
-      DumpNativeStack(os, GetTid(), "  native: ", nullptr);
+      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
     } else {
       os << "Aborting thread:\n";
       if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index f0b3c4e..122dcb1 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -41,7 +41,7 @@
  public:
   explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
   void Dump(std::ostream& os) const {
-    DumpNativeStack(os, GetTid(), "\t", nullptr, raw_context_);
+    DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
   }
  private:
   // Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 9359d27..b0727da 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -856,13 +856,11 @@
   // If we are the JIT then we may have just compiled the method after the
   // IsQuickToInterpreterBridge check.
   jit::Jit* const jit = Runtime::Current()->GetJit();
-  if (jit != nullptr &&
-      jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+  if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) {
     return;
   }
 
-  uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
-      EntryPointToCodePointer(code))[-1].code_size_;
+  uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->code_size_;
   uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
   CHECK(code_start <= pc && pc <= (code_start + code_size))
       << PrettyMethod(method)
diff --git a/runtime/stack.h b/runtime/stack.h
index 1f477b6..1276b24 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -533,6 +533,9 @@
   StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
  public:
   virtual ~StackVisitor() {}
 
@@ -767,8 +770,6 @@
   bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                 uint32_t* val) const
       SHARED_REQUIRES(Locks::mutator_lock_);
-  bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
                                           uint64_t* val) const
diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h
index a9da51b..ac04c3b 100644
--- a/runtime/stride_iterator.h
+++ b/runtime/stride_iterator.h
@@ -19,6 +19,8 @@
 
 #include <iterator>
 
+#include "base/logging.h"
+
 namespace art {
 
 template<typename T>
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 114e0f6..b0cf418 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -919,9 +919,9 @@
      << "]";
 }
 
-void Thread::Dump(std::ostream& os) const {
+void Thread::Dump(std::ostream& os, BacktraceMap* backtrace_map) const {
   DumpState(os);
-  DumpStack(os);
+  DumpStack(os, backtrace_map);
 }
 
 mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
@@ -1480,7 +1480,7 @@
   }
 }
 
-void Thread::DumpStack(std::ostream& os) const {
+void Thread::DumpStack(std::ostream& os, BacktraceMap* backtrace_map) const {
   // TODO: we call this code when dying but may not have suspended the thread ourself. The
   //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
   //       the race with the thread_suspend_count_lock_).
@@ -1496,7 +1496,7 @@
     if (dump_for_abort || ShouldShowNativeStack(this)) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
       ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
-      DumpNativeStack(os, GetTid(), "  native: ", method);
+      DumpNativeStack(os, GetTid(), backtrace_map, "  native: ", method);
     }
     DumpJavaStack(os);
   } else {
diff --git a/runtime/thread.h b/runtime/thread.h
index 8f3461a..138c143 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -42,6 +42,8 @@
 #include "stack.h"
 #include "thread_state.h"
 
+class BacktraceMap;
+
 namespace art {
 
 namespace gc {
@@ -184,7 +186,7 @@
   void ShortDump(std::ostream& os) const;
 
   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
-  void Dump(std::ostream& os) const
+  void Dump(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
       REQUIRES(!Locks::thread_suspend_count_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -1042,7 +1044,7 @@
   void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
-  void DumpStack(std::ostream& os) const
+  void DumpStack(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
       REQUIRES(!Locks::thread_suspend_count_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 6176acd..bdd5d10 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -18,6 +18,7 @@
 
 #define ATRACE_TAG ATRACE_TAG_DALVIK
 
+#include <backtrace/BacktraceMap.h>
 #include <cutils/trace.h>
 #include <dirent.h>
 #include <ScopedLocalRef.h>
@@ -109,9 +110,10 @@
 
 void ThreadList::DumpNativeStacks(std::ostream& os) {
   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
   for (const auto& thread : list_) {
     os << "DUMPING THREAD " << thread->GetTid() << "\n";
-    DumpNativeStack(os, thread->GetTid(), "\t");
+    DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
     os << "\n";
   }
 }
@@ -138,7 +140,7 @@
   // TODO: Reenable this when the native code in system_server can handle it.
   // Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
   if (false) {
-    DumpNativeStack(os, tid, "  native: ");
+    DumpNativeStack(os, tid, nullptr, "  native: ");
   }
   os << "\n";
 }
@@ -175,7 +177,8 @@
 // A closure used by Thread::Dump.
 class DumpCheckpoint FINAL : public Closure {
  public:
-  explicit DumpCheckpoint(std::ostream* os) : os_(os), barrier_(0) {}
+  explicit DumpCheckpoint(std::ostream* os)
+      : os_(os), barrier_(0), backtrace_map_(BacktraceMap::Create(GetTid())) {}
 
   void Run(Thread* thread) OVERRIDE {
     // Note thread and self may not be equal if thread was already suspended at the point of the
@@ -184,7 +187,7 @@
     std::ostringstream local_os;
     {
       ScopedObjectAccess soa(self);
-      thread->Dump(local_os);
+      thread->Dump(local_os, backtrace_map_.get());
     }
     local_os << "\n";
     {
@@ -213,6 +216,8 @@
   std::ostream* const os_;
   // The barrier to be passed through and for the requestor to wait upon.
   Barrier barrier_;
+  // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
+  std::unique_ptr<BacktraceMap> backtrace_map_;
 };
 
 void ThreadList::Dump(std::ostream& os) {
@@ -1217,7 +1222,7 @@
       std::string thread_name;
       self->GetThreadName(thread_name);
       std::ostringstream os;
-      DumpNativeStack(os, GetTid(), "  native: ", nullptr);
+      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
       LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
       break;
     } else {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 62af380..dee4f9c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -46,7 +46,9 @@
 #include <sys/syscall.h>
 #endif
 
-#include <backtrace/Backtrace.h>  // For DumpNativeStack.
+// For DumpNativeStack.
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
 
 #if defined(__linux__)
 #include <linux/unistd.h>
@@ -1102,7 +1104,7 @@
 }
 #endif
 
-void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
+void DumpNativeStack(std::ostream& os, pid_t tid, BacktraceMap* existing_map, const char* prefix,
     ArtMethod* current_method, void* ucontext_ptr) {
 #if __linux__
   // b/18119146
@@ -1110,7 +1112,13 @@
     return;
   }
 
-  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
+  BacktraceMap* map = existing_map;
+  std::unique_ptr<BacktraceMap> tmp_map;
+  if (map == nullptr) {
+    tmp_map.reset(BacktraceMap::Create(tid));
+    map = tmp_map.get();
+  }
+  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
   if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
     os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
     return;
@@ -1174,7 +1182,7 @@
     }
   }
 #else
-  UNUSED(os, tid, prefix, current_method, ucontext_ptr);
+  UNUSED(os, tid, existing_map, prefix, current_method, ucontext_ptr);
 #endif
 }
 
diff --git a/runtime/utils.h b/runtime/utils.h
index 79502c7..bd52b68 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -31,6 +31,8 @@
 #include "globals.h"
 #include "primitive.h"
 
+class BacktraceMap;
+
 namespace art {
 
 class ArtCode;
@@ -221,12 +223,19 @@
 void SetThreadName(const char* thread_name);
 
 // Dumps the native stack for thread 'tid' to 'os'.
-void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
-    ArtMethod* current_method = nullptr, void* ucontext = nullptr)
+void DumpNativeStack(std::ostream& os,
+                     pid_t tid,
+                     BacktraceMap* map = nullptr,
+                     const char* prefix = "",
+                     ArtMethod* current_method = nullptr,
+                     void* ucontext = nullptr)
     NO_THREAD_SAFETY_ANALYSIS;
 
 // Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
-void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix = "", bool include_count = true);
+void DumpKernelStack(std::ostream& os,
+                     pid_t tid,
+                     const char* prefix = "",
+                     bool include_count = true);
 
 // Find $ANDROID_ROOT, /system, or abort.
 const char* GetAndroidRoot();
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 2d9fd53..f52d011 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -38,6 +38,10 @@
   return insn_flags_[index];
 }
 
+inline InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) {
+  return insn_flags_[index];
+}
+
 inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
   return class_loader_.Get();
 }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 4051a1c..e1d4160 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -58,12 +58,14 @@
 // On VLOG(verifier), should we dump the whole state when we run into a hard failure?
 static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
 
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
+    : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+
 void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
                                  uint32_t insns_size, uint16_t registers_size,
                                  MethodVerifier* verifier) {
   DCHECK_GT(insns_size, 0U);
-  register_lines_.reset(new RegisterLine*[insns_size]());
-  size_ = insns_size;
+  register_lines_.resize(insns_size);
   for (uint32_t i = 0; i < insns_size; i++) {
     bool interesting = false;
     switch (mode) {
@@ -80,19 +82,12 @@
         break;
     }
     if (interesting) {
-      register_lines_[i] = RegisterLine::Create(registers_size, verifier);
+      register_lines_[i].reset(RegisterLine::Create(registers_size, verifier));
     }
   }
 }
 
-PcToRegisterLineTable::~PcToRegisterLineTable() {
-  for (size_t i = 0; i < size_; i++) {
-    delete register_lines_[i];
-    if (kIsDebugBuild) {
-      register_lines_[i] = nullptr;
-    }
-  }
-}
+PcToRegisterLineTable::~PcToRegisterLineTable() {}
 
 // Note: returns true on failure.
 ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
@@ -398,7 +393,10 @@
                                bool need_precise_constants, bool verify_to_dump,
                                bool allow_thread_suspension)
     : self_(self),
-      reg_types_(can_load_classes),
+      arena_stack_(Runtime::Current()->GetArenaPool()),
+      arena_(&arena_stack_),
+      reg_types_(can_load_classes, arena_),
+      reg_table_(arena_),
       work_insn_idx_(DexFile::kDexNoIndex),
       dex_method_idx_(dex_method_idx),
       mirror_method_(method),
@@ -702,7 +700,11 @@
   }
 
   // Allocate and initialize an array to hold instruction data.
-  insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
+  insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+  DCHECK(insn_flags_ != nullptr);
+  std::uninitialized_fill_n(insn_flags_.get(),
+                            code_item_->insns_size_in_code_units_,
+                            InstructionFlags());
   // Run through the instructions and see if the width checks out.
   bool result = ComputeWidthsAndCountOps();
   // Flag instructions guarded by a "try" block and check exception handlers.
@@ -848,7 +850,7 @@
         break;
     }
     size_t inst_size = inst->SizeInCodeUnits();
-    insn_flags_[dex_pc].SetIsOpcode();
+    GetInstructionFlags(dex_pc).SetIsOpcode();
     dex_pc += inst_size;
     inst = inst->RelativeAt(inst_size);
   }
@@ -881,7 +883,7 @@
                                         << " endAddr=" << end << " (size=" << insns_size << ")";
       return false;
     }
-    if (!insn_flags_[start].IsOpcode()) {
+    if (!GetInstructionFlags(start).IsOpcode()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD)
           << "'try' block starts inside an instruction (" << start << ")";
       return false;
@@ -889,7 +891,7 @@
     uint32_t dex_pc = start;
     const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
     while (dex_pc < end) {
-      insn_flags_[dex_pc].SetInTry();
+      GetInstructionFlags(dex_pc).SetInTry();
       size_t insn_size = inst->SizeInCodeUnits();
       dex_pc += insn_size;
       inst = inst->RelativeAt(insn_size);
@@ -903,7 +905,7 @@
     CatchHandlerIterator iterator(handlers_ptr);
     for (; iterator.HasNext(); iterator.Next()) {
       uint32_t dex_pc= iterator.GetHandlerAddress();
-      if (!insn_flags_[dex_pc].IsOpcode()) {
+      if (!GetInstructionFlags(dex_pc).IsOpcode()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD)
             << "exception handler starts at bad address (" << dex_pc << ")";
         return false;
@@ -913,7 +915,7 @@
             << "exception handler begins with move-result* (" << dex_pc << ")";
         return false;
       }
-      insn_flags_[dex_pc].SetBranchTarget();
+      GetInstructionFlags(dex_pc).SetBranchTarget();
       // Ensure exception types are resolved so that they don't need resolution to be delivered,
       // unresolved exception types will be ignored by exception delivery
       if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
@@ -935,8 +937,8 @@
   const Instruction* inst = Instruction::At(code_item_->insns_);
 
   /* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
-  insn_flags_[0].SetBranchTarget();
-  insn_flags_[0].SetCompileTimeInfoPoint();
+  GetInstructionFlags(0).SetBranchTarget();
+  GetInstructionFlags(0).SetCompileTimeInfoPoint();
 
   uint32_t insns_size = code_item_->insns_size_in_code_units_;
   for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
@@ -948,18 +950,18 @@
     // All invoke points are marked as "Throw" points already.
     // We are relying on this to also count all the invokes as interesting.
     if (inst->IsBranch()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
       // The compiler also needs safepoints for fall-through to loop heads.
       // Such a loop head must be a target of a branch.
       int32_t offset = 0;
       bool cond, self_ok;
       bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
       DCHECK(target_ok);
-      insn_flags_[dex_pc + offset].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
     } else if (inst->IsSwitch() || inst->IsThrow()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
     } else if (inst->IsReturn()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
     }
     dex_pc += inst->SizeInCodeUnits();
     inst = inst->Next();
@@ -1202,7 +1204,7 @@
   }
   // Make sure the array-data is marked as an opcode. This ensures that it was reached when
   // traversing the code item linearly. It is an approximation for a by-spec padding value.
-  if (!insn_flags_[cur_offset + array_data_offset].IsOpcode()) {
+  if (!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
                                       << ", data offset " << array_data_offset
                                       << " not correctly visited, probably bad padding.";
@@ -1245,13 +1247,13 @@
   int32_t abs_offset = cur_offset + offset;
   if (abs_offset < 0 ||
       (uint32_t) abs_offset >= insn_count ||
-      !insn_flags_[abs_offset].IsOpcode()) {
+      !GetInstructionFlags(abs_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
                                       << reinterpret_cast<void*>(abs_offset) << ") at "
                                       << reinterpret_cast<void*>(cur_offset);
     return false;
   }
-  insn_flags_[abs_offset].SetBranchTarget();
+  GetInstructionFlags(abs_offset).SetBranchTarget();
   return true;
 }
 
@@ -1315,7 +1317,7 @@
   }
   // Make sure the switch data is marked as an opcode. This ensures that it was reached when
   // traversing the code item linearly. It is an approximation for a by-spec padding value.
-  if (!insn_flags_[cur_offset + switch_offset].IsOpcode()) {
+  if (!GetInstructionFlags(cur_offset + switch_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
                                       << ", switch offset " << switch_offset
                                       << " not correctly visited, probably bad padding.";
@@ -1387,14 +1389,14 @@
     int32_t abs_offset = cur_offset + offset;
     if (abs_offset < 0 ||
         abs_offset >= static_cast<int32_t>(insn_count) ||
-        !insn_flags_[abs_offset].IsOpcode()) {
+        !GetInstructionFlags(abs_offset).IsOpcode()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
                                         << " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
                                         << reinterpret_cast<void*>(cur_offset)
                                         << "[" << targ << "]";
       return false;
     }
-    insn_flags_[abs_offset].SetBranchTarget();
+    GetInstructionFlags(abs_offset).SetBranchTarget();
   }
   return true;
 }
@@ -1435,7 +1437,6 @@
                   registers_size,
                   this);
 
-
   work_line_.reset(RegisterLine::Create(registers_size, this));
   saved_line_.reset(RegisterLine::Create(registers_size, this));
 
@@ -1491,7 +1492,7 @@
       vios->Stream() << reg_line->Dump(this) << "\n";
     }
     vios->Stream()
-        << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+        << StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
     const bool kDumpHexOfInstruction = false;
     if (kDumpHexOfInstruction) {
       vios->Stream() << inst->DumpHex(5) << " ";
@@ -1677,7 +1678,7 @@
   const uint32_t insns_size = code_item_->insns_size_in_code_units_;
 
   /* Begin by marking the first instruction as "changed". */
-  insn_flags_[0].SetChanged();
+  GetInstructionFlags(0).SetChanged();
   uint32_t start_guess = 0;
 
   /* Continue until no instructions are marked "changed". */
@@ -1688,7 +1689,7 @@
     // Find the first marked one. Use "start_guess" as a way to find one quickly.
     uint32_t insn_idx = start_guess;
     for (; insn_idx < insns_size; insn_idx++) {
-      if (insn_flags_[insn_idx].IsChanged())
+      if (GetInstructionFlags(insn_idx).IsChanged())
         break;
     }
     if (insn_idx == insns_size) {
@@ -1708,7 +1709,7 @@
     // situation where we have a stray "changed" flag set on an instruction that isn't a branch
     // target.
     work_insn_idx_ = insn_idx;
-    if (insn_flags_[insn_idx].IsBranchTarget()) {
+    if (GetInstructionFlags(insn_idx).IsBranchTarget()) {
       work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
     } else if (kIsDebugBuild) {
       /*
@@ -1734,8 +1735,8 @@
       return false;
     }
     /* Clear "changed" and mark as visited. */
-    insn_flags_[insn_idx].SetVisited();
-    insn_flags_[insn_idx].ClearChanged();
+    GetInstructionFlags(insn_idx).SetVisited();
+    GetInstructionFlags(insn_idx).ClearChanged();
   }
 
   if (gDebugVerify) {
@@ -1762,10 +1763,10 @@
            (insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
             insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
             insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
-        insn_flags_[insn_idx].SetVisited();
+        GetInstructionFlags(insn_idx).SetVisited();
       }
 
-      if (!insn_flags_[insn_idx].IsVisited()) {
+      if (!GetInstructionFlags(insn_idx).IsVisited()) {
         if (dead_start < 0)
           dead_start = insn_idx;
       } else if (dead_start >= 0) {
@@ -1895,8 +1896,8 @@
   // We need to ensure the work line is consistent while performing validation. When we spot a
   // peephole pattern we compute a new line for either the fallthrough instruction or the
   // branch target.
-  std::unique_ptr<RegisterLine> branch_line;
-  std::unique_ptr<RegisterLine> fallthrough_line;
+  ArenaUniquePtr<RegisterLine> branch_line;
+  ArenaUniquePtr<RegisterLine> fallthrough_line;
 
   switch (inst->Opcode()) {
     case Instruction::NOP:
@@ -2144,9 +2145,9 @@
       work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
       // Check whether the previous instruction is a move-object with vAA as a source, creating
       // untracked lock aliasing.
-      if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+      if (0 != work_insn_idx_ && !GetInstructionFlags(work_insn_idx_).IsBranchTarget()) {
         uint32_t prev_idx = work_insn_idx_ - 1;
-        while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+        while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
           prev_idx--;
         }
         const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
@@ -2427,10 +2428,10 @@
       uint32_t instance_of_idx = 0;
       if (0 != work_insn_idx_) {
         instance_of_idx = work_insn_idx_ - 1;
-        while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+        while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
           instance_of_idx--;
         }
-        if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+        if (FailOrAbort(this, GetInstructionFlags(instance_of_idx).IsOpcode(),
                         "Unable to get previous instruction of if-eqz/if-nez for work index ",
                         work_insn_idx_)) {
           break;
@@ -2486,15 +2487,15 @@
           update_line->SetRegisterType<LockOp::kKeep>(this,
                                                       instance_of_inst->VRegB_22c(),
                                                       cast_type);
-          if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+          if (!GetInstructionFlags(instance_of_idx).IsBranchTarget() && 0 != instance_of_idx) {
             // See if instance-of was preceded by a move-object operation, common due to the small
             // register encoding space of instance-of, and propagate type information to the source
             // of the move-object.
             uint32_t move_idx = instance_of_idx - 1;
-            while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+            while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
               move_idx--;
             }
-            if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+            if (FailOrAbort(this, GetInstructionFlags(move_idx).IsOpcode(),
                             "Unable to get previous instruction of if-eqz/if-nez for work index ",
                             work_insn_idx_)) {
               break;
@@ -2786,8 +2787,7 @@
         work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
       }
       if (return_type == nullptr) {
-        return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
-                                                 false);
+        return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
       }
       if (!return_type->IsLowHalf()) {
         work_line_->SetResultRegisterType(this, *return_type);
@@ -2860,7 +2860,7 @@
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
-        descriptor =  dex_file_->StringByTypeIdx(return_type_idx);
+        descriptor = dex_file_->StringByTypeIdx(return_type_idx);
       } else {
         descriptor = abs_method->GetReturnTypeDescriptor();
       }
@@ -3309,7 +3309,7 @@
       return false;
     }
     /* update branch target, set "changed" if appropriate */
-    if (nullptr != branch_line.get()) {
+    if (nullptr != branch_line) {
       if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
         return false;
       }
@@ -3364,7 +3364,7 @@
    * Handle instructions that can throw and that are sitting in a "try" block. (If they're not in a
    * "try" block when they throw, control transfers out of the method.)
    */
-  if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
+  if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
     bool has_catch_all_handler = false;
     CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
 
@@ -3434,11 +3434,11 @@
     if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
       return false;
     }
-    if (nullptr != fallthrough_line.get()) {
+    if (nullptr != fallthrough_line) {
       // Make workline consistent with fallthrough computed from peephole optimization.
       work_line_->CopyFromLine(fallthrough_line.get());
     }
-    if (insn_flags_[next_insn_idx].IsReturn()) {
+    if (GetInstructionFlags(next_insn_idx).IsReturn()) {
       // For returns we only care about the operand to the return, all other registers are dead.
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
       AdjustReturnLine(this, ret_inst, work_line_.get());
@@ -3456,7 +3456,7 @@
        * We're not recording register data for the next instruction, so we don't know what the
        * prior state was. We have to assume that something has changed and re-evaluate it.
        */
-      insn_flags_[next_insn_idx].SetChanged();
+      GetInstructionFlags(next_insn_idx).SetChanged();
     }
   }
 
@@ -3480,7 +3480,7 @@
   }
 
   DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
-  DCHECK(insn_flags_[*start_guess].IsOpcode());
+  DCHECK(GetInstructionFlags(*start_guess).IsOpcode());
 
   if (have_pending_runtime_throw_failure_) {
     have_any_pending_runtime_throw_failure_ = true;
@@ -3491,30 +3491,55 @@
   return true;
 }  // NOLINT(readability/fn_size)
 
+void MethodVerifier::UninstantiableError(const char* descriptor) {
+  Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
+                                           << "non-instantiable klass " << descriptor;
+}
+
+inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
+  return klass->IsInstantiable() || klass->IsPrimitive();
+}
+
 const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
-  const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
-  const RegType& referrer = GetDeclaringClass();
   mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
-  const RegType& result = klass != nullptr ?
-      FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
-      reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
-  if (result.IsConflict()) {
-    Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
-        << "' in " << referrer;
-    return result;
+  const RegType* result = nullptr;
+  if (klass != nullptr) {
+    bool precise = klass->CannotBeAssignedFromOtherTypes();
+    if (precise && !IsInstantiableOrPrimitive(klass)) {
+      const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+      UninstantiableError(descriptor);
+      precise = false;
+    }
+    result = reg_types_.FindClass(klass, precise);
+    if (result == nullptr) {
+      const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+      result = reg_types_.InsertClass(descriptor, klass, precise);
+    }
+  } else {
+    const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+    result = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   }
-  if (klass == nullptr && !result.IsUnresolvedTypes()) {
-    dex_cache_->SetResolvedType(class_idx, result.GetClass());
+  DCHECK(result != nullptr);
+  if (result->IsConflict()) {
+    const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+    Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
+        << "' in " << GetDeclaringClass();
+    return *result;
+  }
+  if (klass == nullptr && !result->IsUnresolvedTypes()) {
+    dex_cache_->SetResolvedType(class_idx, result->GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
   // check at runtime if access is allowed and so pass here. If result is
   // primitive, skip the access check.
-  if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
-      !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
-    Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
-                                    << referrer << "' -> '" << result << "'";
+  if (result->IsNonZeroReferenceTypes() && !result->IsUnresolvedTypes()) {
+    const RegType& referrer = GetDeclaringClass();
+    if (!referrer.IsUnresolvedTypes() && !referrer.CanAccess(*result)) {
+      Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
+                                      << referrer << "' -> '" << result << "'";
+    }
   }
-  return result;
+  return *result;
 }
 
 const RegType& MethodVerifier::GetCaughtExceptionType() {
@@ -3720,9 +3745,10 @@
       } else {
         const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
-        res_method_class = &reg_types_.FromDescriptor(GetClassLoader(),
-                                                      dex_file_->StringByTypeIdx(class_idx),
-                                                      false);
+        res_method_class = &reg_types_.FromDescriptor(
+            GetClassLoader(),
+            dex_file_->StringByTypeIdx(class_idx),
+            false);
       }
       if (!res_method_class->IsAssignableFrom(actual_arg_type)) {
         Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS:
@@ -4476,14 +4502,16 @@
         field->GetType<false>();
 
     if (field_type_class != nullptr) {
-      field_type = &FromClass(field->GetTypeDescriptor(), field_type_class,
+      field_type = &FromClass(field->GetTypeDescriptor(),
+                              field_type_class,
                               field_type_class->CannotBeAssignedFromOtherTypes());
     } else {
       Thread* self = Thread::Current();
       DCHECK(!can_load_classes_ || self->IsExceptionPending());
       self->ClearException();
       field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
-                                              field->GetTypeDescriptor(), false);
+                                              field->GetTypeDescriptor(),
+                                              false);
     }
     if (field_type == nullptr) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
@@ -4604,14 +4632,14 @@
                                      bool update_merge_line) {
   bool changed = true;
   RegisterLine* target_line = reg_table_.GetLine(next_insn);
-  if (!insn_flags_[next_insn].IsVisitedOrChanged()) {
+  if (!GetInstructionFlags(next_insn).IsVisitedOrChanged()) {
     /*
      * We haven't processed this instruction before, and we haven't touched the registers here, so
      * there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
      * only way a register can transition out of "unknown", so this is not just an optimization.)
      */
     target_line->CopyFromLine(merge_line);
-    if (insn_flags_[next_insn].IsReturn()) {
+    if (GetInstructionFlags(next_insn).IsReturn()) {
       // Verify that the monitor stack is empty on return.
       merge_line->VerifyMonitorStackEmpty(this);
 
@@ -4621,10 +4649,9 @@
       AdjustReturnLine(this, ret_inst, target_line);
     }
   } else {
-    std::unique_ptr<RegisterLine> copy(gDebugVerify ?
-                                 RegisterLine::Create(target_line->NumRegs(), this) :
-                                 nullptr);
+    ArenaUniquePtr<RegisterLine> copy;
     if (gDebugVerify) {
+      copy.reset(RegisterLine::Create(target_line->NumRegs(), this));
       copy->CopyFromLine(target_line);
     }
     changed = target_line->MergeRegisters(this, merge_line);
@@ -4643,13 +4670,13 @@
     }
   }
   if (changed) {
-    insn_flags_[next_insn].SetChanged();
+    GetInstructionFlags(next_insn).SetChanged();
   }
   return true;
 }
 
 InstructionFlags* MethodVerifier::CurrentInsnFlags() {
-  return &insn_flags_[work_insn_idx_];
+  return &GetInstructionFlags(work_insn_idx_);
 }
 
 const RegType& MethodVerifier::GetMethodReturnType() {
@@ -4685,8 +4712,7 @@
         = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
     if (mirror_method_ != nullptr) {
       mirror::Class* klass = mirror_method_->GetDeclaringClass();
-      declaring_class_ = &FromClass(descriptor, klass,
-                                    klass->CannotBeAssignedFromOtherTypes());
+      declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
     } else {
       declaring_class_ = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     }
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index ba694b7..7b51d6e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -21,7 +21,10 @@
 #include <sstream>
 #include <vector>
 
+#include "base/arena_allocator.h"
 #include "base/macros.h"
+#include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
 #include "dex_file.h"
 #include "handle.h"
 #include "instruction_flags.h"
@@ -107,7 +110,7 @@
 // execution of that instruction.
 class PcToRegisterLineTable {
  public:
-  PcToRegisterLineTable() : size_(0) {}
+  explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
   ~PcToRegisterLineTable();
 
   // Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -116,14 +119,12 @@
   void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
             uint16_t registers_size, MethodVerifier* verifier);
 
-  RegisterLine* GetLine(size_t idx) {
-    DCHECK_LT(idx, size_);
-    return register_lines_[idx];
+  RegisterLine* GetLine(size_t idx) const {
+    return register_lines_[idx].get();
   }
 
  private:
-  std::unique_ptr<RegisterLine*[]> register_lines_;
-  size_t size_;
+  ScopedArenaVector<ArenaUniquePtr<RegisterLine>> register_lines_;
 
   DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
 };
@@ -240,7 +241,8 @@
   // Accessors used by the compiler via CompilerCallback
   const DexFile::CodeItem* CodeItem() const;
   RegisterLine* GetRegLine(uint32_t dex_pc);
-  const InstructionFlags& GetInstructionFlags(size_t index) const;
+  ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
+  ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
   mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
   MethodReference GetMethodReference() const;
@@ -275,7 +277,14 @@
     return IsConstructor() && !IsStatic();
   }
 
+  ScopedArenaAllocator& GetArena() {
+    return arena_;
+  }
+
  private:
+  void UninstantiableError(const char* descriptor);
+  static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Is the method being verified a constructor? See the comment on the field.
   bool IsConstructor() const {
     return is_constructor_;
@@ -687,19 +696,23 @@
   // The thread we're verifying on.
   Thread* const self_;
 
+  // Arena allocator.
+  ArenaStack arena_stack_;
+  ScopedArenaAllocator arena_;
+
   RegTypeCache reg_types_;
 
   PcToRegisterLineTable reg_table_;
 
   // Storage for the register status we're currently working on.
-  std::unique_ptr<RegisterLine> work_line_;
+  ArenaUniquePtr<RegisterLine> work_line_;
 
   // The address of the instruction we're currently working on, note that this is in 2 byte
   // quantities
   uint32_t work_insn_idx_;
 
   // Storage for the register status we're saving for later.
-  std::unique_ptr<RegisterLine> saved_line_;
+  ArenaUniquePtr<RegisterLine> saved_line_;
 
   const uint32_t dex_method_idx_;  // The method we're working on.
   // Its object representation if known.
@@ -715,7 +728,8 @@
   const DexFile::CodeItem* const code_item_;  // The code item containing the code for the method.
   const RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
   // Instruction widths and flags, one entry per code unit.
-  std::unique_ptr<InstructionFlags[]> insn_flags_;
+  // Owned, but not unique_ptr since insn_flags_ are allocated in arenas.
+  ArenaUniquePtr<InstructionFlags[]> insn_flags_;
   // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
   uint32_t interesting_dex_pc_;
   // The container into which FindLocksAtDexPc should write the registers containing held locks,
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index f445132..11a53e5 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -20,6 +20,7 @@
 #include "reg_type.h"
 
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "mirror/class.h"
 
 namespace art {
@@ -180,6 +181,10 @@
   return instance_;
 }
 
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
+  return arena->Alloc(size, kArenaAllocMisc);
+}
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index b86a4c8..16cab03 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -16,6 +16,7 @@
 
 #include "reg_type-inl.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
 #include "base/casts.h"
 #include "class_linker-inl.h"
@@ -46,20 +47,17 @@
 const DoubleHiType* DoubleHiType::instance_ = nullptr;
 const IntegerType* IntegerType::instance_ = nullptr;
 
-PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   CHECK(klass != nullptr);
   CHECK(!descriptor.empty());
 }
 
-Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+Cat1Type::Cat1Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
-Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+Cat2Type::Cat2Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
@@ -121,11 +119,11 @@
 }
 
 std::string IntegerType::Dump() const {
-    return "Integer";
+  return "Integer";
 }
 
 const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleHiType(klass, descriptor, cache_id);
@@ -140,7 +138,7 @@
 }
 
 const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleLoType(klass, descriptor, cache_id);
@@ -154,14 +152,14 @@
   }
 }
 
-const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongLoType(klass, descriptor, cache_id);
   return instance_;
 }
 
-const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongHiType(klass, descriptor, cache_id);
@@ -182,7 +180,7 @@
   }
 }
 
-const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new FloatType(klass, descriptor, cache_id);
@@ -196,7 +194,7 @@
   }
 }
 
-const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const CharType* CharType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new CharType(klass, descriptor, cache_id);
@@ -210,7 +208,7 @@
   }
 }
 
-const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ShortType(klass, descriptor, cache_id);
@@ -224,7 +222,7 @@
   }
 }
 
-const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ByteType(klass, descriptor, cache_id);
@@ -238,7 +236,7 @@
   }
 }
 
-const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                                uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new IntegerType(klass, descriptor, cache_id);
@@ -253,7 +251,7 @@
 }
 
 const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ConflictType(klass, descriptor, cache_id);
@@ -267,7 +265,7 @@
   }
 }
 
-const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(BooleanType::instance_ == nullptr);
   instance_ = new BooleanType(klass, descriptor, cache_id);
@@ -286,7 +284,7 @@
 }
 
 const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
-                                                   const std::string& descriptor,
+                                                   const StringPiece& descriptor,
                                                    uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new UndefinedType(klass, descriptor, cache_id);
@@ -300,7 +298,7 @@
   }
 }
 
-PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
@@ -335,14 +333,14 @@
 
 std::string UnresolvedReferenceType::Dump() const {
   std::stringstream result;
-  result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
+  result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().as_string().c_str());
   return result.str();
 }
 
 std::string UnresolvedUninitializedRefType::Dump() const {
   std::stringstream result;
   result << "Unresolved And Uninitialized Reference" << ": "
-      << PrettyDescriptor(GetDescriptor().c_str())
+      << PrettyDescriptor(GetDescriptor().as_string().c_str())
       << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
@@ -350,7 +348,7 @@
 std::string UnresolvedUninitializedThisRefType::Dump() const {
   std::stringstream result;
   result << "Unresolved And Uninitialized This Reference"
-      << PrettyDescriptor(GetDescriptor().c_str());
+      << PrettyDescriptor(GetDescriptor().as_string().c_str());
   return result.str();
 }
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 2834a9a..80b751c 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -22,9 +22,11 @@
 #include <set>
 #include <string>
 
+#include "base/arena_object.h"
 #include "base/bit_vector.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "base/stringpiece.h"
 #include "gc_root.h"
 #include "handle_scope.h"
 #include "object_callbacks.h"
@@ -35,6 +37,9 @@
 class Class;
 }  // namespace mirror
 
+class ArenaBitVector;
+class ScopedArenaAllocator;
+
 namespace verifier {
 
 class RegTypeCache;
@@ -173,7 +178,7 @@
   bool IsJavaLangObjectArray() const
       SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
-  const std::string& GetDescriptor() const {
+  const StringPiece& GetDescriptor() const {
     DCHECK(HasClass() ||
            (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
             !IsUnresolvedSuperClass()));
@@ -274,10 +279,20 @@
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  static void* operator new(size_t size) noexcept {
+    return ::operator new(size);
+  }
+
+  static void* operator new(size_t size, ArenaAllocator* arena) = delete;
+  static void* operator new(size_t size, ScopedArenaAllocator* arena);
+
  protected:
-  RegType(mirror::Class* klass, const std::string& descriptor,
+  RegType(mirror::Class* klass,
+          const StringPiece& descriptor,
           uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
-      : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+      : descriptor_(descriptor),
+        klass_(klass),
+        cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -285,9 +300,8 @@
 
   void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const std::string descriptor_;
-  mutable GcRoot<mirror::Class>
-      klass_;  // Non-const only due to moving classes.
+  const StringPiece descriptor_;
+  mutable GcRoot<mirror::Class> klass_;  // Non-const only due to moving classes.
   const uint16_t cache_id_;
 
   friend class RegTypeCache;
@@ -311,7 +325,7 @@
 
   // Create the singleton instance.
   static const ConflictType* CreateInstance(mirror::Class* klass,
-                                            const std::string& descriptor,
+                                            const StringPiece& descriptor,
                                             uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -319,7 +333,7 @@
   static void Destroy();
 
  private:
-  ConflictType(mirror::Class* klass, const std::string& descriptor,
+  ConflictType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -340,7 +354,7 @@
 
   // Create the singleton instance.
   static const UndefinedType* CreateInstance(mirror::Class* klass,
-                                             const std::string& descriptor,
+                                             const StringPiece& descriptor,
                                              uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -348,7 +362,7 @@
   static void Destroy();
 
  private:
-  UndefinedType(mirror::Class* klass, const std::string& descriptor,
+  UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -357,7 +371,7 @@
 
 class PrimitiveType : public RegType {
  public:
-  PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+  PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool HasClassVirtual() const OVERRIDE { return true; }
@@ -365,7 +379,7 @@
 
 class Cat1Type : public PrimitiveType {
  public:
-  Cat1Type(mirror::Class* klass, const std::string& descriptor,
+  Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
@@ -374,14 +388,14 @@
   bool IsInteger() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* CreateInstance(mirror::Class* klass,
-                                           const std::string& descriptor,
+                                           const StringPiece& descriptor,
                                            uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  IntegerType(mirror::Class* klass, const std::string& descriptor,
+  IntegerType(mirror::Class* klass, const StringPiece& descriptor,
               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const IntegerType* instance_;
@@ -392,14 +406,14 @@
   bool IsBoolean() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* CreateInstance(mirror::Class* klass,
-                                           const std::string& descriptor,
+                                           const StringPiece& descriptor,
                                            uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  BooleanType(mirror::Class* klass, const std::string& descriptor,
+  BooleanType(mirror::Class* klass, const StringPiece& descriptor,
               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
 
@@ -411,14 +425,14 @@
   bool IsByte() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* CreateInstance(mirror::Class* klass,
-                                        const std::string& descriptor,
+                                        const StringPiece& descriptor,
                                         uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  ByteType(mirror::Class* klass, const std::string& descriptor,
+  ByteType(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ByteType* instance_;
@@ -429,14 +443,14 @@
   bool IsShort() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* CreateInstance(mirror::Class* klass,
-                                         const std::string& descriptor,
+                                         const StringPiece& descriptor,
                                          uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  ShortType(mirror::Class* klass, const std::string& descriptor,
+  ShortType(mirror::Class* klass, const StringPiece& descriptor,
             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ShortType* instance_;
@@ -447,14 +461,14 @@
   bool IsChar() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* CreateInstance(mirror::Class* klass,
-                                        const std::string& descriptor,
+                                        const StringPiece& descriptor,
                                         uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  CharType(mirror::Class* klass, const std::string& descriptor,
+  CharType(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const CharType* instance_;
@@ -465,14 +479,14 @@
   bool IsFloat() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* CreateInstance(mirror::Class* klass,
-                                         const std::string& descriptor,
+                                         const StringPiece& descriptor,
                                          uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  FloatType(mirror::Class* klass, const std::string& descriptor,
+  FloatType(mirror::Class* klass, const StringPiece& descriptor,
             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const FloatType* instance_;
@@ -480,7 +494,7 @@
 
 class Cat2Type : public PrimitiveType {
  public:
-  Cat2Type(mirror::Class* klass, const std::string& descriptor,
+  Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
@@ -490,14 +504,14 @@
   bool IsLongLo() const OVERRIDE { return true; }
   bool IsLong() const OVERRIDE { return true; }
   static const LongLoType* CreateInstance(mirror::Class* klass,
-                                          const std::string& descriptor,
+                                          const StringPiece& descriptor,
                                           uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  LongLoType(mirror::Class* klass, const std::string& descriptor,
+  LongLoType(mirror::Class* klass, const StringPiece& descriptor,
              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongLoType* instance_;
@@ -508,14 +522,14 @@
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsLongHi() const OVERRIDE { return true; }
   static const LongHiType* CreateInstance(mirror::Class* klass,
-                                          const std::string& descriptor,
+                                          const StringPiece& descriptor,
                                           uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  LongHiType(mirror::Class* klass, const std::string& descriptor,
+  LongHiType(mirror::Class* klass, const StringPiece& descriptor,
              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongHiType* instance_;
@@ -527,14 +541,14 @@
   bool IsDoubleLo() const OVERRIDE { return true; }
   bool IsDouble() const OVERRIDE { return true; }
   static const DoubleLoType* CreateInstance(mirror::Class* klass,
-                                            const std::string& descriptor,
+                                            const StringPiece& descriptor,
                                             uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+  DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleLoType* instance_;
@@ -545,14 +559,14 @@
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   virtual bool IsDoubleHi() const OVERRIDE { return true; }
   static const DoubleHiType* CreateInstance(mirror::Class* klass,
-                                      const std::string& descriptor,
+                                      const StringPiece& descriptor,
                                       uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+  DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleHiType* instance_;
@@ -677,7 +691,7 @@
 // instructions and must be passed to a constructor.
 class UninitializedType : public RegType {
  public:
-  UninitializedType(mirror::Class* klass, const std::string& descriptor,
+  UninitializedType(mirror::Class* klass, const StringPiece& descriptor,
                     uint32_t allocation_pc, uint16_t cache_id)
       : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
 
@@ -697,7 +711,7 @@
 class UninitializedReferenceType FINAL : public UninitializedType {
  public:
   UninitializedReferenceType(mirror::Class* klass,
-                             const std::string& descriptor,
+                             const StringPiece& descriptor,
                              uint32_t allocation_pc, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
@@ -713,7 +727,7 @@
 // constructor.
 class UnresolvedUninitializedRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedRefType(const std::string& descriptor,
+  UnresolvedUninitializedRefType(const StringPiece& descriptor,
                                  uint32_t allocation_pc, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
@@ -737,7 +751,7 @@
 class UninitializedThisReferenceType FINAL : public UninitializedType {
  public:
   UninitializedThisReferenceType(mirror::Class* klass,
-                                 const std::string& descriptor,
+                                 const StringPiece& descriptor,
                                  uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, 0, cache_id) {
@@ -758,7 +772,7 @@
 
 class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedThisRefType(const std::string& descriptor,
+  UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
                                      uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, 0, cache_id) {
@@ -781,7 +795,7 @@
 // sub-class.
 class ReferenceType FINAL : public RegType {
  public:
-  ReferenceType(mirror::Class* klass, const std::string& descriptor,
+  ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -799,7 +813,7 @@
 // type.
 class PreciseReferenceType FINAL : public RegType {
  public:
-  PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+  PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                        uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -815,7 +829,7 @@
 // Common parent of unresolved types.
 class UnresolvedType : public RegType {
  public:
-  UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(nullptr, descriptor, cache_id) {}
 
@@ -827,7 +841,7 @@
 // of this type must be conservative.
 class UnresolvedReferenceType FINAL : public UnresolvedType {
  public:
-  UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UnresolvedType(descriptor, cache_id) {
     if (kIsDebugBuild) {
@@ -882,8 +896,10 @@
 class UnresolvedMergedType FINAL : public UnresolvedType {
  public:
   // Note: the constructor will copy the unresolved BitVector, not use it directly.
-  UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved,
-                       const RegTypeCache* reg_type_cache, uint16_t cache_id)
+  UnresolvedMergedType(const RegType& resolved,
+                       const BitVector& unresolved,
+                       const RegTypeCache* reg_type_cache,
+                       uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // The resolved part. See description below.
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index b6f253b..68af62e 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -118,6 +118,18 @@
   }
 }
 
+template <class RegTypeType>
+inline RegTypeType& RegTypeCache::AddEntry(RegTypeType* new_entry) {
+  DCHECK(new_entry != nullptr);
+  entries_.push_back(new_entry);
+  if (new_entry->HasClass()) {
+    mirror::Class* klass = new_entry->GetClass();
+    DCHECK(!klass->IsPrimitive());
+    klass_entries_.push_back(std::make_pair(GcRoot<mirror::Class>(klass), new_entry));
+  }
+  return *new_entry;
+}
+
 }  // namespace verifier
 }  // namespace art
 #endif  // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index bb756e9..71ed4a2 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -16,7 +16,9 @@
 
 #include "reg_type_cache-inl.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "base/stl_util.h"
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
@@ -29,9 +31,10 @@
 
 bool RegTypeCache::primitive_initialized_ = false;
 uint16_t RegTypeCache::primitive_count_ = 0;
-const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant -
+                                                               kMinSmallConstant + 1];
 
-static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
+ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   if (entry->IsPreciseReference() == precise) {
     // We were or weren't looking for a precise reference and we found what we need.
@@ -67,7 +70,8 @@
   DCHECK_EQ(entries_.size(), primitive_count_);
 }
 
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader,
+                                            const char* descriptor,
                                             bool precise) {
   DCHECK(RegTypeCache::primitive_initialized_);
   if (descriptor[1] == '\0') {
@@ -159,13 +163,20 @@
   return klass;
 }
 
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
+  char* ptr = arena_.AllocArray<char>(string_piece.length());
+  memcpy(ptr, string_piece.data(), string_piece.length());
+  return StringPiece(ptr, string_piece.length());
+}
+
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader,
+                                  const char* descriptor,
                                   bool precise) {
+  StringPiece sp_descriptor(descriptor);
   // Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
   // operations on the descriptor.
-  StringPiece descriptor_sp(descriptor);
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    if (MatchDescriptor(i, descriptor_sp, precise)) {
+    if (MatchDescriptor(i, sp_descriptor, precise)) {
       return *(entries_[i]);
     }
   }
@@ -186,12 +197,11 @@
     if (klass->CannotBeAssignedFromOtherTypes() || precise) {
       DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
       DCHECK(!klass->IsInterface());
-      entry = new PreciseReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+      entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
     } else {
-      entry = new ReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+      entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
     }
-    AddEntry(entry);
-    return *entry;
+    return AddEntry(entry);
   } else {  // Class not resolved.
     // We tried loading the class and failed, this might get an exception raised
     // so we want to clear it before we go on.
@@ -202,9 +212,8 @@
       DCHECK(!Thread::Current()->IsExceptionPending());
     }
     if (IsValidDescriptor(descriptor)) {
-      RegType* entry = new UnresolvedReferenceType(descriptor_sp.as_string(), entries_.size());
-      AddEntry(entry);
-      return *entry;
+      return AddEntry(
+          new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
     } else {
       // The descriptor is broken return the unknown type as there's nothing sensible that
       // could be done at runtime
@@ -213,50 +222,65 @@
   }
 }
 
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
   DCHECK(klass != nullptr);
   if (klass->IsPrimitive()) {
     // Note: precise isn't used for primitive classes. A char is assignable to an int. All
     // primitive classes are final.
-    return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
-  } else {
-    // Look for the reference in the list of entries to have.
-    for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      const RegType* cur_entry = entries_[i];
-      if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
-        return *cur_entry;
+    return &RegTypeFromPrimitiveType(klass->GetPrimitiveType());
+  }
+  for (auto& pair : klass_entries_) {
+    mirror::Class* const reg_klass = pair.first.Read();
+    if (reg_klass == klass) {
+      const RegType* reg_type = pair.second;
+      if (MatchingPrecisionForClass(reg_type, precise)) {
+        return reg_type;
       }
     }
-    // No reference to the class was found, create new reference.
-    RegType* entry;
-    if (precise) {
-      entry = new PreciseReferenceType(klass, descriptor, entries_.size());
-    } else {
-      entry = new ReferenceType(klass, descriptor, entries_.size());
-    }
-    AddEntry(entry);
-    return *entry;
   }
+  return nullptr;
 }
 
-RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor,
+                                         mirror::Class* klass,
+                                         bool precise) {
+  // No reference to the class was found, create new reference.
+  DCHECK(FindClass(klass, precise) == nullptr);
+  RegType* const reg_type = precise
+      ? static_cast<RegType*>(
+          new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
+      : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+  return &AddEntry(reg_type);
+}
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+  DCHECK(klass != nullptr);
+  const RegType* reg_type = FindClass(klass, precise);
+  if (reg_type == nullptr) {
+    reg_type = InsertClass(AddString(StringPiece(descriptor)), klass, precise);
+  }
+  return *reg_type;
+}
+
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
+    : entries_(arena.Adapter(kArenaAllocVerifier)),
+      klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+      can_load_classes_(can_load_classes),
+      arena_(arena) {
   if (kIsDebugBuild) {
     Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
   }
-  entries_.reserve(64);
+  // The klass_entries_ array does not have primitives or small constants.
+  static constexpr size_t kNumReserveEntries = 32;
+  klass_entries_.reserve(kNumReserveEntries);
+  // We want to have room for additional entries after inserting primitives and small
+  // constants.
+  entries_.reserve(kNumReserveEntries + kNumPrimitivesAndSmallConstants);
   FillPrimitiveAndSmallConstantTypes();
 }
 
 RegTypeCache::~RegTypeCache() {
-  CHECK_LE(primitive_count_, entries_.size());
-  // Delete only the non primitive types.
-  if (entries_.size() == kNumPrimitivesAndSmallConstants) {
-    // All entries are from the global pool, nothing to delete.
-    return;
-  }
-  std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
-  std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
-  STLDeleteContainerPointers(non_primitive_begin, entries_.end());
+  DCHECK_LE(primitive_count_, entries_.size());
 }
 
 void RegTypeCache::ShutDown() {
@@ -318,9 +342,9 @@
 }
 
 const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
-  BitVector types(1,                                    // Allocate at least a word.
-                  true,                                 // Is expandable.
-                  Allocator::GetMallocAllocator());     // TODO: Arenas in the verifier.
+  ArenaBitVector types(&arena_,
+                       kDefaultArenaBitVectorBytes * kBitsPerByte,  // Allocate at least 8 bytes.
+                       true);                                       // Is expandable.
   const RegType* left_resolved;
   if (left.IsUnresolvedMergedReference()) {
     const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
@@ -361,20 +385,15 @@
       const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes();
       // Use SameBitsSet. "types" is expandable to allow merging in the components, but the
       // BitVector in the final RegType will be made non-expandable.
-      if (&resolved_part == &resolved_parts_merged &&
-              types.SameBitsSet(&unresolved_part)) {
+      if (&resolved_part == &resolved_parts_merged && types.SameBitsSet(&unresolved_part)) {
         return *cur_entry;
       }
     }
   }
-
-  // Create entry.
-  RegType* entry = new UnresolvedMergedType(resolved_parts_merged,
-                                            types,
-                                            this,
-                                            entries_.size());
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
+                                                     types,
+                                                     this,
+                                                     entries_.size()));
 }
 
 const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -391,14 +410,12 @@
       }
     }
   }
-  RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
 }
 
 const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
   UninitializedType* entry = nullptr;
-  const std::string& descriptor(type.GetDescriptor());
+  const StringPiece& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -409,7 +426,9 @@
         return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
       }
     }
-    entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
+    entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
+                                                         allocation_pc,
+                                                         entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -421,17 +440,19 @@
         return *down_cast<const UninitializedReferenceType*>(cur_entry);
       }
     }
-    entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+    entry = new (&arena_) UninitializedReferenceType(klass,
+                                                     descriptor,
+                                                     allocation_pc,
+                                                     entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
   RegType* entry;
 
   if (uninit_type.IsUnresolvedTypes()) {
-    const std::string& descriptor(uninit_type.GetDescriptor());
+    const StringPiece& descriptor(uninit_type.GetDescriptor());
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedReference() &&
@@ -439,7 +460,7 @@
         return *cur_entry;
       }
     }
-    entry = new UnresolvedReferenceType(descriptor, entries_.size());
+    entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = uninit_type.GetClass();
     if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -450,7 +471,7 @@
           return *cur_entry;
         }
       }
-      entry = new ReferenceType(klass, "", entries_.size());
+      entry = new (&arena_) ReferenceType(klass, "", entries_.size());
     } else if (!klass->IsPrimitive()) {
       // We're uninitialized because of allocation, look or create a precise type as allocations
       // may only create objects of that type.
@@ -469,18 +490,19 @@
           return *cur_entry;
         }
       }
-      entry = new PreciseReferenceType(klass, uninit_type.GetDescriptor(), entries_.size());
+      entry = new (&arena_) PreciseReferenceType(klass,
+                                                 uninit_type.GetDescriptor(),
+                                                 entries_.size());
     } else {
       return Conflict();
     }
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
   UninitializedType* entry;
-  const std::string& descriptor(type.GetDescriptor());
+  const StringPiece& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -489,7 +511,7 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+    entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -498,10 +520,9 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
+    entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
@@ -515,12 +536,11 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstType(value, entries_.size());
+    entry = new (&arena_) PreciseConstType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
@@ -533,12 +553,11 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstLoType(value, entries_.size());
+    entry = new (&arena_) PreciseConstLoType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstLoType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
@@ -551,32 +570,30 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstHiType(value, entries_.size());
+    entry = new (&arena_) PreciseConstHiType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstHiType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
   if (!array.IsArrayTypes()) {
     return Conflict();
   } else if (array.IsUnresolvedTypes()) {
-    const std::string& descriptor(array.GetDescriptor());
-    const std::string component(descriptor.substr(1, descriptor.size() - 1));
-    return FromDescriptor(loader, component.c_str(), false);
+    const std::string descriptor(array.GetDescriptor().as_string());
+    return FromDescriptor(loader, descriptor.c_str() + 1, false);
   } else {
     mirror::Class* klass = array.GetClass()->GetComponentType();
     std::string temp;
+    const char* descriptor = klass->GetDescriptor(&temp);
     if (klass->IsErroneous()) {
       // Arrays may have erroneous component types, use unresolved in that case.
       // We assume that the primitive classes are not erroneous, so we know it is a
       // reference type.
-      return FromDescriptor(loader, klass->GetDescriptor(&temp), false);
+      return FromDescriptor(loader, descriptor, false);
     } else {
-      return FromClass(klass->GetDescriptor(&temp), klass,
-                       klass->CannotBeAssignedFromOtherTypes());
+      return FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
     }
   }
 }
@@ -618,10 +635,10 @@
   for (size_t i = primitive_count_; i < entries_.size(); ++i) {
     entries_[i]->VisitRoots(visitor, root_info);
   }
-}
-
-void RegTypeCache::AddEntry(RegType* new_entry) {
-  entries_.push_back(new_entry);
+  for (auto& pair : klass_entries_) {
+    GcRoot<mirror::Class>& root = pair.first;
+    root.VisitRoot(visitor, root_info);
+  }
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 93948a1..6f9a04e 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -19,6 +19,7 @@
 
 #include "base/casts.h"
 #include "base/macros.h"
+#include "base/scoped_arena_containers.h"
 #include "object_callbacks.h"
 #include "reg_type.h"
 #include "runtime.h"
@@ -31,15 +32,19 @@
   class Class;
   class ClassLoader;
 }  // namespace mirror
+class ScopedArenaAllocator;
 class StringPiece;
 
 namespace verifier {
 
 class RegType;
 
+// Use 8 bytes since that is the default arena allocator alignment.
+static constexpr size_t kDefaultArenaBitVectorBytes = 8;
+
 class RegTypeCache {
  public:
-  explicit RegTypeCache(bool can_load_classes);
+  explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
   ~RegTypeCache();
   static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!RegTypeCache::primitive_initialized_) {
@@ -53,6 +58,13 @@
   const art::verifier::RegType& GetFromId(uint16_t id) const;
   const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
+  // Find a RegType, returns null if not found.
+  const RegType* FindClass(mirror::Class* klass, bool precise) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  // Insert a new class with a specified descriptor, must not already be in the cache.
+  const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  // Get or insert a reg type for a description, klass, and precision.
   const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
   const ConstantType& FromCat1Const(int32_t value, bool precise)
@@ -150,7 +162,13 @@
   const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void AddEntry(RegType* new_entry);
+  // Returns the pass in RegType.
+  template <class RegTypeType>
+  RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Add a string piece to the arena allocator so that it stays live for the lifetime of the
+  // verifier.
+  StringPiece AddString(const StringPiece& string_piece);
 
   template <class Type>
   static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
@@ -160,7 +178,8 @@
   // A quick look up for popular small constants.
   static constexpr int32_t kMinSmallConstant = -1;
   static constexpr int32_t kMaxSmallConstant = 4;
-  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant -
+                                                          kMinSmallConstant + 1];
 
   static constexpr size_t kNumPrimitivesAndSmallConstants =
       12 + (kMaxSmallConstant - kMinSmallConstant + 1);
@@ -172,11 +191,17 @@
   static uint16_t primitive_count_;
 
   // The actual storage for the RegTypes.
-  std::vector<const RegType*> entries_;
+  ScopedArenaVector<const RegType*> entries_;
+
+  // Fast lookup for quickly finding entries that have a matching class.
+  ScopedArenaVector<std::pair<GcRoot<mirror::Class>, const RegType*>> klass_entries_;
 
   // Whether or not we're allowed to load classes.
   const bool can_load_classes_;
 
+  // Arena allocator.
+  ScopedArenaAllocator& arena_;
+
   DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
 };
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 971b1f5..22ac7e4 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -20,6 +20,7 @@
 
 #include "base/bit_vector.h"
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "common_runtime_test.h"
 #include "reg_type_cache-inl.h"
 #include "reg_type-inl.h"
@@ -29,12 +30,23 @@
 namespace art {
 namespace verifier {
 
-class RegTypeTest : public CommonRuntimeTest {};
+class BaseRegTypeTest : public CommonRuntimeTest {
+ public:
+  void PostRuntimeCreate() OVERRIDE {
+    stack.reset(new ArenaStack(Runtime::Current()->GetArenaPool()));
+    allocator.reset(new ScopedArenaAllocator(stack.get()));
+  }
+
+  std::unique_ptr<ArenaStack> stack;
+  std::unique_ptr<ScopedArenaAllocator> allocator;
+};
+
+class RegTypeTest : public BaseRegTypeTest {};
 
 TEST_F(RegTypeTest, ConstLoHi) {
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
@@ -56,7 +68,7 @@
 
 TEST_F(RegTypeTest, Pairs) {
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   int64_t val = static_cast<int32_t>(1234);
   const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
   const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
@@ -80,7 +92,7 @@
 
 TEST_F(RegTypeTest, Primitives) {
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
 
   const RegType& bool_reg_type = cache.Boolean();
   EXPECT_FALSE(bool_reg_type.IsUndefined());
@@ -347,13 +359,13 @@
   EXPECT_TRUE(double_reg_type.HasClass());
 }
 
-class RegTypeReferenceTest : public CommonRuntimeTest {};
+class RegTypeReferenceTest : public BaseRegTypeTest {};
 
 TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
   // Tests matching precisions. A reference type that was created precise doesn't
   // match the one that is imprecise.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& imprecise_obj = cache.JavaLangObject(false);
   const RegType& precise_obj = cache.JavaLangObject(true);
   const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -368,7 +380,7 @@
   // Tests creating unresolved types. Miss for the first time asking the cache and
   // a hit second time.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
@@ -384,7 +396,7 @@
 TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
   // Tests creating types uninitialized types from unresolved types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
@@ -406,7 +418,7 @@
 TEST_F(RegTypeReferenceTest, Dump) {
   // Tests types for proper Dump messages.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
   const RegType& resolved_ref = cache.JavaLangString();
@@ -431,7 +443,7 @@
   // Hit the second time. Then check for the same effect when using
   // The JavaLangObject method instead of FromDescriptor. String class is final.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type = cache.JavaLangString();
   const RegType& ref_type_2 = cache.JavaLangString();
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
@@ -451,7 +463,7 @@
   // Hit the second time. Then I am checking for the same effect when using
   // The JavaLangObject method instead of FromDescriptor. Object Class in not final.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type = cache.JavaLangObject(true);
   const RegType& ref_type_2 = cache.JavaLangObject(true);
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -464,7 +476,7 @@
   // Tests merging logic
   // String and object , LUB is object.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
   const RegType& string = cache_new.JavaLangString();
   const RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
@@ -487,7 +499,7 @@
 TEST_F(RegTypeTest, MergingFloat) {
   // Testing merging logic with float and float constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& float_type = cache_new.Float();
@@ -518,7 +530,7 @@
 TEST_F(RegTypeTest, MergingLong) {
   // Testing merging logic with long and long constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& long_lo_type = cache_new.LongLo();
@@ -572,7 +584,7 @@
 TEST_F(RegTypeTest, MergingDouble) {
   // Testing merging logic with double and double constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& double_lo_type = cache_new.DoubleLo();
@@ -626,7 +638,7 @@
 TEST_F(RegTypeTest, ConstPrecision) {
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
   const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
   const RegType& precise_const = cache_new.FromCat1Const(10, true);
 
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 1df2428..57fb701 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -182,6 +182,21 @@
   }
 }
 
+inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
+  void* memory = verifier->GetArena().Alloc(OFFSETOF_MEMBER(RegisterLine, line_) +
+                                                (num_regs * sizeof(uint16_t)));
+  return new (memory) RegisterLine(num_regs, verifier);
+}
+
+inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
+    : num_regs_(num_regs),
+      monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
+      reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+      this_initialized_(false) {
+  std::uninitialized_fill_n(line_, num_regs_, 0u);
+  SetResultTypeToUnknown(verifier);
+}
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index f48b1e1..37343b5 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -412,12 +412,9 @@
   }
 }
 
-// Check whether there is another register in the search map that is locked the same way as the
-// register in the src map. This establishes an alias.
-static bool FindLockAliasedRegister(
-    uint32_t src,
-    const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
-    const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+bool FindLockAliasedRegister(uint32_t src,
+                             const RegisterLine::RegToLockDepthsMap& src_map,
+                             const RegisterLine::RegToLockDepthsMap& search_map) {
   auto it = src_map.find(src);
   if (it == src_map.end()) {
     // "Not locked" is trivially aliased.
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 46db1c6..b2f5555 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -20,6 +20,7 @@
 #include <memory>
 #include <vector>
 
+#include "base/scoped_arena_containers.h"
 #include "safe_map.h"
 
 namespace art {
@@ -58,11 +59,11 @@
 // stack of entered monitors (identified by code unit offset).
 class RegisterLine {
  public:
-  static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier) {
-    void* memory = operator new(sizeof(RegisterLine) + (num_regs * sizeof(uint16_t)));
-    RegisterLine* rl = new (memory) RegisterLine(num_regs, verifier);
-    return rl;
-  }
+  // A map from register to a bit vector of indices into the monitors_ stack.
+  using RegToLockDepthsMap = ScopedArenaSafeMap<uint32_t, uint32_t>;
+
+  // Create a register line of num_regs registers.
+  static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier);
 
   // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
   void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
@@ -311,11 +312,11 @@
   // Write a bit at each register location that holds a reference.
   void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
 
-  size_t GetMonitorEnterCount() {
+  size_t GetMonitorEnterCount() const {
     return monitors_.size();
   }
 
-  uint32_t GetMonitorEnterDexPc(size_t i) {
+  uint32_t GetMonitorEnterDexPc(size_t i) const {
     return monitors_[i];
   }
 
@@ -375,11 +376,7 @@
     reg_to_lock_depths_.erase(reg);
   }
 
-  RegisterLine(size_t num_regs, MethodVerifier* verifier)
-      : num_regs_(num_regs), this_initialized_(false) {
-    memset(&line_, 0, num_regs_ * sizeof(uint16_t));
-    SetResultTypeToUnknown(verifier);
-  }
+  RegisterLine(size_t num_regs, MethodVerifier* verifier);
 
   // Storage for the result register's type, valid after an invocation.
   uint16_t result_[2];
@@ -388,17 +385,18 @@
   const uint32_t num_regs_;
 
   // A stack of monitor enter locations.
-  std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
+  ScopedArenaVector<uint32_t> monitors_;
+
   // A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
   // stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
   // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
-  AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
+  RegToLockDepthsMap reg_to_lock_depths_;
 
   // Whether "this" initialization (a constructor supercall) has happened.
   bool this_initialized_;
 
   // An array of RegType Ids associated with each dex register.
-  uint16_t line_[0];
+  uint16_t line_[1];
 
   DISALLOW_COPY_AND_ASSIGN(RegisterLine);
 };
diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run
new file mode 100755
index 0000000..27c501d
--- /dev/null
+++ b/test/004-ThreadStress/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Be less agressive than the default debug option for the jit code cache
+# to avoid timeouts.
+exec ${RUN} "$@" --runtime-option -Xjitcodecachesize:1M
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index c93db50..5b22e88 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -129,13 +129,36 @@
         System.out.println("Unexpectedly not succeeding compareAndSwapLong...");
     }
 
-    if (unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) {
+    // We do not use `null` as argument to sun.misc.Unsafe.compareAndSwapObject
+    // in those tests, as this value is not affected by heap poisoning
+    // (which uses address negation to poison and unpoison heap object
+    // references).  This way, when heap poisoning is enabled, we can
+    // better exercise its implementation within that method.
+    if (unsafe.compareAndSwapObject(t, objectOffset, new Object(), new Object())) {
         System.out.println("Unexpectedly succeeding compareAndSwapObject...");
     }
-    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, null)) {
+    Object objectValue2 = new Object();
+    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, objectValue2)) {
         System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
     }
-    if (!unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) {
+    Object objectValue3 = new Object();
+    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue2, objectValue3)) {
+        System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+    }
+
+    // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+    // object (`t`) for the `obj` and `newValue` arguments.
+    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue3, t)) {
+        System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+    }
+    // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+    // object (`t`) for the `obj`, `expectedValue` and `newValue` arguments.
+    if (!unsafe.compareAndSwapObject(t, objectOffset, t, t)) {
+        System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+    }
+    // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+    // object (`t`) for the `obj` and `expectedValue` arguments.
+    if (!unsafe.compareAndSwapObject(t, objectOffset, t, new Object())) {
         System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
     }
   }
diff --git a/test/527-checker-array-access-split/expected.txt b/test/527-checker-array-access-split/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/527-checker-array-access-split/expected.txt
diff --git a/test/527-checker-array-access-split/info.txt b/test/527-checker-array-access-split/info.txt
new file mode 100644
index 0000000..9206804
--- /dev/null
+++ b/test/527-checker-array-access-split/info.txt
@@ -0,0 +1 @@
+Test arm64-specific array access optimization.
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
new file mode 100644
index 0000000..ead9446
--- /dev/null
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  /**
+   * Test that HArrayGet with a constant index is not split.
+   */
+
+  /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArrayGet [<<Array>>,<<Index>>]
+
+  /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (after)
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArrayGet [<<Array>>,<<Index>>]
+
+  public static int constantIndexGet(int array[]) {
+    return array[1];
+  }
+
+  /**
+   * Test that HArraySet with a constant index is not split.
+   */
+
+  /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const2:i\d+>>        IntConstant 2
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Const2>>]
+
+  /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (after)
+  /// CHECK:             <<Const2:i\d+>>        IntConstant 2
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Const2>>]
+
+
+  public static void constantIndexSet(int array[]) {
+    array[1] = 2;
+  }
+
+  /**
+   * Test basic splitting of HArrayGet.
+   */
+
+  /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArrayGet [<<Array>>,<<Index>>]
+
+  /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (after)
+  /// CHECK:             <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArrayGet [<<Address>>,<<Index>>]
+
+  public static int get(int array[], int index) {
+    return array[index];
+  }
+
+  /**
+   * Test basic splitting of HArraySet.
+   */
+
+  /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:                                    ParameterValue
+  /// CHECK:                                    ParameterValue
+  /// CHECK:             <<Arg:i\d+>>           ParameterValue
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Arg>>]
+
+  /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:                                    ParameterValue
+  /// CHECK:                                    ParameterValue
+  /// CHECK:             <<Arg:i\d+>>           ParameterValue
+  /// CHECK:             <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address>>,<<Index>>,<<Arg>>]
+
+  public static void set(int array[], int index, int value) {
+    array[index] = value;
+  }
+
+  /**
+   * Check that the intermediate address can be shared after GVN.
+   */
+
+  /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:        <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: void Main.getSet(int[], int) GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Address>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArraySet [<<Address>>,<<Index>>,<<Add>>]
+
+  public static void getSet(int array[], int index) {
+    array[index] = array[index] + 1;
+  }
+
+  /**
+   * Check that the intermediate address computation is not reordered or merged
+   * across IRs that can trigger GC.
+   */
+
+  /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    NewArray
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:        <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    NewArray
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    NewArray
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                                    ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  public static int[] accrossGC(int array[], int index) {
+    int tmp = array[index] + 1;
+    int[] new_array = new int[1];
+    array[index] = tmp;
+    return new_array;
+  }
+
+  /**
+   * Test that the intermediate address is shared between array accesses after
+   * the bounds check have been removed by BCE.
+   */
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  // By the time we reach the architecture-specific instruction simplifier, BCE
+  // has removed the bounds checks in the loop.
+
+  // Note that we do not care that the `DataOffset` is `12`. But if we do not
+  // specify it and any other `IntConstant` appears before that instruction,
+  // checker will match the previous `IntConstant`, and we will thus fail the
+  // check.
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:        <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Address>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArraySet [<<Address>>,<<Index>>,<<Add>>]
+
+  public static int canMergeAfterBCE1() {
+    int[] array = {0, 1, 2, 3};
+    for (int i = 0; i < array.length; i++) {
+      array[i] = array[i] + 1;
+    }
+    return array[array.length - 1];
+  }
+
+  /**
+   * This test case is similar to `canMergeAfterBCE1`, but with different
+   * indexes for the accesses.
+   */
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK-DAG:         <<Index1:i\d+>>        Add [<<Index>>,<<Const1>>]
+  /// CHECK-DAG:         <<ArrayGetI:i\d+>>     ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK-DAG:         <<ArrayGetI1:i\d+>>    ArrayGet [<<Array>>,<<Index1>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index1>>,<<Add>>]
+
+  // Note that we do not care that the `DataOffset` is `12`. But if we do not
+  // specify it and any other `IntConstant` appears before that instruction,
+  // checker will match the previous `IntConstant`, and we will thus fail the
+  // check.
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK-DAG:         <<Index1:i\d+>>        Add [<<Index>>,<<Const1>>]
+  /// CHECK-DAG:         <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:         <<ArrayGetI:i\d+>>     ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK-DAG:         <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:         <<ArrayGetI1:i\d+>>    ArrayGet [<<Address2>>,<<Index1>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+  /// CHECK:             <<Address3:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                                    ArraySet [<<Address3>>,<<Index1>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK-DAG:         <<Index1:i\d+>>        Add [<<Index>>,<<Const1>>]
+  /// CHECK-DAG:         <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:         <<ArrayGetI:i\d+>>     ArrayGet [<<Address>>,<<Index>>]
+  /// CHECK-DAG:         <<ArrayGetI1:i\d+>>    ArrayGet [<<Address>>,<<Index1>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+  /// CHECK:                                    ArraySet [<<Address>>,<<Index1>>,<<Add>>]
+
+  // There should be only one intermediate address computation in the loop.
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+  /// CHECK:                                    Arm64IntermediateAddress
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+
+  public static int canMergeAfterBCE2() {
+    int[] array = {0, 1, 2, 3};
+    for (int i = 0; i < array.length - 1; i++) {
+      array[i + 1] = array[i] + array[i + 1];
+    }
+    return array[array.length - 1];
+  }
+
+
+  public static void main(String[] args) {
+    int[] array = {123, 456, 789};
+
+    assertIntEquals(456, constantIndexGet(array));
+
+    constantIndexSet(array);
+    assertIntEquals(2, array[1]);
+
+    assertIntEquals(789, get(array, 2));
+
+    set(array, 1, 456);
+    assertIntEquals(456, array[1]);
+
+    getSet(array, 0);
+    assertIntEquals(124, array[0]);
+
+    accrossGC(array, 0);
+    assertIntEquals(125, array[0]);
+
+    assertIntEquals(4, canMergeAfterBCE1());
+    assertIntEquals(6, canMergeAfterBCE2());
+  }
+}
diff --git a/test/530-checker-lse/expected.txt b/test/530-checker-lse/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/530-checker-lse/expected.txt
diff --git a/test/530-checker-lse/info.txt b/test/530-checker-lse/info.txt
new file mode 100644
index 0000000..5b45e20
--- /dev/null
+++ b/test/530-checker-lse/info.txt
@@ -0,0 +1 @@
+Checker test for testing load-store elimination.
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
new file mode 100644
index 0000000..c766aaa
--- /dev/null
+++ b/test/530-checker-lse/src/Main.java
@@ -0,0 +1,512 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Circle {
+  Circle(double radius) {
+    this.radius = radius;
+  }
+  public double getArea() {
+    return radius * radius * Math.PI;
+  }
+  private double radius;
+};
+
+class TestClass {
+  TestClass() {
+  }
+  TestClass(int i, int j) {
+    this.i = i;
+    this.j = j;
+  }
+  int i;
+  int j;
+  volatile int k;
+  TestClass next;
+  static int si;
+};
+
+class SubTestClass extends TestClass {
+  int k;
+};
+
+class TestClass2 {
+  int i;
+  int j;
+};
+
+public class Main {
+
+  /// CHECK-START: double Main.calcCircleArea(double) load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: double Main.calcCircleArea(double) load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: InstanceFieldGet
+
+  static double calcCircleArea(double radius) {
+    return new Circle(radius).getArea();
+  }
+
+  /// CHECK-START: int Main.test1(TestClass, TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test1(TestClass, TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Different fields shouldn't alias.
+  static int test1(TestClass obj1, TestClass obj2) {
+    obj1.i = 1;
+    obj2.j = 2;
+    return obj1.i + obj2.j;
+  }
+
+  /// CHECK-START: int Main.test2(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test2(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldSet
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Redundant store of the same value.
+  static int test2(TestClass obj) {
+    obj.j = 1;
+    obj.j = 1;
+    return obj.j;
+  }
+
+  /// CHECK-START: int Main.test3(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test3(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: InstanceFieldGet
+
+  // A new allocation shouldn't alias with pre-existing values.
+  static int test3(TestClass obj) {
+    obj.i = 1;
+    obj.next.j = 2;
+    TestClass obj2 = new TestClass();
+    obj2.i = 3;
+    obj2.j = 4;
+    return obj.i + obj.next.j + obj2.i + obj2.j;
+  }
+
+  /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  // Set and merge the same value in two branches.
+  static int test4(TestClass obj, boolean b) {
+    if (b) {
+      obj.i = 1;
+    } else {
+      obj.i = 1;
+    }
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  // Set and merge different values in two branches.
+  static int test5(TestClass obj, boolean b) {
+    if (b) {
+      obj.i = 1;
+    } else {
+      obj.i = 2;
+    }
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test6(TestClass, TestClass, boolean) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test6(TestClass, TestClass, boolean) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Setting the same value doesn't clear the value for aliased locations.
+  static int test6(TestClass obj1, TestClass obj2, boolean b) {
+    obj1.i = 1;
+    obj1.j = 2;
+    if (b) {
+      obj2.j = 2;
+    }
+    return obj1.j + obj2.j;
+  }
+
+  /// CHECK-START: int Main.test7(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test7(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Invocation should kill values in non-singleton heap locations.
+  static int test7(TestClass obj) {
+    obj.i = 1;
+    System.out.print("");
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test8() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InvokeVirtual
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test8() load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InvokeVirtual
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Invocation should not kill values in singleton heap locations.
+  static int test8() {
+    TestClass obj = new TestClass();
+    obj.i = 1;
+    System.out.print("");
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test9(TestClass) load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test9(TestClass) load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Invocation should kill values in non-singleton heap locations.
+  static int test9(TestClass obj) {
+    TestClass obj2 = new TestClass();
+    obj2.i = 1;
+    obj.next = obj2;
+    System.out.print("");
+    return obj2.i;
+  }
+
+  /// CHECK-START: int Main.test10(TestClass) load_store_elimination (before)
+  /// CHECK: StaticFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: StaticFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test10(TestClass) load_store_elimination (after)
+  /// CHECK: StaticFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: StaticFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Static fields shouldn't alias with instance fields.
+  static int test10(TestClass obj) {
+    TestClass.si += obj.i;
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test11(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test11(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Loop without heap writes.
+  // obj.i is actually hoisted to the loop pre-header by licm already.
+  static int test11(TestClass obj) {
+    obj.i = 1;
+    int sum = 0;
+    for (int i = 0; i < 10; i++) {
+      sum += obj.i;
+    }
+    return sum;
+  }
+
+  /// CHECK-START: int Main.test12(TestClass, TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: int Main.test12(TestClass, TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+
+  // Loop with heap writes.
+  static int test12(TestClass obj1, TestClass obj2) {
+    obj1.i = 1;
+    int sum = 0;
+    for (int i = 0; i < 10; i++) {
+      sum += obj1.i;
+      obj2.i = sum;
+    }
+    return sum;
+  }
+
+  /// CHECK-START: int Main.test13(TestClass, TestClass2) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test13(TestClass, TestClass2) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Different classes shouldn't alias.
+  static int test13(TestClass obj1, TestClass2 obj2) {
+    obj1.i = 1;
+    obj2.i = 2;
+    return obj1.i + obj2.i;
+  }
+
+  /// CHECK-START: int Main.test14(TestClass, SubTestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test14(TestClass, SubTestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Subclass may alias with super class.
+  static int test14(TestClass obj1, SubTestClass obj2) {
+    obj1.i = 1;
+    obj2.i = 2;
+    return obj1.i;
+  }
+
+  /// CHECK-START: int Main.test15() load_store_elimination (before)
+  /// CHECK: StaticFieldSet
+  /// CHECK: StaticFieldSet
+  /// CHECK: StaticFieldGet
+
+  /// CHECK-START: int Main.test15() load_store_elimination (after)
+  /// CHECK: <<Const2:i\d+>> IntConstant 2
+  /// CHECK: StaticFieldSet
+  /// CHECK: StaticFieldSet
+  /// CHECK-NOT: StaticFieldGet
+  /// CHECK: Return [<<Const2>>]
+
+  // Static field access from subclass's name.
+  static int test15() {
+    TestClass.si = 1;
+    SubTestClass.si = 2;
+    return TestClass.si;
+  }
+
+  /// CHECK-START: int Main.test16() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test16() load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK-NOT: StaticFieldSet
+  /// CHECK-NOT: StaticFieldGet
+
+  // Test inlined constructor.
+  static int test16() {
+    TestClass obj = new TestClass(1, 2);
+    return obj.i + obj.j;
+  }
+
+  /// CHECK-START: int Main.test17() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test17() load_store_elimination (after)
+  /// CHECK: <<Const0:i\d+>> IntConstant 0
+  /// CHECK: NewInstance
+  /// CHECK-NOT: StaticFieldSet
+  /// CHECK-NOT: StaticFieldGet
+  /// CHECK: Return [<<Const0>>]
+
+  // Test getting default value.
+  static int test17() {
+    TestClass obj = new TestClass();
+    obj.j = 1;
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test18(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test18(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Volatile field load/store shouldn't be eliminated.
+  static int test18(TestClass obj) {
+    obj.k = 1;
+    return obj.k;
+  }
+
+  /// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (before)
+  /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
+  /// CHECK: ArraySet
+  /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+
+  /// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (after)
+  /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
+  /// CHECK: ArraySet
+  /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+
+  // I/F, J/D aliasing should keep the load/store.
+  static float test19(float[] fa1, float[] fa2) {
+    fa1[0] = fa2[0];
+    return fa1[0];
+  }
+
+  /// CHECK-START: TestClass Main.test20() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: TestClass Main.test20() load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK-NOT: InstanceFieldSet
+
+  // Storing default heap value is redundant if the heap location has the
+  // default heap value.
+  static TestClass test20() {
+    TestClass obj = new TestClass();
+    obj.i = 0;
+    return obj;
+  }
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertFloatEquals(float expected, float result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertDoubleEquals(double expected, double result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void main(String[] args) {
+    assertDoubleEquals(Math.PI * Math.PI * Math.PI, calcCircleArea(Math.PI));
+    assertIntEquals(test1(new TestClass(), new TestClass()), 3);
+    assertIntEquals(test2(new TestClass()), 1);
+    TestClass obj1 = new TestClass();
+    TestClass obj2 = new TestClass();
+    obj1.next = obj2;
+    assertIntEquals(test3(obj1), 10);
+    assertIntEquals(test4(new TestClass(), true), 1);
+    assertIntEquals(test4(new TestClass(), false), 1);
+    assertIntEquals(test5(new TestClass(), true), 1);
+    assertIntEquals(test5(new TestClass(), false), 2);
+    assertIntEquals(test6(new TestClass(), new TestClass(), true), 4);
+    assertIntEquals(test6(new TestClass(), new TestClass(), false), 2);
+    assertIntEquals(test7(new TestClass()), 1);
+    assertIntEquals(test8(), 1);
+    obj1 = new TestClass();
+    obj2 = new TestClass();
+    obj1.next = obj2;
+    assertIntEquals(test9(new TestClass()), 1);
+    assertIntEquals(test10(new TestClass(3, 4)), 3);
+    assertIntEquals(TestClass.si, 3);
+    assertIntEquals(test11(new TestClass()), 10);
+    assertIntEquals(test12(new TestClass(), new TestClass()), 10);
+    assertIntEquals(test13(new TestClass(), new TestClass2()), 3);
+    SubTestClass obj3 = new SubTestClass();
+    assertIntEquals(test14(obj3, obj3), 2);
+    assertIntEquals(test15(), 2);
+    assertIntEquals(test16(), 3);
+    assertIntEquals(test17(), 0);
+    assertIntEquals(test18(new TestClass()), 1);
+    float[] fa1 = { 0.8f };
+    float[] fa2 = { 1.8f };
+    assertFloatEquals(test19(fa1, fa2), 1.8f);
+    assertFloatEquals(test20().i, 0);
+  }
+}
diff --git a/test/532-checker-nonnull-arrayset/src/Main.java b/test/532-checker-nonnull-arrayset/src/Main.java
index 7d8fff4..2c701bb 100644
--- a/test/532-checker-nonnull-arrayset/src/Main.java
+++ b/test/532-checker-nonnull-arrayset/src/Main.java
@@ -29,10 +29,10 @@
   /// CHECK-NOT:      test
   /// CHECK:          ReturnVoid
   public static void test() {
-    Object[] array = new Object[1];
+    Object[] array = new Object[2];
     Object nonNull = array[0];
     nonNull.getClass(); // Ensure nonNull has an implicit null check.
-    array[0] = nonNull;
+    array[1] = nonNull;
   }
 
   public static void main(String[] args) {}
diff --git a/test/955-lambda-smali/run b/test/955-lambda-smali/run
index b754680..2fb2f89 100755
--- a/test/955-lambda-smali/run
+++ b/test/955-lambda-smali/run
@@ -15,4 +15,4 @@
 # limitations under the License.
 
 # Ensure that the lambda experimental opcodes are turned on for dalvikvm and dex2oat
-${RUN} "$@" --runtime-option -Xexperimental:lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:lambdas
+${RUN} "$@" --experimental lambdas
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
index c786687..06692f9 100755
--- a/test/960-default-smali/build
+++ b/test/960-default-smali/build
@@ -18,7 +18,7 @@
 set -e
 
 # Generate the smali Main.smali file or fail
-./util-src/generate_smali.py ./smali
+${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali
 
 if [[ $@ == *"--jvm"* ]]; then
   # Build the Java files if we are running a --jvm test
@@ -29,5 +29,5 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx256m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx256m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip "$TEST_NAME.jar" classes.dex
diff --git a/test/960-default-smali/info.txt b/test/960-default-smali/info.txt
index eb596e2..9583abb 100644
--- a/test/960-default-smali/info.txt
+++ b/test/960-default-smali/info.txt
@@ -2,15 +2,16 @@
 
 Obviously needs to run under ART or a Java 8 Language runtime and compiler.
 
-When run a Main.smali file will be generated by the util-src/generate_smali.py
-script. If we run with --jvm we will use the tools/extract-embedded-java script to
-turn the smali into equivalent Java using the embedded Java code.
+When run a Main.smali file will be generated by the
+test/utils/python/generate_smali_main.py script. If we run with --jvm we will
+use the tools/extract-embedded-java script to turn the smali into equivalent
+Java using the embedded Java code.
 
 When updating be sure to write the equivalent Java code in comments of the smali
 files.
 
-Care should be taken when updating the generate_smali.py script. It must always
-return equivalent output when run multiple times.
+Care should be taken when updating the generate_smali_main.py script. It must
+always return equivalent output when run multiple times.
 
 To update the test files do the following steps:
     <Add new classes/interfaces>
diff --git a/test/960-default-smali/run b/test/960-default-smali/run
index e378b06..22f6800 100755
--- a/test/960-default-smali/run
+++ b/test/960-default-smali/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build
index 707c17e..5eb851f 100755
--- a/test/961-default-iface-resolution-generated/build
+++ b/test/961-default-iface-resolution-generated/build
@@ -40,7 +40,7 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
 
 # Reset the ulimit back to its initial value
diff --git a/test/961-default-iface-resolution-generated/run b/test/961-default-iface-resolution-generated/run
index e378b06..22f6800 100755
--- a/test/961-default-iface-resolution-generated/run
+++ b/test/961-default-iface-resolution-generated/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
index 5ad82f7..06bb3bd 100755
--- a/test/962-iface-static/build
+++ b/test/962-iface-static/build
@@ -26,5 +26,5 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
diff --git a/test/962-iface-static/run b/test/962-iface-static/run
index e713708..d37737f 100755
--- a/test/962-iface-static/run
+++ b/test/962-iface-static/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
index 5ad82f7..06bb3bd 100755
--- a/test/963-default-range-smali/build
+++ b/test/963-default-range-smali/build
@@ -26,5 +26,5 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
diff --git a/test/963-default-range-smali/run b/test/963-default-range-smali/run
index e713708..d37737f 100755
--- a/test/963-default-range-smali/run
+++ b/test/963-default-range-smali/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build
index deef803..b0fbe4b 100755
--- a/test/964-default-iface-init-generated/build
+++ b/test/964-default-iface-init-generated/build
@@ -38,7 +38,7 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
 
 # Reset the ulimit back to its initial value
diff --git a/test/964-default-iface-init-generated/run b/test/964-default-iface-init-generated/run
index e378b06..22f6800 100755
--- a/test/964-default-iface-init-generated/run
+++ b/test/964-default-iface-init-generated/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index b83d4a4..6ce3d94 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -41,8 +41,7 @@
 
 ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
   TEST_ART_RUN_TEST_DEPENDENCIES += \
-    $(JACK_JAR) \
-    $(JACK_LAUNCHER_JAR) \
+    $(JACK) \
     $(JILL_JAR)
 endif
 
@@ -61,15 +60,13 @@
     run_test_options += --build-with-javac-dx
   endif
 $$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
-$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES)
+$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
 	$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
 	$(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
 	  SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
 	  DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
 	  JACK=$(abspath $(JACK)) \
-	  JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
 	  JACK_CLASSPATH=$(TARGET_JACK_CLASSPATH) \
-	  JACK_JAR=$(abspath $(JACK_JAR)) \
 	  JILL_JAR=$(abspath $(JILL_JAR)) \
 	  $(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1)
 	$(hide) touch $$@
@@ -444,55 +441,9 @@
 # Known broken tests for the mips32 optimizing compiler backend.
 TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
     441-checker-inliner \
-    442-checker-constant-folding \
-    444-checker-nce \
-    445-checker-licm \
-    446-checker-inliner2 \
-    447-checker-inliner3 \
-    449-checker-bce \
-    450-checker-types \
-    455-checker-gvn \
-    458-checker-instruction-simplification \
-    462-checker-inlining-across-dex-files \
-    463-checker-boolean-simplifier \
-    464-checker-inline-sharpen-calls \
-    465-checker-clinit-gvn \
-    468-checker-bool-simplifier-regression \
-    473-checker-inliner-constants \
-    474-checker-boolean-input \
-    476-checker-ctor-memory-barrier \
-    477-checker-bound-type \
-    478-checker-clinit-check-pruning \
-    478-checker-inliner-nested-loop \
-    480-checker-dead-blocks \
-    482-checker-loop-back-edge-use \
-    484-checker-register-hints \
-    485-checker-dce-loop-update \
-    485-checker-dce-switch \
-    486-checker-must-do-null-check \
-    487-checker-inline-calls \
-    488-checker-inline-recursive-calls \
-    490-checker-inline \
-    492-checker-inline-invoke-interface \
-    493-checker-inline-invoke-interface \
-    494-checker-instanceof-tests \
-    495-checker-checkcast-tests \
-    496-checker-inlining-and-class-loader \
-    508-checker-disassembly \
     510-checker-try-catch \
-    517-checker-builder-fallthrough \
     521-checker-array-set-null \
-    522-checker-regression-monitor-exit \
-    523-checker-can-throw-regression \
-    525-checker-arrays-and-fields \
-    526-checker-caller-callee-regs \
-    529-checker-unresolved \
-    530-checker-loops \
-    530-checker-regression-reftype-final \
-    532-checker-nonnull-arrayset \
-    534-checker-bce-deoptimization \
     536-checker-intrinsic-optimization \
-    537-checker-debuggable \
 
 ifeq (mips,$(TARGET_ARCH))
   ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
@@ -695,13 +646,13 @@
     uc_host_or_target := HOST
     test_groups := ART_RUN_TEST_HOST_RULES
     run_test_options += --host
-    prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
+    prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(HOST_JACK_CLASSPATH_DEPENDENCIES)
     jack_classpath := $(HOST_JACK_CLASSPATH)
   else
     ifeq ($(1),target)
       uc_host_or_target := TARGET
       test_groups := ART_RUN_TEST_TARGET_RULES
-      prereq_rule := test-art-target-sync
+      prereq_rule := test-art-target-sync $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
       jack_classpath := $(TARGET_JACK_CLASSPATH)
     else
       $$(error found $(1) expected $(TARGET_TYPES))
@@ -917,9 +868,7 @@
 	    SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
 	    DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
 	    JACK=$(abspath $(JACK)) \
-	    JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
 	    JACK_CLASSPATH=$$(PRIVATE_JACK_CLASSPATH) \
-	    JACK_JAR=$(abspath $(JACK_JAR)) \
 	    JILL_JAR=$(abspath $(JILL_JAR)) \
 	    art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \
 	      && $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
diff --git a/test/etc/default-build b/test/etc/default-build
index c92402b..4743216 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -96,7 +96,7 @@
 
 if [ -d smali ]; then
   # Compile Smali classes
-  ${SMALI} -JXmx256m --experimental --api-level 23 --output smali_classes.dex `find smali -name '*.smali'`
+  ${SMALI} -JXmx256m ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
 
   # Don't bother with dexmerger if we provide our own main function in a smali file.
   if [ ${SKIP_DX_MERGER} = "false" ]; then
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index fbefa07..18867fd 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -18,6 +18,7 @@
 DEBUGGER="n"
 DEV_MODE="n"
 DEX2OAT=""
+EXPERIMENTAL=""
 FALSE_BIN="/system/bin/false"
 FLAGS=""
 GDB=""
@@ -196,6 +197,13 @@
         FLAGS="${FLAGS} -Xcompiler-option --compile-pic"
         COMPILE_FLAGS="${COMPILE_FLAGS} --compile-pic"
         shift
+    elif [ "x$1" = "x--experimental" ]; then
+        if [ "$#" -lt 2 ]; then
+            echo "missing --experimental option" 1>&2
+            exit 1
+        fi
+        EXPERIMENTAL="$EXPERIMENTAL $2"
+        shift 2
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
         echo "unknown $0 option: $1" 1>&2
         exit 1
@@ -204,6 +212,13 @@
     fi
 done
 
+if [ "$USE_JVM" = "n" ]; then
+    for feature in ${EXPERIMENTAL}; do
+        FLAGS="${FLAGS} -Xexperimental:${feature} -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:${feature}"
+        COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xexperimental:${feature}"
+    done
+fi
+
 if [ "x$1" = "x" ] ; then
   MAIN="Main"
 else
diff --git a/test/run-all-tests b/test/run-all-tests
index 76283b7..6d5c28c 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -44,12 +44,45 @@
     elif [ "x$1" = "x--use-java-home" ]; then
         run_args="${run_args} --use-java-home"
         shift
+    elif [ "x$1" = "x--no-image" ]; then
+        run_args="${run_args} --no-image"
+        shift
+    elif [ "x$1" = "x--quick" ]; then
+        run_args="${run_args} --quick"
+        shift
+    elif [ "x$1" = "x--optimizing" ]; then
+        run_args="${run_args} --optimizing"
+        shift
+    elif [ "x$1" = "x--image" ]; then
+        run_args="${run_args} --image"
+        shift
+    elif [ "x$1" = "x--never-clean" ]; then
+        run_args="${run_args} --never-clean"
+        shift
     elif [ "x$1" = "x--jvm" ]; then
         run_args="${run_args} --jvm"
         shift
     elif [ "x$1" = "x--debug" ]; then
         run_args="${run_args} --debug"
         shift
+    elif [ "x$1" = "x--build-only" ]; then
+        run_args="${run_args} --build-only"
+        shift
+    elif [ "x$1" = "x--build-with-jack" ]; then
+        run_args="${run_args} --build-with-jack"
+        shift
+    elif [ "x$1" = "x--build-with-javac-dx" ]; then
+        run_args="${run_args} --build-with-javac-dx"
+        shift
+    elif [ "x$1" = "x--dex2oat-swap" ]; then
+        run_args="${run_args} --dex2oat-swap"
+        shift
+    elif [ "x$1" = "x--dalvik" ]; then
+        run_args="${run_args} --dalvik"
+        shift
+    elif [ "x$1" = "x--debuggable" ]; then
+        run_args="${run_args} --debuggable"
+        shift
     elif [ "x$1" = "x--zygote" ]; then
         run_args="${run_args} --zygote"
         shift
@@ -59,15 +92,15 @@
     elif [ "x$1" = "x--jit" ]; then
         run_args="${run_args} --jit"
         shift
+    elif [ "x$1" = "x--verify-soft-fail" ]; then
+        run_args="${run_args} --verify-soft-fail"
+        shift
     elif [ "x$1" = "x--no-verify" ]; then
         run_args="${run_args} --no-verify"
         shift
     elif [ "x$1" = "x--no-optimize" ]; then
         run_args="${run_args} --no-optimize"
         shift
-    elif [ "x$1" = "x--valgrind" ]; then
-        run_args="${run_args} --valgrind"
-        shift
     elif [ "x$1" = "x--dev" ]; then
         run_args="${run_args} --dev"
         shift
@@ -116,6 +149,15 @@
     elif [ "x$1" = "x--always-clean" ]; then
         run_args="${run_args} --always-clean"
         shift
+    elif [ "x$1" = "x--pic-test" ]; then
+        run_args="${run_args} --pic-test"
+        shift
+    elif [ "x$1" = "x--pic-image" ]; then
+        run_args="${run_args} --pic-image"
+        shift
+    elif [ "x$1" = "x--strace" ]; then
+        run_args="${run_args} --strace"
+        shift
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
         echo "unknown $0 option: $1" 1>&2
         usage="yes"
@@ -134,9 +176,13 @@
         echo "  Options are all passed to run-test; refer to that for " \
              "further documentation:"
         echo "    --debug --dev --host --interpreter --jit --jvm --no-optimize"
-        echo "    --no-verify -O --update --valgrind --zygote --64 --relocate"
-        echo "    --prebuild --always-clean --gcstress --gcverify --trace"
-        echo "    --no-patchoat --no-dex2oat --use-java-home"
+        echo "    --no-verify --verify-soft-fail -O --update --zygote --64"
+        echo "    --relocate --prebuild --always-clean --gcstress --gcverify"
+        echo "    --trace --no-patchoat --no-dex2oat --use-java-home --pic-image"
+        echo "    --pic-test --strace --debuggable --dalvik --dex2oat-swap"
+        echo "    --build-only --build-with-jack --build-with-javac-dx"
+        echo "    --never-clean --image --no-image --quick --optimizing"
+        echo "    --no-relocate --no-prebuild"
         echo "  Specific Runtime Options:"
         echo "    --seq                Run tests one-by-one, avoiding failures caused by busy CPU"
     ) 1>&2
diff --git a/test/run-test b/test/run-test
index 1b71f33..3442fcf 100755
--- a/test/run-test
+++ b/test/run-test
@@ -46,6 +46,7 @@
 export DEX_LOCATION=/data/run-test/${test_dir}
 export NEED_DEX="true"
 export USE_JACK="false"
+export SMALI_ARGS="--experimental --api-level 23"
 
 # If dx was not set by the environment variable, assume it is in the path.
 if [ -z "$DX" ]; then
@@ -82,24 +83,11 @@
     export ANDROID_BUILD_TOP=$oldwd
 fi
 
-# If JACK_VM_COMMAND is not set, assume it launches the prebuilt jack-launcher.
-if [ -z "$JACK_VM_COMMAND" ]; then
-  if [ ! -z "$TMPDIR" ]; then
-    jack_temp_dir="-Djava.io.tmpdir=$TMPDIR"
-  fi
-  export JACK_VM_COMMAND="java -Dfile.encoding=UTF-8 -Xms2560m -XX:+TieredCompilation $jack_temp_dir -jar $ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack-launcher.jar"
-fi
-
 # If JACK_CLASSPATH is not set, assume it only contains core-libart.
 if [ -z "$JACK_CLASSPATH" ]; then
   export JACK_CLASSPATH="$ANDROID_BUILD_TOP/out/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack"
 fi
 
-# If JACK_JAR is not set, assume it is located in the prebuilts directory.
-if [ -z "$JACK_JAR" ]; then
-  export JACK_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack.jar"
-fi
-
 # If JILL_JAR is not set, assume it is located in the prebuilts directory.
 if [ -z "$JILL_JAR" ]; then
   export JILL_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jill.jar"
@@ -540,6 +528,7 @@
         echo "    --debug               Wait for a debugger to attach."
         echo "    --debuggable          Whether to compile Java code for a debugger."
         echo "    --gdb                 Run under gdb; incompatible with some tests."
+        echo "    --gdb-arg             Pass an option to gdb."
         echo "    --build-only          Build test files only (off by default)."
         echo "    --build-with-javac-dx Build test files with javac and dx (on by default)."
         echo "    --build-with-jack     Build test files with jack and jill (off by default)."
@@ -565,6 +554,8 @@
         echo "                          the image and oat files be relocated to a random"
         echo "                          address before running. (default)"
         echo "    --no-relocate         Force the use of no relocating in the test"
+        echo "    --image               Run the test using a precompiled boot image. (default)"
+        echo "    --no-image            Run the test without a precompiled boot image."
         echo "    --host                Use the host-mode virtual machine."
         echo "    --invoke-with         Pass --invoke-with option to runtime."
         echo "    --dalvik              Use Dalvik (off by default)."
@@ -576,6 +567,7 @@
              "files."
         echo "    --64                  Run the test in 64-bit mode"
         echo "    --trace               Run with method tracing"
+        echo "    --strace              Run with syscall tracing from strace."
         echo "    --stream              Run method tracing in streaming mode (requires --trace)"
         echo "    --gcstress            Run with gc stress testing"
         echo "    --gcverify            Run with gc verification"
@@ -585,6 +577,9 @@
         echo "    --dex2oat-swap        Use a dex2oat swap file."
         echo "    --instruction-set-features [string]"
         echo "                          Set instruction-set-features for compilation."
+        echo "    --pic-image           Use an image compiled with position independent code for the"
+        echo "                          boot class path."
+        echo "    --pic-test            Compile the test code position independent."
     ) 1>&2
     exit 1
 fi
diff --git a/test/960-default-smali/util-src/generate_smali.py b/test/utils/python/generate_smali_main.py
similarity index 98%
rename from test/960-default-smali/util-src/generate_smali.py
rename to test/utils/python/generate_smali_main.py
index b2bf1f0..d796d31 100755
--- a/test/960-default-smali/util-src/generate_smali.py
+++ b/test/utils/python/generate_smali_main.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 
 """
-Generate Smali Main file for test 960
+Generate Smali Main file from a classes.xml file.
 """
 
 import os
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 1083c2f..5615f8f 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -10,8 +10,6 @@
 
 TODO:
  * Add more tips to the help page.
-   - Note that only 'app' heap matters, not 'zygote' or 'image'.
-   - Say what a dex cache is.
    - Recommend how to start looking at a heap dump.
    - Say how to enable allocation sites.
    - Where to submit feedback, questions, and bug reports.
@@ -24,6 +22,7 @@
  * Show site context and heap and class filter in "Objects" view?
  * Have a menu at the top of an object view with links to the sections?
  * Include ahat version and hprof file in the menu at the top of the page?
+ * Show root types.
  * Heaped Table
    - Make sortable by clicking on headers.
    - Use consistent order for heap columns.
@@ -86,7 +85,6 @@
    index.
  * What's the difference between getId and getUniqueId?
  * I see objects with duplicate references.
- * Don't store stack trace by heap (CL 157252)
  * A way to get overall retained size by heap.
  * A method Instance.isReachable()
 
@@ -97,6 +95,9 @@
  * Computing, for each instance, the other instances it dominates.
 
 Release History:
+ 0.2 Oct 20, 2015
+   Take into account 'count' and 'offset' when displaying strings.
+
  0.1ss Aug 04, 2015
    Enable stack allocations code (using custom modified perflib).
    Sort objects in 'objects/' with default sort.
diff --git a/tools/ahat/src/AhatSnapshot.java b/tools/ahat/src/AhatSnapshot.java
index 3035ef7..43658f3 100644
--- a/tools/ahat/src/AhatSnapshot.java
+++ b/tools/ahat/src/AhatSnapshot.java
@@ -18,14 +18,12 @@
 
 import com.android.tools.perflib.heap.ClassObj;
 import com.android.tools.perflib.heap.Heap;
-import com.android.tools.perflib.heap.HprofParser;
 import com.android.tools.perflib.heap.Instance;
 import com.android.tools.perflib.heap.RootObj;
 import com.android.tools.perflib.heap.Snapshot;
 import com.android.tools.perflib.heap.StackFrame;
 import com.android.tools.perflib.heap.StackTrace;
-import com.android.tools.perflib.heap.io.HprofBuffer;
-import com.android.tools.perflib.heap.io.MemoryMappedFileBuffer;
+import com.android.tools.perflib.captures.MemoryMappedFileBuffer;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import java.io.File;
@@ -56,8 +54,7 @@
    * Create an AhatSnapshot from an hprof file.
    */
   public static AhatSnapshot fromHprof(File hprof) throws IOException {
-    HprofBuffer buffer = new MemoryMappedFileBuffer(hprof);
-    Snapshot snapshot = (new HprofParser(buffer)).parse();
+    Snapshot snapshot = Snapshot.createSnapshot(new MemoryMappedFileBuffer(hprof));
     snapshot.computeDominators();
     return new AhatSnapshot(snapshot);
   }
@@ -185,20 +182,17 @@
 
   // Return the stack where the given instance was allocated.
   private static StackTrace getStack(Instance inst) {
-    // TODO: return inst.getStack() once perflib is fixed.
-    return null;
+    return inst.getStack();
   }
 
   // Return the list of stack frames for a stack trace.
   private static StackFrame[] getStackFrames(StackTrace stack) {
-    // TODO: Use stack.getFrames() once perflib is fixed.
-    return null;
+    return stack.getFrames();
   }
 
   // Return the serial number of the given stack trace.
   private static int getStackTraceSerialNumber(StackTrace stack) {
-    // TODO: Use stack.getSerialNumber() once perflib is fixed.
-    return 0;
+    return stack.getSerialNumber();
   }
 
   // Get the site associated with the given stack id and depth.
diff --git a/tools/ahat/src/help.html b/tools/ahat/src/help.html
index b48d791..b7ae2ce 100644
--- a/tools/ahat/src/help.html
+++ b/tools/ahat/src/help.html
@@ -54,3 +54,38 @@
     </ul>
   </li>
 </ul>
+
+<h2>Tips:</h2>
+<h3>Heaps</h3>
+<p>
+Android heap dumps contain information for multiple heaps. The <b>app</b> heap
+is the memory used by your application. The <b>zygote</b> and <b>image</b>
+heaps are used by the system. You should ignore everything in the zygote and
+image heap and look only at the app heap. This is because changes in your
+application will not effect the zygote or image heaps, and because the zygote
+and image heaps are shared, they don't contribute significantly to your
+applications PSS.
+</p>
+
+<h3>Bitmaps</h3>
+<p>
+Bitmaps store their data using byte[] arrays. Whenever you see a large
+byte[], check if it is a bitmap by looking to see if there is a single
+android.graphics.Bitmap object referring to it. The byte[] will be marked as a
+root, but it is really being retained by the android.graphics.Bitmap object.
+</p>
+
+<h3>DexCaches</h3>
+<p>
+For each DexFile you load, there will be a corresponding DexCache whose size
+is proportional to the number of strings, fields, methods, and classes in your
+dex file. The DexCache entries may or may not be visible depending on the
+version of the Android platform the heap dump is from.
+</p>
+
+<h3>FinalizerReferences</h3>
+<p>
+A FinalizerReference is allocated for every object on the heap that has a
+non-trivial finalizer. These are stored in a linked list reachable from the
+FinalizerReference class object.
+</p>
diff --git a/tools/checker/common/archs.py b/tools/checker/common/archs.py
index 84bded9..178e0b5 100644
--- a/tools/checker/common/archs.py
+++ b/tools/checker/common/archs.py
@@ -12,4 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-archs_list = ['ARM', 'ARM64', 'MIPS64', 'X86', 'X86_64']
+archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index c74508d..3bd62fe 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -86,8 +86,10 @@
       if m:
         enclosing_classes.append(m.group(1))
         continue
-      m = re.compile(r'^\s*\}( .*)?;').search(raw_line)
-      if m:
+
+      # End of class/struct -- be careful not to match "do { ... } while" constructs by accident
+      m = re.compile(r'^\s*\}(\s+)?(while)?(.+)?;').search(raw_line)
+      if m and not m.group(2):
         enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
         continue
 
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index b4f686f..9a8b462 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -159,10 +159,10 @@
   bug: 22786792
 },
 {
-  description: "Formatting failures",
+  description: "Lack of IPv6 on some buildbot slaves",
   result: EXEC_FAILED,
-  names: ["libcore.java.text.NumberFormatTest#test_currencyFromLocale",
-          "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits"],
-  bug: 25136848
+  names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",
+          "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6"],
+  bug: 25178637
 }
 ]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index edec362..de27a6f 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -30,8 +30,6 @@
 
 art="/data/local/tmp/system/bin/art"
 art_debugee="sh /data/local/tmp/system/bin/art"
-# We use Quick's image on target because optimizing's image is not compiled debuggable.
-image="-Ximage:/data/art-test/core.art"
 args=$@
 debuggee_args="-Xcompiler-option --debuggable"
 device_dir="--device-dir=/data/local/tmp"
@@ -41,6 +39,8 @@
 image_compiler_option=""
 debug="no"
 verbose="no"
+image="-Ximage:/data/art-test/core-jit.art"
+vm_args=""
 # By default, we run the whole JDWP test suite.
 test="org.apache.harmony.jpda.tests.share.AllTests"
 
@@ -88,7 +88,11 @@
   fi
 done
 
-vm_args="--vm-arg $image"
+if [[ "$image" != "" ]]; then
+  vm_args="--vm-arg $image"
+fi
+vm_args="$vm_args --vm-arg -Xusejit:true"
+debuggee_args="$debuggee_args -Xusejit:true"
 if [[ $debug == "yes" ]]; then
   art="$art -d"
   art_debugee="$art_debugee -d"
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 80f7a37..67a7983 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -102,4 +102,4 @@
 # Run the tests using vogar.
 echo "Running tests for the following test packages:"
 echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}
+vogar $vogar_args --vm-arg -Xusejit:true --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}