Merge "ART: Use an iterative way in DoDFSPreOrderSSARename()"
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index a443487..288bddd 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -295,10 +295,6 @@
   art_cflags += -DIMT_SIZE=64
 endif
 
-ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
-  art_cflags += -DART_USE_OPTIMIZING_COMPILER=1
-endif
-
 ifeq ($(ART_HEAP_POISONING),true)
   art_cflags += -DART_HEAP_POISONING=1
   art_asflags += -DART_HEAP_POISONING=1
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 6e17ed3..d71ae29 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -52,18 +52,12 @@
   core_pic_infix :=
   core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
 
-  # With the optimizing compiler, we want to rerun dex2oat whenever there is
-  # a dex2oat change to catch regressions early.
-  ifeq ($(ART_USE_OPTIMIZING_COMPILER), true)
-    core_dex2oat_dependency := $(DEX2OAT)
-  endif
-
   ifeq ($(1),default)
     core_compile_options += --compiler-backend=Quick
   endif
   ifeq ($(1),optimizing)
     core_compile_options += --compiler-backend=Optimizing
-    core_dex2oat_dependency := $(DEX2OAT)
+    core_dex2oat_dependency += $(DEX2OAT)
     core_infix := -optimizing
   endif
   ifeq ($(1),interpreter)
@@ -178,18 +172,12 @@
   core_pic_infix :=
   core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
 
-  # With the optimizing compiler, we want to rerun dex2oat whenever there is
-  # a dex2oat change to catch regressions early.
-  ifeq ($(ART_USE_OPTIMIZING_COMPILER), true)
-    core_dex2oat_dependency := $(DEX2OAT)
-  endif
-
   ifeq ($(1),default)
     core_compile_options += --compiler-backend=Quick
   endif
   ifeq ($(1),optimizing)
     core_compile_options += --compiler-backend=Optimizing
-    core_dex2oat_dependency := $(DEX2OAT)
+    core_dex2oat_dependency += $(DEX2OAT)
     core_infix := -optimizing
   endif
   ifeq ($(1),interpreter)
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index dc2bc5c..67b4428 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -92,7 +92,7 @@
 
   void UnreserveImageSpace();
 
-  Compiler::Kind compiler_kind_ = kUseOptimizingCompiler ? Compiler::kOptimizing : Compiler::kQuick;
+  Compiler::Kind compiler_kind_ = Compiler::kOptimizing;
   std::unique_ptr<CompilerOptions> compiler_options_;
   std::unique_ptr<VerificationResults> verification_results_;
   std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1f114cf..3c5c2fe 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -148,7 +148,7 @@
   if (arg1.wide == 0) {
     LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
   } else {
-    RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
+    RegStorage r_tmp = TargetReg(kArg2, kWide);
     LoadValueDirectWideFixed(arg1, r_tmp);
   }
   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 42b3541..960f4d9 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -410,7 +410,7 @@
  * of an existing value range, NewArray or a loop phi corresponding to an
  * incrementing/decrementing array index (MonotonicValueRange).
  */
-class ValueRange : public ArenaObject<kArenaAllocMisc> {
+class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
  public:
   ValueRange(ArenaAllocator* allocator, ValueBound lower, ValueBound upper)
       : allocator_(allocator), lower_(lower), upper_(upper) {}
@@ -1112,7 +1112,14 @@
 
   BCEVisitor(HGraph* graph, HInductionVarAnalysis* induction_analysis)
       : HGraphVisitor(graph),
-        maps_(graph->GetBlocks().size()),
+        maps_(graph->GetBlocks().size(),
+              ArenaSafeMap<int, ValueRange*>(
+                  std::less<int>(),
+                  graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+              graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
+        first_constant_index_bounds_check_map_(
+            std::less<int>(),
+            graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
         need_to_revisit_block_(false),
         initial_block_size_(graph->GetBlocks().size()),
         induction_range_(induction_analysis) {}
@@ -1137,14 +1144,9 @@
       // Added blocks don't keep value ranges.
       return nullptr;
     }
-    int block_id = basic_block->GetBlockId();
-    if (maps_.at(block_id) == nullptr) {
-      std::unique_ptr<ArenaSafeMap<int, ValueRange*>> map(
-          new ArenaSafeMap<int, ValueRange*>(
-              std::less<int>(), GetGraph()->GetArena()->Adapter()));
-      maps_.at(block_id) = std::move(map);
-    }
-    return maps_.at(block_id).get();
+    uint32_t block_id = basic_block->GetBlockId();
+    DCHECK_LT(block_id, maps_.size());
+    return &maps_[block_id];
   }
 
   // Traverse up the dominator tree to look for value range info.
@@ -1842,11 +1844,11 @@
     }
   }
 
-  std::vector<std::unique_ptr<ArenaSafeMap<int, ValueRange*>>> maps_;
+  ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
 
   // Map an HArrayLength instruction's id to the first HBoundsCheck instruction in
   // a block that checks a constant index against that HArrayLength.
-  SafeMap<int, HBoundsCheck*> first_constant_index_bounds_check_map_;
+  ArenaSafeMap<int, HBoundsCheck*> first_constant_index_bounds_check_map_;
 
   // For the block, there is at least one HArrayLength instruction for which there
   // is more than one bounds check instruction with constant indexing. And it's
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 7ae405a..ebbfb14 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -324,7 +324,7 @@
   // Keep a map of all try blocks and their respective TryItems. We do not use
   // the block's pointer but rather its id to ensure deterministic iteration.
   ArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
-      std::less<uint32_t>(), arena_->Adapter());
+      std::less<uint32_t>(), arena_->Adapter(kArenaAllocGraphBuilder));
 
   // Obtain TryItem information for blocks with throwing instructions, and split
   // blocks which are both try & catch to simplify the graph.
@@ -1186,6 +1186,12 @@
   }
 }
 
+static Primitive::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
+  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+  const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+  return Primitive::GetType(type[0]);
+}
+
 bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
                                              uint32_t dex_pc,
                                              bool is_put) {
@@ -1205,44 +1211,61 @@
   ArtField* resolved_field =
       compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
 
-  if (resolved_field == nullptr) {
-    MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
-    return false;
-  }
-
-  Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
 
   HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot, dex_pc);
-  current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
+  HInstruction* null_check = new (arena_) HNullCheck(object, dex_pc);
+  current_block_->AddInstruction(null_check);
+
+  Primitive::Type field_type = (resolved_field == nullptr)
+      ? GetFieldAccessType(*dex_file_, field_index)
+      : resolved_field->GetTypeAsPrimitiveType();
   if (is_put) {
     Temporaries temps(graph_);
-    HInstruction* null_check = current_block_->GetLastInstruction();
     // We need one temporary for the null check.
     temps.Add(null_check);
     HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
-    current_block_->AddInstruction(new (arena_) HInstanceFieldSet(
-        null_check,
-        value,
-        field_type,
-        resolved_field->GetOffset(),
-        resolved_field->IsVolatile(),
-        field_index,
-        *dex_file_,
-        dex_compilation_unit_->GetDexCache(),
-        dex_pc));
+    HInstruction* field_set = nullptr;
+    if (resolved_field == nullptr) {
+      MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+      field_set = new (arena_) HUnresolvedInstanceFieldSet(null_check,
+                                                           value,
+                                                           field_type,
+                                                           field_index,
+                                                           dex_pc);
+    } else {
+      field_set = new (arena_) HInstanceFieldSet(null_check,
+                                                 value,
+                                                 field_type,
+                                                 resolved_field->GetOffset(),
+                                                 resolved_field->IsVolatile(),
+                                                 field_index,
+                                                 *dex_file_,
+                                                 dex_compilation_unit_->GetDexCache(),
+                                                 dex_pc);
+    }
+    current_block_->AddInstruction(field_set);
   } else {
-    current_block_->AddInstruction(new (arena_) HInstanceFieldGet(
-        current_block_->GetLastInstruction(),
-        field_type,
-        resolved_field->GetOffset(),
-        resolved_field->IsVolatile(),
-        field_index,
-        *dex_file_,
-        dex_compilation_unit_->GetDexCache(),
-        dex_pc));
-
-    UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+    HInstruction* field_get = nullptr;
+    if (resolved_field == nullptr) {
+      MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+      field_get = new (arena_) HUnresolvedInstanceFieldGet(null_check,
+                                                           field_type,
+                                                           field_index,
+                                                           dex_pc);
+    } else {
+      field_get = new (arena_) HInstanceFieldGet(null_check,
+                                                 field_type,
+                                                 resolved_field->GetOffset(),
+                                                 resolved_field->IsVolatile(),
+                                                 field_index,
+                                                 *dex_file_,
+                                                 dex_compilation_unit_->GetDexCache(),
+                                                 dex_pc);
+    }
+    current_block_->AddInstruction(field_get);
+    UpdateLocal(source_or_dest_reg, field_get, dex_pc);
   }
+
   return true;
 }
 
@@ -1282,6 +1305,23 @@
   return outer_class.Get() == cls.Get();
 }
 
+void HGraphBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
+                                                     uint32_t dex_pc,
+                                                     bool is_put,
+                                                     Primitive::Type field_type) {
+  uint32_t source_or_dest_reg = instruction.VRegA_21c();
+  uint16_t field_index = instruction.VRegB_21c();
+
+  if (is_put) {
+    HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
+    current_block_->AddInstruction(
+        new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
+  } else {
+    current_block_->AddInstruction(
+        new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
+    UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+  }
+}
 bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
                                            uint32_t dex_pc,
                                            bool is_put) {
@@ -1299,10 +1339,13 @@
       soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
 
   if (resolved_field == nullptr) {
-    MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
-    return false;
+    MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+    Primitive::Type field_type = GetFieldAccessType(*dex_file_, field_index);
+    BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+    return true;
   }
 
+  Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
   const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
   Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
       outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file)));
@@ -1317,6 +1360,7 @@
     // The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
     return false;
   } else {
+    // TODO: This is rather expensive. Perf it and cache the results if needed.
     std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
         outer_dex_cache.Get(),
         GetCompilingClass(),
@@ -1325,7 +1369,9 @@
         &storage_index);
     bool can_easily_access = is_put ? pair.second : pair.first;
     if (!can_easily_access) {
-      return false;
+      MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
+      BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+      return true;
     }
   }
 
@@ -1346,8 +1392,6 @@
     cls = new (arena_) HClinitCheck(constant, dex_pc);
     current_block_->AddInstruction(cls);
   }
-
-  Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
   if (is_put) {
     // We need to keep the class alive before loading the value.
     Temporaries temps(graph_);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 4c8e3d0..b2dc241 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -187,6 +187,10 @@
   // Builds an instance field access node and returns whether the instruction is supported.
   bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
 
+  void BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
+                                        uint32_t dex_pc,
+                                        bool is_put,
+                                        Primitive::Type field_type);
   // Builds a static field access node and returns whether the instruction is supported.
   bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
 
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index be05691..8254277 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -413,6 +413,130 @@
   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
 }
 
+void CodeGenerator::CreateUnresolvedFieldLocationSummary(
+    HInstruction* field_access,
+    Primitive::Type field_type,
+    const FieldAccessCallingConvention& calling_convention) {
+  bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+      || field_access->IsUnresolvedInstanceFieldSet();
+  bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+      || field_access->IsUnresolvedStaticFieldGet();
+
+  ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+  LocationSummary* locations =
+      new (allocator) LocationSummary(field_access, LocationSummary::kCall);
+
+  locations->AddTemp(calling_convention.GetFieldIndexLocation());
+
+  if (is_instance) {
+    // Add the `this` object for instance field accesses.
+    locations->SetInAt(0, calling_convention.GetObjectLocation());
+  }
+
+  // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
+  // regardless of the the type. Because of that we forced to special case
+  // the access to floating point values.
+  if (is_get) {
+    if (Primitive::IsFloatingPointType(field_type)) {
+      // The return value will be stored in regular registers while register
+      // allocator expects it in a floating point register.
+      // Note We don't need to request additional temps because the return
+      // register(s) are already blocked due the call and they may overlap with
+      // the input or field index.
+      // The transfer between the two will be done at codegen level.
+      locations->SetOut(calling_convention.GetFpuLocation(field_type));
+    } else {
+      locations->SetOut(calling_convention.GetReturnLocation(field_type));
+    }
+  } else {
+     size_t set_index = is_instance ? 1 : 0;
+     if (Primitive::IsFloatingPointType(field_type)) {
+      // The set value comes from a float location while the calling convention
+      // expects it in a regular register location. Allocate a temp for it and
+      // make the transfer at codegen.
+      AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
+      locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
+    } else {
+      locations->SetInAt(set_index,
+          calling_convention.GetSetValueLocation(field_type, is_instance));
+    }
+  }
+}
+
+void CodeGenerator::GenerateUnresolvedFieldAccess(
+    HInstruction* field_access,
+    Primitive::Type field_type,
+    uint32_t field_index,
+    uint32_t dex_pc,
+    const FieldAccessCallingConvention& calling_convention) {
+  LocationSummary* locations = field_access->GetLocations();
+
+  MoveConstant(locations->GetTemp(0), field_index);
+
+  bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+      || field_access->IsUnresolvedInstanceFieldSet();
+  bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+      || field_access->IsUnresolvedStaticFieldGet();
+
+  if (!is_get && Primitive::IsFloatingPointType(field_type)) {
+    // Copy the float value to be set into the calling convention register.
+    // Note that using directly the temp location is problematic as we don't
+    // support temp register pairs. To avoid boilerplate conversion code, use
+    // the location from the calling convention.
+    MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
+                 locations->InAt(is_instance ? 1 : 0),
+                 (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt));
+  }
+
+  QuickEntrypointEnum entrypoint = kQuickSet8Static;  // Initialize to anything to avoid warnings.
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      entrypoint = is_instance
+          ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
+          : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
+      break;
+    case Primitive::kPrimByte:
+      entrypoint = is_instance
+          ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
+          : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
+      break;
+    case Primitive::kPrimShort:
+      entrypoint = is_instance
+          ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
+          : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
+      break;
+    case Primitive::kPrimChar:
+      entrypoint = is_instance
+          ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
+          : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
+      break;
+    case Primitive::kPrimInt:
+    case Primitive::kPrimFloat:
+      entrypoint = is_instance
+          ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
+          : (is_get ? kQuickGet32Static : kQuickSet32Static);
+      break;
+    case Primitive::kPrimNot:
+      entrypoint = is_instance
+          ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
+          : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
+      break;
+    case Primitive::kPrimLong:
+    case Primitive::kPrimDouble:
+      entrypoint = is_instance
+          ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
+          : (is_get ? kQuickGet64Static : kQuickSet64Static);
+      break;
+    default:
+      LOG(FATAL) << "Invalid type " << field_type;
+  }
+  InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
+
+  if (is_get && Primitive::IsFloatingPointType(field_type)) {
+    MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
+  }
+}
+
 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
   // The DCHECKS below check that a register is not specified twice in
   // the summary. The out location can overlap with an input, so we need
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b58a3ff..a3ebc43 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -143,6 +143,22 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
 };
 
+class FieldAccessCallingConvention {
+ public:
+  virtual Location GetObjectLocation() const = 0;
+  virtual Location GetFieldIndexLocation() const = 0;
+  virtual Location GetReturnLocation(Primitive::Type type) const = 0;
+  virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0;
+  virtual Location GetFpuLocation(Primitive::Type type) const = 0;
+  virtual ~FieldAccessCallingConvention() {}
+
+ protected:
+  FieldAccessCallingConvention() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention);
+};
+
 class CodeGenerator {
  public:
   // Compiles the graph to executable instructions. Returns whether the compilation
@@ -177,6 +193,9 @@
   virtual void Bind(HBasicBlock* block) = 0;
   virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
   virtual void MoveConstant(Location destination, int32_t value) = 0;
+  virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
+  virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
+
   virtual Assembler* GetAssembler() = 0;
   virtual const Assembler& GetAssembler() const = 0;
   virtual size_t GetWordSize() const = 0;
@@ -385,6 +404,18 @@
 
   void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
 
+  void CreateUnresolvedFieldLocationSummary(
+      HInstruction* field_access,
+      Primitive::Type field_type,
+      const FieldAccessCallingConvention& calling_convention);
+
+  void GenerateUnresolvedFieldAccess(
+      HInstruction* field_access,
+      Primitive::Type field_type,
+      uint32_t field_index,
+      uint32_t dex_pc,
+      const FieldAccessCallingConvention& calling_convention);
+
   void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
   DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
 
@@ -425,9 +456,12 @@
         core_spill_mask_(0),
         fpu_spill_mask_(0),
         first_register_slot_in_slow_path_(0),
-        blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers)),
-        blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers)),
-        blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs)),
+        blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
+                                                                    kArenaAllocCodeGenerator)),
+        blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
+                                                                   kArenaAllocCodeGenerator)),
+        blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs,
+                                                                    kArenaAllocCodeGenerator)),
         number_of_core_registers_(number_of_core_registers),
         number_of_fpu_registers_(number_of_fpu_registers),
         number_of_register_pairs_(number_of_register_pairs),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index da7a675..cf7f5f4 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -361,6 +361,51 @@
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
 };
 
+class ArraySetSlowPathARM : public SlowPathCode {
+ public:
+  explicit ArraySetSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConvention calling_convention;
+    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+    parallel_move.AddMove(
+        locations->InAt(0),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimNot,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(1),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(2),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+        Primitive::kPrimNot,
+        nullptr);
+    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+    CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+    arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+                               instruction_,
+                               instruction_->GetDexPc(),
+                               this);
+    RestoreLiveRegisters(codegen, locations);
+    __ b(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM"; }
+
+ private:
+  HInstruction* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM);
+};
+
 #undef __
 #define __ down_cast<ArmAssembler*>(GetAssembler())->
 
@@ -428,8 +473,12 @@
                     kNumberOfRegisterPairs,
                     ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
                                         arraysize(kCoreCalleeSaves)),
-                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
-                                        arraysize(kFpuCalleeSaves)),
+                    graph->IsDebuggable()
+                        // If the graph is debuggable, we need to save the fpu registers ourselves,
+                        // as the stubs do not do it.
+                        ? 0
+                        : ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+                                              arraysize(kFpuCalleeSaves)),
                     compiler_options,
                     stats),
       block_labels_(nullptr),
@@ -438,9 +487,11 @@
       move_resolver_(graph->GetArena(), this),
       assembler_(),
       isa_features_(isa_features),
-      method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
-      call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
-      relative_call_patches_(graph->GetArena()->Adapter()) {
+      method_patches_(MethodReferenceComparator(),
+                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      call_patches_(MethodReferenceComparator(),
+                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   // Always save the LR register to mimic Quick.
   AddAllocatedRegister(Location::RegisterLocation(LR));
 }
@@ -855,6 +906,10 @@
           Primitive::kPrimInt);
     } else if (source.IsFpuRegister()) {
       UNIMPLEMENTED(FATAL);
+    } else if (source.IsFpuRegisterPair()) {
+      __ vmovrrd(destination.AsRegisterPairLow<Register>(),
+                 destination.AsRegisterPairHigh<Register>(),
+                 FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
     } else {
       DCHECK(source.IsDoubleStackSlot());
       DCHECK(ExpectedPairLayout(destination));
@@ -866,6 +921,10 @@
       __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
                          SP,
                          source.GetStackIndex());
+    } else if (source.IsRegisterPair()) {
+      __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
+                 source.AsRegisterPairLow<Register>(),
+                 source.AsRegisterPairHigh<Register>());
     } else {
       UNIMPLEMENTED(FATAL);
     }
@@ -987,6 +1046,25 @@
   __ LoadImmediate(location.AsRegister<Register>(), value);
 }
 
+void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+  if (Primitive::Is64BitType(dst_type)) {
+    Move64(dst, src);
+  } else {
+    Move32(dst, src);
+  }
+}
+
+void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else if (location.IsRegisterPair()) {
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
+}
+
 void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
                                      HInstruction* instruction,
                                      uint32_t dex_pc,
@@ -3554,6 +3632,74 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARM calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
 void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
       ? LocationSummary::kCallOnSlowPath
@@ -3744,38 +3890,32 @@
 
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  bool needs_runtime_call = instruction->NeedsTypeCheck();
+  bool may_need_runtime_call = instruction->NeedsTypeCheck();
 
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
-      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
-  if (needs_runtime_call) {
-    InvokeRuntimeCallingConvention calling_convention;
-    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+      instruction,
+      may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (Primitive::IsFloatingPointType(value_type)) {
+    locations->SetInAt(2, Location::RequiresFpuRegister());
   } else {
-    locations->SetInAt(0, Location::RequiresRegister());
-    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-    if (Primitive::IsFloatingPointType(value_type)) {
-      locations->SetInAt(2, Location::RequiresFpuRegister());
-    } else {
-      locations->SetInAt(2, Location::RequiresRegister());
-    }
+    locations->SetInAt(2, Location::RequiresRegister());
+  }
 
-    if (needs_write_barrier) {
-      // Temporary registers for the write barrier.
-      locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
-      locations->AddTemp(Location::RequiresRegister());
-    }
+  if (needs_write_barrier) {
+    // Temporary registers for the write barrier.
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
+    locations->AddTemp(Location::RequiresRegister());
   }
 }
 
 void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
+  Register array = locations->InAt(0).AsRegister<Register>();
   Location index = locations->InAt(1);
   Primitive::Type value_type = instruction->GetComponentType();
-  bool needs_runtime_call = locations->WillCall();
+  bool may_need_runtime_call = locations->CanCall();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
 
@@ -3787,9 +3927,9 @@
       if (index.IsConstant()) {
         size_t offset =
             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
-        __ StoreToOffset(kStoreByte, value, obj, offset);
+        __ StoreToOffset(kStoreByte, value, array, offset);
       } else {
-        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>()));
+        __ add(IP, array, ShifterOperand(index.AsRegister<Register>()));
         __ StoreToOffset(kStoreByte, value, IP, data_offset);
       }
       break;
@@ -3802,55 +3942,133 @@
       if (index.IsConstant()) {
         size_t offset =
             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
-        __ StoreToOffset(kStoreHalfword, value, obj, offset);
+        __ StoreToOffset(kStoreHalfword, value, array, offset);
       } else {
-        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
+        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_2));
         __ StoreToOffset(kStoreHalfword, value, IP, data_offset);
       }
       break;
     }
 
-    case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      if (!needs_runtime_call) {
-        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-        Register value = locations->InAt(2).AsRegister<Register>();
-        Register source = value;
-        if (kPoisonHeapReferences && needs_write_barrier) {
-          // Note that in the case where `value` is a null reference,
-          // we do not enter this block, as a null reference does not
-          // need poisoning.
-          DCHECK_EQ(value_type, Primitive::kPrimNot);
-          Register temp = locations->GetTemp(0).AsRegister<Register>();
-          __ Mov(temp, value);
-          __ PoisonHeapReference(temp);
-          source = temp;
-        }
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+      Register value = locations->InAt(2).AsRegister<Register>();
+      Register source = value;
+
+      if (instruction->InputAt(2)->IsNullConstant()) {
+        // Just setting null.
         if (index.IsConstant()) {
           size_t offset =
               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          __ StoreToOffset(kStoreWord, source, obj, offset);
+          __ StoreToOffset(kStoreWord, source, array, offset);
         } else {
           DCHECK(index.IsRegister()) << index;
-          __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+          __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
           __ StoreToOffset(kStoreWord, source, IP, data_offset);
         }
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
-        if (needs_write_barrier) {
-          DCHECK_EQ(value_type, Primitive::kPrimNot);
-          Register temp = locations->GetTemp(0).AsRegister<Register>();
-          Register card = locations->GetTemp(1).AsRegister<Register>();
-          codegen_->MarkGCCard(temp, card, obj, value, instruction->GetValueCanBeNull());
-        }
-      } else {
-        DCHECK_EQ(value_type, Primitive::kPrimNot);
-        // Note: if heap poisoning is enabled, pAputObject takes cares
-        // of poisoning the reference.
-        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
-                                instruction,
-                                instruction->GetDexPc(),
-                                nullptr);
+        break;
       }
+
+      DCHECK(needs_write_barrier);
+      Register temp1 = locations->GetTemp(0).AsRegister<Register>();
+      Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+      Label done;
+      SlowPathCode* slow_path = nullptr;
+
+      if (may_need_runtime_call) {
+        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM(instruction);
+        codegen_->AddSlowPath(slow_path);
+        if (instruction->GetValueCanBeNull()) {
+          Label non_zero;
+          __ CompareAndBranchIfNonZero(value, &non_zero);
+          if (index.IsConstant()) {
+            size_t offset =
+               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+            __ StoreToOffset(kStoreWord, value, array, offset);
+          } else {
+            DCHECK(index.IsRegister()) << index;
+            __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+            __ StoreToOffset(kStoreWord, value, IP, data_offset);
+          }
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          __ b(&done);
+          __ Bind(&non_zero);
+        }
+
+        __ LoadFromOffset(kLoadWord, temp1, array, class_offset);
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        __ MaybeUnpoisonHeapReference(temp1);
+        __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+        __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
+        // No need to poison/unpoison, we're comparing two poisoined references.
+        __ cmp(temp1, ShifterOperand(temp2));
+        if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          Label do_put;
+          __ b(&do_put, EQ);
+          __ MaybeUnpoisonHeapReference(temp1);
+          __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+          // No need to poison/unpoison, we're comparing against null.
+          __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+          __ Bind(&do_put);
+        } else {
+          __ b(slow_path->GetEntryLabel(), NE);
+        }
+      }
+
+      if (kPoisonHeapReferences) {
+        // Note that in the case where `value` is a null reference,
+        // we do not enter this block, as a null reference does not
+        // need poisoning.
+        DCHECK_EQ(value_type, Primitive::kPrimNot);
+        __ Mov(temp1, value);
+        __ PoisonHeapReference(temp1);
+        source = temp1;
+      }
+
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        __ StoreToOffset(kStoreWord, source, array, offset);
+      } else {
+        DCHECK(index.IsRegister()) << index;
+        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+        __ StoreToOffset(kStoreWord, source, IP, data_offset);
+      }
+
+      if (!may_need_runtime_call) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
+
+      codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
+
+      if (done.IsLinked()) {
+        __ Bind(&done);
+      }
+
+      if (slow_path != nullptr) {
+        __ Bind(slow_path->GetExitLabel());
+      }
+
+      break;
+    }
+
+    case Primitive::kPrimInt: {
+      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+      Register value = locations->InAt(2).AsRegister<Register>();
+      if (index.IsConstant()) {
+        size_t offset =
+            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+        __ StoreToOffset(kStoreWord, value, array, offset);
+      } else {
+        DCHECK(index.IsRegister()) << index;
+        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+        __ StoreToOffset(kStoreWord, value, IP, data_offset);
+      }
+
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
@@ -3860,9 +4078,9 @@
       if (index.IsConstant()) {
         size_t offset =
             (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
+        __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), array, offset);
       } else {
-        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
+        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
         __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
       }
       break;
@@ -3874,9 +4092,9 @@
       DCHECK(value.IsFpuRegister());
       if (index.IsConstant()) {
         size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), obj, offset);
+        __ StoreSToOffset(value.AsFpuRegister<SRegister>(), array, offset);
       } else {
-        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
+        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4));
         __ StoreSToOffset(value.AsFpuRegister<SRegister>(), IP, data_offset);
       }
       break;
@@ -3888,9 +4106,9 @@
       DCHECK(value.IsFpuRegisterPair());
       if (index.IsConstant()) {
         size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), obj, offset);
+        __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), array, offset);
       } else {
-        __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
+        __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8));
         __ StoreDToOffset(FromLowSToD(value.AsFpuRegisterPairLow<SRegister>()), IP, data_offset);
       }
 
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 111112e..16d1d38 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -96,6 +96,38 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM);
 };
 
+class FieldAccessCallingConventionARM : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionARM() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return Location::RegisterLocation(R1);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return Location::RegisterLocation(R0);
+  }
+  Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterPairLocation(R0, R1)
+        : Location::RegisterLocation(R0);
+  }
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterPairLocation(R2, R3)
+        : (is_instance
+            ? Location::RegisterLocation(R2)
+            : Location::RegisterLocation(R1));
+  }
+  Location GetFpuLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::FpuRegisterPairLocation(S0, S1)
+        : Location::FpuRegisterLocation(S0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM);
+};
+
 class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap {
  public:
   ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
@@ -225,6 +257,9 @@
   void Bind(HBasicBlock* block) OVERRIDE;
   void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
   void MoveConstant(Location destination, int32_t value) OVERRIDE;
+  void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 31900d5..af5bbaa 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -19,7 +19,6 @@
 #include "arch/arm64/instruction_set_features_arm64.h"
 #include "art_method.h"
 #include "code_generator_utils.h"
-#include "common_arm64.h"
 #include "compiled_method.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -119,8 +118,11 @@
 
   CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
       register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
-  CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
-      register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
+  CPURegList fp_list = CPURegList(
+      CPURegister::kFPRegister,
+      kDRegSize,
+      register_set->GetFloatingPointRegisters()
+          & (~(codegen->GetGraph()->IsDebuggable() ? 0 : callee_saved_fp_registers.list())));
 
   MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
   UseScratchRegisterScope temps(masm);
@@ -477,7 +479,7 @@
 class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
  public:
   explicit DeoptimizationSlowPathARM64(HInstruction* instruction)
-    : instruction_(instruction) {}
+      : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
@@ -496,6 +498,52 @@
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
 };
 
+class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+  explicit ArraySetSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConvention calling_convention;
+    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+    parallel_move.AddMove(
+        locations->InAt(0),
+        LocationFrom(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimNot,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(1),
+        LocationFrom(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(2),
+        LocationFrom(calling_convention.GetRegisterAt(2)),
+        Primitive::kPrimNot,
+        nullptr);
+    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+    CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+    arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
+    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+    RestoreLiveRegisters(codegen, locations);
+    __ B(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
+
+ private:
+  HInstruction* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
+};
+
 #undef __
 
 Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
@@ -534,7 +582,9 @@
                     kNumberOfAllocatableFPRegisters,
                     kNumberOfAllocatableRegisterPairs,
                     callee_saved_core_registers.list(),
-                    callee_saved_fp_registers.list(),
+                    // If the graph is debuggable, we need to save the fpu registers ourselves,
+                    // as the stubs do not do it.
+                    graph->IsDebuggable() ? 0 : callee_saved_fp_registers.list(),
                     compiler_options,
                     stats),
       block_labels_(nullptr),
@@ -542,11 +592,14 @@
       instruction_visitor_(graph, this),
       move_resolver_(graph->GetArena(), this),
       isa_features_(isa_features),
-      uint64_literals_(std::less<uint64_t>(), graph->GetArena()->Adapter()),
-      method_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
-      call_patches_(MethodReferenceComparator(), graph->GetArena()->Adapter()),
-      relative_call_patches_(graph->GetArena()->Adapter()),
-      pc_rel_dex_cache_patches_(graph->GetArena()->Adapter()) {
+      uint64_literals_(std::less<uint64_t>(),
+                       graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      method_patches_(MethodReferenceComparator(),
+                      graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      call_patches_(MethodReferenceComparator(),
+                    graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   // Save the link register (containing the return address) to mimic Quick.
   AddAllocatedRegister(LocationFrom(lr));
 }
@@ -612,7 +665,7 @@
 void ParallelMoveResolverARM64::EmitMove(size_t index) {
   DCHECK_LT(index, moves_.size());
   MoveOperands* move = moves_[index];
-  codegen_->MoveLocation(move->GetDestination(), move->GetSource());
+  codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid);
 }
 
 void CodeGeneratorARM64::GenerateFrameEntry() {
@@ -696,7 +749,9 @@
   }
 
   if (instruction->IsCurrentMethod()) {
-    MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
+    MoveLocation(location,
+                 Location::DoubleStackSlot(kCurrentMethodStackOffset),
+                 Primitive::kPrimVoid);
   } else if (locations != nullptr && locations->Out().Equals(location)) {
     return;
   } else if (instruction->IsIntConstant()
@@ -739,6 +794,14 @@
   __ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
 }
 
+void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
+}
+
 Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
   Primitive::Type type = load->GetType();
 
@@ -889,7 +952,9 @@
          (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
 }
 
-void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
+void CodeGeneratorARM64::MoveLocation(Location destination,
+                                      Location source,
+                                      Primitive::Type dst_type) {
   if (source.Equals(destination)) {
     return;
   }
@@ -898,7 +963,7 @@
   // locations. When moving from and to a register, the argument type can be
   // used to generate 32bit instead of 64bit moves. In debug mode we also
   // checks the coherency of the locations and the type.
-  bool unspecified_type = (type == Primitive::kPrimVoid);
+  bool unspecified_type = (dst_type == Primitive::kPrimVoid);
 
   if (destination.IsRegister() || destination.IsFpuRegister()) {
     if (unspecified_type) {
@@ -908,30 +973,44 @@
                                   || src_cst->IsFloatConstant()
                                   || src_cst->IsNullConstant()))) {
         // For stack slots and 32bit constants, a 64bit type is appropriate.
-        type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+        dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
       } else {
         // If the source is a double stack slot or a 64bit constant, a 64bit
         // type is appropriate. Else the source is a register, and since the
         // type has not been specified, we chose a 64bit type to force a 64bit
         // move.
-        type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+        dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
       }
     }
-    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
-           (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
-    CPURegister dst = CPURegisterFrom(destination, type);
+    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+           (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
+    CPURegister dst = CPURegisterFrom(destination, dst_type);
     if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
       DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
       __ Ldr(dst, StackOperandFrom(source));
     } else if (source.IsConstant()) {
-      DCHECK(CoherentConstantAndType(source, type));
+      DCHECK(CoherentConstantAndType(source, dst_type));
       MoveConstant(dst, source.GetConstant());
-    } else {
+    } else if (source.IsRegister()) {
       if (destination.IsRegister()) {
-        __ Mov(Register(dst), RegisterFrom(source, type));
+        __ Mov(Register(dst), RegisterFrom(source, dst_type));
       } else {
         DCHECK(destination.IsFpuRegister());
-        __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
+        Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+            ? Primitive::kPrimLong
+            : Primitive::kPrimInt;
+        __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type));
+      }
+    } else {
+      DCHECK(source.IsFpuRegister());
+      if (destination.IsRegister()) {
+        Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+            ? Primitive::kPrimDouble
+            : Primitive::kPrimFloat;
+        __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type));
+      } else {
+        DCHECK(destination.IsFpuRegister());
+        __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
       }
     }
   } else {  // The destination is not a register. It must be a stack slot.
@@ -939,16 +1018,17 @@
     if (source.IsRegister() || source.IsFpuRegister()) {
       if (unspecified_type) {
         if (source.IsRegister()) {
-          type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+          dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
         } else {
-          type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+          dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
         }
       }
-      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
-             (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
-      __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
+      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+             (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
+      __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination));
     } else if (source.IsConstant()) {
-      DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type;
+      DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type))
+          << source << " " << dst_type;
       UseScratchRegisterScope temps(GetVIXLAssembler());
       HConstant* src_cst = source.GetConstant();
       CPURegister temp;
@@ -1552,76 +1632,136 @@
 }
 
 void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
-  if (instruction->NeedsTypeCheck()) {
-    LocationSummary* locations =
-        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
-    InvokeRuntimeCallingConvention calling_convention;
-    locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
-    locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
-    locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+      instruction,
+      instruction->NeedsTypeCheck() ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+    locations->SetInAt(2, Location::RequiresFpuRegister());
   } else {
-    LocationSummary* locations =
-        new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
-    locations->SetInAt(0, Location::RequiresRegister());
-    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-    if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
-      locations->SetInAt(2, Location::RequiresFpuRegister());
-    } else {
-      locations->SetInAt(2, Location::RequiresRegister());
-    }
+    locations->SetInAt(2, Location::RequiresRegister());
   }
 }
 
 void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
   Primitive::Type value_type = instruction->GetComponentType();
   LocationSummary* locations = instruction->GetLocations();
-  bool needs_runtime_call = locations->WillCall();
+  bool may_need_runtime_call = locations->CanCall();
+  bool needs_write_barrier =
+      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
 
-  if (needs_runtime_call) {
-    // Note: if heap poisoning is enabled, pAputObject takes cares
-    // of poisoning the reference.
-    codegen_->InvokeRuntime(
-        QUICK_ENTRY_POINT(pAputObject), instruction, instruction->GetDexPc(), nullptr);
-    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+  Register array = InputRegisterAt(instruction, 0);
+  CPURegister value = InputCPURegisterAt(instruction, 2);
+  CPURegister source = value;
+  Location index = locations->InAt(1);
+  size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
+  MemOperand destination = HeapOperand(array);
+  MacroAssembler* masm = GetVIXLAssembler();
+  BlockPoolsScope block_pools(masm);
+
+  if (!needs_write_barrier) {
+    DCHECK(!may_need_runtime_call);
+    if (index.IsConstant()) {
+      offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
+      destination = HeapOperand(array, offset);
+    } else {
+      UseScratchRegisterScope temps(masm);
+      Register temp = temps.AcquireSameSizeAs(array);
+      __ Add(temp, array, offset);
+      destination = HeapOperand(temp,
+                                XRegisterFrom(index),
+                                LSL,
+                                Primitive::ComponentSizeShift(value_type));
+    }
+    codegen_->Store(value_type, value, destination);
+    codegen_->MaybeRecordImplicitNullCheck(instruction);
   } else {
-    Register obj = InputRegisterAt(instruction, 0);
-    CPURegister value = InputCPURegisterAt(instruction, 2);
-    CPURegister source = value;
-    Location index = locations->InAt(1);
-    size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
-    MemOperand destination = HeapOperand(obj);
-    MacroAssembler* masm = GetVIXLAssembler();
-    BlockPoolsScope block_pools(masm);
+    DCHECK(needs_write_barrier);
+    vixl::Label done;
+    SlowPathCodeARM64* slow_path = nullptr;
     {
       // We use a block to end the scratch scope before the write barrier, thus
       // freeing the temporary registers so they can be used in `MarkGCCard`.
       UseScratchRegisterScope temps(masm);
-
-      if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
-        DCHECK(value.IsW());
-        Register temp = temps.AcquireW();
-        __ Mov(temp, value.W());
-        GetAssembler()->PoisonHeapReference(temp.W());
-        source = temp;
-      }
-
+      Register temp = temps.AcquireSameSizeAs(array);
       if (index.IsConstant()) {
         offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
-        destination = HeapOperand(obj, offset);
+        destination = HeapOperand(array, offset);
       } else {
-        Register temp = temps.AcquireSameSizeAs(obj);
-        __ Add(temp, obj, offset);
         destination = HeapOperand(temp,
                                   XRegisterFrom(index),
                                   LSL,
                                   Primitive::ComponentSizeShift(value_type));
       }
 
-      codegen_->Store(value_type, source, destination);
-      codegen_->MaybeRecordImplicitNullCheck(instruction);
+      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+
+      if (may_need_runtime_call) {
+        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
+        codegen_->AddSlowPath(slow_path);
+        if (instruction->GetValueCanBeNull()) {
+          vixl::Label non_zero;
+          __ Cbnz(Register(value), &non_zero);
+          if (!index.IsConstant()) {
+            __ Add(temp, array, offset);
+          }
+          __ Str(wzr, destination);
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          __ B(&done);
+          __ Bind(&non_zero);
+        }
+
+        Register temp2 = temps.AcquireSameSizeAs(array);
+        __ Ldr(temp, HeapOperand(array, class_offset));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        GetAssembler()->MaybeUnpoisonHeapReference(temp);
+        __ Ldr(temp, HeapOperand(temp, component_offset));
+        __ Ldr(temp2, HeapOperand(Register(value), class_offset));
+        // No need to poison/unpoison, we're comparing two poisoned references.
+        __ Cmp(temp, temp2);
+        if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          vixl::Label do_put;
+          __ B(eq, &do_put);
+          GetAssembler()->MaybeUnpoisonHeapReference(temp);
+          __ Ldr(temp, HeapOperand(temp, super_offset));
+          // No need to unpoison, we're comparing against null.
+          __ Cbnz(temp, slow_path->GetEntryLabel());
+          __ Bind(&do_put);
+        } else {
+          __ B(ne, slow_path->GetEntryLabel());
+        }
+        temps.Release(temp2);
+      }
+
+      if (kPoisonHeapReferences) {
+        Register temp2 = temps.AcquireSameSizeAs(array);
+          DCHECK(value.IsW());
+        __ Mov(temp2, value.W());
+        GetAssembler()->PoisonHeapReference(temp2);
+        source = temp2;
+      }
+
+      if (!index.IsConstant()) {
+        __ Add(temp, array, offset);
+      }
+      __ Str(source, destination);
+
+      if (!may_need_runtime_call) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
     }
-    if (CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue())) {
-      codegen_->MarkGCCard(obj, value.W(), instruction->GetValueCanBeNull());
+
+    codegen_->MarkGCCard(array, value.W(), instruction->GetValueCanBeNull());
+
+    if (done.IsLinked()) {
+      __ Bind(&done);
+    }
+
+    if (slow_path != nullptr) {
+      __ Bind(slow_path->GetExitLabel());
     }
   }
 }
@@ -3394,6 +3534,74 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionARM64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
 void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
   new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
 }
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7178081..a068b48 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
 
 #include "code_generator.h"
+#include "common_arm64.h"
 #include "dex/compiler_enums.h"
 #include "driver/compiler_options.h"
 #include "nodes.h"
@@ -141,6 +142,34 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
 };
 
+class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionARM64() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return helpers::LocationFrom(vixl::x1);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return helpers::LocationFrom(vixl::x0);
+  }
+  Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return helpers::LocationFrom(vixl::x0);
+  }
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? helpers::LocationFrom(vixl::x2)
+        : (is_instance
+            ? helpers::LocationFrom(vixl::x2)
+            : helpers::LocationFrom(vixl::x1));
+  }
+  Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return helpers::LocationFrom(vixl::d0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
+};
+
 class InstructionCodeGeneratorARM64 : public HGraphVisitor {
  public:
   InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
@@ -334,10 +363,9 @@
   // Code generation helpers.
   void MoveConstant(vixl::CPURegister destination, HConstant* constant);
   void MoveConstant(Location destination, int32_t value) OVERRIDE;
-  // The type is optional. When specified it must be coherent with the
-  // locations, and is used for optimisation and debugging.
-  void MoveLocation(Location destination, Location source,
-                    Primitive::Type type = Primitive::kPrimVoid);
+  void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
   void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
   void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
   void LoadAcquire(HInstruction* instruction, vixl::CPURegister dst, const vixl::MemOperand& src);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index c9f8493..e95d283 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -40,9 +40,6 @@
 // We need extra temporary/scratch registers (in addition to AT) in some cases.
 static constexpr FpuRegister FTMP = F8;
 
-// ART Thread Register.
-static constexpr GpuRegister TR = S1;
-
 Location Mips64ReturnLocation(Primitive::Type return_type) {
   switch (return_type) {
     case Primitive::kPrimBoolean:
@@ -620,7 +617,7 @@
 
 void CodeGeneratorMIPS64::MoveLocation(Location destination,
                                        Location source,
-                                       Primitive::Type type) {
+                                       Primitive::Type dst_type) {
   if (source.Equals(destination)) {
     return;
   }
@@ -628,7 +625,7 @@
   // A valid move can always be inferred from the destination and source
   // locations. When moving from and to a register, the argument type can be
   // used to generate 32bit instead of 64bit moves.
-  bool unspecified_type = (type == Primitive::kPrimVoid);
+  bool unspecified_type = (dst_type == Primitive::kPrimVoid);
   DCHECK_EQ(unspecified_type, false);
 
   if (destination.IsRegister() || destination.IsFpuRegister()) {
@@ -639,21 +636,21 @@
                                   || src_cst->IsFloatConstant()
                                   || src_cst->IsNullConstant()))) {
         // For stack slots and 32bit constants, a 64bit type is appropriate.
-        type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+        dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
       } else {
         // If the source is a double stack slot or a 64bit constant, a 64bit
         // type is appropriate. Else the source is a register, and since the
         // type has not been specified, we chose a 64bit type to force a 64bit
         // move.
-        type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+        dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
       }
     }
-    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
-           (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
+    DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+           (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
     if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
       // Move to GPR/FPR from stack
       LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
-      if (Primitive::IsFloatingPointType(type)) {
+      if (Primitive::IsFloatingPointType(dst_type)) {
         __ LoadFpuFromOffset(load_type,
                              destination.AsFpuRegister<FpuRegister>(),
                              SP,
@@ -668,31 +665,47 @@
     } else if (source.IsConstant()) {
       // Move to GPR/FPR from constant
       GpuRegister gpr = AT;
-      if (!Primitive::IsFloatingPointType(type)) {
+      if (!Primitive::IsFloatingPointType(dst_type)) {
         gpr = destination.AsRegister<GpuRegister>();
       }
-      if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
+      if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
         __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
       } else {
         __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
       }
-      if (type == Primitive::kPrimFloat) {
+      if (dst_type == Primitive::kPrimFloat) {
         __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
-      } else if (type == Primitive::kPrimDouble) {
+      } else if (dst_type == Primitive::kPrimDouble) {
         __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
       }
-    } else {
+    } else if (source.IsRegister()) {
       if (destination.IsRegister()) {
         // Move to GPR from GPR
         __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
       } else {
+        DCHECK(destination.IsFpuRegister());
+        if (Primitive::Is64BitType(dst_type)) {
+          __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+        } else {
+          __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+        }
+      }
+    } else if (source.IsFpuRegister()) {
+      if (destination.IsFpuRegister()) {
         // Move to FPR from FPR
-        if (type == Primitive::kPrimFloat) {
+        if (dst_type == Primitive::kPrimFloat) {
           __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
         } else {
-          DCHECK_EQ(type, Primitive::kPrimDouble);
+          DCHECK_EQ(dst_type, Primitive::kPrimDouble);
           __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
         }
+      } else {
+        DCHECK(destination.IsRegister());
+        if (Primitive::Is64BitType(dst_type)) {
+          __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+        } else {
+          __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+        }
       }
     }
   } else {  // The destination is not a register. It must be a stack slot.
@@ -700,13 +713,13 @@
     if (source.IsRegister() || source.IsFpuRegister()) {
       if (unspecified_type) {
         if (source.IsRegister()) {
-          type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+          dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
         } else {
-          type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+          dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
         }
       }
-      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
-             (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
+      DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+             (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
       // Move to stack from GPR/FPR
       StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
       if (source.IsRegister()) {
@@ -864,6 +877,14 @@
   __ LoadConst32(location.AsRegister<GpuRegister>(), value);
 }
 
+void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
+}
+
 Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
   Primitive::Type type = load->GetType();
 
@@ -3121,6 +3142,74 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo());
 }
 
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionMIPS64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
 void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
   new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
 }
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 16461d6..5e8f9e7 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -106,6 +106,31 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
 };
 
+class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionMIPS64() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return Location::RegisterLocation(A1);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return Location::RegisterLocation(A0);
+  }
+  Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return Location::RegisterLocation(A0);
+  }
+  Location GetSetValueLocation(
+      Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
+    return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+  }
+  Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return Location::FpuRegisterLocation(F0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64);
+};
+
 class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
  public:
   ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
@@ -280,11 +305,13 @@
   void Finalize(CodeAllocator* allocator) OVERRIDE;
 
   // Code generation helpers.
-
-  void MoveLocation(Location destination, Location source, Primitive::Type type);
+  void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
 
   void MoveConstant(Location destination, int32_t value) OVERRIDE;
 
+  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+
   void SwapLocations(Location loc1, Location loc2, Primitive::Type type);
 
   // Generate code to invoke a runtime entry point.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 277f6b4..5078456 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -380,6 +380,51 @@
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86);
 };
 
+class ArraySetSlowPathX86 : public SlowPathCode {
+ public:
+  explicit ArraySetSlowPathX86(HInstruction* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConvention calling_convention;
+    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+    parallel_move.AddMove(
+        locations->InAt(0),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimNot,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(1),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(2),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+        Primitive::kPrimNot,
+        nullptr);
+    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+    CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+    x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+                               instruction_,
+                               instruction_->GetDexPc(),
+                               this);
+    RestoreLiveRegisters(codegen, locations);
+    __ jmp(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; }
+
+ private:
+  HInstruction* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86);
+};
+
 #undef __
 #define __ down_cast<X86Assembler*>(GetAssembler())->
 
@@ -475,8 +520,8 @@
       instruction_visitor_(graph, this),
       move_resolver_(graph->GetArena(), this),
       isa_features_(isa_features),
-      method_patches_(graph->GetArena()->Adapter()),
-      relative_call_patches_(graph->GetArena()->Adapter()) {
+      method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   // Use a fake return address register to mimic Quick.
   AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
 }
@@ -782,7 +827,10 @@
           Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
           Primitive::kPrimInt);
     } else if (source.IsFpuRegister()) {
-      LOG(FATAL) << "Unimplemented";
+      XmmRegister src_reg = source.AsFpuRegister<XmmRegister>();
+      __ movd(destination.AsRegisterPairLow<Register>(), src_reg);
+      __ psrlq(src_reg, Immediate(32));
+      __ movd(destination.AsRegisterPairHigh<Register>(), src_reg);
     } else {
       // No conflict possible, so just do the moves.
       DCHECK(source.IsDoubleStackSlot());
@@ -795,6 +843,15 @@
       __ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
     } else if (source.IsDoubleStackSlot()) {
       __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
+    } else if (source.IsRegisterPair()) {
+      size_t elem_size = Primitive::ComponentSize(Primitive::kPrimInt);
+      // Create stack space for 2 elements.
+      __ subl(ESP, Immediate(2 * elem_size));
+      __ movl(Address(ESP, 0), source.AsRegisterPairLow<Register>());
+      __ movl(Address(ESP, elem_size), source.AsRegisterPairHigh<Register>());
+      __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
+      // And remove the temporary stack space we allocated.
+      __ addl(ESP, Immediate(2 * elem_size));
     } else {
       LOG(FATAL) << "Unimplemented";
     }
@@ -921,6 +978,25 @@
   __ movl(location.AsRegister<Register>(), Immediate(value));
 }
 
+void CodeGeneratorX86::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+  if (Primitive::Is64BitType(dst_type)) {
+    Move64(dst, src);
+  } else {
+    Move32(dst, src);
+  }
+}
+
+void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else if (location.IsRegisterPair()) {
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
+}
+
 void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) {
   DCHECK(!successor->IsExitBlock());
 
@@ -4040,6 +4116,74 @@
   HandleFieldGet(instruction, instruction->GetFieldInfo());
 }
 
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionX86 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
 void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
       ? LocationSummary::kCallOnSlowPath
@@ -4245,72 +4389,59 @@
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
 
-  bool needs_runtime_call = instruction->NeedsTypeCheck();
+  bool may_need_runtime_call = instruction->NeedsTypeCheck();
 
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
       instruction,
-      needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
+      may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
 
-  if (needs_runtime_call) {
-    InvokeRuntimeCallingConvention calling_convention;
-    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+  bool is_byte_type = (value_type == Primitive::kPrimBoolean)
+      || (value_type == Primitive::kPrimByte);
+  // We need the inputs to be different than the output in case of long operation.
+  // In case of a byte operation, the register allocator does not support multiple
+  // inputs that die at entry with one in a specific register.
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  if (is_byte_type) {
+    // Ensure the value is in a byte register.
+    locations->SetInAt(2, Location::ByteRegisterOrConstant(EAX, instruction->InputAt(2)));
+  } else if (Primitive::IsFloatingPointType(value_type)) {
+    locations->SetInAt(2, Location::RequiresFpuRegister());
   } else {
-    bool is_byte_type = (value_type == Primitive::kPrimBoolean)
-        || (value_type == Primitive::kPrimByte);
-    // We need the inputs to be different than the output in case of long operation.
-    // In case of a byte operation, the register allocator does not support multiple
-    // inputs that die at entry with one in a specific register.
-    locations->SetInAt(0, Location::RequiresRegister());
-    locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-    if (is_byte_type) {
-      // Ensure the value is in a byte register.
-      locations->SetInAt(2, Location::ByteRegisterOrConstant(EAX, instruction->InputAt(2)));
-    } else if (Primitive::IsFloatingPointType(value_type)) {
-      locations->SetInAt(2, Location::RequiresFpuRegister());
-    } else {
-      locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
-    }
-    if (needs_write_barrier) {
-      // Temporary registers for the write barrier.
-      locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
-      // Ensure the card is in a byte register.
-      locations->AddTemp(Location::RegisterLocation(ECX));
-    }
+    locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
+  }
+  if (needs_write_barrier) {
+    // Temporary registers for the write barrier.
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
+    // Ensure the card is in a byte register.
+    locations->AddTemp(Location::RegisterLocation(ECX));
   }
 }
 
 void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
+  Register array = locations->InAt(0).AsRegister<Register>();
   Location index = locations->InAt(1);
   Location value = locations->InAt(2);
   Primitive::Type value_type = instruction->GetComponentType();
-  bool needs_runtime_call = locations->WillCall();
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+  bool may_need_runtime_call = locations->CanCall();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
 
   switch (value_type) {
     case Primitive::kPrimBoolean:
     case Primitive::kPrimByte: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
-        if (value.IsRegister()) {
-          __ movb(Address(obj, offset), value.AsRegister<ByteRegister>());
-        } else {
-          __ movb(Address(obj, offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
+          : Address(array, index.AsRegister<Register>(), TIMES_1, offset);
+      if (value.IsRegister()) {
+        __ movb(address, value.AsRegister<ByteRegister>());
       } else {
-        if (value.IsRegister()) {
-          __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset),
-                  value.AsRegister<ByteRegister>());
-        } else {
-          __ movb(Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+        __ movb(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
       }
       codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
@@ -4318,93 +4449,106 @@
 
     case Primitive::kPrimShort:
     case Primitive::kPrimChar: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
-        if (value.IsRegister()) {
-          __ movw(Address(obj, offset), value.AsRegister<Register>());
-        } else {
-          __ movw(Address(obj, offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
+          : Address(array, index.AsRegister<Register>(), TIMES_2, offset);
+      if (value.IsRegister()) {
+        __ movw(address, value.AsRegister<Register>());
       } else {
-        if (value.IsRegister()) {
-          __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset),
-                  value.AsRegister<Register>());
-        } else {
-          __ movw(Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+        __ movw(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
       }
       codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
-    case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      if (!needs_runtime_call) {
-        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-        if (index.IsConstant()) {
-          size_t offset =
-              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          if (value.IsRegister()) {
-            if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
-              Register temp = locations->GetTemp(0).AsRegister<Register>();
-              __ movl(temp, value.AsRegister<Register>());
-              __ PoisonHeapReference(temp);
-              __ movl(Address(obj, offset), temp);
-            } else {
-              __ movl(Address(obj, offset), value.AsRegister<Register>());
-            }
-          } else {
-            DCHECK(value.IsConstant()) << value;
-            int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
-            // `value_type == Primitive::kPrimNot` implies `v == 0`.
-            DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
-            // Note: if heap poisoning is enabled, no need to poison
-            // (negate) `v` if it is a reference, as it would be null.
-            __ movl(Address(obj, offset), Immediate(v));
-          }
-        } else {
-          DCHECK(index.IsRegister()) << index;
-          if (value.IsRegister()) {
-            if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
-              Register temp = locations->GetTemp(0).AsRegister<Register>();
-              __ movl(temp, value.AsRegister<Register>());
-              __ PoisonHeapReference(temp);
-              __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), temp);
-            } else {
-              __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset),
-                      value.AsRegister<Register>());
-            }
-          } else {
-            DCHECK(value.IsConstant()) << value;
-            int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
-            // `value_type == Primitive::kPrimNot` implies `v == 0`.
-            DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
-            // Note: if heap poisoning is enabled, no need to poison
-            // (negate) `v` if it is a reference, as it would be null.
-            __ movl(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset), Immediate(v));
-          }
-        }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+          : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+      if (!value.IsRegister()) {
+        // Just setting null.
+        DCHECK(instruction->InputAt(2)->IsNullConstant());
+        DCHECK(value.IsConstant()) << value;
+        __ movl(address, Immediate(0));
         codegen_->MaybeRecordImplicitNullCheck(instruction);
-
-        if (needs_write_barrier) {
-          Register temp = locations->GetTemp(0).AsRegister<Register>();
-          Register card = locations->GetTemp(1).AsRegister<Register>();
-          codegen_->MarkGCCard(
-              temp, card, obj, value.AsRegister<Register>(), instruction->GetValueCanBeNull());
-        }
-      } else {
-        DCHECK_EQ(value_type, Primitive::kPrimNot);
-        DCHECK(!codegen_->IsLeafMethod());
-        // Note: if heap poisoning is enabled, pAputObject takes cares
-        // of poisoning the reference.
-        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
-                                instruction,
-                                instruction->GetDexPc(),
-                                nullptr);
+        DCHECK(!needs_write_barrier);
+        DCHECK(!may_need_runtime_call);
+        break;
       }
+
+      DCHECK(needs_write_barrier);
+      Register register_value = value.AsRegister<Register>();
+      NearLabel done, not_null, do_put;
+      SlowPathCode* slow_path = nullptr;
+      Register temp = locations->GetTemp(0).AsRegister<Register>();
+      if (may_need_runtime_call) {
+        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86(instruction);
+        codegen_->AddSlowPath(slow_path);
+        if (instruction->GetValueCanBeNull()) {
+          __ testl(register_value, register_value);
+          __ j(kNotEqual, &not_null);
+          __ movl(address, Immediate(0));
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          __ jmp(&done);
+          __ Bind(&not_null);
+        }
+
+        __ movl(temp, Address(array, class_offset));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        __ MaybeUnpoisonHeapReference(temp);
+        __ movl(temp, Address(temp, component_offset));
+        // No need to poison/unpoison, we're comparing two poisoned references.
+        __ cmpl(temp, Address(register_value, class_offset));
+        if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          __ j(kEqual, &do_put);
+          __ MaybeUnpoisonHeapReference(temp);
+          __ movl(temp, Address(temp, super_offset));
+          // No need to unpoison, we're comparing against null..
+          __ testl(temp, temp);
+          __ j(kNotEqual, slow_path->GetEntryLabel());
+          __ Bind(&do_put);
+        } else {
+          __ j(kNotEqual, slow_path->GetEntryLabel());
+        }
+      }
+
+      if (kPoisonHeapReferences) {
+        __ movl(temp, register_value);
+        __ PoisonHeapReference(temp);
+        __ movl(address, temp);
+      } else {
+        __ movl(address, register_value);
+      }
+      if (!may_need_runtime_call) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
+
+      Register card = locations->GetTemp(1).AsRegister<Register>();
+      codegen_->MarkGCCard(
+          temp, card, array, value.AsRegister<Register>(), instruction->GetValueCanBeNull());
+      __ Bind(&done);
+
+      if (slow_path != nullptr) {
+        __ Bind(slow_path->GetExitLabel());
+      }
+
+      break;
+    }
+    case Primitive::kPrimInt: {
+      uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+          : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+      if (value.IsRegister()) {
+        __ movl(address, value.AsRegister<Register>());
+      } else {
+        DCHECK(value.IsConstant()) << value;
+        int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+        __ movl(address, Immediate(v));
+      }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
@@ -4413,30 +4557,30 @@
       if (index.IsConstant()) {
         size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
         if (value.IsRegisterPair()) {
-          __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
+          __ movl(Address(array, offset), value.AsRegisterPairLow<Register>());
           codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(obj, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
+          __ movl(Address(array, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
         } else {
           DCHECK(value.IsConstant());
           int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
-          __ movl(Address(obj, offset), Immediate(Low32Bits(val)));
+          __ movl(Address(array, offset), Immediate(Low32Bits(val)));
           codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(obj, offset + kX86WordSize), Immediate(High32Bits(val)));
+          __ movl(Address(array, offset + kX86WordSize), Immediate(High32Bits(val)));
         }
       } else {
         if (value.IsRegisterPair()) {
-          __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
+          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
                   value.AsRegisterPairLow<Register>());
           codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
+          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
                   value.AsRegisterPairHigh<Register>());
         } else {
           DCHECK(value.IsConstant());
           int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
-          __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
+          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
                   Immediate(Low32Bits(val)));
           codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
+          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
                   Immediate(High32Bits(val)));
         }
       }
@@ -4444,28 +4588,22 @@
     }
 
     case Primitive::kPrimFloat: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+      uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+          : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
       DCHECK(value.IsFpuRegister());
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-        __ movss(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
-      } else {
-        __ movss(Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset),
-                value.AsFpuRegister<XmmRegister>());
-      }
+      __ movss(address, value.AsFpuRegister<XmmRegister>());
       break;
     }
 
     case Primitive::kPrimDouble: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+      uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
+          : Address(array, index.AsRegister<Register>(), TIMES_8, offset);
       DCHECK(value.IsFpuRegister());
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ movsd(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
-      } else {
-        __ movsd(Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset),
-                value.AsFpuRegister<XmmRegister>());
-      }
+      __ movsd(address, value.AsFpuRegister<XmmRegister>());
       break;
     }
 
@@ -5623,7 +5761,7 @@
 /**
  * Class to handle late fixup of offsets into constant area.
  */
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocMisc> {
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
  public:
   RIPFixup(const CodeGeneratorX86& codegen, int offset)
       : codegen_(codegen), offset_into_constant_area_(offset) {}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 2c2fc65..ae2d84f 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -91,6 +91,36 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86);
 };
 
+class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionX86() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return Location::RegisterLocation(ECX);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return Location::RegisterLocation(EAX);
+  }
+  Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterPairLocation(EAX, EDX)
+        : Location::RegisterLocation(EAX);
+  }
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterPairLocation(EDX, EBX)
+        : (is_instance
+            ? Location::RegisterLocation(EDX)
+            : Location::RegisterLocation(ECX));
+  }
+  Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return Location::FpuRegisterLocation(XMM0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86);
+};
+
 class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
  public:
   ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
@@ -228,6 +258,9 @@
   void Bind(HBasicBlock* block) OVERRIDE;
   void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
   void MoveConstant(Location destination, int32_t value) OVERRIDE;
+  void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 453c6fd..791bb9e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -396,6 +396,51 @@
   DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64);
 };
 
+class ArraySetSlowPathX86_64 : public SlowPathCode {
+ public:
+  explicit ArraySetSlowPathX86_64(HInstruction* instruction) : instruction_(instruction) {}
+
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
+    __ Bind(GetEntryLabel());
+    SaveLiveRegisters(codegen, locations);
+
+    InvokeRuntimeCallingConvention calling_convention;
+    HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+    parallel_move.AddMove(
+        locations->InAt(0),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+        Primitive::kPrimNot,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(1),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+        Primitive::kPrimInt,
+        nullptr);
+    parallel_move.AddMove(
+        locations->InAt(2),
+        Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
+        Primitive::kPrimNot,
+        nullptr);
+    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+    CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+    x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+                               instruction_,
+                               instruction_->GetDexPc(),
+                               this);
+    RestoreLiveRegisters(codegen, locations);
+    __ jmp(GetExitLabel());
+  }
+
+  const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; }
+
+ private:
+  HInstruction* const instruction_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64);
+};
+
 #undef __
 #define __ down_cast<X86_64Assembler*>(GetAssembler())->
 
@@ -626,9 +671,9 @@
         move_resolver_(graph->GetArena(), this),
         isa_features_(isa_features),
         constant_area_start_(0),
-        method_patches_(graph->GetArena()->Adapter()),
-        relative_call_patches_(graph->GetArena()->Adapter()),
-        pc_rel_dex_cache_patches_(graph->GetArena()->Adapter()) {
+        method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+        relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+        pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
 }
 
@@ -945,6 +990,19 @@
   Load64BitValue(location.AsRegister<CpuRegister>(), static_cast<int64_t>(value));
 }
 
+void CodeGeneratorX86_64::MoveLocation(
+    Location dst, Location src, Primitive::Type dst_type ATTRIBUTE_UNUSED) {
+  Move(dst, src);
+}
+
+void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+  if (location.IsRegister()) {
+    locations->AddTemp(location);
+  } else {
+    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+  }
+}
+
 void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
   DCHECK(!successor->IsExitBlock());
 
@@ -3804,6 +3862,74 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldGet(
+    HUnresolvedInstanceFieldGet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldSet(
+    HUnresolvedInstanceFieldSet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldGet(
+    HUnresolvedStaticFieldGet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->CreateUnresolvedFieldLocationSummary(
+      instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet(
+    HUnresolvedStaticFieldSet* instruction) {
+  FieldAccessCallingConventionX86_64 calling_convention;
+  codegen_->GenerateUnresolvedFieldAccess(instruction,
+                                          instruction->GetFieldType(),
+                                          instruction->GetFieldIndex(),
+                                          instruction->GetDexPc(),
+                                          calling_convention);
+}
+
 void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
   LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
       ? LocationSummary::kCallOnSlowPath
@@ -3992,66 +4118,55 @@
 
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  bool needs_runtime_call = instruction->NeedsTypeCheck();
+  bool may_need_runtime_call = instruction->NeedsTypeCheck();
 
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
-      instruction, needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
-  if (needs_runtime_call) {
-    InvokeRuntimeCallingConvention calling_convention;
-    locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-    locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-    locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-  } else {
-    locations->SetInAt(0, Location::RequiresRegister());
-    locations->SetInAt(
-        1, Location::RegisterOrConstant(instruction->InputAt(1)));
-    locations->SetInAt(2, Location::RequiresRegister());
-    if (value_type == Primitive::kPrimLong) {
-      locations->SetInAt(2, Location::RegisterOrInt32LongConstant(instruction->InputAt(2)));
-    } else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) {
-      locations->SetInAt(2, Location::RequiresFpuRegister());
-    } else {
-      locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
-    }
+      instruction,
+      may_need_runtime_call ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
 
-    if (needs_write_barrier) {
-      // Temporary registers for the write barrier.
-      locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
-      locations->AddTemp(Location::RequiresRegister());
-    }
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(
+      1, Location::RegisterOrConstant(instruction->InputAt(1)));
+  locations->SetInAt(2, Location::RequiresRegister());
+  if (value_type == Primitive::kPrimLong) {
+    locations->SetInAt(2, Location::RegisterOrInt32LongConstant(instruction->InputAt(2)));
+  } else if (value_type == Primitive::kPrimFloat || value_type == Primitive::kPrimDouble) {
+    locations->SetInAt(2, Location::RequiresFpuRegister());
+  } else {
+    locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)));
+  }
+
+  if (needs_write_barrier) {
+    // Temporary registers for the write barrier.
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
+    locations->AddTemp(Location::RequiresRegister());
   }
 }
 
 void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
   LocationSummary* locations = instruction->GetLocations();
-  CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>();
+  CpuRegister array = locations->InAt(0).AsRegister<CpuRegister>();
   Location index = locations->InAt(1);
   Location value = locations->InAt(2);
   Primitive::Type value_type = instruction->GetComponentType();
-  bool needs_runtime_call = locations->WillCall();
+  bool may_need_runtime_call = locations->CanCall();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
 
   switch (value_type) {
     case Primitive::kPrimBoolean:
     case Primitive::kPrimByte: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
-        if (value.IsRegister()) {
-          __ movb(Address(obj, offset), value.AsRegister<CpuRegister>());
-        } else {
-          __ movb(Address(obj, offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
+          : Address(array, index.AsRegister<CpuRegister>(), TIMES_1, offset);
+      if (value.IsRegister()) {
+        __ movb(address, value.AsRegister<CpuRegister>());
       } else {
-        if (value.IsRegister()) {
-          __ movb(Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset),
-                  value.AsRegister<CpuRegister>());
-        } else {
-          __ movb(Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+        __ movb(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
       }
       codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
@@ -4059,154 +4174,145 @@
 
     case Primitive::kPrimShort:
     case Primitive::kPrimChar: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
-        if (value.IsRegister()) {
-          __ movw(Address(obj, offset), value.AsRegister<CpuRegister>());
-        } else {
-          DCHECK(value.IsConstant()) << value;
-          __ movw(Address(obj, offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
+          : Address(array, index.AsRegister<CpuRegister>(), TIMES_2, offset);
+      if (value.IsRegister()) {
+        __ movw(address, value.AsRegister<CpuRegister>());
       } else {
-        DCHECK(index.IsRegister()) << index;
-        if (value.IsRegister()) {
-          __ movw(Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset),
-                  value.AsRegister<CpuRegister>());
-        } else {
-          DCHECK(value.IsConstant()) << value;
-          __ movw(Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset),
-                  Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
-        }
+        DCHECK(value.IsConstant()) << value;
+        __ movw(address, Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
       }
       codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
-    case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      if (!needs_runtime_call) {
-        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-        if (index.IsConstant()) {
-          size_t offset =
-              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          if (value.IsRegister()) {
-            if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
-              CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
-              __ movl(temp, value.AsRegister<CpuRegister>());
-              __ PoisonHeapReference(temp);
-              __ movl(Address(obj, offset), temp);
-            } else {
-              __ movl(Address(obj, offset), value.AsRegister<CpuRegister>());
-            }
-          } else {
-            DCHECK(value.IsConstant()) << value;
-            int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
-            // `value_type == Primitive::kPrimNot` implies `v == 0`.
-            DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
-            // Note: if heap poisoning is enabled, no need to poison
-            // (negate) `v` if it is a reference, as it would be null.
-            __ movl(Address(obj, offset), Immediate(v));
-          }
-        } else {
-          DCHECK(index.IsRegister()) << index;
-          if (value.IsRegister()) {
-            if (kPoisonHeapReferences && value_type == Primitive::kPrimNot) {
-              CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
-              __ movl(temp, value.AsRegister<CpuRegister>());
-              __ PoisonHeapReference(temp);
-              __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset), temp);
-            } else {
-              __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
-                      value.AsRegister<CpuRegister>());
-            }
-          } else {
-            DCHECK(value.IsConstant()) << value;
-            int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
-            // `value_type == Primitive::kPrimNot` implies `v == 0`.
-            DCHECK((value_type != Primitive::kPrimNot) || (v == 0));
-            // Note: if heap poisoning is enabled, no need to poison
-            // (negate) `v` if it is a reference, as it would be null.
-            __ movl(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
-                    Immediate(v));
-          }
-        }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+          : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+      if (!value.IsRegister()) {
+        // Just setting null.
+        DCHECK(instruction->InputAt(2)->IsNullConstant());
+        DCHECK(value.IsConstant()) << value;
+        __ movl(address, Immediate(0));
         codegen_->MaybeRecordImplicitNullCheck(instruction);
-        if (needs_write_barrier) {
-          DCHECK_EQ(value_type, Primitive::kPrimNot);
-          CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
-          CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
-          codegen_->MarkGCCard(
-              temp, card, obj, value.AsRegister<CpuRegister>(), instruction->GetValueCanBeNull());
-        }
-      } else {
-        DCHECK_EQ(value_type, Primitive::kPrimNot);
-        // Note: if heap poisoning is enabled, pAputObject takes cares
-        // of poisoning the reference.
-        codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
-                                instruction,
-                                instruction->GetDexPc(),
-                                nullptr);
-        DCHECK(!codegen_->IsLeafMethod());
+        DCHECK(!needs_write_barrier);
+        DCHECK(!may_need_runtime_call);
+        break;
       }
+
+      DCHECK(needs_write_barrier);
+      CpuRegister register_value = value.AsRegister<CpuRegister>();
+      NearLabel done, not_null, do_put;
+      SlowPathCode* slow_path = nullptr;
+      CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
+      if (may_need_runtime_call) {
+        slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathX86_64(instruction);
+        codegen_->AddSlowPath(slow_path);
+        if (instruction->GetValueCanBeNull()) {
+          __ testl(register_value, register_value);
+          __ j(kNotEqual, &not_null);
+          __ movl(address, Immediate(0));
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          __ jmp(&done);
+          __ Bind(&not_null);
+        }
+
+        __ movl(temp, Address(array, class_offset));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        __ MaybeUnpoisonHeapReference(temp);
+        __ movl(temp, Address(temp, component_offset));
+        // No need to poison/unpoison, we're comparing two poisoned references.
+        __ cmpl(temp, Address(register_value, class_offset));
+        if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          __ j(kEqual, &do_put);
+          __ MaybeUnpoisonHeapReference(temp);
+          __ movl(temp, Address(temp, super_offset));
+          // No need to unpoison the result, we're comparing against null.
+          __ testl(temp, temp);
+          __ j(kNotEqual, slow_path->GetEntryLabel());
+          __ Bind(&do_put);
+        } else {
+          __ j(kNotEqual, slow_path->GetEntryLabel());
+        }
+      }
+
+      if (kPoisonHeapReferences) {
+        __ movl(temp, register_value);
+        __ PoisonHeapReference(temp);
+        __ movl(address, temp);
+      } else {
+        __ movl(address, register_value);
+      }
+      if (!may_need_runtime_call) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
+
+      CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
+      codegen_->MarkGCCard(
+          temp, card, array, value.AsRegister<CpuRegister>(), instruction->GetValueCanBeNull());
+      __ Bind(&done);
+
+      if (slow_path != nullptr) {
+        __ Bind(slow_path->GetExitLabel());
+      }
+
+      break;
+    }
+    case Primitive::kPrimInt: {
+      uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+          : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+      if (value.IsRegister()) {
+        __ movl(address, value.AsRegister<CpuRegister>());
+      } else {
+        DCHECK(value.IsConstant()) << value;
+        int32_t v = CodeGenerator::GetInt32ValueOf(value.GetConstant());
+        __ movl(address, Immediate(v));
+      }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case Primitive::kPrimLong: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        if (value.IsRegister()) {
-          __ movq(Address(obj, offset), value.AsRegister<CpuRegister>());
-        } else {
-          int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
-          DCHECK(IsInt<32>(v));
-          int32_t v_32 = v;
-          __ movq(Address(obj, offset), Immediate(v_32));
-        }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
+          : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+      if (value.IsRegister()) {
+        __ movq(address, value.AsRegister<CpuRegister>());
       } else {
-        if (value.IsRegister()) {
-          __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
-                  value.AsRegister<CpuRegister>());
-        } else {
-          int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
-          DCHECK(IsInt<32>(v));
-          int32_t v_32 = v;
-          __ movq(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
-                  Immediate(v_32));
-        }
+        int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
+        DCHECK(IsInt<32>(v));
+        int32_t v_32 = v;
+        __ movq(address, Immediate(v_32));
       }
       codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case Primitive::kPrimFloat: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-        DCHECK(value.IsFpuRegister());
-        __ movss(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
-      } else {
-        DCHECK(value.IsFpuRegister());
-        __ movss(Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset),
-                value.AsFpuRegister<XmmRegister>());
-      }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
+          : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+      DCHECK(value.IsFpuRegister());
+      __ movss(address, value.AsFpuRegister<XmmRegister>());
       codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case Primitive::kPrimDouble: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        DCHECK(value.IsFpuRegister());
-        __ movsd(Address(obj, offset), value.AsFpuRegister<XmmRegister>());
-      } else {
-        DCHECK(value.IsFpuRegister());
-        __ movsd(Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset),
-                value.AsFpuRegister<XmmRegister>());
-      }
+      uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+      Address address = index.IsConstant()
+          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
+          : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+      DCHECK(value.IsFpuRegister());
+      __ movsd(address, value.AsFpuRegister<XmmRegister>());
       codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
@@ -4250,7 +4356,7 @@
   Location index_loc = locations->InAt(0);
   Location length_loc = locations->InAt(1);
   SlowPathCode* slow_path =
-    new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
+      new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
 
   if (length_loc.IsConstant()) {
     int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -5279,7 +5385,7 @@
 /**
  * Class to handle late fixup of offsets into constant area.
  */
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocMisc> {
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
   public:
     RIPFixup(const CodeGeneratorX86_64& codegen, int offset)
       : codegen_(codegen), offset_into_constant_area_(offset) {}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 197ce63..ecc8630 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -70,6 +70,35 @@
   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
 };
 
+class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
+ public:
+  FieldAccessCallingConventionX86_64() {}
+
+  Location GetObjectLocation() const OVERRIDE {
+    return Location::RegisterLocation(RSI);
+  }
+  Location GetFieldIndexLocation() const OVERRIDE {
+    return Location::RegisterLocation(RDI);
+  }
+  Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return Location::RegisterLocation(RAX);
+  }
+  Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+    return Primitive::Is64BitType(type)
+        ? Location::RegisterLocation(RDX)
+        : (is_instance
+            ? Location::RegisterLocation(RDX)
+            : Location::RegisterLocation(RSI));
+  }
+  Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+    return Location::FpuRegisterLocation(XMM0);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64);
+};
+
+
 class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor {
  public:
   InvokeDexCallingConventionVisitorX86_64() {}
@@ -215,6 +244,9 @@
   void Bind(HBasicBlock* block) OVERRIDE;
   void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
   void MoveConstant(Location destination, int32_t value) OVERRIDE;
+  void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+  void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 20ce110..e0aa4ff 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -226,14 +226,14 @@
 
   // We assume that GVN has run before, so we only perform a pointer
   // comparison.  If for some reason the values are equal but the pointers are
-  // different, we are still correct and only miss an optimisation
+  // different, we are still correct and only miss an optimization
   // opportunity.
   if (instruction->GetLeft() == instruction->GetRight()) {
     // Replace code looking like
     //    SUB dst, src, src
     // with
     //    CONSTANT 0
-    // Note that we cannot optimise `x - x` to `0` for floating-point. It does
+    // Note that we cannot optimize `x - x` to `0` for floating-point. It does
     // not work when `x` is an infinity.
     instruction->ReplaceWith(GetGraph()->GetConstant(type, 0));
     block->RemoveInstruction(instruction);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 2c6c3b7..7a83662 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -398,6 +398,22 @@
     StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
   }
 
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+    StartAttributeStream("field_type") << field_access->GetFieldType();
+  }
+
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+    StartAttributeStream("field_type") << field_access->GetFieldType();
+  }
+
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+    StartAttributeStream("field_type") << field_access->GetFieldType();
+  }
+
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+    StartAttributeStream("field_type") << field_access->GetFieldType();
+  }
+
   void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
     StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
   }
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 5050e15..7cf0617 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -39,7 +39,7 @@
   explicit ValueSet(ArenaAllocator* allocator)
       : allocator_(allocator),
         num_buckets_(kMinimumNumberOfBuckets),
-        buckets_(allocator->AllocArray<Node*>(num_buckets_)),
+        buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
         buckets_owned_(allocator, num_buckets_, false),
         num_entries_(0) {
     // ArenaAllocator returns zeroed memory, so no need to set buckets to null.
@@ -52,7 +52,7 @@
   ValueSet(ArenaAllocator* allocator, const ValueSet& to_copy)
       : allocator_(allocator),
         num_buckets_(to_copy.IdealBucketCount()),
-        buckets_(allocator->AllocArray<Node*>(num_buckets_)),
+        buckets_(allocator->AllocArray<Node*>(num_buckets_, kArenaAllocGvn)),
         buckets_owned_(allocator, num_buckets_, false),
         num_entries_(to_copy.num_entries_) {
     // ArenaAllocator returns zeroed memory, so entries of buckets_ and
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 9fb4304..e5123de 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -74,11 +74,14 @@
 HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
     : HOptimization(graph, kInductionPassName),
       global_depth_(0),
-      stack_(graph->GetArena()->Adapter()),
-      scc_(graph->GetArena()->Adapter()),
-      map_(std::less<HInstruction*>(), graph->GetArena()->Adapter()),
-      cycle_(std::less<HInstruction*>(), graph->GetArena()->Adapter()),
-      induction_(std::less<HLoopInformation*>(), graph->GetArena()->Adapter()) {
+      stack_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+      scc_(graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+      map_(std::less<HInstruction*>(),
+           graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+      cycle_(std::less<HInstruction*>(),
+             graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)),
+      induction_(std::less<HLoopInformation*>(),
+                 graph->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)) {
 }
 
 void HInductionVarAnalysis::Run() {
@@ -228,7 +231,7 @@
 
   // Rotate proper entry-phi to front.
   if (size > 1) {
-    ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter());
+    ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis));
     RotateEntryPhiFirst(loop, &scc_, &other);
   }
 
@@ -553,44 +556,33 @@
     }
   } else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
     // Analyze condition with induction at left-hand-side (e.g. i < U).
+    InductionInfo* lower_expr = a->op_b;
+    InductionInfo* upper_expr = b;
     InductionInfo* stride = a->op_a;
-    InductionInfo* lo_val = a->op_b;
-    InductionInfo* hi_val = b;
-    // Analyze stride (may be compound).
-    InductionVarRange::Value v1 = InductionVarRange::GetVal(stride, nullptr, /* is_min */ true);
-    InductionVarRange::Value v2 = InductionVarRange::GetVal(stride, nullptr, /* is_min */ false);
-    if (v1.a_constant != 0 || v2.a_constant != 0 || v1.b_constant != v2.b_constant) {
+    int64_t stride_value = 0;
+    if (!IsIntAndGet(stride, &stride_value)) {
       return;
     }
-    // Rewrite safe condition i != U with unit stride into i < U or i > U
-    // (unit stride guarantees that the end condition is always reached).
-    const int32_t stride_value = v1.b_constant;
-    int64_t lo_value = 0;
-    int64_t hi_value = 0;
-    if (cmp == kCondNE && IsIntAndGet(lo_val, &lo_value) && IsIntAndGet(hi_val, &hi_value)) {
-      if ((stride_value == +1 && lo_value < hi_value) ||
-          (stride_value == -1 && lo_value > hi_value)) {
-        cmp = stride_value > 0 ? kCondLT : kCondGT;
-      }
+    // Rewrite condition i != U into i < U or i > U if end condition is reached exactly.
+    if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLT)) ||
+                           (stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGT)))) {
+      cmp = stride_value > 0 ? kCondLT : kCondGT;
     }
     // Normalize a linear loop control with a nonzero stride:
     //   stride > 0, either i < U or i <= U
     //   stride < 0, either i > U or i >= U
-    //
-    // TODO: construct conditions for constant/symbolic safety of trip-count
-    //
     if ((stride_value > 0 && (cmp == kCondLT || cmp == kCondLE)) ||
         (stride_value < 0 && (cmp == kCondGT || cmp == kCondGE))) {
-      VisitTripCount(loop, lo_val, hi_val, stride, stride_value, type, cmp);
+      VisitTripCount(loop, lower_expr, upper_expr, stride, stride_value, type, cmp);
     }
   }
 }
 
 void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
-                                           InductionInfo* lo_val,
-                                           InductionInfo* hi_val,
+                                           InductionInfo* lower_expr,
+                                           InductionInfo* upper_expr,
                                            InductionInfo* stride,
-                                           int32_t stride_value,
+                                           int64_t stride_value,
                                            Primitive::Type type,
                                            IfCondition cmp) {
   // Any loop of the general form:
@@ -604,30 +596,95 @@
   //    for (n = 0; n < TC; n++) // where TC = (U + S - L) / S
   //      .. L + S * n ..
   //
-  // NOTE: The TC (trip-count) expression is only valid when safe. Otherwise TC is 0
-  //       (or possibly infinite). Also, the expression assumes the loop does not have
-  //       early-exits. Otherwise, TC is an upper bound.
+  // taking the following into consideration:
   //
-  bool cancels = (cmp == kCondLT || cmp == kCondGT) && std::abs(stride_value) == 1;
+  // (1) Using the same precision, the TC (trip-count) expression should be interpreted as
+  //     an unsigned entity, for example, as in the following loop that uses the full range:
+  //     for (int i = INT_MIN; i < INT_MAX; i++) // TC = UINT_MAX
+  // (2) The TC is only valid if the loop is taken, otherwise TC = 0, as in:
+  //     for (int i = 12; i < U; i++) // TC = 0 when U >= 12
+  //     If this cannot be determined at compile-time, the TC is only valid within the
+  //     loop-body proper, not the loop-header unless enforced with an explicit condition.
+  // (3) The TC is only valid if the loop is finite, otherwise TC has no value, as in:
+  //     for (int i = 0; i <= U; i++) // TC = Inf when U = INT_MAX
+  //     If this cannot be determined at compile-time, the TC is only valid when enforced
+  //     with an explicit condition.
+  // (4) For loops which early-exits, the TC forms an upper bound, as in:
+  //     for (int i = 0; i < 10 && ....; i++) // TC <= 10
+  const bool is_taken = IsTaken(lower_expr, upper_expr, cmp);
+  const bool is_finite = IsFinite(upper_expr, stride_value, type, cmp);
+  const bool cancels = (cmp == kCondLT || cmp == kCondGT) && std::abs(stride_value) == 1;
   if (!cancels) {
     // Convert exclusive integral inequality into inclusive integral inequality,
     // viz. condition i < U is i <= U - 1 and condition i > U is i >= U + 1.
     if (cmp == kCondLT) {
-      hi_val = CreateInvariantOp(kSub, hi_val, CreateConstant(1, type));
+      upper_expr = CreateInvariantOp(kSub, upper_expr, CreateConstant(1, type));
     } else if (cmp == kCondGT) {
-      hi_val = CreateInvariantOp(kAdd, hi_val, CreateConstant(1, type));
+      upper_expr = CreateInvariantOp(kAdd, upper_expr, CreateConstant(1, type));
     }
     // Compensate for stride.
-    hi_val = CreateInvariantOp(kAdd, hi_val, stride);
+    upper_expr = CreateInvariantOp(kAdd, upper_expr, stride);
   }
-
+  InductionInfo* trip_count
+      = CreateInvariantOp(kDiv, CreateInvariantOp(kSub, upper_expr, lower_expr), stride);
   // Assign the trip-count expression to the loop control. Clients that use the information
-  // should be aware that the expression is only valid in the loop-body proper (when symbolically
-  // safe), and not yet in the loop-header (unless constant safe). If the loop has any early exits,
-  // the trip-count forms a conservative upper bound on the number of loop iterations.
-  InductionInfo* trip_count =
-      CreateInvariantOp(kDiv, CreateInvariantOp(kSub, hi_val, lo_val), stride);
-  AssignInfo(loop, loop->GetHeader()->GetLastInstruction(), trip_count);
+  // should be aware that the expression is only valid under the conditions listed above.
+  InductionOp tcKind = kTripCountInBodyUnsafe;
+  if (is_taken && is_finite) {
+    tcKind = kTripCountInLoop;
+  } else if (is_finite) {
+    tcKind = kTripCountInBody;
+  } else if (is_taken) {
+    tcKind = kTripCountInLoopUnsafe;
+  }
+  AssignInfo(loop, loop->GetHeader()->GetLastInstruction(), CreateTripCount(tcKind, trip_count));
+}
+
+bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
+                                    InductionInfo* upper_expr,
+                                    IfCondition cmp) {
+  int64_t lower_value;
+  int64_t upper_value;
+  if (IsIntAndGet(lower_expr, &lower_value) && IsIntAndGet(upper_expr, &upper_value)) {
+    switch (cmp) {
+      case kCondLT: return lower_value <  upper_value;
+      case kCondLE: return lower_value <= upper_value;
+      case kCondGT: return lower_value >  upper_value;
+      case kCondGE: return lower_value >= upper_value;
+      case kCondEQ:
+      case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+    }
+  }
+  return false;  // not certain, may be untaken
+}
+
+bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
+                                     int64_t stride_value,
+                                     Primitive::Type type,
+                                     IfCondition cmp) {
+  const int64_t min = type == Primitive::kPrimInt
+      ? std::numeric_limits<int32_t>::min()
+      : std::numeric_limits<int64_t>::min();
+  const int64_t max = type == Primitive::kPrimInt
+        ? std::numeric_limits<int32_t>::max()
+        : std::numeric_limits<int64_t>::max();
+  // Some rules under which it is certain at compile-time that the loop is finite.
+  int64_t value;
+  switch (cmp) {
+    case kCondLT:
+      return stride_value == 1 ||
+          (IsIntAndGet(upper_expr, &value) && value <= (max - stride_value + 1));
+    case kCondLE:
+      return (IsIntAndGet(upper_expr, &value) && value <= (max - stride_value));
+    case kCondGT:
+      return stride_value == -1 ||
+          (IsIntAndGet(upper_expr, &value) && value >= (min - stride_value - 1));
+    case kCondGE:
+      return (IsIntAndGet(upper_expr, &value) && value >= (min - stride_value));
+    case kCondEQ:
+    case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+  }
+  return false;  // not certain, may be infinite
 }
 
 void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
@@ -637,7 +694,8 @@
   if (it == induction_.end()) {
     it = induction_.Put(loop,
                         ArenaSafeMap<HInstruction*, InductionInfo*>(
-                            std::less<HInstruction*>(), graph_->GetArena()->Adapter()));
+                            std::less<HInstruction*>(),
+                            graph_->GetArena()->Adapter(kArenaAllocInductionVarAnalysis)));
   }
   it->second.Put(instruction, info);
 }
@@ -744,13 +802,22 @@
 }
 
 bool HInductionVarAnalysis::IsIntAndGet(InductionInfo* info, int64_t* value) {
-  if (info != nullptr && info->induction_class == kInvariant && info->operation == kFetch) {
-    DCHECK(info->fetch);
-    if (info->fetch->IsIntConstant()) {
-      *value = info->fetch->AsIntConstant()->GetValue();
-      return true;
-    } else if (info->fetch->IsLongConstant()) {
-      *value = info->fetch->AsLongConstant()->GetValue();
+  if (info != nullptr && info->induction_class == kInvariant) {
+    // A direct constant fetch.
+    if (info->operation == kFetch) {
+      DCHECK(info->fetch);
+      if (info->fetch->IsIntConstant()) {
+        *value = info->fetch->AsIntConstant()->GetValue();
+        return true;
+      } else if (info->fetch->IsLongConstant()) {
+        *value = info->fetch->AsLongConstant()->GetValue();
+        return true;
+      }
+    }
+    // Use range analysis to resolve compound values.
+    int32_t range_value;
+    if (InductionVarRange::GetConstant(info, &range_value)) {
+      *value = range_value;
       return true;
     }
   }
@@ -778,6 +845,10 @@
             inv += std::to_string(info->fetch->GetId()) + ":" + info->fetch->DebugName();
           }
           break;
+        case kTripCountInLoop:       inv += "TC-loop:"; break;
+        case kTripCountInBody:       inv += "TC-body:"; break;
+        case kTripCountInLoopUnsafe: inv += "TC-loop-unsafe:"; break;
+        case kTripCountInBodyUnsafe: inv += "TC-body-unsafe:"; break;
       }
       inv += InductionToString(info->op_b);
       return inv + ")";
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 190a0db..7ab80cd 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -56,13 +56,20 @@
   };
 
   enum InductionOp {
-    kNop,  // no-operation: a true induction
+    // No-operation: a true induction.
+    kNop,
+    // Various invariant operations.
     kAdd,
     kSub,
     kNeg,
     kMul,
     kDiv,
-    kFetch
+    kFetch,
+    // Trip counts (valid in full loop or only body proper; unsafe implies loop may be infinite).
+    kTripCountInLoop,
+    kTripCountInBody,
+    kTripCountInLoopUnsafe,
+    kTripCountInBodyUnsafe
   };
 
   /**
@@ -77,8 +84,10 @@
    *         nop: a, then defined by b
    *   (4) periodic
    *         nop: a, then defined by b (repeated when exhausted)
+   *   (5) trip-count:
+   *         tc: defined by b
    */
-  struct InductionInfo : public ArenaObject<kArenaAllocMisc> {
+  struct InductionInfo : public ArenaObject<kArenaAllocInductionVarAnalysis> {
     InductionInfo(InductionClass ic,
                   InductionOp op,
                   InductionInfo* a,
@@ -110,6 +119,10 @@
     return new (graph_->GetArena()) InductionInfo(kInvariant, kFetch, nullptr, nullptr, f);
   }
 
+  InductionInfo* CreateTripCount(InductionOp op, InductionInfo* b) {
+    return new (graph_->GetArena()) InductionInfo(kInvariant, op, nullptr, b, nullptr);
+  }
+
   InductionInfo* CreateInduction(InductionClass ic, InductionInfo* a, InductionInfo* b) {
     DCHECK(a != nullptr && b != nullptr);
     return new (graph_->GetArena()) InductionInfo(ic, kNop, a, b, nullptr);
@@ -151,12 +164,17 @@
                       Primitive::Type type,
                       IfCondition cmp);
   void VisitTripCount(HLoopInformation* loop,
-                      InductionInfo* lo_val,
-                      InductionInfo* hi_val,
+                      InductionInfo* lower_expr,
+                      InductionInfo* upper_expr,
                       InductionInfo* stride,
-                      int32_t stride_value,
+                      int64_t stride_value,
                       Primitive::Type type,
                       IfCondition cmp);
+  bool IsTaken(InductionInfo* lower_expr, InductionInfo* upper_expr, IfCondition cmp);
+  bool IsFinite(InductionInfo* upper_expr,
+                int64_t stride_value,
+                Primitive::Type type,
+                IfCondition cmp);
 
   // Assign and lookup.
   void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index e519e77..20492e7 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -234,7 +234,8 @@
   EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[0], 0).c_str());
 
   // Trip-count.
-  EXPECT_STREQ("(100)", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+  EXPECT_STREQ("(TC-loop:(100))",
+               GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
 }
 
 TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
@@ -543,8 +544,10 @@
   InductionVarRange range(iva_);
   InductionVarRange::Value v_min = range.GetMinInduction(store, store->InputAt(1));
   InductionVarRange::Value v_max = range.GetMaxInduction(store, store->InputAt(1));
+  ASSERT_TRUE(v_min.is_known);
   EXPECT_EQ(0, v_min.a_constant);
   EXPECT_EQ(1, v_min.b_constant);
+  ASSERT_TRUE(v_max.is_known);
   EXPECT_EQ(0, v_max.a_constant);
   EXPECT_EQ(199, v_max.b_constant);
 }
@@ -579,7 +582,8 @@
     }
     EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(increment_[d], d).c_str());
     // Trip-count.
-    EXPECT_STREQ("(100)", GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
+    EXPECT_STREQ("(TC-loop:(100))",
+                 GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
   }
 }
 
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 119a80b..db12819 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -86,51 +86,36 @@
 
 InductionVarRange::Value InductionVarRange::GetMinInduction(HInstruction* context,
                                                             HInstruction* instruction) {
-  HLoopInformation* loop = context->GetBlock()->GetLoopInformation();
-  if (loop != nullptr) {
-    return GetVal(induction_analysis_->LookupInfo(loop, instruction),
-                  GetTripCount(loop, context), /* is_min */ true);
-  }
-  return Value();
+  return GetInduction(context, instruction, /* is_min */ true);
 }
 
 InductionVarRange::Value InductionVarRange::GetMaxInduction(HInstruction* context,
                                                             HInstruction* instruction) {
-  HLoopInformation* loop = context->GetBlock()->GetLoopInformation();
-  if (loop != nullptr) {
-    return SimplifyMax(
-        GetVal(induction_analysis_->LookupInfo(loop, instruction),
-               GetTripCount(loop, context), /* is_min */ false));
-  }
-  return Value();
+  return SimplifyMax(GetInduction(context, instruction, /* is_min */ false));
 }
 
 //
 // Private class methods.
 //
 
-HInductionVarAnalysis::InductionInfo* InductionVarRange::GetTripCount(HLoopInformation* loop,
-                                                                      HInstruction* context) {
-  // The trip-count expression is only valid when the top-test is taken at least once,
-  // that means, when the analyzed context appears outside the loop header itself.
-  // Early-exit loops are okay, since in those cases, the trip-count is conservative.
-  //
-  // TODO: deal with runtime safety issues on TCs
-  //
-  if (context->GetBlock() != loop->GetHeader()) {
-    HInductionVarAnalysis::InductionInfo* trip =
-        induction_analysis_->LookupInfo(loop, loop->GetHeader()->GetLastInstruction());
-    if (trip != nullptr) {
-      // Wrap the trip-count representation in its own unusual NOP node, so that range analysis
-      // is able to determine the [0, TC - 1] interval without having to construct constants.
-      return induction_analysis_->CreateInvariantOp(HInductionVarAnalysis::kNop, trip, trip);
-    }
+InductionVarRange::Value InductionVarRange::GetInduction(HInstruction* context,
+                                                         HInstruction* instruction,
+                                                         bool is_min) {
+  HLoopInformation* loop = context->GetBlock()->GetLoopInformation();  // closest enveloping loop
+  if (loop != nullptr) {
+    HBasicBlock* header = loop->GetHeader();
+    bool in_body = context->GetBlock() != header;
+    return GetVal(induction_analysis_->LookupInfo(loop, instruction),
+                  induction_analysis_->LookupInfo(loop, header->GetLastInstruction()),
+                  in_body,
+                  is_min);
   }
-  return nullptr;
+  return Value();
 }
 
 InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction,
                                                      HInductionVarAnalysis::InductionInfo* trip,
+                                                     bool in_body,
                                                      bool is_min) {
   // Detect constants and chase the fetch a bit deeper into the HIR tree, so that it becomes
   // more likely range analysis will compare the same instructions as terminal nodes.
@@ -139,13 +124,13 @@
     return Value(value);
   } else if (instruction->IsAdd()) {
     if (IsIntAndGet(instruction->InputAt(0), &value)) {
-      return AddValue(Value(value), GetFetch(instruction->InputAt(1), trip, is_min));
+      return AddValue(Value(value), GetFetch(instruction->InputAt(1), trip, in_body, is_min));
     } else if (IsIntAndGet(instruction->InputAt(1), &value)) {
-      return AddValue(GetFetch(instruction->InputAt(0), trip, is_min), Value(value));
+      return AddValue(GetFetch(instruction->InputAt(0), trip, in_body, is_min), Value(value));
     }
   } else if (is_min) {
-    // Special case for finding minimum: minimum of trip-count is 1.
-    if (trip != nullptr && instruction == trip->op_b->fetch) {
+    // Special case for finding minimum: minimum of trip-count in loop-body is 1.
+    if (trip != nullptr && in_body && instruction == trip->op_b->fetch) {
       return Value(1);
     }
   }
@@ -154,42 +139,53 @@
 
 InductionVarRange::Value InductionVarRange::GetVal(HInductionVarAnalysis::InductionInfo* info,
                                                    HInductionVarAnalysis::InductionInfo* trip,
+                                                   bool in_body,
                                                    bool is_min) {
   if (info != nullptr) {
     switch (info->induction_class) {
       case HInductionVarAnalysis::kInvariant:
         // Invariants.
         switch (info->operation) {
-          case HInductionVarAnalysis::kNop:  // normalized: 0 or TC-1
-            DCHECK_EQ(info->op_a, info->op_b);
-            return is_min ? Value(0)
-                          : SubValue(GetVal(info->op_b, trip, is_min), Value(1));
           case HInductionVarAnalysis::kAdd:
-            return AddValue(GetVal(info->op_a, trip, is_min),
-                            GetVal(info->op_b, trip, is_min));
+            return AddValue(GetVal(info->op_a, trip, in_body, is_min),
+                            GetVal(info->op_b, trip, in_body, is_min));
           case HInductionVarAnalysis::kSub:  // second reversed!
-            return SubValue(GetVal(info->op_a, trip, is_min),
-                            GetVal(info->op_b, trip, !is_min));
+            return SubValue(GetVal(info->op_a, trip, in_body, is_min),
+                            GetVal(info->op_b, trip, in_body, !is_min));
           case HInductionVarAnalysis::kNeg:  // second reversed!
             return SubValue(Value(0),
-                            GetVal(info->op_b, trip, !is_min));
+                            GetVal(info->op_b, trip, in_body, !is_min));
           case HInductionVarAnalysis::kMul:
-            return GetMul(info->op_a, info->op_b, trip, is_min);
+            return GetMul(info->op_a, info->op_b, trip, in_body, is_min);
           case HInductionVarAnalysis::kDiv:
-            return GetDiv(info->op_a, info->op_b, trip, is_min);
+            return GetDiv(info->op_a, info->op_b, trip, in_body, is_min);
           case HInductionVarAnalysis::kFetch:
-            return GetFetch(info->fetch, trip, is_min);
+            return GetFetch(info->fetch, trip, in_body, is_min);
+          case HInductionVarAnalysis::kTripCountInLoop:
+            if (!in_body) {
+              return is_min ? Value(0)
+                            : GetVal(info->op_b, trip, in_body, is_min);   // one extra!
+            }
+            FALLTHROUGH_INTENDED;
+          case HInductionVarAnalysis::kTripCountInBody:
+            if (in_body) {
+              return is_min ? Value(0)
+                            : SubValue(GetVal(info->op_b, trip, in_body, is_min), Value(1));
+            }
+            break;
+          default:
+            break;
         }
         break;
       case HInductionVarAnalysis::kLinear:
         // Linear induction a * i + b, for normalized 0 <= i < TC.
-        return AddValue(GetMul(info->op_a, trip, trip, is_min),
-                        GetVal(info->op_b, trip, is_min));
+        return AddValue(GetMul(info->op_a, trip, trip, in_body, is_min),
+                        GetVal(info->op_b, trip, in_body, is_min));
       case HInductionVarAnalysis::kWrapAround:
       case HInductionVarAnalysis::kPeriodic:
         // Merge values in the wrap-around/periodic.
-        return MergeVal(GetVal(info->op_a, trip, is_min),
-                        GetVal(info->op_b, trip, is_min), is_min);
+        return MergeVal(GetVal(info->op_a, trip, in_body, is_min),
+                        GetVal(info->op_b, trip, in_body, is_min), is_min);
     }
   }
   return Value();
@@ -198,11 +194,12 @@
 InductionVarRange::Value InductionVarRange::GetMul(HInductionVarAnalysis::InductionInfo* info1,
                                                    HInductionVarAnalysis::InductionInfo* info2,
                                                    HInductionVarAnalysis::InductionInfo* trip,
+                                                   bool in_body,
                                                    bool is_min) {
-  Value v1_min = GetVal(info1, trip, /* is_min */ true);
-  Value v1_max = GetVal(info1, trip, /* is_min */ false);
-  Value v2_min = GetVal(info2, trip, /* is_min */ true);
-  Value v2_max = GetVal(info2, trip, /* is_min */ false);
+  Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
+  Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
+  Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
+  Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
   if (v1_min.is_known && v1_min.a_constant == 0 && v1_min.b_constant >= 0) {
     // Positive range vs. positive or negative range.
     if (v2_min.is_known && v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
@@ -228,11 +225,12 @@
 InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::InductionInfo* info1,
                                                    HInductionVarAnalysis::InductionInfo* info2,
                                                    HInductionVarAnalysis::InductionInfo* trip,
+                                                   bool in_body,
                                                    bool is_min) {
-  Value v1_min = GetVal(info1, trip, /* is_min */ true);
-  Value v1_max = GetVal(info1, trip, /* is_min */ false);
-  Value v2_min = GetVal(info2, trip, /* is_min */ true);
-  Value v2_max = GetVal(info2, trip, /* is_min */ false);
+  Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
+  Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
+  Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
+  Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
   if (v1_min.is_known && v1_min.a_constant == 0 && v1_min.b_constant >= 0) {
     // Positive range vs. positive or negative range.
     if (v2_min.is_known && v2_min.a_constant == 0 && v2_min.b_constant >= 0) {
@@ -255,6 +253,16 @@
   return Value();
 }
 
+bool InductionVarRange::GetConstant(HInductionVarAnalysis::InductionInfo* info, int32_t *value) {
+  Value v_min = GetVal(info, nullptr, false, /* is_min */ true);
+  Value v_max = GetVal(info, nullptr, false, /* is_min */ false);
+  if (v_min.a_constant == 0 && v_max.a_constant == 0 && v_min.b_constant == v_max.b_constant) {
+    *value = v_min.b_constant;
+    return true;
+  }
+  return false;
+}
+
 InductionVarRange::Value InductionVarRange::AddValue(Value v1, Value v2) {
   if (v1.is_known && v2.is_known && IsSafeAdd(v1.b_constant, v2.b_constant)) {
     const int32_t b = v1.b_constant + v2.b_constant;
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 8280c8b..dbdd2ee 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -73,24 +73,29 @@
   // Private helper methods.
   //
 
-  HInductionVarAnalysis::InductionInfo* GetTripCount(HLoopInformation* loop, HInstruction* context);
+  Value GetInduction(HInstruction* context, HInstruction* instruction, bool is_min);
 
   static Value GetFetch(HInstruction* instruction,
                         HInductionVarAnalysis::InductionInfo* trip,
+                        bool in_body,
                         bool is_min);
-
   static Value GetVal(HInductionVarAnalysis::InductionInfo* info,
                       HInductionVarAnalysis::InductionInfo* trip,
+                      bool in_body,
                       bool is_min);
   static Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
                       HInductionVarAnalysis::InductionInfo* info2,
                       HInductionVarAnalysis::InductionInfo* trip,
+                      bool in_body,
                       bool is_min);
   static Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
                       HInductionVarAnalysis::InductionInfo* info2,
                       HInductionVarAnalysis::InductionInfo* trip,
+                      bool in_body,
                       bool is_min);
 
+  static bool GetConstant(HInductionVarAnalysis::InductionInfo* info, int32_t *value);
+
   static Value AddValue(Value v1, Value v2);
   static Value SubValue(Value v1, Value v2);
   static Value MulValue(Value v1, Value v2);
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 5d9a075..4497a88 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -85,8 +85,7 @@
 
   /** Constructs a trip-count. */
   HInductionVarAnalysis::InductionInfo* CreateTripCount(int32_t tc) {
-    HInductionVarAnalysis::InductionInfo* trip = CreateConst(tc);
-    return CreateInvariant('@', trip, trip);
+    return iva_->CreateTripCount(HInductionVarAnalysis::kTripCountInLoop, CreateConst(tc));
   }
 
   /** Constructs a linear a * i + b induction. */
@@ -112,24 +111,28 @@
 
   Value GetMin(HInductionVarAnalysis::InductionInfo* info,
                HInductionVarAnalysis::InductionInfo* induc) {
-    return InductionVarRange::GetVal(info, induc, /* is_min */ true);
+    return InductionVarRange::GetVal(info, induc, /* in_body */ true, /* is_min */ true);
   }
 
   Value GetMax(HInductionVarAnalysis::InductionInfo* info,
                HInductionVarAnalysis::InductionInfo* induc) {
-    return InductionVarRange::GetVal(info, induc, /* is_min */ false);
+    return InductionVarRange::GetVal(info, induc, /* in_body */ true, /* is_min */ false);
   }
 
   Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
                HInductionVarAnalysis::InductionInfo* info2,
                bool is_min) {
-    return InductionVarRange::GetMul(info1, info2, nullptr, is_min);
+    return InductionVarRange::GetMul(info1, info2, nullptr, /* in_body */ true, is_min);
   }
 
   Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
                HInductionVarAnalysis::InductionInfo* info2,
                bool is_min) {
-    return InductionVarRange::GetDiv(info1, info2, nullptr, is_min);
+    return InductionVarRange::GetDiv(info1, info2, nullptr, /* in_body */ true, is_min);
+  }
+
+  bool GetConstant(HInductionVarAnalysis::InductionInfo* info, int32_t* value) {
+    return InductionVarRange::GetConstant(info, value);
   }
 
   Value AddValue(Value v1, Value v2) { return InductionVarRange::AddValue(v1, v2); }
@@ -279,6 +282,13 @@
   ExpectEqual(Value(500), GetDiv(CreateRange(-1000, -40), CreateRange(-4, -2), false));
 }
 
+TEST_F(InductionVarRangeTest, GetConstant) {
+  int32_t value;
+  ASSERT_TRUE(GetConstant(CreateConst(12345), &value));
+  EXPECT_EQ(12345, value);
+  EXPECT_FALSE(GetConstant(CreateRange(1, 2), &value));
+}
+
 TEST_F(InductionVarRangeTest, AddValue) {
   ExpectEqual(Value(110), AddValue(Value(10), Value(100)));
   ExpectEqual(Value(-5), AddValue(Value(&x_, 1, -4), Value(&x_, -1, -1)));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 039029a..0b65c56 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -247,12 +247,14 @@
     return false;
   }
 
-  uint16_t class_def_idx = resolved_method->GetDeclaringClass()->GetDexClassDefIndex();
-  if (!compiler_driver_->IsMethodVerifiedWithoutFailures(
-        resolved_method->GetDexMethodIndex(), class_def_idx, *resolved_method->GetDexFile())) {
-    VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
-                   << " couldn't be verified, so it cannot be inlined";
-    return false;
+  if (!resolved_method->GetDeclaringClass()->IsVerified()) {
+    uint16_t class_def_idx = resolved_method->GetDeclaringClass()->GetDexClassDefIndex();
+    if (!compiler_driver_->IsMethodVerifiedWithoutFailures(
+          resolved_method->GetDexMethodIndex(), class_def_idx, *resolved_method->GetDexFile())) {
+      VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
+                     << " couldn't be verified, so it cannot be inlined";
+      return false;
+    }
   }
 
   if (invoke_instruction->IsInvokeStaticOrDirect() &&
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 22bca2f..3287a0a 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -431,19 +431,41 @@
   HInstruction* value = instruction->GetValue();
   if (value->GetType() != Primitive::kPrimNot) return;
 
+  if (CanEnsureNotNullAt(value, instruction)) {
+    instruction->ClearValueCanBeNull();
+  }
+
   if (value->IsArrayGet()) {
     if (value->AsArrayGet()->GetArray() == instruction->GetArray()) {
       // If the code is just swapping elements in the array, no need for a type check.
       instruction->ClearNeedsTypeCheck();
+      return;
     }
   }
 
   if (value->IsNullConstant()) {
     instruction->ClearNeedsTypeCheck();
+    return;
   }
 
-  if (CanEnsureNotNullAt(value, instruction)) {
-    instruction->ClearValueCanBeNull();
+  ScopedObjectAccess soa(Thread::Current());
+  ReferenceTypeInfo array_rti = instruction->GetArray()->GetReferenceTypeInfo();
+  ReferenceTypeInfo value_rti = value->GetReferenceTypeInfo();
+  if (!array_rti.IsValid()) {
+    return;
+  }
+
+  if (value_rti.IsValid() && array_rti.CanArrayHold(value_rti)) {
+    instruction->ClearNeedsTypeCheck();
+    return;
+  }
+
+  if (array_rti.IsObjectArray()) {
+    if (array_rti.IsExact()) {
+      instruction->ClearNeedsTypeCheck();
+      return;
+    }
+    instruction->SetStaticTypeOfArrayIsObjectArray();
   }
 }
 
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 52e2cbe..1b4d161 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -725,6 +725,24 @@
   __ Sd(val, adr, 0);
 }
 
+// Thread java.lang.Thread.currentThread()
+void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
+  LocationSummary* locations = new (arena_) LocationSummary(invoke,
+                                                            LocationSummary::kNoCall,
+                                                            kIntrinsified);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
+  Mips64Assembler* assembler = GetAssembler();
+  GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
+
+  __ LoadFromOffset(kLoadUnsignedWord,
+                    out,
+                    TR,
+                    Thread::PeerOffset<kMips64PointerSize>().Int32Value());
+}
+
 // Unimplemented intrinsics.
 
 #define UNIMPLEMENTED_INTRINSIC(Name)                                                  \
@@ -736,7 +754,6 @@
 UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
 UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
 
-UNIMPLEMENTED_INTRINSIC(ThreadCurrentThread)
 UNIMPLEMENTED_INTRINSIC(UnsafeGet)
 UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
 UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 76bd595..de4fb7e 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -468,7 +468,7 @@
  * The intent is to have the code for generating the instruction independent of
  * register allocation. A register allocator just has to provide a LocationSummary.
  */
-class LocationSummary : public ArenaObject<kArenaAllocMisc> {
+class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
  public:
   enum CallKind {
     kNoCall,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 486968c..849f876 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1067,6 +1067,10 @@
   M(Shr, BinaryOperation)                                               \
   M(StaticFieldGet, Instruction)                                        \
   M(StaticFieldSet, Instruction)                                        \
+  M(UnresolvedInstanceFieldGet, Instruction)                            \
+  M(UnresolvedInstanceFieldSet, Instruction)                            \
+  M(UnresolvedStaticFieldGet, Instruction)                              \
+  M(UnresolvedStaticFieldSet, Instruction)                              \
   M(StoreLocal, Instruction)                                            \
   M(Sub, BinaryOperation)                                               \
   M(SuspendCheck, Instruction)                                          \
@@ -1644,17 +1648,34 @@
   bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) {
     return IsValidHandle(type_handle_);
   }
+
   bool IsExact() const { return is_exact_; }
 
   bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsObjectClass();
   }
+
+  bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
+    DCHECK(IsValid());
+    return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
+  }
+
   bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK(IsValid());
     return GetTypeHandle()->IsInterface();
   }
 
+  bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+    return GetTypeHandle()->IsArrayClass();
+  }
+
+  bool CanArrayHold(ReferenceTypeInfo rti)  const SHARED_REQUIRES(Locks::mutator_lock_) {
+    if (!IsExact()) return false;
+    if (!IsArrayClass()) return false;
+    return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
+  }
+
   Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
 
   bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -2222,7 +2243,9 @@
  public:
   int32_t GetValue() const { return value_; }
 
-  uint64_t GetValueAsUint64() const OVERRIDE { return static_cast<uint64_t>(value_); }
+  uint64_t GetValueAsUint64() const OVERRIDE {
+    return static_cast<uint64_t>(static_cast<uint32_t>(value_));
+  }
 
   bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     DCHECK(other->IsIntConstant());
@@ -4312,7 +4335,8 @@
                 SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc),
         expected_component_type_(expected_component_type),
         needs_type_check_(value->GetType() == Primitive::kPrimNot),
-        value_can_be_null_(true) {
+        value_can_be_null_(true),
+        static_type_of_array_is_object_array_(false) {
     SetRawInputAt(0, array);
     SetRawInputAt(1, index);
     SetRawInputAt(2, value);
@@ -4341,8 +4365,13 @@
     value_can_be_null_ = false;
   }
 
+  void SetStaticTypeOfArrayIsObjectArray() {
+    static_type_of_array_is_object_array_ = true;
+  }
+
   bool GetValueCanBeNull() const { return value_can_be_null_; }
   bool NeedsTypeCheck() const { return needs_type_check_; }
+  bool StaticTypeOfArrayIsObjectArray() const { return static_type_of_array_is_object_array_; }
 
   HInstruction* GetArray() const { return InputAt(0); }
   HInstruction* GetIndex() const { return InputAt(1); }
@@ -4369,6 +4398,9 @@
   const Primitive::Type expected_component_type_;
   bool needs_type_check_;
   bool value_can_be_null_;
+  // Cached information for the reference_type_info_ so that codegen
+  // does not need to inspect the static type.
+  bool static_type_of_array_is_object_array_;
 
   DISALLOW_COPY_AND_ASSIGN(HArraySet);
 };
@@ -4707,6 +4739,112 @@
   DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
 };
 
+class HUnresolvedInstanceFieldGet : public HExpression<1> {
+ public:
+  HUnresolvedInstanceFieldGet(HInstruction* obj,
+                              Primitive::Type field_type,
+                              uint32_t field_index,
+                              uint32_t dex_pc)
+      : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+        field_index_(field_index) {
+    SetRawInputAt(0, obj);
+  }
+
+  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool CanThrow() const OVERRIDE { return true; }
+
+  Primitive::Type GetFieldType() const { return GetType(); }
+  uint32_t GetFieldIndex() const { return field_index_; }
+
+  DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet);
+
+ private:
+  const uint32_t field_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet);
+};
+
+class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> {
+ public:
+  HUnresolvedInstanceFieldSet(HInstruction* obj,
+                              HInstruction* value,
+                              Primitive::Type field_type,
+                              uint32_t field_index,
+                              uint32_t dex_pc)
+      : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+        field_type_(field_type),
+        field_index_(field_index) {
+    DCHECK_EQ(field_type, value->GetType());
+    SetRawInputAt(0, obj);
+    SetRawInputAt(1, value);
+  }
+
+  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool CanThrow() const OVERRIDE { return true; }
+
+  Primitive::Type GetFieldType() const { return field_type_; }
+  uint32_t GetFieldIndex() const { return field_index_; }
+
+  DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
+
+ private:
+  const Primitive::Type field_type_;
+  const uint32_t field_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
+};
+
+class HUnresolvedStaticFieldGet : public HExpression<0> {
+ public:
+  HUnresolvedStaticFieldGet(Primitive::Type field_type,
+                            uint32_t field_index,
+                            uint32_t dex_pc)
+      : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+        field_index_(field_index) {
+  }
+
+  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool CanThrow() const OVERRIDE { return true; }
+
+  Primitive::Type GetFieldType() const { return GetType(); }
+  uint32_t GetFieldIndex() const { return field_index_; }
+
+  DECLARE_INSTRUCTION(UnresolvedStaticFieldGet);
+
+ private:
+  const uint32_t field_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet);
+};
+
+class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> {
+ public:
+  HUnresolvedStaticFieldSet(HInstruction* value,
+                            Primitive::Type field_type,
+                            uint32_t field_index,
+                            uint32_t dex_pc)
+      : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+        field_type_(field_type),
+        field_index_(field_index) {
+    DCHECK_EQ(field_type, value->GetType());
+    SetRawInputAt(0, value);
+  }
+
+  bool NeedsEnvironment() const OVERRIDE { return true; }
+  bool CanThrow() const OVERRIDE { return true; }
+
+  Primitive::Type GetFieldType() const { return field_type_; }
+  uint32_t GetFieldIndex() const { return field_index_; }
+
+  DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
+
+ private:
+  const Primitive::Type field_type_;
+  const uint32_t field_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet);
+};
+
 // Implement the move-exception DEX instruction.
 class HLoadException : public HExpression<0> {
  public:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a2b6131..3e982dc 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -321,8 +321,7 @@
 OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
     : Compiler(driver, kMaximumCompilationTimeBeforeWarning),
       run_optimizations_(
-          (driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime)
-          && !driver->GetCompilerOptions().GetDebuggable()),
+          driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
       delegate_(Create(driver, Compiler::Kind::kQuick)) {}
 
 void OptimizingCompiler::Init() {
@@ -575,12 +574,6 @@
                                                      CompilerDriver* compiler_driver,
                                                      const DexCompilationUnit& dex_compilation_unit,
                                                      PassObserver* pass_observer) const {
-  if (graph->HasTryCatch() && graph->IsDebuggable()) {
-    // TODO: b/24054676, stop creating catch phis eagerly to avoid special cases like phis without
-    // inputs.
-    return nullptr;
-  }
-
   ScopedObjectAccess soa(Thread::Current());
   StackHandleScopeCollection handles(soa.Self());
   soa.Self()->TransitionFromRunnableToSuspended(kNative);
@@ -836,8 +829,12 @@
   return compiled_method;
 }
 
-static bool HasOnlyUnresolvedFailures(const VerifiedMethod* verified_method) {
-  uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS;
+static bool CanHandleVerificationFailure(const VerifiedMethod* verified_method) {
+  // For access errors the compiler will use the unresolved helpers (e.g. HInvokeUnresolved).
+  uint32_t unresolved_mask = verifier::VerifyError::VERIFY_ERROR_NO_CLASS
+      | verifier::VerifyError::VERIFY_ERROR_ACCESS_CLASS
+      | verifier::VerifyError::VERIFY_ERROR_ACCESS_FIELD
+      | verifier::VerifyError::VERIFY_ERROR_ACCESS_METHOD;
   return (verified_method->GetEncounteredVerificationFailures() & (~unresolved_mask)) == 0;
 }
 
@@ -854,7 +851,7 @@
   const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
   DCHECK(!verified_method->HasRuntimeThrow());
   if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
-      || HasOnlyUnresolvedFailures(verified_method)) {
+      || CanHandleVerificationFailure(verified_method)) {
      method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
                          method_idx, jclass_loader, dex_file, dex_cache);
   } else {
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index c7701b7..df45c8e 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -34,6 +34,8 @@
   kInstructionSimplifications,
   kInstructionSimplificationsArch,
   kUnresolvedMethod,
+  kUnresolvedField,
+  kUnresolvedFieldNotAFastAccess,
   kNotCompiledBranchOutsideMethodCode,
   kNotCompiledCannotBuildSSA,
   kNotCompiledCantAccesType,
@@ -45,7 +47,6 @@
   kNotCompiledPathological,
   kNotCompiledSpaceFilter,
   kNotCompiledUnhandledInstruction,
-  kNotCompiledUnresolvedField,
   kNotCompiledUnsupportedIsa,
   kNotCompiledVerifyAtRuntime,
   kNotOptimizedDisabled,
@@ -104,6 +105,8 @@
       case kInstructionSimplifications: return "kInstructionSimplifications";
       case kInstructionSimplificationsArch: return "kInstructionSimplificationsArch";
       case kUnresolvedMethod : return "kUnresolvedMethod";
+      case kUnresolvedField : return "kUnresolvedField";
+      case kUnresolvedFieldNotAFastAccess : return "kUnresolvedFieldNotAFastAccess";
       case kNotCompiledBranchOutsideMethodCode: return "kNotCompiledBranchOutsideMethodCode";
       case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
       case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
@@ -115,7 +118,6 @@
       case kNotCompiledPathological : return "kNotCompiledPathological";
       case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter";
       case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
-      case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
       case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
       case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime";
       case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index fe837e4..d22f254 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -52,6 +52,8 @@
   void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
   void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
   void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
   void VisitInvoke(HInvoke* instr) OVERRIDE;
   void VisitArrayGet(HArrayGet* instr) OVERRIDE;
   void VisitCheckCast(HCheckCast* instr) OVERRIDE;
@@ -450,6 +452,22 @@
   UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
 }
 
+void RTPVisitor::VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) {
+  // TODO: Use descriptor to get the actual type.
+  if (instr->GetFieldType() == Primitive::kPrimNot) {
+    instr->SetReferenceTypeInfo(
+      ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+  }
+}
+
+void RTPVisitor::VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) {
+  // TODO: Use descriptor to get the actual type.
+  if (instr->GetFieldType() == Primitive::kPrimNot) {
+    instr->SetReferenceTypeInfo(
+      ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+  }
+}
+
 void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
   ScopedObjectAccess soa(Thread::Current());
   mirror::DexCache* dex_cache =
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index c43e58f..9cdb89b 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -187,7 +187,8 @@
   }
 
   number_of_registers_ = codegen_->GetNumberOfCoreRegisters();
-  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
+  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
+                                                    kArenaAllocRegisterAllocator);
   processing_core_registers_ = true;
   unhandled_ = &unhandled_core_intervals_;
   for (LiveInterval* fixed : physical_core_register_intervals_) {
@@ -206,7 +207,8 @@
   handled_.clear();
 
   number_of_registers_ = codegen_->GetNumberOfFloatingPointRegisters();
-  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
+  registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_,
+                                                    kArenaAllocRegisterAllocator);
   processing_core_registers_ = false;
   unhandled_ = &unhandled_fp_intervals_;
   for (LiveInterval* fixed : physical_fp_register_intervals_) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 414cc7d..e4b0999 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -27,7 +27,7 @@
 
 static constexpr int kNoRegister = -1;
 
-class BlockInfo : public ArenaObject<kArenaAllocMisc> {
+class BlockInfo : public ArenaObject<kArenaAllocSsaLiveness> {
  public:
   BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
       : block_(block),
@@ -55,7 +55,7 @@
  * A live range contains the start and end of a range where an instruction or a temporary
  * is live.
  */
-class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> {
+class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> {
  public:
   LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
     DCHECK_LT(start, end);
@@ -101,7 +101,7 @@
 /**
  * A use position represents a live interval use at a given position.
  */
-class UsePosition : public ArenaObject<kArenaAllocMisc> {
+class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> {
  public:
   UsePosition(HInstruction* user,
               HEnvironment* environment,
@@ -169,7 +169,7 @@
   DISALLOW_COPY_AND_ASSIGN(UsePosition);
 };
 
-class SafepointPosition : public ArenaObject<kArenaAllocMisc> {
+class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> {
  public:
   explicit SafepointPosition(HInstruction* instruction)
       : instruction_(instruction),
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index cc32da1..680e2d7 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -235,11 +235,7 @@
   UsageError("  --compiler-backend=(Quick|Optimizing): select compiler backend");
   UsageError("      set.");
   UsageError("      Example: --compiler-backend=Optimizing");
-  if (kUseOptimizingCompiler) {
-    UsageError("      Default: Optimizing");
-  } else {
-    UsageError("      Default: Quick");
-  }
+  UsageError("      Default: Optimizing");
   UsageError("");
   UsageError("  --compiler-filter="
                 "(verify-none"
@@ -503,7 +499,7 @@
 class Dex2Oat FINAL {
  public:
   explicit Dex2Oat(TimingLogger* timings) :
-      compiler_kind_(kUseOptimizingCompiler ? Compiler::kOptimizing : Compiler::kQuick),
+      compiler_kind_(Compiler::kOptimizing),
       instruction_set_(kRuntimeISA),
       // Take the default set of instruction features from the build.
       verification_results_(nullptr),
@@ -752,10 +748,9 @@
 
   void ProcessOptions(ParserOptions* parser_options) {
     image_ = (!image_filename_.empty());
-    if (!parser_options->requested_specific_compiler && !kUseOptimizingCompiler) {
-      // If no specific compiler is requested, the current behavior is
-      // to compile the boot image with Quick, and the rest with Optimizing.
-      compiler_kind_ = image_ ? Compiler::kQuick : Compiler::kOptimizing;
+    if (image_) {
+      // We need the boot image to always be debuggable.
+      parser_options->debuggable = true;
     }
 
     if (oat_filename_.empty() && oat_fd_ == -1) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 1ac7930..82bc8b9 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -109,11 +109,12 @@
   std::string boot_image_location_;
 };
 
-#if defined (ART_TARGET)
+#if defined (ART_TARGET) && !defined(__mips__)
 TEST_F(ImgDiagTest, ImageDiffPidSelf) {
 #else
 // Can't run this test on the host, it will fail when trying to open /proc/kpagestats
 // because it's root read-only.
+// Also test fails on mips. b/24596015.
 TEST_F(ImgDiagTest, DISABLED_ImageDiffPidSelf) {
 #endif
   // Invoke 'img_diag' against the current process.
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index dc1cf8a..d09631b 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -839,13 +839,12 @@
 TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
     /*
      * Called by managed code to resolve a static field and store a 64-bit primitive value.
-     * On entry r0 holds field index, r1:r2 hold new_val
+     * On entry r0 holds field index, r2:r3 hold new_val
      */
     .extern artSet64StaticFromCode
 ENTRY art_quick_set64_static
-    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12   @ save callee saves in case of GC
-    mov    r3, r2                        @ pass one half of wide argument
-    mov    r2, r1                        @ pass other half of wide argument
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r12   @ save callee saves in case of GC
+                                         @ r2:r3 contain the wide argument
     ldr    r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE]  @ pass referrer
     str    r9, [sp, #-16]!               @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 16
@@ -870,6 +869,7 @@
     .extern artSet64InstanceFromCode
 ENTRY art_quick_set64_instance
     SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12, lr  @ save callee saves in case of GC
+                                         @ r2:r3 contain the wide argument
     ldr    r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE]  @ pass referrer
     str    r9, [sp, #-12]!               @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 12
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6812178..be5a15e 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1421,9 +1421,8 @@
     .extern artSet64StaticFromCode
 ENTRY art_quick_set64_static
     SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  // save callee saves in case of GC
-    mov    x3, x1                     // Store value
     ldr    x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
-    mov    x2, x3                     // Put value param
+                                      // x2 contains the parameter
     mov    x3, xSELF                  // pass Thread::Current
     bl     artSet64StaticFromCode
     RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
diff --git a/runtime/arch/mips/memcmp16_mips.S b/runtime/arch/mips/memcmp16_mips.S
index aef81af..c8eac9b 100644
--- a/runtime/arch/mips/memcmp16_mips.S
+++ b/runtime/arch/mips/memcmp16_mips.S
@@ -26,7 +26,7 @@
   beqz  $a2,done   /* 0 length string */
   beq $a0,$a1,done    /* strings are identical */
 
-  /* Unoptimised... */
+  /* Unoptimized... */
 1:  lhu $t0,0($a0)
   lhu $t1,0($a1)
   addu  $a1,2
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index ce1b2f3..68156ae 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1244,7 +1244,7 @@
     .extern artSet64StaticFromCode
 ENTRY art_quick_set64_static
     SETUP_REFS_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case of GC
-    move   $a2, $a1                      # pass new_val
+                                         # a2 contains the new val
     ld     $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp)  # pass referrer's Method*
     jal    artSet64StaticFromCode        # (field_idx, referrer, new_val, Thread*)
     move   $a3, rSELF                    # pass Thread::Current
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
index cd94d5e..1d07d47 100644
--- a/runtime/arch/mips64/registers_mips64.h
+++ b/runtime/arch/mips64/registers_mips64.h
@@ -52,7 +52,6 @@
   S6   = 22,
   S7   = 23,
   T8   = 24,  // More temporaries.
-  TMP  = T8,  // scratch register (in addition to AT)
   T9   = 25,
   K0   = 26,  // Reserved for trap handler.
   K1   = 27,
@@ -60,6 +59,8 @@
   SP   = 29,  // Stack pointer.
   S8   = 30,  // Saved value/frame pointer.
   RA   = 31,  // Return address.
+  TR   = S1,  // ART Thread Register
+  TMP  = T8,  // scratch register (in addition to AT)
   kNumberOfGpuRegisters = 32,
   kNoGpuRegister = -1  // Signals an illegal register.
 };
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index f10799c..016c664 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -418,6 +418,48 @@
     return result;
   }
 
+  // 64bit static field set use a slightly different register order than Invoke3WithReferrer.
+  // TODO: implement for other architectures
+  // TODO: try merge with Invoke3WithReferrer
+  size_t Invoke64StaticSet(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
+                           ArtMethod* referrer) {
+    // Push a transition back into managed code onto the linked list in thread.
+    ManagedStack fragment;
+    self->PushManagedStackFragment(&fragment);
+
+    size_t result;
+    size_t fpr_result = 0;
+#if defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
+    // Note: Uses the native convention
+    // TODO: Set the thread?
+    __asm__ __volatile__(
+        "pushq %[referrer]\n\t"        // Push referrer
+        "pushq (%%rsp)\n\t"             // & 16B alignment padding
+        ".cfi_adjust_cfa_offset 16\n\t"
+        "call *%%rax\n\t"              // Call the stub
+        "addq $16, %%rsp\n\t"          // Pop null and padding
+        ".cfi_adjust_cfa_offset -16\n\t"
+        : "=a" (result)
+          // Use the result from rax
+        : "D"(arg0), "d"(arg1), "S"(arg2), "a"(code), [referrer] "c"(referrer)
+          // This places arg0 into rdi, arg1 into rdx, arg2 into rsi, and code into rax
+        : "rbx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+          "memory");  // clobber all
+    // TODO: Should we clobber the other registers?
+#else
+    UNUSED(arg0, arg1, arg2, code, referrer);
+    LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
+    result = 0;
+#endif
+    // Pop transition.
+    self->PopManagedStackFragment(fragment);
+
+    fp_result = fpr_result;
+    EXPECT_EQ(0U, fp_result);
+
+    return result;
+  }
+
   // TODO: Set up a frame according to referrer's specs.
   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
                                       Thread* self, ArtMethod* referrer, size_t hidden) {
@@ -774,22 +816,6 @@
     return result;
   }
 
-  // Method with 32b arg0, 64b arg1
-  size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
-                              ArtMethod* referrer) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
-    defined(__aarch64__)
-    // Just pass through.
-    return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
-#else
-    // Need to split up arguments.
-    uint32_t lower = static_cast<uint32_t>(arg1 & 0xFFFFFFFF);
-    uint32_t upper = static_cast<uint32_t>((arg1 >> 32) & 0xFFFFFFFF);
-
-    return Invoke3WithReferrer(arg0, lower, upper, code, self, referrer);
-#endif
-  }
-
   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
     int32_t offset;
 #ifdef __LP64__
@@ -1974,21 +2000,22 @@
 }
 
 
-// TODO: Complete these tests for 32b architectures.
+// TODO: Complete these tests for 32b architectures
 
 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
                            StubTest* test)
     SHARED_REQUIRES(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
-    defined(__aarch64__)
+// TODO: (defined(__mips__) && defined(__LP64__)) || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__))
   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
 
   for (size_t i = 0; i < arraysize(values); ++i) {
-    test->Invoke3UWithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
-                               values[i],
-                               StubTest::GetEntrypoint(self, kQuickSet64Static),
-                               self,
-                               referrer);
+    test->Invoke64StaticSet(static_cast<size_t>(f->GetDexFieldIndex()),
+                            values[i],
+                            0U,
+                            StubTest::GetEntrypoint(self, kQuickSet64Static),
+                            self,
+                            referrer);
 
     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
                                            0U, 0U,
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index f3b15c9..3afc4d5 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1434,15 +1434,18 @@
 // Call artSet64StaticFromCode with 3 word size arguments plus with the referrer in the 2nd position
 // so that new_val is aligned on even registers were we passing arguments in registers.
 DEFINE_FUNCTION art_quick_set64_static
+    // TODO: Implement SETUP_GOT_NOSAVE for got_reg = ecx to avoid moving around the registers.
+    movd %ebx, %xmm0
     SETUP_REFS_ONLY_CALLEE_SAVE_FRAME  ebx, ebx  // save ref containing registers for GC
-    mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx  // get referrer
+    movd %xmm0, %ebx
+    mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx  // get referrer
     subl LITERAL(12), %esp        // alignment padding
     CFI_ADJUST_CFA_OFFSET(12)
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
-    PUSH edx                      // pass high half of new_val
-    PUSH ecx                      // pass low half of new_val
-    PUSH ebx                      // pass referrer
+    PUSH ebx                      // pass high half of new_val
+    PUSH edx                      // pass low half of new_val
+    PUSH ecx                      // pass referrer
     PUSH eax                      // pass field_idx
     call SYMBOL(artSet64StaticFromCode)  // (field_idx, referrer, new_val, Thread*)
     addl LITERAL(32), %esp        // pop arguments
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 2f438a3..1133203 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1383,7 +1383,7 @@
 
 // This is singled out as the argument order is different.
 DEFINE_FUNCTION art_quick_set64_static
-    movq %rsi, %rdx                      // pass new_val
+                                         // new_val is already in %rdx
     movq 8(%rsp), %rsi                   // pass referrer
     SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
                                          // field_idx is in rdi
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index d6b2b7e..632a50f 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -35,6 +35,8 @@
 #include "quick/quick_method_frame_info.h"
 #include "read_barrier-inl.h"
 #include "runtime-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
 #include "utils.h"
 
 namespace art {
@@ -75,9 +77,28 @@
           expected_root, desired_root);
 }
 
+// AssertSharedHeld doesn't work in GetAccessFlags, so use a NO_THREAD_SAFETY_ANALYSIS helper.
+// TODO: Figure out why ASSERT_SHARED_CAPABILITY doesn't work.
+ALWAYS_INLINE
+static inline void DoGetAccessFlagsHelper(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
+  CHECK(method->IsRuntimeMethod() || method->GetDeclaringClass()->IsIdxLoaded() ||
+        method->GetDeclaringClass()->IsErroneous());
+}
+
 inline uint32_t ArtMethod::GetAccessFlags() {
-  DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
-         GetDeclaringClass()->IsErroneous());
+  if (kIsDebugBuild) {
+    Thread* self = Thread::Current();
+    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
+      ScopedObjectAccess soa(self);
+      CHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() ||
+            GetDeclaringClass()->IsErroneous());
+    } else {
+      // We cannot use SOA in this case. We might be holding the lock, but may not be in the
+      // runnable state (e.g., during GC).
+      Locks::mutator_lock_->AssertSharedHeld(self);
+      DoGetAccessFlagsHelper(this);
+    }
+  }
   return access_flags_;
 }
 
diff --git a/runtime/art_method.h b/runtime/art_method.h
index f78c827..0315c3a 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -75,7 +75,9 @@
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
   }
 
-  ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
+  // Note: GetAccessFlags acquires the mutator lock in debug mode to check that it is not called for
+  // a proxy method.
+  ALWAYS_INLINE uint32_t GetAccessFlags();
 
   void SetAccessFlags(uint32_t new_access_flags) {
     // Not called within a transaction.
@@ -86,77 +88,78 @@
   InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Returns true if the method is declared public.
-  bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPublic() {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the method is declared private.
-  bool IsPrivate() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPrivate() {
     return (GetAccessFlags() & kAccPrivate) != 0;
   }
 
   // Returns true if the method is declared static.
-  bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsStatic() {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
   // Returns true if the method is a constructor.
-  bool IsConstructor() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsConstructor() {
     return (GetAccessFlags() & kAccConstructor) != 0;
   }
 
   // Returns true if the method is a class initializer.
-  bool IsClassInitializer() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsClassInitializer() {
     return IsConstructor() && IsStatic();
   }
 
   // Returns true if the method is static, private, or a constructor.
-  bool IsDirect() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsDirect() {
     return IsDirect(GetAccessFlags());
   }
 
   static bool IsDirect(uint32_t access_flags) {
-    return (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0;
+    constexpr uint32_t direct = kAccStatic | kAccPrivate | kAccConstructor;
+    return (access_flags & direct) != 0;
   }
 
   // Returns true if the method is declared synchronized.
-  bool IsSynchronized() SHARED_REQUIRES(Locks::mutator_lock_) {
-    uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
+  bool IsSynchronized() {
+    constexpr uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
     return (GetAccessFlags() & synchonized) != 0;
   }
 
-  bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsFinal() {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsMiranda() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsMiranda() {
     return (GetAccessFlags() & kAccMiranda) != 0;
   }
 
-  bool IsNative() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsNative() {
     return (GetAccessFlags() & kAccNative) != 0;
   }
 
-  bool IsFastNative() SHARED_REQUIRES(Locks::mutator_lock_) {
-    uint32_t mask = kAccFastNative | kAccNative;
+  bool IsFastNative() {
+    constexpr uint32_t mask = kAccFastNative | kAccNative;
     return (GetAccessFlags() & mask) == mask;
   }
 
-  bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsAbstract() {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
-  bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsSynthetic() {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
   bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsPreverified() {
     return (GetAccessFlags() & kAccPreverified) != 0;
   }
 
-  void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetPreverified() {
     DCHECK(!IsPreverified());
     SetAccessFlags(GetAccessFlags() | kAccPreverified);
   }
@@ -404,7 +407,7 @@
     return GetNativePointer<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
   }
 
-  void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) {
+  void SetEntryPointFromJni(const void* entrypoint) {
     DCHECK(IsNative());
     SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
   }
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 691b57f..345428c 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -83,6 +83,8 @@
   "BaselineMaps ",
   "Optimization ",
   "GVN          ",
+  "InductionVar ",
+  "BCE          ",
   "SsaLiveness  ",
   "SsaPhiElim   ",
   "RefTypeProp  ",
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 17045c6..b4f19ee 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -93,6 +93,8 @@
   kArenaAllocBaselineMaps,
   kArenaAllocOptimization,
   kArenaAllocGvn,
+  kArenaAllocInductionVarAnalysis,
+  kArenaAllocBoundsCheckElimination,
   kArenaAllocSsaLiveness,
   kArenaAllocSsaPhiElimination,
   kArenaAllocReferenceTypePropagation,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8e42040..dbc5cec 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1500,7 +1500,7 @@
   Thread* const self = Thread::Current();
   JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
   for (const ClassLoaderData& data : class_loaders_) {
-    vm->DecodeWeakGlobalDuringShutdown(self, data.weak_root);
+    vm->DeleteWeakGlobalRef(self, data.weak_root);
     delete data.allocator;
     delete data.class_table;
   }
@@ -4186,6 +4186,8 @@
     data.allocator = Runtime::Current()->CreateLinearAlloc();
     class_loaders_.push_back(data);
     // Don't already have a class table, add it to the class loader.
+    CHECK(class_loader->GetClassTable() == nullptr);
+    CHECK(class_loader->GetAllocator() == nullptr);
     class_loader->SetClassTable(data.class_table);
     class_loader->SetAllocator(data.allocator);
   }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index f705330..739403f 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -560,7 +560,7 @@
 
  private:
   struct ClassLoaderData {
-    jobject weak_root;  // Weak root to enable class unloading.
+    jweak weak_root;  // Weak root to enable class unloading.
     ClassTable* class_table;
     LinearAlloc* allocator;
   };
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 450031a..d24b4fb 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -579,15 +579,11 @@
 
   Runtime* runtime = Runtime::Current();
   ScopedSuspendAll ssa(__FUNCTION__);
-  Thread* self = Thread::Current();
-  ThreadState old_state = self->SetStateUnsafe(kRunnable);
-  CHECK_NE(old_state, kRunnable);
   if (RequiresDeoptimization()) {
     runtime->GetInstrumentation()->EnableDeoptimization();
   }
   instrumentation_events_ = 0;
   gDebuggerActive = true;
-  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
   LOG(INFO) << "Debugger is active";
 }
 
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index cc3eefe..8ae0b07 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -71,44 +71,6 @@
       *outer_method->GetDexFile(), method_index, dex_cache, class_loader, nullptr, invoke_type);
 }
 
-inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
-                                            Runtime::CalleeSaveType type,
-                                            bool do_caller_check = false)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
-  DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
-
-  const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
-  auto** caller_sp = reinterpret_cast<ArtMethod**>(
-      reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
-  ArtMethod* outer_method = *caller_sp;
-  ArtMethod* caller = outer_method;
-
-  if ((outer_method != nullptr) && outer_method->IsOptimized(sizeof(void*))) {
-    const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
-    uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
-        (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
-    uintptr_t native_pc_offset = outer_method->NativeQuickPcOffset(caller_pc);
-    CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
-    StackMapEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
-    DCHECK(stack_map.IsValid());
-    if (stack_map.HasInlineInfo(encoding)) {
-      InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-      caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
-    }
-  }
-
-  if (kIsDebugBuild && do_caller_check) {
-    // Note that do_caller_check is optional, as this method can be called by
-    // stubs, and tests without a proper call stack.
-    NthCallerVisitor visitor(Thread::Current(), 1, true);
-    visitor.WalkStack();
-    CHECK_EQ(caller, visitor.caller);
-  }
-
-  return caller;
-}
-
 inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   return GetCalleeSaveMethodCaller(
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 94aced2..f193999 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -21,11 +21,15 @@
 #include "base/mutex.h"
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/quick/callee_save_frame.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "nth_caller_visitor.h"
 #include "reflection.h"
 #include "scoped_thread_state_change.h"
 #include "ScopedLocalRef.h"
@@ -345,4 +349,54 @@
   return true;
 }
 
+ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
+                                     Runtime::CalleeSaveType type,
+                                     bool do_caller_check)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
+
+  const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+  auto** caller_sp = reinterpret_cast<ArtMethod**>(
+      reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
+  ArtMethod* outer_method = *caller_sp;
+  ArtMethod* caller = outer_method;
+
+  if ((outer_method != nullptr) && outer_method->IsOptimized(sizeof(void*))) {
+    const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+    uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
+        (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
+    if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
+      uintptr_t native_pc_offset = outer_method->NativeQuickPcOffset(caller_pc);
+      CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+      StackMapEncoding encoding = code_info.ExtractEncoding();
+      StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+      DCHECK(stack_map.IsValid());
+      if (stack_map.HasInlineInfo(encoding)) {
+        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+        caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
+      }
+    } else {
+      // We're instrumenting, just use the StackVisitor which knows how to
+      // handle instrumented frames.
+      NthCallerVisitor visitor(Thread::Current(), 1, true);
+      visitor.WalkStack();
+      caller = visitor.caller;
+      if (kIsDebugBuild) {
+        // Avoid doing the check below.
+        do_caller_check = false;
+      }
+    }
+  }
+
+  if (kIsDebugBuild && do_caller_check) {
+    // Note that do_caller_check is optional, as this method can be called by
+    // stubs, and tests without a proper call stack.
+    NthCallerVisitor visitor(Thread::Current(), 1, true);
+    visitor.WalkStack();
+    CHECK_EQ(caller, visitor.caller);
+  }
+
+  return caller;
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 53f2677..4217cab 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -26,6 +26,7 @@
 #include "gc/allocator_type.h"
 #include "invoke_type.h"
 #include "jvalue.h"
+#include "runtime.h"
 
 namespace art {
 
@@ -179,6 +180,10 @@
 template <typename INT_TYPE, typename FLOAT_TYPE>
 inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
 
+ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
+                                     Runtime::CalleeSaveType type,
+                                     bool do_caller_check = false);
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index f69c39e..fc5c52e 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -112,4 +112,61 @@
   return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self);
 }
 
+extern uint64_t GenericJniMethodEnd(Thread* self,
+                                    uint32_t saved_local_ref_cookie,
+                                    jvalue result,
+                                    uint64_t result_f,
+                                    ArtMethod* called,
+                                    HandleScope* handle_scope)
+    // TODO: NO_THREAD_SAFETY_ANALYSIS as GoToRunnable() is NO_THREAD_SAFETY_ANALYSIS
+    NO_THREAD_SAFETY_ANALYSIS {
+  GoToRunnable(self);
+  // We need the mutator lock (i.e., calling GoToRunnable()) before accessing the shorty or the
+  // locked object.
+  jobject locked = called->IsSynchronized() ? handle_scope->GetHandle(0).ToJObject() : nullptr;
+  char return_shorty_char = called->GetShorty()[0];
+  if (return_shorty_char == 'L') {
+    if (locked != nullptr) {
+      UnlockJniSynchronizedMethod(locked, self);
+    }
+    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceHandleResult(
+        result.l, saved_local_ref_cookie, self));
+  } else {
+    if (locked != nullptr) {
+      UnlockJniSynchronizedMethod(locked, self);  // Must decode before pop.
+    }
+    PopLocalReferences(saved_local_ref_cookie, self);
+    switch (return_shorty_char) {
+      case 'F': {
+        if (kRuntimeISA == kX86) {
+          // Convert back the result to float.
+          double d = bit_cast<double, uint64_t>(result_f);
+          return bit_cast<uint32_t, float>(static_cast<float>(d));
+        } else {
+          return result_f;
+        }
+      }
+      case 'D':
+        return result_f;
+      case 'Z':
+        return result.z;
+      case 'B':
+        return result.b;
+      case 'C':
+        return result.c;
+      case 'S':
+        return result.s;
+      case 'I':
+        return result.i;
+      case 'J':
+        return result.j;
+      case 'V':
+        return 0;
+      default:
+        LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
+        return 0;
+    }
+  }
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 1e9e4fb..5d3ac73 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1989,62 +1989,27 @@
                                 reinterpret_cast<uintptr_t>(nativeCode));
 }
 
+// Defined in quick_jni_entrypoints.cc.
+extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie,
+                                    jvalue result, uint64_t result_f, ArtMethod* called,
+                                    HandleScope* handle_scope);
 /*
  * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
  * unlocking.
  */
-extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
+extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
+                                                    jvalue result,
+                                                    uint64_t result_f) {
+  // We're here just back from a native call. We don't have the shared mutator lock at this point
+  // yet until we call GoToRunnable() later in GenericJniMethodEnd(). Accessing objects or doing
+  // anything that requires a mutator lock before that would cause problems as GC may have the
+  // exclusive mutator lock and may be moving objects, etc.
   ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   ArtMethod* called = *sp;
   uint32_t cookie = *(sp32 - 1);
-
-  jobject lock = nullptr;
-  if (called->IsSynchronized()) {
-    HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp)
-        + sizeof(*sp));
-    lock = table->GetHandle(0).ToJObject();
-  }
-
-  char return_shorty_char = called->GetShorty()[0];
-
-  if (return_shorty_char == 'L') {
-    return artQuickGenericJniEndJNIRef(self, cookie, result.l, lock);
-  } else {
-    artQuickGenericJniEndJNINonRef(self, cookie, lock);
-
-    switch (return_shorty_char) {
-      case 'F': {
-        if (kRuntimeISA == kX86) {
-          // Convert back the result to float.
-          double d = bit_cast<double, uint64_t>(result_f);
-          return bit_cast<uint32_t, float>(static_cast<float>(d));
-        } else {
-          return result_f;
-        }
-      }
-      case 'D':
-        return result_f;
-      case 'Z':
-        return result.z;
-      case 'B':
-        return result.b;
-      case 'C':
-        return result.c;
-      case 'S':
-        return result.s;
-      case 'I':
-        return result.i;
-      case 'J':
-        return result.j;
-      case 'V':
-        return 0;
-      default:
-        LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
-        return 0;
-    }
-  }
+  HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp));
+  return GenericJniMethodEnd(self, cookie, result, result_f, called, table);
 }
 
 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index 25fdd7c..2510514 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -53,13 +53,7 @@
 }
 
 inline bool RosAlloc::Run::IsFull() {
-  const size_t num_vec = NumberOfBitmapVectors();
-  for (size_t v = 0; v < num_vec; ++v) {
-    if (~alloc_bit_map_[v] != 0) {
-      return false;
-    }
-  }
-  return true;
+  return free_list_.Size() == 0;
 }
 
 inline bool RosAlloc::CanAllocFromThreadLocalRun(Thread* self, size_t size) {
@@ -120,45 +114,14 @@
 }
 
 inline void* RosAlloc::Run::AllocSlot() {
-  const size_t idx = size_bracket_idx_;
-  while (true) {
-    if (kIsDebugBuild) {
-      // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
-      for (size_t i = 0; i < first_search_vec_idx_; ++i) {
-        CHECK_EQ(~alloc_bit_map_[i], 0U);
-      }
-    }
-    uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
-    uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
-    if (LIKELY(ffz1 != 0)) {
-      const uint32_t ffz = ffz1 - 1;
-      const uint32_t slot_idx = ffz +
-          first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
-      const uint32_t mask = 1U << ffz;
-      DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
-      // Found an empty slot. Set the bit.
-      DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
-      *alloc_bitmap_ptr |= mask;
-      DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
-      uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) +
-          headerSizes[idx] + slot_idx * bracketSizes[idx];
-      if (kTraceRosAlloc) {
-        LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex
-                  << reinterpret_cast<intptr_t>(slot_addr)
-                  << ", bracket_size=" << std::dec << bracketSizes[idx]
-                  << ", slot_idx=" << slot_idx;
-      }
-      return slot_addr;
-    }
-    const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
-    if (first_search_vec_idx_ + 1 >= num_words) {
-      DCHECK(IsFull());
-      // Already at the last word, return null.
-      return nullptr;
-    }
-    // Increase the index to the next word and try again.
-    ++first_search_vec_idx_;
+  Slot* slot = free_list_.Remove();
+  if (kTraceRosAlloc && slot != nullptr) {
+    const uint8_t idx = size_bracket_idx_;
+    LOG(INFO) << "RosAlloc::Run::AllocSlot() : " << slot
+              << ", bracket_size=" << std::dec << bracketSizes[idx]
+              << ", slot_idx=" << SlotIndex(slot);
   }
+  return slot;
 }
 
 }  // namespace allocator
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 470bc1c..9c8e4df 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -35,7 +35,7 @@
 namespace gc {
 namespace allocator {
 
-static constexpr bool kUsePrefetchDuringAllocRun = true;
+static constexpr bool kUsePrefetchDuringAllocRun = false;
 static constexpr bool kPrefetchNewRunDataByZeroing = false;
 static constexpr size_t kPrefetchStride = 64;
 
@@ -43,8 +43,6 @@
 size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
 size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
 size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
-size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
-size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
 bool RosAlloc::initialized_ = false;
 size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
 RosAlloc::Run* RosAlloc::dedicated_full_run_ =
@@ -556,9 +554,7 @@
       new_run->magic_num_ = kMagicNum;
     }
     new_run->size_bracket_idx_ = idx;
-    new_run->SetAllocBitMapBitsForInvalidSlots();
     DCHECK(!new_run->IsThreadLocal());
-    DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
     DCHECK(!new_run->to_be_bulk_freed_);
     if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
       // Take ownership of the cache lines if we are likely to be thread local run.
@@ -576,6 +572,7 @@
         }
       }
     }
+    new_run->InitFreeList();
   }
   return new_run;
 }
@@ -695,15 +692,11 @@
       MutexLock mu(self, *size_bracket_locks_[idx]);
       bool is_all_free_after_merge;
       // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
-      if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
+      if (thread_local_run->MergeThreadLocalFreeListToFreeList(&is_all_free_after_merge)) {
         DCHECK_NE(thread_local_run, dedicated_full_run_);
         // Some slot got freed. Keep it.
         DCHECK(!thread_local_run->IsFull());
         DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
-        if (is_all_free_after_merge) {
-          // Check that the bitmap idx is back at 0 if it's all free.
-          DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
-        }
       } else {
         // No slots got freed. Try to refill the thread-local run.
         DCHECK(thread_local_run->IsFull());
@@ -792,7 +785,7 @@
     DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
     DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
     DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
-    run->MarkThreadLocalFreeBitMap(ptr);
+    run->AddToThreadLocalFreeList(ptr);
     if (kTraceRosAlloc) {
       LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
                 << reinterpret_cast<intptr_t>(run);
@@ -818,7 +811,7 @@
     }
     DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
     DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
-    run->ZeroHeader();
+    run->ZeroHeaderAndSlotHeaders();
     {
       MutexLock lock_mu(self, lock_);
       FreePages(self, run, true);
@@ -853,271 +846,145 @@
   return bracket_size;
 }
 
-std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
-  std::string bit_map_str;
-  for (size_t v = 0; v < num_vec; v++) {
-    uint32_t vec = bit_map_base[v];
-    if (v != num_vec - 1) {
-      bit_map_str.append(StringPrintf("%x-", vec));
+template<bool kUseTail>
+std::string RosAlloc::Run::FreeListToStr(SlotFreeList<kUseTail>* free_list) {
+  std::string free_list_str;
+  const uint8_t idx = size_bracket_idx_;
+  const size_t bracket_size = bracketSizes[idx];
+  for (Slot* slot = free_list->Head(); slot != nullptr; slot = slot->Next()) {
+    bool is_last = slot->Next() == nullptr;
+    uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
+        reinterpret_cast<uintptr_t>(FirstSlot());
+    DCHECK_EQ(slot_offset % bracket_size, 0U);
+    uintptr_t slot_idx = slot_offset / bracket_size;
+    if (!is_last) {
+      free_list_str.append(StringPrintf("%u-", static_cast<uint32_t>(slot_idx)));
     } else {
-      bit_map_str.append(StringPrintf("%x", vec));
+      free_list_str.append(StringPrintf("%u", static_cast<uint32_t>(slot_idx)));
     }
   }
-  return bit_map_str.c_str();
+  return free_list_str;
 }
 
 std::string RosAlloc::Run::Dump() {
   size_t idx = size_bracket_idx_;
-  size_t num_slots = numOfSlots[idx];
-  size_t num_vec = RoundUp(num_slots, 32) / 32;
   std::ostringstream stream;
   stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
          << "{ magic_num=" << static_cast<int>(magic_num_)
          << " size_bracket_idx=" << idx
          << " is_thread_local=" << static_cast<int>(is_thread_local_)
          << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
-         << " first_search_vec_idx=" << first_search_vec_idx_
-         << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
-         << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
-         << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
+         << " free_list=" << FreeListToStr(&free_list_)
+         << " bulk_free_list=" << FreeListToStr(&bulk_free_list_)
+         << " thread_local_list=" << FreeListToStr(&thread_local_free_list_)
          << " }" << std::endl;
   return stream.str();
 }
 
+inline size_t RosAlloc::Run::SlotIndex(Slot* slot) {
+  const uint8_t idx = size_bracket_idx_;
+  const size_t bracket_size = bracketSizes[idx];
+  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(slot)
+      - reinterpret_cast<uint8_t*>(FirstSlot());
+  DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
+  size_t slot_idx = offset_from_slot_base / bracket_size;
+  DCHECK_LT(slot_idx, numOfSlots[idx]);
+  return slot_idx;
+}
+
 void RosAlloc::Run::FreeSlot(void* ptr) {
   DCHECK(!IsThreadLocal());
   const uint8_t idx = size_bracket_idx_;
   const size_t bracket_size = bracketSizes[idx];
-  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
-      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
-  DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
-  size_t slot_idx = offset_from_slot_base / bracket_size;
-  DCHECK_LT(slot_idx, numOfSlots[idx]);
-  size_t vec_idx = slot_idx / 32;
-  if (kIsDebugBuild) {
-    size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
-    DCHECK_LT(vec_idx, num_vec);
-  }
-  size_t vec_off = slot_idx % 32;
-  uint32_t* vec = &alloc_bit_map_[vec_idx];
-  first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
-  const uint32_t mask = 1U << vec_off;
-  DCHECK_NE(*vec & mask, 0U);
-  *vec &= ~mask;
-  DCHECK_EQ(*vec & mask, 0U);
+  Slot* slot = ToSlot(ptr);
   // Zero out the memory.
   // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
-  memset(ptr, 0, bracket_size);
+  memset(slot, 0, bracket_size);
+  free_list_.Add(slot);
   if (kTraceRosAlloc) {
-    LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
-              << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
+    LOG(INFO) << "RosAlloc::Run::FreeSlot() : " << slot
+              << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
   }
 }
 
-size_t RosAlloc::Run::NumberOfFreeSlots() {
-  size_t num_alloc_slots = 0;
-  const size_t idx = size_bracket_idx_;
-  const size_t num_slots = numOfSlots[idx];
-  const size_t num_vec = RoundUp(num_slots, 32) / 32;
-  DCHECK_NE(num_vec, 0U);
-  for (size_t v = 0; v < num_vec - 1; v++) {
-    num_alloc_slots += POPCOUNT(alloc_bit_map_[v]);
-  }
-  // Don't count the invalid bits in the last vector.
-  uint32_t last_vec_masked = alloc_bit_map_[num_vec - 1] &
-      ~GetBitmapLastVectorMask(num_slots, num_vec);
-  num_alloc_slots += POPCOUNT(last_vec_masked);
-  size_t num_free_slots = num_slots - num_alloc_slots;
-  DCHECK_LE(num_alloc_slots, num_slots);
-  DCHECK_LE(num_free_slots, num_slots);
-  return num_free_slots;
-}
-
-inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
+inline bool RosAlloc::Run::MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out) {
   DCHECK(IsThreadLocal());
-  // Free slots in the alloc bit map based on the thread local free bit map.
-  const size_t idx = size_bracket_idx_;
-  const size_t num_of_slots = numOfSlots[idx];
-  const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
-  bool changed = false;
-  uint32_t* vecp = &alloc_bit_map_[0];
-  uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
-  bool is_all_free_after = true;
-  for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
-    uint32_t tl_free_vec = *tl_free_vecp;
-    uint32_t vec_before = *vecp;
-    uint32_t vec_after;
-    if (tl_free_vec != 0) {
-      first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
-      vec_after = vec_before & ~tl_free_vec;
-      *vecp = vec_after;
-      changed = true;
-      *tl_free_vecp = 0;  // clear the thread local free bit map.
-    } else {
-      vec_after = vec_before;
-    }
-    if (vec_after != 0) {
-      if (v == num_vec - 1) {
-        // Only not all free if a bit other than the mask bits are set.
-        is_all_free_after =
-            is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
-      } else {
-        is_all_free_after = false;
-      }
-    }
-    DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
-  }
-  *is_all_free_after_out = is_all_free_after;
-  // Return true if there was at least a bit set in the thread-local
-  // free bit map and at least a bit in the alloc bit map changed.
-  return changed;
-}
-
-inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
-  DCHECK(!IsThreadLocal());
-  // Free slots in the alloc bit map based on the bulk free bit map.
-  const size_t num_vec = NumberOfBitmapVectors();
-  uint32_t* vecp = &alloc_bit_map_[0];
-  uint32_t* free_vecp = &BulkFreeBitMap()[0];
-  for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
-    uint32_t free_vec = *free_vecp;
-    if (free_vec != 0) {
-      first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
-      *vecp &= ~free_vec;
-      *free_vecp = 0;  // clear the bulk free bit map.
-    }
-    DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
-  }
-}
-
-inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
-  DCHECK(IsThreadLocal());
-  // Union the thread local bit map with the bulk free bit map.
-  size_t num_vec = NumberOfBitmapVectors();
-  uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
-  uint32_t* from_vecp = &BulkFreeBitMap()[0];
-  for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
-    uint32_t from_vec = *from_vecp;
-    if (from_vec != 0) {
-      *to_vecp |= from_vec;
-      *from_vecp = 0;  // clear the bulk free bit map.
-    }
-    DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
-  }
-}
-
-inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
-  DCHECK(IsThreadLocal());
-  MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
-}
-
-inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
-  return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
-}
-
-inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
-                                                  const char* caller_name) {
+  // Merge the thread local free list into the free list and clear the thread local free list.
   const uint8_t idx = size_bracket_idx_;
-  const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
-      - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
+  bool thread_local_free_list_size = thread_local_free_list_.Size();
+  const size_t size_before = free_list_.Size();
+  free_list_.Merge(&thread_local_free_list_);
+  const size_t size_after = free_list_.Size();
+  DCHECK_EQ(size_before < size_after, thread_local_free_list_size > 0);
+  DCHECK_LE(size_before, size_after);
+  *is_all_free_after_out = free_list_.Size() == numOfSlots[idx];
+  // Return true at least one slot was added to the free list.
+  return size_before < size_after;
+}
+
+inline void RosAlloc::Run::MergeBulkFreeListToFreeList() {
+  DCHECK(!IsThreadLocal());
+  // Merge the bulk free list into the free list and clear the bulk free list.
+  free_list_.Merge(&bulk_free_list_);
+}
+
+inline void RosAlloc::Run::MergeBulkFreeListToThreadLocalFreeList() {
+  DCHECK(IsThreadLocal());
+  // Merge the bulk free list into the thread local free list and clear the bulk free list.
+  thread_local_free_list_.Merge(&bulk_free_list_);
+}
+
+inline void RosAlloc::Run::AddToThreadLocalFreeList(void* ptr) {
+  DCHECK(IsThreadLocal());
+  AddToFreeListShared(ptr, &thread_local_free_list_, __FUNCTION__);
+}
+
+inline size_t RosAlloc::Run::AddToBulkFreeList(void* ptr) {
+  return AddToFreeListShared(ptr, &bulk_free_list_, __FUNCTION__);
+}
+
+inline size_t RosAlloc::Run::AddToFreeListShared(void* ptr,
+                                                 SlotFreeList<true>* free_list,
+                                                 const char* caller_name) {
+  const uint8_t idx = size_bracket_idx_;
   const size_t bracket_size = bracketSizes[idx];
-  memset(ptr, 0, bracket_size);
-  DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
-  size_t slot_idx = offset_from_slot_base / bracket_size;
-  DCHECK_LT(slot_idx, numOfSlots[idx]);
-  size_t vec_idx = slot_idx / 32;
-  if (kIsDebugBuild) {
-    size_t num_vec = NumberOfBitmapVectors();
-    DCHECK_LT(vec_idx, num_vec);
-  }
-  size_t vec_off = slot_idx % 32;
-  uint32_t* vec = &free_bit_map_base[vec_idx];
-  const uint32_t mask = 1U << vec_off;
-  DCHECK_EQ(*vec & mask, 0U);
-  *vec |= mask;
-  DCHECK_NE(*vec & mask, 0U);
+  Slot* slot = ToSlot(ptr);
+  memset(slot, 0, bracket_size);
+  free_list->Add(slot);
   if (kTraceRosAlloc) {
-    LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
-              << reinterpret_cast<intptr_t>(ptr)
-              << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
+    LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : " << ptr
+              << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
   }
   return bracket_size;
 }
 
-inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
-  const size_t kBitsPerVec = 32;
-  DCHECK_GE(num_vec * kBitsPerVec, num_slots);
-  DCHECK_NE(num_vec, 0U);
-  size_t remain = num_vec * kBitsPerVec - num_slots;
-  DCHECK_LT(remain, kBitsPerVec);
-  return ((1U << remain) - 1) << ((kBitsPerVec - remain) & 0x1F);
-}
-
-inline bool RosAlloc::Run::IsAllFree() {
+inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() {
+  DCHECK(IsAllFree());
   const uint8_t idx = size_bracket_idx_;
-  const size_t num_slots = numOfSlots[idx];
-  const size_t num_vec = NumberOfBitmapVectors();
-  DCHECK_NE(num_vec, 0U);
-  // Check the last vector after the loop since it uses a special case for the masked bits.
-  for (size_t v = 0; v < num_vec - 1; v++) {
-    uint32_t vec = alloc_bit_map_[v];
-    if (vec != 0) {
-      return false;
-    }
+  // Zero the slot header (next pointers).
+  for (Slot* slot = free_list_.Head(); slot != nullptr; ) {
+    Slot* next_slot = slot->Next();
+    slot->Clear();
+    slot = next_slot;
   }
-  // Make sure the last word is equal to the mask, all other bits must be 0.
-  return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
-}
-
-inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
-  const size_t num_vec = NumberOfBitmapVectors();
-  for (size_t v = 0; v < num_vec; v++) {
-    uint32_t vec = BulkFreeBitMap()[v];
-    if (vec != 0) {
-      return false;
-    }
-  }
-  return true;
-}
-
-inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
-  const size_t num_vec = NumberOfBitmapVectors();
-  for (size_t v = 0; v < num_vec; v++) {
-    uint32_t vec = ThreadLocalFreeBitMap()[v];
-    if (vec != 0) {
-      return false;
-    }
-  }
-  return true;
-}
-
-inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
-  const size_t idx = size_bracket_idx_;
-  const size_t num_slots = numOfSlots[idx];
-  const size_t num_vec = RoundUp(num_slots, 32) / 32;
-  DCHECK_NE(num_vec, 0U);
-  // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
-  // don't represent valid slots.
-  alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
-}
-
-inline void RosAlloc::Run::ZeroHeader() {
-  const uint8_t idx = size_bracket_idx_;
+  // Zero the header.
   memset(this, 0, headerSizes[idx]);
+  // Check that the entire run is all zero.
+  if (kIsDebugBuild) {
+    const size_t size = numOfPages[idx] * kPageSize;
+    const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this);
+    for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) {
+      CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
+    }
+  }
 }
 
 inline void RosAlloc::Run::ZeroData() {
   const uint8_t idx = size_bracket_idx_;
-  uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
+  uint8_t* slot_begin = reinterpret_cast<uint8_t*>(FirstSlot());
   memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
 }
 
-inline void RosAlloc::Run::FillAllocBitMap() {
-  size_t num_vec = NumberOfBitmapVectors();
-  memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
-  first_search_vec_idx_ = num_vec - 1;  // No free bits in any of the bitmap words.
-}
-
 void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
                                     void* arg) {
   size_t idx = size_bracket_idx_;
@@ -1126,26 +993,27 @@
   size_t bracket_size = IndexToBracketSize(idx);
   DCHECK_EQ(slot_base + num_slots * bracket_size,
             reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
-  size_t num_vec = RoundUp(num_slots, 32) / 32;
-  size_t slots = 0;
-  const uint32_t* const tl_free_vecp = IsThreadLocal() ? ThreadLocalFreeBitMap() : nullptr;
-  for (size_t v = 0; v < num_vec; v++, slots += 32) {
-    DCHECK_GE(num_slots, slots);
-    uint32_t vec = alloc_bit_map_[v];
-    if (tl_free_vecp != nullptr) {
-      // Clear out the set bits in the thread local free bitmap since these aren't actually
-      // allocated.
-      vec &= ~tl_free_vecp[v];
+  // Free slots are on the free list and the allocated/used slots are not. We traverse the free list
+  // to find out and record which slots are free in the is_free array.
+  std::unique_ptr<bool[]> is_free(new bool[num_slots]());  // zero initialized
+  for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
+    size_t slot_idx = SlotIndex(slot);
+    DCHECK_LT(slot_idx, num_slots);
+    is_free[slot_idx] = true;
+  }
+  if (IsThreadLocal()) {
+    for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
+      size_t slot_idx = SlotIndex(slot);
+      DCHECK_LT(slot_idx, num_slots);
+      is_free[slot_idx] = true;
     }
-    size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
-    for (size_t i = 0; i < end; ++i) {
-      bool is_allocated = ((vec >> i) & 0x1) != 0;
-      uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
-      if (is_allocated) {
-        handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
-      } else {
-        handler(slot_addr, slot_addr + bracket_size, 0, arg);
-      }
+  }
+  for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
+    uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
+    if (!is_free[slot_idx]) {
+      handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
+    } else {
+      handler(slot_addr, slot_addr + bracket_size, 0, arg);
     }
   }
 }
@@ -1236,7 +1104,7 @@
     DCHECK(run != nullptr);
     DCHECK_EQ(run->magic_num_, kMagicNum);
     // Set the bit in the bulk free bit map.
-    freed_bytes += run->MarkBulkFreeBitMap(ptr);
+    freed_bytes += run->AddToBulkFreeList(ptr);
 #ifdef __ANDROID__
     if (!run->to_be_bulk_freed_) {
       run->to_be_bulk_freed_ = true;
@@ -1262,7 +1130,7 @@
       DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
       DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
       DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
-      run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
+      run->MergeBulkFreeListToThreadLocalFreeList();
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
                   << std::hex << reinterpret_cast<intptr_t>(run);
@@ -1272,7 +1140,7 @@
       // it's become all free.
     } else {
       bool run_was_full = run->IsFull();
-      run->MergeBulkFreeBitMapIntoAllocBitMap();
+      run->MergeBulkFreeListToFreeList();
       if (kTraceRosAlloc) {
         LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
                   << reinterpret_cast<intptr_t>(run);
@@ -1316,7 +1184,7 @@
           DCHECK(non_full_runs->find(run) == non_full_runs->end());
         }
         if (!run_was_current) {
-          run->ZeroHeader();
+          run->ZeroHeaderAndSlotHeaders();
           MutexLock lock_mu(self, lock_);
           FreePages(self, run, true);
         }
@@ -1677,9 +1545,9 @@
       size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
       free_bytes += num_free_slots * bracketSizes[idx];
       bool dont_care;
-      thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
+      thread_local_run->MergeThreadLocalFreeListToFreeList(&dont_care);
       thread_local_run->SetIsThreadLocal(false);
-      thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
+      thread_local_run->MergeBulkFreeListToFreeList();
       DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
       DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
       RevokeRun(self, idx, thread_local_run);
@@ -1702,7 +1570,7 @@
       }
     }
   } else if (run->IsAllFree()) {
-    run->ZeroHeader();
+    run->ZeroHeaderAndSlotHeaders();
     MutexLock mu(self, lock_);
     FreePages(self, run, true);
   } else {
@@ -1814,22 +1682,15 @@
     size_t max_num_of_slots = run_size / bracket_size;
     // Compute the actual number of slots by taking the header and
     // alignment into account.
-    size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
-    DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
+    size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint64_t));
+    DCHECK_EQ(fixed_header_size, 80U);
     size_t header_size = 0;
-    size_t bulk_free_bit_map_offset = 0;
-    size_t thread_local_free_bit_map_offset = 0;
     size_t num_of_slots = 0;
     // Search for the maximum number of slots that allows enough space
-    // for the header (including the bit maps.)
+    // for the header.
     for (int s = max_num_of_slots; s >= 0; s--) {
       size_t tmp_slots_size = bracket_size * s;
-      size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
-      size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
-      size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
-      size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
-      size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
-      size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
+      size_t tmp_unaligned_header_size = fixed_header_size;
       // Align up the unaligned header size. bracket_size may not be a power of two.
       size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
           tmp_unaligned_header_size :
@@ -1841,24 +1702,19 @@
         // space for the header (including the bit maps.)
         num_of_slots = s;
         header_size = tmp_header_size;
-        bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
-        thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
         break;
       }
     }
-    DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
+    DCHECK_GT(num_of_slots, 0U);
+    DCHECK_GT(header_size, 0U);
     // Add the padding for the alignment remainder.
     header_size += run_size % bracket_size;
     DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
     numOfSlots[i] = num_of_slots;
     headerSizes[i] = header_size;
-    bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
-    threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
     if (kTraceRosAlloc) {
       LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
-                << ", headerSizes[" << i << "]=" << headerSizes[i]
-                << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
-                << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
+                << ", headerSizes[" << i << "]=" << headerSizes[i];
     }
   }
   // Fill the alloc bitmap so nobody can successfully allocate from it.
@@ -1868,8 +1724,11 @@
   // It doesn't matter which size bracket we use since the main goal is to have the allocation
   // fail 100% of the time you attempt to allocate into the dedicated full run.
   dedicated_full_run_->size_bracket_idx_ = 0;
-  dedicated_full_run_->FillAllocBitMap();
+  DCHECK_EQ(dedicated_full_run_->FreeList()->Size(), 0U);  // It looks full.
   dedicated_full_run_->SetIsThreadLocal(true);
+
+  // The smallest bracket size must be at least as large as the sizeof(Slot).
+  DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size";
 }
 
 void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
@@ -2025,19 +1884,12 @@
   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
   uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
   const size_t num_slots = numOfSlots[idx];
-  const size_t num_vec = RoundUp(num_slots, 32) / 32;
-  CHECK_GT(num_vec, 0U);
   size_t bracket_size = IndexToBracketSize(idx);
   CHECK_EQ(slot_base + num_slots * bracket_size,
            reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
       << "Mismatch in the end address of the run " << Dump();
-  // Check that the bulk free bitmap is clean. It's only used during BulkFree().
-  CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
-  uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
-  // Make sure all the bits at the end of the run are set so that we don't allocate there.
-  CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
-  // Ensure that the first bitmap index is valid.
-  CHECK_LT(first_search_vec_idx_, num_vec);
+  // Check that the bulk free list is empty. It's only used during BulkFree().
+  CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump();
   // Check the thread local runs, the current runs, and the run sets.
   if (IsThreadLocal()) {
     // If it's a thread local run, then it must be pointed to by an owner thread.
@@ -2059,11 +1911,11 @@
     }
     CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
   } else {
-    // If it's not thread local, check that the thread local free bitmap is clean.
-    CHECK(IsThreadLocalFreeBitmapClean())
-        << "A non-thread-local run's thread local free bitmap isn't clean "
+    // If it's not thread local, check that the thread local free list is empty.
+    CHECK(IsThreadLocalFreeListEmpty())
+        << "A non-thread-local run's thread local free list isn't empty "
         << Dump();
-    // Check if it's a current run for the size bucket.
+    // Check if it's a current run for the size bracket.
     bool is_current_run = false;
     for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
       MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
@@ -2101,34 +1953,39 @@
     }
   }
   // Check each slot.
-  size_t slots = 0;
   size_t memory_tool_modifier = running_on_memory_tool ?
       2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
       0U;
-  for (size_t v = 0; v < num_vec; v++, slots += 32) {
-    DCHECK_GE(num_slots, slots) << "Out of bounds";
-    uint32_t vec = alloc_bit_map_[v];
-    uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
-    size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
-    for (size_t i = 0; i < end; ++i) {
-      bool is_allocated = ((vec >> i) & 0x1) != 0;
-      // If a thread local run, slots may be marked freed in the
-      // thread local free bitmap.
-      bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
-      if (is_allocated && !is_thread_local_freed) {
-        uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
-        if (running_on_memory_tool) {
-          slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
-        }
-        mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
-        size_t obj_size = obj->SizeOf();
-        CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
-            << "A run slot contains a large object " << Dump();
-        CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
-            << PrettyTypeOf(obj) << " "
-            << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
-            << " A run slot contains an object with wrong size " << Dump();
-      }
+  // TODO: reuse InspectAllSlots().
+  std::unique_ptr<bool[]> is_free(new bool[num_slots]());  // zero initialized
+  // Mark the free slots and the remaining ones are allocated.
+  for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
+    size_t slot_idx = SlotIndex(slot);
+    DCHECK_LT(slot_idx, num_slots);
+    is_free[slot_idx] = true;
+  }
+  if (IsThreadLocal()) {
+    for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
+      size_t slot_idx = SlotIndex(slot);
+      DCHECK_LT(slot_idx, num_slots);
+      is_free[slot_idx] = true;
+    }
+  }
+  for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
+    uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
+    if (running_on_memory_tool) {
+      slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
+    }
+    if (!is_free[slot_idx]) {
+      // The slot is allocated
+      mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
+      size_t obj_size = obj->SizeOf();
+      CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
+          << "A run slot contains a large object " << Dump();
+      CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
+          << PrettyTypeOf(obj) << " "
+          << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
+          << " A run slot contains an object with wrong size " << Dump();
     }
   }
 }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index a7f29af..87f1392 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -112,6 +112,198 @@
     DISALLOW_COPY_AND_ASSIGN(FreePageRun);
   };
 
+  // The slot header.
+  class Slot {
+   public:
+    Slot* Next() const {
+      return next_;
+    }
+    void SetNext(Slot* next) {
+      next_ = next;
+    }
+    // The slot right before this slot in terms of the address.
+    Slot* Left(size_t bracket_size) {
+      return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) - bracket_size);
+    }
+    void Clear() {
+      next_ = nullptr;
+    }
+
+   private:
+    Slot* next_;  // Next slot in the list.
+  };
+
+  // We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to
+  // traverse the list from the head to the tail when merging free lists.
+  // We don't use the tail (kUseTail == false) for the free list to avoid the need to manage the
+  // tail in the allocation fast path for a performance reason.
+  template<bool kUseTail = true>
+  class SlotFreeList {
+   public:
+    SlotFreeList() : head_(0U), tail_(0), size_(0) {}
+    Slot* Head() const {
+      return reinterpret_cast<Slot*>(head_);
+    }
+    Slot* Tail() const {
+      CHECK(kUseTail);
+      return reinterpret_cast<Slot*>(tail_);
+    }
+    size_t Size() const {
+      return size_;
+    }
+    // Removes from the head of the free list.
+    Slot* Remove() {
+      Slot* slot;
+      if (kIsDebugBuild) {
+        Verify();
+      }
+      Slot** headp = reinterpret_cast<Slot**>(&head_);
+      Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
+      Slot* old_head = *headp;
+      if (old_head == nullptr) {
+        // List was empty.
+        if (kUseTail) {
+          DCHECK(*tailp == nullptr);
+        }
+        return nullptr;
+      } else {
+        // List wasn't empty.
+        if (kUseTail) {
+          DCHECK(*tailp != nullptr);
+        }
+        Slot* old_head_next = old_head->Next();
+        slot = old_head;
+        *headp = old_head_next;
+        if (kUseTail && old_head_next == nullptr) {
+          // List becomes empty.
+          *tailp = nullptr;
+        }
+      }
+      slot->Clear();
+      --size_;
+      if (kIsDebugBuild) {
+        Verify();
+      }
+      return slot;
+    }
+    void Add(Slot* slot) {
+      if (kIsDebugBuild) {
+        Verify();
+      }
+      DCHECK(slot != nullptr);
+      Slot** headp = reinterpret_cast<Slot**>(&head_);
+      Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
+      Slot* old_head = *headp;
+      if (old_head == nullptr) {
+        // List was empty.
+        if (kUseTail) {
+          DCHECK(*tailp == nullptr);
+        }
+        *headp = slot;
+        if (kUseTail) {
+          *tailp = slot;
+        }
+      } else {
+        // List wasn't empty.
+        if (kUseTail) {
+          DCHECK(*tailp != nullptr);
+        }
+        *headp = slot;
+        slot->SetNext(old_head);
+      }
+      ++size_;
+      if (kIsDebugBuild) {
+        Verify();
+      }
+    }
+    // Merge the given list into this list. Empty the given list.
+    // Deliberately support only a kUseTail == true SlotFreeList parameter because 1) we don't
+    // currently have a situation where we need a kUseTail == false SlotFreeList parameter, and 2)
+    // supporting the kUseTail == false parameter would require a O(n) linked list traversal to do
+    // the merge if 'this' SlotFreeList has kUseTail == false, which we'd like to avoid.
+    void Merge(SlotFreeList<true>* list) {
+      if (kIsDebugBuild) {
+        Verify();
+        CHECK(list != nullptr);
+        list->Verify();
+      }
+      if (list->Size() == 0) {
+        return;
+      }
+      Slot** headp = reinterpret_cast<Slot**>(&head_);
+      Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
+      Slot* old_head = *headp;
+      if (old_head == nullptr) {
+        // List was empty.
+        *headp = list->Head();
+        if (kUseTail) {
+          *tailp = list->Tail();
+        }
+        size_ = list->Size();
+      } else {
+        // List wasn't empty.
+        DCHECK(list->Head() != nullptr);
+        *headp = list->Head();
+        DCHECK(list->Tail() != nullptr);
+        list->Tail()->SetNext(old_head);
+        // if kUseTail, no change to tailp.
+        size_ += list->Size();
+      }
+      list->Reset();
+      if (kIsDebugBuild) {
+        Verify();
+      }
+    }
+
+    void Reset() {
+      head_ = 0;
+      if (kUseTail) {
+        tail_ = 0;
+      }
+      size_ = 0;
+    }
+
+    void Verify() {
+      Slot* head = reinterpret_cast<Slot*>(head_);
+      Slot* tail = kUseTail ? reinterpret_cast<Slot*>(tail_) : nullptr;
+      if (size_ == 0) {
+        CHECK(head == nullptr);
+        if (kUseTail) {
+          CHECK(tail == nullptr);
+        }
+      } else {
+        CHECK(head != nullptr);
+        if (kUseTail) {
+          CHECK(tail != nullptr);
+        }
+        size_t count = 0;
+        for (Slot* slot = head; slot != nullptr; slot = slot->Next()) {
+          ++count;
+          if (kUseTail && slot->Next() == nullptr) {
+            CHECK_EQ(slot, tail);
+          }
+        }
+        CHECK_EQ(size_, count);
+      }
+    }
+
+   private:
+    // A pointer (Slot*) to the head of the list. Always 8 bytes so that we will have the same
+    // layout between 32 bit and 64 bit, which is not strictly necessary, but we do so for 1)
+    // uniformity, 2) we won't need to change this code if we move to a non-low 4G heap in the
+    // future, and 3) the space savings by using 32 bit fields in 32 bit would be lost in noise
+    // (won't open up enough space to cause an extra slot to be available).
+    uint64_t head_;
+    // A pointer (Slot*) to the tail of the list. Always 8 bytes so that we will have the same
+    // layout between 32 bit and 64 bit. The tail is stored to speed up merging of lists.
+    // Unused if kUseTail is false.
+    uint64_t tail_;
+    // The number of slots in the list. This is used to make it fast to check if a free list is all
+    // free without traversing the whole free list.
+    uint32_t size_;
+    uint32_t padding_ ATTRIBUTE_UNUSED;
+  };
+
   // Represents a run of memory slots of the same size.
   //
   // A run's memory layout:
@@ -125,19 +317,17 @@
   // +-------------------+
   // | to_be_bulk_freed  |
   // +-------------------+
-  // | top_bitmap_idx    |
-  // +-------------------+
   // |                   |
-  // | alloc bit map     |
+  // | free list         |
   // |                   |
   // +-------------------+
   // |                   |
-  // | bulk free bit map |
+  // | bulk free list    |
   // |                   |
   // +-------------------+
   // |                   |
   // | thread-local free |
-  // | bit map           |
+  // | list              |
   // |                   |
   // +-------------------+
   // | padding due to    |
@@ -160,94 +350,100 @@
     uint8_t size_bracket_idx_;          // The index of the size bracket of this run.
     uint8_t is_thread_local_;           // True if this run is used as a thread-local run.
     uint8_t to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
-    uint32_t first_search_vec_idx_;  // The index of the first bitmap vector which may contain an available slot.
-    uint32_t alloc_bit_map_[0];      // The bit map that allocates if each slot is in use.
+    uint32_t padding_ ATTRIBUTE_UNUSED;
+    // Use a tailless free list for free_list_ so that the alloc fast path does not manage the tail.
+    SlotFreeList<false> free_list_;
+    SlotFreeList<true> bulk_free_list_;
+    SlotFreeList<true> thread_local_free_list_;
+    // Padding due to alignment
+    // Slot 0
+    // Slot 1
+    // ...
 
-    // bulk_free_bit_map_[] : The bit map that is used for GC to
-    // temporarily mark the slots to free without using a lock. After
-    // all the slots to be freed in a run are marked, all those slots
-    // get freed in bulk with one locking per run, as opposed to one
-    // locking per slot to minimize the lock contention. This is used
-    // within BulkFree().
-
-    // thread_local_free_bit_map_[] : The bit map that is used for GC
-    // to temporarily mark the slots to free in a thread-local run
-    // without using a lock (without synchronizing the thread that
-    // owns the thread-local run.) When the thread-local run becomes
-    // full, the thread will check this bit map and update the
-    // allocation bit map of the run (that is, the slots get freed.)
-
-    // Returns the byte size of the header except for the bit maps.
+    // Returns the byte size of the header.
     static size_t fixed_header_size() {
-      Run temp;
-      size_t size = reinterpret_cast<uint8_t*>(&temp.alloc_bit_map_) - reinterpret_cast<uint8_t*>(&temp);
-      DCHECK_EQ(size, static_cast<size_t>(8));
-      return size;
+      return sizeof(Run);
     }
-    // Returns the base address of the free bit map.
-    uint32_t* BulkFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
+    Slot* FirstSlot() {
+      const uint8_t idx = size_bracket_idx_;
+      return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) + headerSizes[idx]);
     }
-    // Returns the base address of the thread local free bit map.
-    uint32_t* ThreadLocalFreeBitMap() {
-      return reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
+    Slot* LastSlot() {
+      const uint8_t idx = size_bracket_idx_;
+      const size_t bracket_size = bracketSizes[idx];
+      uintptr_t end = reinterpret_cast<uintptr_t>(End());
+      Slot* last_slot = reinterpret_cast<Slot*>(end - bracket_size);
+      DCHECK_LE(FirstSlot(), last_slot);
+      return last_slot;
+    }
+    SlotFreeList<false>* FreeList() {
+      return &free_list_;
+    }
+    SlotFreeList<true>* BulkFreeList() {
+      return &bulk_free_list_;
+    }
+    SlotFreeList<true>* ThreadLocalFreeList() {
+      return &thread_local_free_list_;
     }
     void* End() {
       return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
     }
-    // Returns the number of bitmap words per run.
-    size_t NumberOfBitmapVectors() const {
-      return RoundUp(numOfSlots[size_bracket_idx_], 32) / 32;
-    }
     void SetIsThreadLocal(bool is_thread_local) {
       is_thread_local_  = is_thread_local ? 1 : 0;
     }
     bool IsThreadLocal() const {
       return is_thread_local_ != 0;
     }
-    // Frees slots in the allocation bit map with regard to the
-    // thread-local free bit map. Used when a thread-local run becomes
+    // Set up the free list for a new/empty run.
+    void InitFreeList() {
+      const uint8_t idx = size_bracket_idx_;
+      const size_t bracket_size = bracketSizes[idx];
+      Slot* first_slot = FirstSlot();
+      // Add backwards so the first slot is at the head of the list.
+      for (Slot* slot = LastSlot(); slot >= first_slot; slot = slot->Left(bracket_size)) {
+        free_list_.Add(slot);
+      }
+    }
+    // Merge the thread local free list to the free list.  Used when a thread-local run becomes
     // full.
-    bool MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out);
-    // Frees slots in the allocation bit map with regard to the bulk
-    // free bit map. Used in a bulk free.
-    void MergeBulkFreeBitMapIntoAllocBitMap();
-    // Unions the slots to be freed in the free bit map into the
-    // thread-local free bit map. In a bulk free, as a two-step
-    // process, GC will first record all the slots to free in a run in
-    // the free bit map where it can write without a lock, and later
-    // acquire a lock once per run to union the bits of the free bit
-    // map to the thread-local free bit map.
-    void UnionBulkFreeBitMapToThreadLocalFreeBitMap();
+    bool MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out);
+    // Merge the bulk free list to the free list. Used in a bulk free.
+    void MergeBulkFreeListToFreeList();
+    // Merge the bulk free list to the thread local free list. In a bulk free, as a two-step
+    // process, GC will first record all the slots to free in a run in the bulk free list where it
+    // can write without a lock, and later acquire a lock once per run to merge the bulk free list
+    // to the thread-local free list.
+    void MergeBulkFreeListToThreadLocalFreeList();
     // Allocates a slot in a run.
-    void* AllocSlot();
+    ALWAYS_INLINE void* AllocSlot();
     // Frees a slot in a run. This is used in a non-bulk free.
     void FreeSlot(void* ptr);
-    // Marks the slots to free in the bulk free bit map. Returns the bracket size.
-    size_t MarkBulkFreeBitMap(void* ptr);
-    // Marks the slots to free in the thread-local free bit map.
-    void MarkThreadLocalFreeBitMap(void* ptr);
-    // Last word mask, all of the bits in the last word which aren't valid slots are set to
-    // optimize allocation path.
-    static uint32_t GetBitmapLastVectorMask(size_t num_slots, size_t num_vec);
+    // Add the given slot to the bulk free list. Returns the bracket size.
+    size_t AddToBulkFreeList(void* ptr);
+    // Add the given slot to the thread-local free list.
+    void AddToThreadLocalFreeList(void* ptr);
     // Returns true if all the slots in the run are not in use.
-    bool IsAllFree();
+    bool IsAllFree() const {
+      return free_list_.Size() == numOfSlots[size_bracket_idx_];
+    }
     // Returns the number of free slots.
-    size_t NumberOfFreeSlots();
+    size_t NumberOfFreeSlots() {
+      return free_list_.Size();
+    }
     // Returns true if all the slots in the run are in use.
     ALWAYS_INLINE bool IsFull();
-    // Returns true if the bulk free bit map is clean.
-    bool IsBulkFreeBitmapClean();
-    // Returns true if the thread local free bit map is clean.
-    bool IsThreadLocalFreeBitmapClean();
-    // Set the alloc_bit_map_ bits for slots that are past the end of the run.
-    void SetAllocBitMapBitsForInvalidSlots();
+    // Returns true if the bulk free list is empty.
+    bool IsBulkFreeListEmpty() const {
+      return bulk_free_list_.Size() == 0;
+    }
+    // Returns true if the thread local free list is empty.
+    bool IsThreadLocalFreeListEmpty() const {
+      return thread_local_free_list_.Size() == 0;
+    }
     // Zero the run's data.
     void ZeroData();
-    // Zero the run's header.
-    void ZeroHeader();
-    // Fill the alloc bitmap with 1s.
-    void FillAllocBitMap();
+    // Zero the run's header and the slot headers.
+    void ZeroHeaderAndSlotHeaders();
     // Iterate over all the slots and apply the given function.
     void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
     // Dump the run metadata for debugging.
@@ -258,11 +454,24 @@
         REQUIRES(Locks::thread_list_lock_);
 
    private:
-    // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket
+    // The common part of AddToBulkFreeList() and AddToThreadLocalFreeList(). Returns the bracket
     // size.
-    size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
-    // Turns the bit map into a string for debugging.
-    static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
+    size_t AddToFreeListShared(void* ptr, SlotFreeList<true>* free_list, const char* caller_name);
+    // Turns a FreeList into a string for debugging.
+    template<bool kUseTail>
+    std::string FreeListToStr(SlotFreeList<kUseTail>* free_list);
+    // Check a given pointer is a valid slot address and return it as Slot*.
+    Slot* ToSlot(void* ptr) {
+      const uint8_t idx = size_bracket_idx_;
+      const size_t bracket_size = bracketSizes[idx];
+      const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
+          - reinterpret_cast<uint8_t*>(FirstSlot());
+      DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
+      size_t slot_idx = offset_from_slot_base / bracket_size;
+      DCHECK_LT(slot_idx, numOfSlots[idx]);
+      return reinterpret_cast<Slot*>(ptr);
+    }
+    size_t SlotIndex(Slot* slot);
 
     // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
   };
@@ -283,10 +492,6 @@
   static size_t numOfSlots[kNumOfSizeBrackets];
   // The header sizes in bytes of the runs for each size bracket.
   static size_t headerSizes[kNumOfSizeBrackets];
-  // The byte offsets of the bulk free bit maps of the runs for each size bracket.
-  static size_t bulkFreeBitMapOffsets[kNumOfSizeBrackets];
-  // The byte offsets of the thread-local free bit maps of the runs for each size bracket.
-  static size_t threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
 
   // Initialize the run specs (the above arrays).
   static void Initialize();
@@ -493,7 +698,7 @@
   // The reader-writer lock to allow one bulk free at a time while
   // allowing multiple individual frees at the same time. Also, this
   // is used to avoid race conditions between BulkFree() and
-  // RevokeThreadLocalRuns() on the bulk free bitmaps.
+  // RevokeThreadLocalRuns() on the bulk free list.
   ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
   // The page release mode.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 468179c..0a7a69f 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -620,7 +620,10 @@
     gc_mark_stack_->PushBack(to_ref);
   } else {
     CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
-             static_cast<uint32_t>(kMarkStackModeGcExclusive));
+             static_cast<uint32_t>(kMarkStackModeGcExclusive))
+        << "ref=" << to_ref
+        << " self->gc_marking=" << self->GetIsGcMarking()
+        << " cc->is_marking=" << is_marking_;
     CHECK(self == thread_running_gc_)
         << "Only GC-running thread should access the mark stack "
         << "in the GC exclusive mark stack mode";
diff --git a/runtime/globals.h b/runtime/globals.h
index d70f3ab..987a94e 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -58,12 +58,6 @@
 static constexpr bool kIsTargetBuild = false;
 #endif
 
-#if defined(ART_USE_OPTIMIZING_COMPILER)
-static constexpr bool kUseOptimizingCompiler = true;
-#else
-static constexpr bool kUseOptimizingCompiler = false;
-#endif
-
 // Garbage collector constants.
 static constexpr bool kMovingCollector = true;
 static constexpr bool kMarkCompactSupport = false && kMovingCollector;
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 4f4a97f..e9c16c1 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -115,30 +115,8 @@
   }
 }
 
-class WaitForCompilationToFinishTask FINAL : public Task {
- public:
-  WaitForCompilationToFinishTask() : barrier_(0) {}
-
-  void Wait(Thread* self) {
-    barrier_.Increment(self, 1);
-  }
-
-  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE {}
-
-  void Finalize() OVERRIDE {
-    // Do this in Finalize since Finalize is called after Run by the thread pool.
-    barrier_.Pass(Thread::Current());
-  }
-
- private:
-  Barrier barrier_;
-  DISALLOW_COPY_AND_ASSIGN(WaitForCompilationToFinishTask);
-};
-
 void JitInstrumentationCache::WaitForCompilationToFinish(Thread* self) {
-  std::unique_ptr<WaitForCompilationToFinishTask> task(new WaitForCompilationToFinishTask);
-  thread_pool_->AddTask(self, task.get());
-  task->Wait(self);
+  thread_pool_->Wait(self, false, false);
 }
 
 }  // namespace jit
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index e8633de..e215994 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -71,18 +71,14 @@
 
 int32_t Throwable::GetStackDepth() {
   Object* stack_state = GetStackState();
-  if (stack_state == nullptr) {
+  if (stack_state == nullptr || !stack_state->IsObjectArray()) {
     return -1;
   }
-  if (!stack_state->IsIntArray() && !stack_state->IsLongArray()) {
-    return -1;
-  }
-  mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(stack_state->AsArray());
-  int32_t array_len = method_trace->GetLength();
-  // The format is [method pointers][pcs] so the depth is half the length (see method
-  // BuildInternalStackTraceVisitor::Init).
-  CHECK_EQ(array_len % 2, 0);
-  return array_len / 2;
+  mirror::ObjectArray<mirror::Object>* const trace = stack_state->AsObjectArray<mirror::Object>();
+  const int32_t array_len = trace->GetLength();
+  DCHECK_GT(array_len, 0);
+  // See method BuildInternalStackTraceVisitor::Init for the format.
+  return array_len - 1;
 }
 
 std::string Throwable::Dump() {
@@ -95,18 +91,22 @@
   result += "\n";
   Object* stack_state = GetStackState();
   // check stack state isn't missing or corrupt
-  if (stack_state != nullptr &&
-      (stack_state->IsIntArray() || stack_state->IsLongArray())) {
+  if (stack_state != nullptr && stack_state->IsObjectArray()) {
+    mirror::ObjectArray<mirror::Object>* object_array =
+        stack_state->AsObjectArray<mirror::Object>();
     // Decode the internal stack trace into the depth and method trace
-    // Format is [method pointers][pcs]
-    auto* method_trace = down_cast<mirror::PointerArray*>(stack_state->AsArray());
-    auto array_len = method_trace->GetLength();
+    // See method BuildInternalStackTraceVisitor::Init for the format.
+    DCHECK_GT(object_array->GetLength(), 0);
+    mirror::Object* methods_and_dex_pcs = object_array->Get(0);
+    DCHECK(methods_and_dex_pcs->IsIntArray() || methods_and_dex_pcs->IsLongArray());
+    mirror::PointerArray* method_trace = down_cast<mirror::PointerArray*>(methods_and_dex_pcs);
+    const int32_t array_len = method_trace->GetLength();
     CHECK_EQ(array_len % 2, 0);
     const auto depth = array_len / 2;
     if (depth == 0) {
       result += "(Throwable with empty stack trace)";
     } else {
-      auto ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+      const size_t ptr_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
       for (int32_t i = 0; i < depth; ++i) {
         ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, ptr_size);
         uintptr_t dex_pc = method_trace->GetElementPtrSize<uintptr_t>(i + depth, ptr_size);
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index daae401..85ac4aa 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -62,8 +62,10 @@
     if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
       ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
-      obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
-          offset, old_ref, ref);
+      if (ref != old_ref) {
+        obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
+            offset, old_ref, ref);
+      }
     }
     AssertToSpaceInvariant(obj, offset, ref);
     return ref;
@@ -90,17 +92,17 @@
     // To be implemented.
     return ref;
   } else if (with_read_barrier && kUseTableLookupReadBarrier) {
-    if (kMaybeDuringStartup && IsDuringStartup()) {
-      // During startup, the heap may not be initialized yet. Just
-      // return the given ref.
-      return ref;
-    }
-    if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+    Thread* self = Thread::Current();
+    if (self != nullptr &&
+        self->GetIsGcMarking() &&
+        Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
       MirrorType* old_ref = ref;
       ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
-      Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
-      atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+      if (ref != old_ref) {
+        Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
+        atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
+      }
     }
     AssertToSpaceInvariant(gc_root_source, ref);
     return ref;
@@ -127,19 +129,19 @@
     // To be implemented.
     return ref;
   } else if (with_read_barrier && kUseTableLookupReadBarrier) {
-    if (kMaybeDuringStartup && IsDuringStartup()) {
-      // During startup, the heap may not be initialized yet. Just
-      // return the given ref.
-      return ref;
-    }
-    if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
+    Thread* self = Thread::Current();
+    if (self != nullptr &&
+        self->GetIsGcMarking() &&
+        Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
       auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
       ref = reinterpret_cast<MirrorType*>(Mark(ref));
       auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
       // Update the field atomically. This may fail if mutator updates before us, but it's ok.
-      auto* atomic_root =
-          reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
-      atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+      if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
+        auto* atomic_root =
+            reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
+        atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
+      }
     }
     AssertToSpaceInvariant(gc_root_source, ref);
     return ref;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 8cba1a9..1f447d0 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1790,6 +1790,9 @@
 }
 
 LinearAlloc* Runtime::CreateLinearAlloc() {
+  // For 64 bit compilers, it needs to be in low 4GB in the case where we are cross compiling for a
+  // 32 bit target. In this case, we have 32 bit pointers in the dex cache arrays which can't hold
+  // when we have 64 bit ArtMethod pointers.
   return (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA))
       ? new LinearAlloc(low_4gb_arena_pool_.get())
       : new LinearAlloc(arena_pool_.get());
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 7f72f8a..1d21a64 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -110,7 +110,7 @@
 }
 
 InlineInfo StackVisitor::GetCurrentInlineInfo() const {
-  ArtMethod* outer_method = *GetCurrentQuickFrame();
+  ArtMethod* outer_method = GetOuterMethod();
   uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
   CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
   StackMapEncoding encoding = code_info.ExtractEncoding();
@@ -194,11 +194,12 @@
 }
 
 bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
+  DCHECK_EQ(m, GetMethod());
   // Process register map (which native and runtime methods don't have)
   if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
     return false;
   }
-  if (m->IsOptimized(sizeof(void*))) {
+  if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
     return true;  // TODO: Implement.
   }
   const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
@@ -251,7 +252,7 @@
     if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
       return true;
     }
-    if (m->IsOptimized(sizeof(void*))) {
+    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
       return GetVRegFromOptimizedCode(m, vreg, kind, val);
     } else {
       return GetVRegFromQuickCode(m, vreg, kind, val);
@@ -288,15 +289,15 @@
 
 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                             uint32_t* val) const {
+  ArtMethod* outer_method = GetOuterMethod();
+  const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
+  DCHECK(code_pointer != nullptr);
   DCHECK_EQ(m, GetMethod());
   const DexFile::CodeItem* code_item = m->GetCodeItem();
   DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                     // its instructions?
   uint16_t number_of_dex_registers = code_item->registers_size_;
   DCHECK_LT(vreg, code_item->registers_size_);
-  ArtMethod* outer_method = *GetCurrentQuickFrame();
-  const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
-  DCHECK(code_pointer != nullptr);
   CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
   StackMapEncoding encoding = code_info.ExtractEncoding();
 
@@ -405,7 +406,7 @@
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
     DCHECK(m == GetMethod());
-    if (m->IsOptimized(sizeof(void*))) {
+    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
       return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
     } else {
       return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
@@ -481,7 +482,7 @@
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably write registers without a context.
     DCHECK(m == GetMethod());
-    if (m->IsOptimized(sizeof(void*))) {
+    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
       return false;
     } else {
       return SetVRegFromQuickCode(m, vreg, new_value, kind);
@@ -590,7 +591,7 @@
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably write registers without a context.
     DCHECK(m == GetMethod());
-    if (m->IsOptimized(sizeof(void*))) {
+    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
       return false;
     } else {
       return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
@@ -724,14 +725,14 @@
 uintptr_t StackVisitor::GetReturnPc() const {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   DCHECK(sp != nullptr);
-  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
+  uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
   return *reinterpret_cast<uintptr_t*>(pc_addr);
 }
 
 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   CHECK(sp != nullptr);
-  uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue();
+  uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
   *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
 }
 
diff --git a/runtime/stack.h b/runtime/stack.h
index 292c745..31acf0e 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -473,6 +473,10 @@
 
   ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
 
+  ArtMethod* GetOuterMethod() const {
+    return *GetCurrentQuickFrame();
+  }
+
   bool IsShadowFrame() const {
     return cur_shadow_frame_ != nullptr;
   }
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 8bf241b..f5d20bd 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -118,11 +118,8 @@
   }
 }
 
-inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
-  AssertThreadSuspensionIsAllowable();
+inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
   DCHECK_NE(new_state, kRunnable);
-  DCHECK_EQ(this, Thread::Current());
-  // Change to non-runnable state, thereby appearing suspended to the system.
   DCHECK_EQ(GetState(), kRunnable);
   union StateAndFlags old_state_and_flags;
   union StateAndFlags new_state_and_flags;
@@ -145,12 +142,9 @@
       break;
     }
   }
+}
 
-  // Change to non-runnable state, thereby appearing suspended to the system.
-  // Mark the release of the share of the mutator_lock_.
-  Locks::mutator_lock_->TransitionFromRunnableToSuspended(this);
-
-  // Once suspended - check the active suspend barrier flag
+inline void Thread::PassActiveSuspendBarriers() {
   while (true) {
     uint16_t current_flags = tls32_.state_and_flags.as_struct.flags;
     if (LIKELY((current_flags & (kCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
@@ -159,11 +153,22 @@
       PassActiveSuspendBarriers(this);
     } else {
       // Impossible
-      LOG(FATAL) << "Fatal, thread transited into suspended without running the checkpoint";
+      LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint";
     }
   }
 }
 
+inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
+  AssertThreadSuspensionIsAllowable();
+  DCHECK_EQ(this, Thread::Current());
+  // Change to non-runnable state, thereby appearing suspended to the system.
+  TransitionToSuspendedAndRunCheckpoints(new_state);
+  // Mark the release of the share of the mutator_lock_.
+  Locks::mutator_lock_->TransitionFromRunnableToSuspended(this);
+  // Once suspended - check the active suspend barrier flag
+  PassActiveSuspendBarriers();
+}
+
 inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
   union StateAndFlags old_state_and_flags;
   old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
@@ -191,7 +196,9 @@
       PassActiveSuspendBarriers(this);
     } else if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) {
       // Impossible
-      LOG(FATAL) << "Fatal, wrong checkpoint flag";
+      LOG(FATAL) << "Transitioning to runnable with checkpoint flag, "
+                 << " flags=" << old_state_and_flags.as_struct.flags
+                 << " state=" << old_state_and_flags.as_struct.state;
     } else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
       // Wait while our suspend count is non-zero.
       MutexLock mu(this, *Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 82e6fb0..65f71ef 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1966,15 +1966,32 @@
         pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
 
   bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
-    // Allocate method trace with format [method pointers][pcs].
-    auto* cl = Runtime::Current()->GetClassLinker();
-    trace_ = cl->AllocPointerArray(self_, depth * 2);
-    const char* last_no_suspend_cause =
-        self_->StartAssertNoThreadSuspension("Building internal stack trace");
-    if (trace_ == nullptr) {
+    // Allocate method trace as an object array where the first element is a pointer array that
+    // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
+    // class of the ArtMethod pointers.
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    StackHandleScope<1> hs(self_);
+    mirror::Class* array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass);
+    // The first element is the methods and dex pc array, the other elements are declaring classes
+    // for the methods to ensure classes in the stack trace don't get unloaded.
+    Handle<mirror::ObjectArray<mirror::Object>> trace(
+        hs.NewHandle(
+            mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1)));
+    if (trace.Get() == nullptr) {
+      // Acquire uninterruptible_ in all paths.
+      self_->StartAssertNoThreadSuspension("Building internal stack trace");
       self_->AssertPendingOOMException();
       return false;
     }
+    mirror::PointerArray* methods_and_pcs = class_linker->AllocPointerArray(self_, depth * 2);
+    const char* last_no_suspend_cause =
+        self_->StartAssertNoThreadSuspension("Building internal stack trace");
+    if (methods_and_pcs == nullptr) {
+      self_->AssertPendingOOMException();
+      return false;
+    }
+    trace->Set(0, methods_and_pcs);
+    trace_ = trace.Get();
     // If We are called from native, use non-transactional mode.
     CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
     return true;
@@ -1996,16 +2013,24 @@
     if (m->IsRuntimeMethod()) {
       return true;  // Ignore runtime frames (in particular callee save).
     }
-    trace_->SetElementPtrSize<kTransactionActive>(
-        count_, m, pointer_size_);
-    trace_->SetElementPtrSize<kTransactionActive>(
-        trace_->GetLength() / 2 + count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
-            pointer_size_);
+    mirror::PointerArray* trace_methods_and_pcs = GetTraceMethodsAndPCs();
+    trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, m, pointer_size_);
+    trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(
+        trace_methods_and_pcs->GetLength() / 2 + count_,
+        m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
+        pointer_size_);
+    // Save the declaring class of the method to ensure that the declaring classes of the methods
+    // do not get unloaded while the stack trace is live.
+    trace_->Set(count_ + 1, m->GetDeclaringClass());
     ++count_;
     return true;
   }
 
-  mirror::PointerArray* GetInternalStackTrace() const {
+  mirror::PointerArray* GetTraceMethodsAndPCs() const SHARED_REQUIRES(Locks::mutator_lock_) {
+    return down_cast<mirror::PointerArray*>(trace_->Get(0));
+  }
+
+  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
     return trace_;
   }
 
@@ -2015,8 +2040,11 @@
   int32_t skip_depth_;
   // Current position down stack trace.
   uint32_t count_;
-  // An array of the methods on the stack, the last entries are the dex PCs.
-  mirror::PointerArray* trace_;
+  // An object array where the first element is a pointer array that contains the ArtMethod
+  // pointers on the stack and dex PCs. The rest of the elements are the declaring
+  // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of
+  // the i'th frame.
+  mirror::ObjectArray<mirror::Object>* trace_;
   // For cross compilation.
   const size_t pointer_size_;
 
@@ -2039,11 +2067,12 @@
     return nullptr;  // Allocation failed.
   }
   build_trace_visitor.WalkStack();
-  mirror::PointerArray* trace = build_trace_visitor.GetInternalStackTrace();
+  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
   if (kIsDebugBuild) {
-    // Second half is dex PCs.
-    for (uint32_t i = 0; i < static_cast<uint32_t>(trace->GetLength() / 2); ++i) {
-      auto* method = trace->GetElementPtrSize<ArtMethod*>(
+    mirror::PointerArray* trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
+    // Second half of trace_methods is dex PCs.
+    for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) {
+      auto* method = trace_methods->GetElementPtrSize<ArtMethod*>(
           i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
       CHECK(method != nullptr);
     }
@@ -2062,12 +2091,16 @@
 }
 
 jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
-    const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
+    const ScopedObjectAccessAlreadyRunnable& soa,
+    jobject internal,
+    jobjectArray output_array,
     int* stack_depth) {
-  // Decode the internal stack trace into the depth, method trace and PC trace
-  int32_t depth = soa.Decode<mirror::PointerArray*>(internal)->GetLength() / 2;
+  // Decode the internal stack trace into the depth, method trace and PC trace.
+  // Subtract one for the methods and PC trace.
+  int32_t depth = soa.Decode<mirror::Array*>(internal)->GetLength() - 1;
+  DCHECK_GE(depth, 0);
 
-  auto* cl = Runtime::Current()->GetClassLinker();
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
 
   jobjectArray result;
 
@@ -2081,7 +2114,7 @@
   } else {
     // Create java_trace array and place in local reference table
     mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
-        cl->AllocStackTraceElementArray(soa.Self(), depth);
+        class_linker->AllocStackTraceElementArray(soa.Self(), depth);
     if (java_traces == nullptr) {
       return nullptr;
     }
@@ -2093,7 +2126,12 @@
   }
 
   for (int32_t i = 0; i < depth; ++i) {
-    auto* method_trace = soa.Decode<mirror::PointerArray*>(internal);
+    mirror::ObjectArray<mirror::Object>* decoded_traces =
+        soa.Decode<mirror::Object*>(internal)->AsObjectArray<mirror::Object>();
+    // Methods and dex PC trace is element 0.
+    DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
+    mirror::PointerArray* const method_trace =
+        down_cast<mirror::PointerArray*>(decoded_traces->Get(0));
     // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
     ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
     uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
diff --git a/runtime/thread.h b/runtime/thread.h
index d21644d..d262c62 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -247,17 +247,15 @@
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
-  ThreadState TransitionFromSuspendedToRunnable()
+  ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
       REQUIRES(!Locks::thread_suspend_count_lock_)
-      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
-      ALWAYS_INLINE;
+      SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
 
   // Transition from runnable into a state where mutator privileges are denied. Releases share of
   // mutator lock.
-  void TransitionFromRunnableToSuspended(ThreadState new_state)
+  ALWAYS_INLINE void TransitionFromRunnableToSuspended(ThreadState new_state)
       REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
-      UNLOCK_FUNCTION(Locks::mutator_lock_)
-      ALWAYS_INLINE;
+      UNLOCK_FUNCTION(Locks::mutator_lock_);
 
   // Once called thread suspension will cause an assertion failure.
   const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) {
@@ -1017,11 +1015,15 @@
   // Dbg::Disconnected.
   ThreadState SetStateUnsafe(ThreadState new_state) {
     ThreadState old_state = GetState();
-    tls32_.state_and_flags.as_struct.state = new_state;
-    // if transit to a suspended state, check the pass barrier request.
-    if (UNLIKELY((new_state != kRunnable) &&
-                 (tls32_.state_and_flags.as_struct.flags & kActiveSuspendBarrier))) {
-      PassActiveSuspendBarriers(this);
+    if (old_state == kRunnable && new_state != kRunnable) {
+      // Need to run pending checkpoint and suspend barriers. Run checkpoints in runnable state in
+      // case they need to use a ScopedObjectAccess. If we are holding the mutator lock and a SOA
+      // attempts to TransitionFromSuspendedToRunnable, it results in a deadlock.
+      TransitionToSuspendedAndRunCheckpoints(new_state);
+      // Since we transitioned to a suspended state, check the pass barrier requests.
+      PassActiveSuspendBarriers();
+    } else {
+      tls32_.state_and_flags.as_struct.state = new_state;
     }
     return old_state;
   }
@@ -1064,6 +1066,12 @@
   void SetUpAlternateSignalStack();
   void TearDownAlternateSignalStack();
 
+  ALWAYS_INLINE void TransitionToSuspendedAndRunCheckpoints(ThreadState new_state)
+      REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+
+  ALWAYS_INLINE void PassActiveSuspendBarriers()
+      REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_);
+
   // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
   // change from being Suspended to Runnable without a suspend request occurring.
   union PACKED(4) StateAndFlags {
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 55a77ac..285df18 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -49,7 +49,9 @@
       CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1);  // v8: this, v1: x
       CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1);  // v8: this, v3: y, v1: x
       CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1);  // v8: this, v3: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1);  // v8: this, v3: y, v1: x
+      if (!m->IsOptimized(sizeof(void*))) {
+        CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1);  // v8: this, v3: y, v1: x
+      }
       CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1);  // v8: this, v3: y, v1: x
       // v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
       //   0024: move-object v3, v2
@@ -63,12 +65,18 @@
       // Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
       CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
       CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1);  // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
-      CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);  // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
-      // v5 is removed from the root set because there is a "merge" operation.
-      // See 0015: if-nez v2, 001f.
-      CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
+      if (!m->IsOptimized(sizeof(void*))) {
+        // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
+        CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
+        // v5 is removed from the root set because there is a "merge" operation.
+        // See 0015: if-nez v2, 001f.
+        CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
+      }
       CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
-      CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+
+      if (!m->IsOptimized(sizeof(void*))) {
+        CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+      }
       CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
       CHECK_REGS_CONTAIN_REFS(0x2cU, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
       // Note that it is OK for a compiler to not have a dex map at these two dex PCs because
diff --git a/test/024-illegal-access/expected.txt b/test/024-illegal-access/expected.txt
index 5f951f4..0ae4a77 100644
--- a/test/024-illegal-access/expected.txt
+++ b/test/024-illegal-access/expected.txt
@@ -1,2 +1,5 @@
 Got expected failure 1
 Got expected failure 2
+Got expected failure 3
+Got expected failure 4
+Got expected failure 5
diff --git a/test/024-illegal-access/src/Main.java b/test/024-illegal-access/src/Main.java
index bde73e9..84c7114 100644
--- a/test/024-illegal-access/src/Main.java
+++ b/test/024-illegal-access/src/Main.java
@@ -17,7 +17,7 @@
 public class Main {
     static public void main(String[] args) {
         try {
-            PublicAccess.main();
+            PublicAccess.accessStaticField();
             System.err.println("ERROR: call 1 not expected to succeed");
         } catch (VerifyError ve) {
             // dalvik
@@ -28,14 +28,41 @@
         }
 
         try {
-            CheckInstanceof.main(new Object());
+            PublicAccess.accessStaticMethod();
             System.err.println("ERROR: call 2 not expected to succeed");
-        } catch (VerifyError ve) {
-            // dalvik
-            System.out.println("Got expected failure 2");
         } catch (IllegalAccessError iae) {
             // reference
             System.out.println("Got expected failure 2");
         }
+
+        try {
+            PublicAccess.accessInstanceField();
+            System.err.println("ERROR: call 3 not expected to succeed");
+        } catch (VerifyError ve) {
+            // dalvik
+            System.out.println("Got expected failure 3");
+        } catch (IllegalAccessError iae) {
+            // reference
+            System.out.println("Got expected failure 3");
+        }
+
+        try {
+            PublicAccess.accessInstanceMethod();
+            System.err.println("ERROR: call 4 not expected to succeed");
+        } catch (IllegalAccessError iae) {
+            // reference
+            System.out.println("Got expected failure 4");
+        }
+
+        try {
+            CheckInstanceof.main(new Object());
+            System.err.println("ERROR: call 5 not expected to succeed");
+        } catch (VerifyError ve) {
+            // dalvik
+            System.out.println("Got expected failure 5");
+        } catch (IllegalAccessError iae) {
+            // reference
+            System.out.println("Got expected failure 5");
+        }
     }
 }
diff --git a/test/024-illegal-access/src/PublicAccess.java b/test/024-illegal-access/src/PublicAccess.java
index 4e72cd4..e3fef85 100644
--- a/test/024-illegal-access/src/PublicAccess.java
+++ b/test/024-illegal-access/src/PublicAccess.java
@@ -18,8 +18,20 @@
  * Some stuff for access checks.
  */
 public class PublicAccess {
-    public static void main() {
-        String shouldFail = SemiPrivate.mPrivvy;
+    public static void accessStaticField() {
+        String shouldFail = SemiPrivate.mStaticPrivvy;
+        System.out.println("Got " + shouldFail);
+    }
+    public static void accessStaticMethod() {
+        String shouldFail = SemiPrivate.privvyStaticMethod();
+        System.out.println("Got " + shouldFail);
+    }
+    public static void accessInstanceField() {
+        String shouldFail = new SemiPrivate().mInstancePrivvy;
+        System.out.println("Got " + shouldFail);
+    }
+    public static void accessInstanceMethod() {
+        String shouldFail = new SemiPrivate().privvyInstanceMethod();
         System.out.println("Got " + shouldFail);
     }
 }
diff --git a/test/024-illegal-access/src/SemiPrivate.java b/test/024-illegal-access/src/SemiPrivate.java
index 06b16c4..62e0d05 100644
--- a/test/024-illegal-access/src/SemiPrivate.java
+++ b/test/024-illegal-access/src/SemiPrivate.java
@@ -18,5 +18,15 @@
  * Version with package scope access.
  */
 public class SemiPrivate {
-    /* not private */ static String mPrivvy = "stuff";
+    /* not private */ static String mStaticPrivvy = "stuff";
+
+    /* not private */ static String privvyStaticMethod() {
+      return "stuff";
+    }
+
+    /* not private */ String mInstancePrivvy = "stuff";
+
+    /* not private */ String privvyInstanceMethod() {
+      return "stuff";
+    }
 }
diff --git a/test/024-illegal-access/src2/SemiPrivate.java b/test/024-illegal-access/src2/SemiPrivate.java
index 064265a..4f36a07 100644
--- a/test/024-illegal-access/src2/SemiPrivate.java
+++ b/test/024-illegal-access/src2/SemiPrivate.java
@@ -18,5 +18,15 @@
  * Version with private access.
  */
 public class SemiPrivate {
-    private static String mPrivvy = "stuff";
+    private static String mStaticPrivvy = "stuff";
+
+    private static String privvyStaticMethod() {
+      return "stuff";
+    }
+
+    private String mInstancePrivvy = "stuff";
+
+    private String privvyInstanceMethod() {
+      return "stuff";
+    }
 }
diff --git a/test/088-monitor-verification/expected.txt b/test/088-monitor-verification/expected.txt
index f252f6f..2cb8f2c 100644
--- a/test/088-monitor-verification/expected.txt
+++ b/test/088-monitor-verification/expected.txt
@@ -5,9 +5,3 @@
 notNested ok
 twoPath ok
 triplet ok
-OK
-TooDeep
-NotStructuredOverUnlock
-NotStructuredUnderUnlock
-UnbalancedJoin
-UnbalancedStraight
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index 53b72e9..fc5755b 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -220,6 +220,11 @@
 
     // Smali testing code.
     private static void runSmaliTests() {
+        if (!hasOatFile() || runtimeIsSoftFail() || isCallerInterpreted()) {
+            // Skip test, this seems to be a non-compiled code test configuration.
+            return;
+        }
+
         runTest("OK", new Object[] { new Object(), new Object() }, null);
         runTest("TooDeep", new Object[] { new Object() }, null);
         runTest("NotStructuredOverUnlock", new Object[] { new Object() },
@@ -231,7 +236,6 @@
     }
 
     private static void runTest(String className, Object[] parameters, Class<?> excType) {
-        System.out.println(className);
         try {
             Class<?> c = Class.forName(className);
 
@@ -275,4 +279,7 @@
     // Helpers for the smali code.
     public static native void assertCallerIsInterpreted();
     public static native void assertCallerIsManaged();
+    public static native boolean hasOatFile();
+    public static native boolean runtimeIsSoftFail();
+    public static native boolean isCallerInterpreted();
 }
diff --git a/test/116-nodex2oat/nodex2oat.cc b/test/116-nodex2oat/nodex2oat.cc
deleted file mode 100644
index 131af31..0000000
--- a/test/116-nodex2oat/nodex2oat.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "class_linker.h"
-#include "dex_file-inl.h"
-#include "mirror/class-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-
-namespace art {
-
-class NoDex2OatTest {
- public:
-  static bool hasOat(jclass cls) {
-    ScopedObjectAccess soa(Thread::Current());
-    mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
-    const DexFile& dex_file = klass->GetDexFile();
-    const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
-    return oat_dex_file != nullptr;
-  }
-};
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasOat(JNIEnv*, jclass cls) {
-  return NoDex2OatTest::hasOat(cls);
-}
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass) {
-  return Runtime::Current()->IsDex2OatEnabled();
-}
-
-}  // namespace art
diff --git a/test/116-nodex2oat/src/Main.java b/test/116-nodex2oat/src/Main.java
index 086ffb9..229735f 100644
--- a/test/116-nodex2oat/src/Main.java
+++ b/test/116-nodex2oat/src/Main.java
@@ -18,16 +18,16 @@
   public static void main(String[] args) {
     System.loadLibrary(args[0]);
     System.out.println(
-        "Has oat is " + hasOat() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
+        "Has oat is " + hasOatFile() + ", is dex2oat enabled is " + isDex2OatEnabled() + ".");
 
-    if (hasOat() && !isDex2OatEnabled()) {
+    if (hasOatFile() && !isDex2OatEnabled()) {
       throw new Error("Application with dex2oat disabled runs with an oat file");
-    } else if (!hasOat() && isDex2OatEnabled()) {
+    } else if (!hasOatFile() && isDex2OatEnabled()) {
       throw new Error("Application with dex2oat enabled runs without an oat file");
     }
   }
 
-  private native static boolean hasOat();
+  private native static boolean hasOatFile();
 
   private native static boolean isDex2OatEnabled();
 }
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
index 5cca309..425cf48 100644
--- a/test/117-nopatchoat/src/Main.java
+++ b/test/117-nopatchoat/src/Main.java
@@ -28,10 +28,10 @@
 
     System.out.println(
         "dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
-        ", has oat is " + hasOat() + ", has executable oat is " + (
+        ", has oat is " + hasOatFile() + ", has executable oat is " + (
         executable_correct ? "expected" : "not expected") + ".");
 
-    if (!hasOat() && isDex2OatEnabled()) {
+    if (!hasOatFile() && isDex2OatEnabled()) {
       throw new Error("Application with dex2oat enabled runs without an oat file");
     }
 
@@ -51,7 +51,7 @@
 
   private native static boolean isPic();
 
-  private native static boolean hasOat();
+  private native static boolean hasOatFile();
 
   private native static boolean hasExecutableOat();
 
diff --git a/test/118-noimage-dex2oat/noimage-dex2oat.cc b/test/118-noimage-dex2oat/noimage-dex2oat.cc
deleted file mode 100644
index aacf00f..0000000
--- a/test/118-noimage-dex2oat/noimage-dex2oat.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "class_linker.h"
-#include "dex_file-inl.h"
-#include "mirror/class-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-
-namespace art {
-
-class NoDex2OatTest {
- public:
-  static bool hasOat(jclass cls) {
-    ScopedObjectAccess soa(Thread::Current());
-    mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
-    const DexFile& dex_file = klass->GetDexFile();
-    const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
-    return oat_dex_file != nullptr;
-  }
-};
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv*, jclass) {
-  return Runtime::Current()->GetHeap()->HasImageSpace();
-}
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv*, jclass) {
-  return Runtime::Current()->IsImageDex2OatEnabled();
-}
-
-}  // namespace art
diff --git a/test/141-class-unload/expected.txt b/test/141-class-unload/expected.txt
index ff65a70..53d7abe 100644
--- a/test/141-class-unload/expected.txt
+++ b/test/141-class-unload/expected.txt
@@ -16,3 +16,8 @@
 JNI_OnLoad called
 JNI_OnUnload called
 null
+1
+2
+JNI_OnLoad called
+class null false test
+JNI_OnUnload called
diff --git a/test/141-class-unload/src-ex/IntHolder.java b/test/141-class-unload/src-ex/IntHolder.java
index e4aa6b8..feff0d2 100644
--- a/test/141-class-unload/src-ex/IntHolder.java
+++ b/test/141-class-unload/src-ex/IntHolder.java
@@ -36,4 +36,8 @@
     }
 
     public static native void waitForCompilation();
+
+    public static Throwable generateStackTrace() {
+      return new Exception("test");
+    }
 }
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index 105a2b9..3cc43ac 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -39,6 +39,8 @@
             testNoUnloadInstance(constructor);
             // Test JNI_OnLoad and JNI_OnUnload.
             testLoadAndUnloadLibrary(constructor);
+            // Test that stack traces keep the classes live.
+            testStackTrace(constructor);
             // Stress test to make sure we dont leak memory.
             stressTest(constructor);
         } catch (Exception e) {
@@ -75,6 +77,16 @@
       System.out.println(loader.get());
     }
 
+    private static void testStackTrace(Constructor constructor) throws Exception {
+        WeakReference<Class> klass = setUpUnloadClass(constructor);
+        Method stackTraceMethod = klass.get().getDeclaredMethod("generateStackTrace");
+        Throwable throwable = (Throwable) stackTraceMethod.invoke(klass.get());
+        stackTraceMethod = null;
+        Runtime.getRuntime().gc();
+        boolean isNull = klass.get() == null;
+        System.out.println("class null " + isNull + " " + throwable.getMessage());
+    }
+
     private static void testLoadAndUnloadLibrary(Constructor constructor) throws Exception {
         WeakReference<ClassLoader> loader = setUpLoadLibrary(constructor);
         // No strong refernces to class loader, should get unloaded.
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index e3e0091..7e9a583 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -42,7 +42,9 @@
     } else if (m_name.compare("testIntervalHole") == 0) {
       found_method_ = true;
       uint32_t value = 0;
-      if (GetCurrentQuickFrame() != nullptr && m->IsOptimized(sizeof(void*))) {
+      if (GetCurrentQuickFrame() != nullptr &&
+          m->IsOptimized(sizeof(void*)) &&
+          !Runtime::Current()->IsDebuggable()) {
         CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false);
       } else {
         CHECK(GetVReg(m, 0, kIntVReg, &value));
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 6f04797..adb5ada 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -44,6 +44,76 @@
     super.superMethod();
   }
 
+  /// CHECK-START: void Main.callUnresolvedStaticFieldAccess() register (before)
+  /// CHECK:        UnresolvedStaticFieldSet field_type:PrimByte
+  /// CHECK:        UnresolvedStaticFieldSet field_type:PrimChar
+  /// CHECK:        UnresolvedStaticFieldSet field_type:PrimInt
+  /// CHECK:        UnresolvedStaticFieldSet field_type:PrimLong
+  /// CHECK:        UnresolvedStaticFieldSet field_type:PrimFloat
+  /// CHECK:        UnresolvedStaticFieldSet field_type:PrimDouble
+  /// CHECK:        UnresolvedStaticFieldSet field_type:PrimNot
+
+  /// CHECK:        UnresolvedStaticFieldGet field_type:PrimByte
+  /// CHECK:        UnresolvedStaticFieldGet field_type:PrimChar
+  /// CHECK:        UnresolvedStaticFieldGet field_type:PrimInt
+  /// CHECK:        UnresolvedStaticFieldGet field_type:PrimLong
+  /// CHECK:        UnresolvedStaticFieldGet field_type:PrimFloat
+  /// CHECK:        UnresolvedStaticFieldGet field_type:PrimDouble
+  /// CHECK:        UnresolvedStaticFieldGet field_type:PrimNot
+  static public void callUnresolvedStaticFieldAccess() {
+    Object o = new Object();
+    UnresolvedClass.staticByte = (byte)1;
+    UnresolvedClass.staticChar = '1';
+    UnresolvedClass.staticInt = 123456789;
+    UnresolvedClass.staticLong = 123456789123456789l;
+    UnresolvedClass.staticFloat = 123456789123456789f;
+    UnresolvedClass.staticDouble = 123456789123456789d;
+    UnresolvedClass.staticObject = o;
+
+    expectEquals((byte)1, UnresolvedClass.staticByte);
+    expectEquals('1', UnresolvedClass.staticChar);
+    expectEquals(123456789, UnresolvedClass.staticInt);
+    expectEquals(123456789123456789l, UnresolvedClass.staticLong);
+    expectEquals(123456789123456789f, UnresolvedClass.staticFloat);
+    expectEquals(123456789123456789d, UnresolvedClass.staticDouble);
+    expectEquals(o, UnresolvedClass.staticObject);
+  }
+
+  /// CHECK-START: void Main.callUnresolvedInstanceFieldAccess(UnresolvedClass) register (before)
+  /// CHECK:        UnresolvedInstanceFieldSet field_type:PrimByte
+  /// CHECK:        UnresolvedInstanceFieldSet field_type:PrimChar
+  /// CHECK:        UnresolvedInstanceFieldSet field_type:PrimInt
+  /// CHECK:        UnresolvedInstanceFieldSet field_type:PrimLong
+  /// CHECK:        UnresolvedInstanceFieldSet field_type:PrimFloat
+  /// CHECK:        UnresolvedInstanceFieldSet field_type:PrimDouble
+  /// CHECK:        UnresolvedInstanceFieldSet field_type:PrimNot
+
+  /// CHECK:        UnresolvedInstanceFieldGet field_type:PrimByte
+  /// CHECK:        UnresolvedInstanceFieldGet field_type:PrimChar
+  /// CHECK:        UnresolvedInstanceFieldGet field_type:PrimInt
+  /// CHECK:        UnresolvedInstanceFieldGet field_type:PrimLong
+  /// CHECK:        UnresolvedInstanceFieldGet field_type:PrimFloat
+  /// CHECK:        UnresolvedInstanceFieldGet field_type:PrimDouble
+  /// CHECK:        UnresolvedInstanceFieldGet field_type:PrimNot
+  static public void callUnresolvedInstanceFieldAccess(UnresolvedClass c) {
+    Object o = new Object();
+    c.instanceByte = (byte)1;
+    c.instanceChar = '1';
+    c.instanceInt = 123456789;
+    c.instanceLong = 123456789123456789l;
+    c.instanceFloat = 123456789123456789f;
+    c.instanceDouble = 123456789123456789d;
+    c.instanceObject = o;
+
+    expectEquals((byte)1, c.instanceByte);
+    expectEquals('1', c.instanceChar);
+    expectEquals(123456789, c.instanceInt);
+    expectEquals(123456789123456789l, c.instanceLong);
+    expectEquals(123456789123456789f, c.instanceFloat);
+    expectEquals(123456789123456789d, c.instanceDouble);
+    expectEquals(o, c.instanceObject);
+  }
+
   /// CHECK-START: void Main.main(java.lang.String[]) register (before)
   /// CHECK:        InvokeUnresolved invoke_type:direct
   static public void main(String[] args) {
@@ -52,5 +122,49 @@
     callInvokeUnresolvedVirtual(c);
     callInvokeUnresolvedInterface(c);
     callInvokeUnresolvedSuper(new Main());
+    callUnresolvedStaticFieldAccess();
+    callUnresolvedInstanceFieldAccess(c);
+  }
+
+  public static void expectEquals(byte expected, byte result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void expectEquals(char expected, char result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+    public static void expectEquals(float expected, float result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void expectEquals(double expected, double result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void expectEquals(Object expected, Object result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
   }
 }
diff --git a/test/529-checker-unresolved/src/Unresolved.java b/test/529-checker-unresolved/src/Unresolved.java
index 5bf92dd..03ceb68 100644
--- a/test/529-checker-unresolved/src/Unresolved.java
+++ b/test/529-checker-unresolved/src/Unresolved.java
@@ -40,6 +40,22 @@
   public void interfaceMethod() {
     System.out.println("UnresolvedClass.interfaceMethod()");
   }
+
+  public static byte staticByte;
+  public static char staticChar;
+  public static int staticInt;
+  public static long staticLong;
+  public static float staticFloat;
+  public static double staticDouble;
+  public static Object staticObject;
+
+  public byte instanceByte;
+  public char instanceChar;
+  public int instanceInt;
+  public long instanceLong;
+  public float instanceFloat;
+  public double instanceDouble;
+  public Object instanceObject;
 }
 
 final class UnresolvedFinalClass {
diff --git a/test/530-checker-loops/src/Main.java b/test/530-checker-loops/src/Main.java
index 1c5b5d6..58c92f1 100644
--- a/test/530-checker-loops/src/Main.java
+++ b/test/530-checker-loops/src/Main.java
@@ -22,7 +22,7 @@
   static int sResult;
 
   //
-  // Various sequence variables where bound checks can be removed from loop.
+  // Various sequence variables used in bound checks.
   //
 
   /// CHECK-START: int Main.linear(int[]) BCE (before)
@@ -262,11 +262,11 @@
     return result;
   }
 
-  /// CHECK-START: int Main.linearForNE() BCE (before)
+  /// CHECK-START: int Main.linearForNEUp() BCE (before)
   /// CHECK-DAG: BoundsCheck
-  /// CHECK-START: int Main.linearForNE() BCE (after)
+  /// CHECK-START: int Main.linearForNEUp() BCE (after)
   /// CHECK-NOT: BoundsCheck
-  private static int linearForNE() {
+  private static int linearForNEUp() {
     int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
     int result = 0;
     for (int i = 0; i != 10; i++) {
@@ -275,21 +275,47 @@
     return result;
   }
 
-  /// CHECK-START: int Main.linearDoWhile() BCE (before)
+  /// CHECK-START: int Main.linearForNEDown() BCE (before)
   /// CHECK-DAG: BoundsCheck
-  /// CHECK-START: int Main.linearDoWhile() BCE (after)
+  /// CHECK-START: int Main.linearForNEDown() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  private static int linearForNEDown() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    for (int i = 9; i != -1; i--) {
+      result += x[i];
+    }
+    return result;
+  }
+
+  /// CHECK-START: int Main.linearDoWhileUp() BCE (before)
   /// CHECK-DAG: BoundsCheck
-  private static int linearDoWhile() {
+  /// CHECK-START: int Main.linearDoWhileUp() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  private static int linearDoWhileUp() {
     int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
     int result = 0;
     int i = 0;
-    // TODO: make this work
     do {
       result += x[i++];
     } while (i < 10);
     return result;
   }
 
+  /// CHECK-START: int Main.linearDoWhileDown() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-START: int Main.linearDoWhileDown() BCE (after)
+  /// CHECK-NOT: BoundsCheck
+  private static int linearDoWhileDown() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int result = 0;
+    int i = 9;
+    do {
+      result += x[i--];
+    } while (0 <= i);
+    return result;
+  }
+
   /// CHECK-START: int Main.linearShort() BCE (before)
   /// CHECK-DAG: BoundsCheck
   /// CHECK-START: int Main.linearShort() BCE (after)
@@ -471,23 +497,50 @@
     return result;
   }
 
-  //
-  // Cases that actually go out of bounds. These test cases
-  // ensure the exceptions are thrown at the right places.
-  //
-
+  /// CHECK-START: void Main.lowerOOB(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-START: void Main.lowerOOB(int[]) BCE (after)
+  /// CHECK-DAG: BoundsCheck
   private static void lowerOOB(int[] x) {
     for (int i = -1; i < x.length; i++) {
       sResult += x[i];
     }
   }
 
+  /// CHECK-START: void Main.upperOOB(int[]) BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-START: void Main.upperOOB(int[]) BCE (after)
+  /// CHECK-DAG: BoundsCheck
   private static void upperOOB(int[] x) {
     for (int i = 0; i <= x.length; i++) {
       sResult += x[i];
     }
   }
 
+  /// CHECK-START: void Main.doWhileUpOOB() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-START: void Main.doWhileUpOOB() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  private static void doWhileUpOOB() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int i = 0;
+    do {
+      sResult += x[i++];
+    } while (i <= x.length);
+  }
+
+  /// CHECK-START: void Main.doWhileDownOOB() BCE (before)
+  /// CHECK-DAG: BoundsCheck
+  /// CHECK-START: void Main.doWhileDownOOB() BCE (after)
+  /// CHECK-DAG: BoundsCheck
+  private static void doWhileDownOOB() {
+    int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    int i = x.length - 1;
+    do {
+      sResult += x[i--];
+    } while (-1 <= i);
+  }
+
   //
   // Verifier.
   //
@@ -550,8 +603,10 @@
     expectEquals(66, linearWithVeryLargeNegativeStride());
 
     // Special forms.
-    expectEquals(55, linearForNE());
-    expectEquals(55, linearDoWhile());
+    expectEquals(55, linearForNEUp());
+    expectEquals(55, linearForNEDown());
+    expectEquals(55, linearDoWhileUp());
+    expectEquals(55, linearDoWhileDown());
     expectEquals(55, linearShort());
 
     // Periodic adds (1, 3), one at the time.
@@ -618,6 +673,23 @@
     }
     expectEquals(1055, sResult);
 
+    // Do while up goes OOB.
+    sResult = 0;
+    try {
+      doWhileUpOOB();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1055, sResult);
+
+    // Do while down goes OOB.
+    sResult = 0;
+    try {
+      doWhileDownOOB();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      sResult += 1000;
+    }
+    expectEquals(1055, sResult);
   }
 
   private static void expectEquals(int expected, int result) {
diff --git a/test/534-checker-bce-deoptimization/expected.txt b/test/534-checker-bce-deoptimization/expected.txt
new file mode 100644
index 0000000..3823a29
--- /dev/null
+++ b/test/534-checker-bce-deoptimization/expected.txt
@@ -0,0 +1 @@
+finish
diff --git a/test/534-checker-bce-deoptimization/info.txt b/test/534-checker-bce-deoptimization/info.txt
new file mode 100644
index 0000000..9f097d0
--- /dev/null
+++ b/test/534-checker-bce-deoptimization/info.txt
@@ -0,0 +1,8 @@
+Checker test for testing the behavior of deoptimization generated by
+bounds check elimination.
+
+The runtime used to trip on that test because it used to deopt the
+whole stack, and the compiler was not preserving dex registers at
+call sites.
+
+We fixed the bug by doing single frame deoptimization.
diff --git a/test/534-checker-bce-deoptimization/src/Main.java b/test/534-checker-bce-deoptimization/src/Main.java
new file mode 100644
index 0000000..8cd20f6
--- /dev/null
+++ b/test/534-checker-bce-deoptimization/src/Main.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+    public static void main(String[] args) {
+        new Main().run();
+        System.out.println("finish");
+    }
+
+    public void run() {
+        double a[][] = new double[200][201];
+        double b[] = new double[200];
+        int n = 100;
+
+        foo1(a, n, b);
+    }
+
+    void foo1(double a[][], int n, double b[]) {
+        double t;
+        int i,k;
+
+        for (i = 0; i < n; i++) {
+            k = n - (i + 1);
+            b[k] /= a[k][k];
+            t = -b[k];
+            foo2(k + 1000, t, b);
+        }
+    }
+
+    void foo2(int n, double c, double b[]) {
+        try {
+            foo3(n, c, b);
+        } catch (Exception e) {
+        }
+    }
+
+    void foo3(int n, double c, double b[]) {
+        int i = 0;
+        for (i = 0; i < n; i++) {
+            b[i + 1] += c * b[i + 1];
+        }
+    }
+}
+
diff --git a/test/535-deopt-and-inlining/expected.txt b/test/535-deopt-and-inlining/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/535-deopt-and-inlining/expected.txt
diff --git a/test/535-deopt-and-inlining/info.txt b/test/535-deopt-and-inlining/info.txt
new file mode 100644
index 0000000..717612a
--- /dev/null
+++ b/test/535-deopt-and-inlining/info.txt
@@ -0,0 +1,2 @@
+Stress test for deoptimization and JIT, to ensure the
+stack visitor uses the right ArtMethod when deopting.
diff --git a/test/535-deopt-and-inlining/src/Main.java b/test/535-deopt-and-inlining/src/Main.java
new file mode 100644
index 0000000..c231bf0
--- /dev/null
+++ b/test/535-deopt-and-inlining/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  public static void run() {
+    // Loop enough to get JIT compilation.
+    for (int i = 0; i < 10000; ++i) {
+      doCall(new int[0]);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    run();
+  }
+
+  public static void doCall(int[] array) {
+    try {
+      deopt(array);
+    } catch (IndexOutOfBoundsException ioobe) {
+      // Expected
+    }
+  }
+
+  public static void deopt(int[] array) {
+    // Invoke `deopt` much more than `$inline$deopt` so that only `deopt` gets
+    // initially JITted.
+    if (call == 100) {
+      call = 0;
+      $inline$deopt(array);
+    } else {
+      call++;
+    }
+  }
+
+  public static void $inline$deopt(int[] array) {
+    array[0] = 1;
+    array[1] = 1;
+  }
+
+  static int call = 0;
+}
diff --git a/test/535-regression-const-val/expected.txt b/test/535-regression-const-val/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/535-regression-const-val/expected.txt
diff --git a/test/535-regression-const-val/info.txt b/test/535-regression-const-val/info.txt
new file mode 100644
index 0000000..ea3e67b
--- /dev/null
+++ b/test/535-regression-const-val/info.txt
@@ -0,0 +1,2 @@
+Test a regression where SsaChecker would fail comparing raw value of IntConstant
+vs FloatConstant due to a static_cast sign extend.
diff --git a/test/535-regression-const-val/smali/TestCase.smali b/test/535-regression-const-val/smali/TestCase.smali
new file mode 100644
index 0000000..f42f173
--- /dev/null
+++ b/test/535-regression-const-val/smali/TestCase.smali
@@ -0,0 +1,36 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+.method public static testCase(ZZ)I
+  .registers 5
+
+  # Create Phi [ 0.0f, -0.25f ].
+  # Binary representation of -0.25f has the most significant bit set.
+  if-eqz p0, :else
+  :then
+    const v0, 0x0
+    goto :merge
+  :else
+    const/high16 v0, 0xbe800000
+  :merge
+
+  # Now use as either float or int.
+  if-eqz p1, :return
+  float-to-int v0, v0
+  :return
+  return v0
+.end method
diff --git a/test/535-regression-const-val/src/Main.java b/test/535-regression-const-val/src/Main.java
new file mode 100644
index 0000000..858770f
--- /dev/null
+++ b/test/535-regression-const-val/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  // Workaround for b/18051191.
+  class InnerClass {}
+
+  public static void main(String[] args) {}
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index e43ea90..bffd0e0 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -19,6 +19,8 @@
 include art/build/Android.common_build.mk
 
 LIBARTTEST_COMMON_SRC_FILES := \
+  common/runtime_state.cc \
+  common/stack_inspect.cc \
   004-JniTest/jni_test.cc \
   004-SignalTest/signaltest.cc \
   004-ReferenceMap/stack_walk_refmap_jni.cc \
@@ -26,10 +28,7 @@
   004-UnsafeTest/unsafe_test.cc \
   044-proxy/native_proxy.cc \
   051-thread/thread_test.cc \
-  088-monitor-verification/stack_inspect.cc \
-  116-nodex2oat/nodex2oat.cc \
   117-nopatchoat/nopatchoat.cc \
-  118-noimage-dex2oat/noimage-dex2oat.cc \
   1337-gc-coverage/gc_coverage.cc \
   137-cfi/cfi.cc \
   139-register-natives/regnative.cc \
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 4397ea4..a103eac 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -418,19 +418,71 @@
 
 TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
 
-# Known broken tests for the arm64 optimizing compiler backend.
-TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
+# Known broken tests for the mips32 optimizing compiler backend.
+TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
+    441-checker-inliner \
+    442-checker-constant-folding \
+    444-checker-nce \
+    445-checker-licm \
+    446-checker-inliner2 \
+    447-checker-inliner3 \
+    449-checker-bce \
+    450-checker-types \
+    455-checker-gvn \
+    458-checker-instruction-simplification \
+    462-checker-inlining-across-dex-files \
+    463-checker-boolean-simplifier \
+    464-checker-inline-sharpen-calls \
+    465-checker-clinit-gvn \
+    468-checker-bool-simplifier-regression \
+    473-checker-inliner-constants \
+    474-checker-boolean-input \
+    476-checker-ctor-memory-barrier \
+    477-checker-bound-type \
+    478-checker-clinit-check-pruning \
+    478-checker-inliner-nested-loop \
+    480-checker-dead-blocks \
+    482-checker-loop-back-edge-use \
+    484-checker-register-hints \
+    485-checker-dce-loop-update \
+    485-checker-dce-switch \
+    486-checker-must-do-null-check \
+    487-checker-inline-calls \
+    488-checker-inline-recursive-calls \
+    490-checker-inline \
+    492-checker-inline-invoke-interface \
+    493-checker-inline-invoke-interface \
+    494-checker-instanceof-tests \
+    495-checker-checkcast-tests \
+    496-checker-inlining-and-class-loader \
+    508-checker-disassembly \
+    510-checker-try-catch \
+    517-checker-builder-fallthrough \
+    521-checker-array-set-null \
+    522-checker-regression-monitor-exit \
+    523-checker-can-throw-regression \
+    525-checker-arrays-and-fields \
+    526-checker-caller-callee-regs \
+    529-checker-unresolved \
+    530-checker-loops \
+    530-checker-regression-reftype-final \
+    532-checker-nonnull-arrayset \
+    534-checker-bce-deoptimization \
 
-ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
-  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
-      optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
+ifeq (mips,$(TARGET_ARCH))
+  ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+    ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+        optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+        $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \
+        $(TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+  endif
 endif
 
-TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
+TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS :=
 
 # Known broken tests for the optimizing compiler.
-TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
+TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS := \
+  455-set-vreg \
 
 ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -438,20 +490,11 @@
       $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
 endif
 
-# If ART_USE_OPTIMIZING_COMPILER is set to true, then the default core.art has been
-# compiled with the optimizing compiler.
-ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
-  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
-      default,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
-      $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS),$(ALL_ADDRESS_SIZES))
-endif
-
 TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS :=
 
 # Tests that should fail when the optimizing compiler compiles them non-debuggable.
 TEST_ART_BROKEN_OPTIMIZING_NONDEBUGGABLE_RUN_TESTS := \
   454-get-vreg \
-  455-set-vreg \
   457-regs \
 
 ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
new file mode 100644
index 0000000..042b03b
--- /dev/null
+++ b/test/common/runtime_state.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+#include "base/logging.h"
+#include "dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "nth_caller_visitor.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// public static native boolean hasOatFile();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasOatFile(JNIEnv* env, jclass cls) {
+  ScopedObjectAccess soa(env);
+
+  mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
+  const DexFile& dex_file = klass->GetDexFile();
+  const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+  return (oat_dex_file != nullptr) ? JNI_TRUE : JNI_FALSE;
+}
+
+// public static native boolean runtimeIsSoftFail();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_runtimeIsSoftFail(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                                  jclass cls ATTRIBUTE_UNUSED) {
+  return Runtime::Current()->IsVerificationSoftFail() ? JNI_TRUE : JNI_FALSE;
+}
+
+// public static native boolean isDex2OatEnabled();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                                 jclass cls ATTRIBUTE_UNUSED) {
+  return Runtime::Current()->IsDex2OatEnabled();
+}
+
+// public static native boolean hasImage();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                         jclass cls ATTRIBUTE_UNUSED) {
+  return Runtime::Current()->GetHeap()->HasImageSpace();
+}
+
+// public static native boolean isImageDex2OatEnabled();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                                      jclass cls ATTRIBUTE_UNUSED) {
+  return Runtime::Current()->IsImageDex2OatEnabled();
+}
+
+}  // namespace art
diff --git a/test/088-monitor-verification/stack_inspect.cc b/test/common/stack_inspect.cc
similarity index 64%
rename from test/088-monitor-verification/stack_inspect.cc
rename to test/common/stack_inspect.cc
index e2899c3..d22cf52 100644
--- a/test/088-monitor-verification/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -27,25 +27,26 @@
 
 namespace art {
 
-// public static native void assertCallerIsInterpreted();
+// public static native boolean isCallerInterpreted();
 
-extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsInterpreted(JNIEnv* env, jclass) {
-  LOG(INFO) << "assertCallerIsInterpreted";
-
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isCallerInterpreted(JNIEnv* env, jclass) {
   ScopedObjectAccess soa(env);
   NthCallerVisitor caller(soa.Self(), 1, false);
   caller.WalkStack();
   CHECK(caller.caller != nullptr);
-  LOG(INFO) << PrettyMethod(caller.caller);
-  CHECK(caller.GetCurrentShadowFrame() != nullptr);
+  return caller.GetCurrentShadowFrame() != nullptr ? JNI_TRUE : JNI_FALSE;
 }
 
-// public static native void assertCallerIsManaged();
+// public static native void assertCallerIsInterpreted();
 
-extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsManaged(JNIEnv* env, jclass cls) {
-  // Note: needs some smarts to not fail if there is no managed code, at all.
-  LOG(INFO) << "assertCallerIsManaged";
+extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsInterpreted(JNIEnv* env, jclass klass) {
+  CHECK(Java_Main_isCallerInterpreted(env, klass));
+}
 
+
+// public static native boolean isCallerManaged();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isCallerManaged(JNIEnv* env, jclass cls) {
   ScopedObjectAccess soa(env);
 
   mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
@@ -54,28 +55,20 @@
   if (oat_dex_file == nullptr) {
     // No oat file, this must be a test configuration that doesn't compile at all. Ignore that the
     // result will be that we're running the interpreter.
-    return;
+    return JNI_FALSE;
   }
 
   NthCallerVisitor caller(soa.Self(), 1, false);
   caller.WalkStack();
   CHECK(caller.caller != nullptr);
-  LOG(INFO) << PrettyMethod(caller.caller);
 
-  if (caller.GetCurrentShadowFrame() == nullptr) {
-    // Not a shadow frame, this looks good.
-    return;
-  }
+  return caller.GetCurrentShadowFrame() != nullptr ? JNI_FALSE : JNI_TRUE;
+}
 
-  // This could be an interpret-only or a verify-at-runtime compilation, or a read-barrier variant,
-  // or... It's not really safe to just reject now. Let's look at the access flags. If the method
-  // was successfully verified, its access flags should be set to mark it preverified, except when
-  // we're running soft-fail tests.
-  if (Runtime::Current()->IsVerificationSoftFail()) {
-    // Soft-fail config. Everything should be running with interpreter access checks, potentially.
-    return;
-  }
-  CHECK(caller.caller->IsPreverified());
+// public static native void assertCallerIsManaged();
+
+extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsManaged(JNIEnv* env, jclass cls) {
+  CHECK(Java_Main_isCallerManaged(env, cls));
 }
 
 }  // namespace art
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 972e827..de9b35d 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -20,21 +20,11 @@
 fi
 
 common_targets="vogar vogar.jar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests"
-android_root="/data/local/tmp/system"
-linker="linker"
 mode="target"
 j_arg="-j$(nproc)"
 showcommands=
 make_command=
 
-case "$TARGET_PRODUCT" in
-  (armv8|mips64r6) linker="linker64";;
-esac
-
-if [[ "$ART_TEST_ANDROID_ROOT" != "" ]]; then
-  android_root="$ART_TEST_ANDROID_ROOT"
-fi
-
 while true; do
   if [[ "$1" == "--host" ]]; then
     mode="host"
@@ -42,16 +32,6 @@
   elif [[ "$1" == "--target" ]]; then
     mode="target"
     shift
-  elif [[ "$1" == "--32" ]]; then
-    linker="linker"
-    shift
-  elif [[ "$1" == "--64" ]]; then
-    linker="linker64"
-    shift
-  elif [[ "$1" == "--android-root" ]]; then
-    shift
-    android_root=$1
-    shift
   elif [[ "$1" == -j* ]]; then
     j_arg=$1
     shift
@@ -64,19 +44,10 @@
 done
 
 if [[ $mode == "host" ]]; then
-  make_command="make $j_arg build-art-host-tests $common_targets out/host/linux-x86/lib/libjavacoretests.so out/host/linux-x86/lib64/libjavacoretests.so"
-  echo "Executing $make_command"
-  $make_command
+  make_command="make $j_arg $showcommands build-art-host-tests $common_targets out/host/linux-x86/lib/libjavacoretests.so out/host/linux-x86/lib64/libjavacoretests.so"
 elif [[ $mode == "target" ]]; then
-  # Disable NINJA for building on target, it does not support setting environment variables
-  # within the make command.
-  env="$env USE_NINJA=false"
-  # Build extra tools that will be used by tests, so that
-  # they are compiled with our own linker.
-  # We need to provide our own linker in case the linker on the device
-  # is out of date.
-  make_command="make TARGET_LINKER=$android_root/bin/$linker $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh out/host/linux-x86/bin/adb"
-  echo "Executing env $env $make_command"
-  env $env $make_command
+  make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh out/host/linux-x86/bin/adb"
 fi
 
+echo "Executing $make_command"
+$make_command
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 26d9ad7..80f7a37 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -86,12 +86,6 @@
     # Remove the --debug from the arguments.
     vogar_args=${vogar_args/$1}
     vogar_args="$vogar_args --vm-arg -XXlib:libartd.so"
-    if [ "$emulator" = "no" ]; then
-      # Increase the timeout, as vogar cannot set individual test
-      # timeout when being asked to run packages, and some tests go above
-      # the default timeout.
-      vogar_args="$vogar_args --timeout 240"
-    fi
     shift
   elif [[ "$1" == "" ]]; then
     break
@@ -100,10 +94,10 @@
   fi
 done
 
-if [ "$emulator" = "yes" ]; then
-  # Be very patient with the emulator.
-  vogar_args="$vogar_args --timeout 480"
-fi
+# Increase the timeout, as vogar cannot set individual test
+# timeout when being asked to run packages, and some tests go above
+# the default timeout.
+vogar_args="$vogar_args --timeout 480"
 
 # Run the tests using vogar.
 echo "Running tests for the following test packages:"