Merge "ART: Move DexCache arrays to native."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index ad2feeb..acce68b 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -333,7 +333,7 @@
     ifneq ($(NATIVE_COVERAGE),true)
       art_host_non_debug_cflags += -Wframe-larger-than=2700
       ifdef SANITIZE_TARGET
-        art_target_non_debug_cflags += -Wframe-larger-than=5450
+        art_target_non_debug_cflags += -Wframe-larger-than=6400
       else
         art_target_non_debug_cflags += -Wframe-larger-than=1728
       endif
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 01ca46e..9b4dbe0 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -51,7 +51,8 @@
                                   uint16_t class_def_idx,
                                   uint32_t method_idx,
                                   jobject class_loader,
-                                  const DexFile& dex_file) const = 0;
+                                  const DexFile& dex_file,
+                                  Handle<mirror::DexCache> dex_cache) const = 0;
 
   virtual CompiledMethod* JniCompile(uint32_t access_flags,
                                      uint32_t method_idx,
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 603130a..ff7ddc1 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -324,9 +324,13 @@
     DexToDexCompilationLevel dex_to_dex_compilation_level) {
   DCHECK(driver != nullptr);
   if (dex_to_dex_compilation_level != DexToDexCompilationLevel::kDontDexToDexCompile) {
-    art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
+    ScopedObjectAccess soa(Thread::Current());
+    StackHandleScope<1> hs(soa.Self());
+    ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+    art::DexCompilationUnit unit(nullptr, class_loader, class_linker,
                                  dex_file, code_item, class_def_idx, method_idx, access_flags,
-                                 driver->GetVerifiedMethod(&dex_file, method_idx));
+                                 driver->GetVerifiedMethod(&dex_file, method_idx),
+                                 hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
     art::optimizer::DexCompiler dex_compiler(*driver, unit, dex_to_dex_compilation_level);
     dex_compiler.Compile();
     if (dex_compiler.GetQuickenedInfo().empty()) {
@@ -335,7 +339,7 @@
     }
 
     // Create a `CompiledMethod`, with the quickened information in the vmap table.
-    Leb128EncodingVector builder;
+    Leb128EncodingVector<> builder;
     for (QuickenedInfo info : dex_compiler.GetQuickenedInfo()) {
       builder.PushBackUnsigned(info.dex_pc);
       builder.PushBackUnsigned(info.dex_member_index);
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 1cff8dc..39f8ee8 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -30,6 +30,7 @@
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
 #include "driver/dex_compilation_unit.h"
+#include "scoped_thread_state_change.h"
 #include "utils.h"
 
 namespace art {
@@ -1283,8 +1284,12 @@
       ifield_lowering_infos_.push_back(
           MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened));
     }
-    MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
-                                   ifield_lowering_infos_.data(), ifield_pos);
+    ScopedObjectAccess soa(Thread::Current());
+    MirIFieldLoweringInfo::Resolve(soa,
+                                   cu_->compiler_driver,
+                                   GetCurrentDexCompilationUnit(),
+                                   ifield_lowering_infos_.data(),
+                                   ifield_pos);
   }
 
   if (sfield_pos != max_refs) {
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 4dfec17..13bbc3e 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -29,7 +29,8 @@
 
 namespace art {
 
-void MirIFieldLoweringInfo::Resolve(CompilerDriver* compiler_driver,
+void MirIFieldLoweringInfo::Resolve(const ScopedObjectAccess& soa,
+                                    CompilerDriver* compiler_driver,
                                     const DexCompilationUnit* mUnit,
                                     MirIFieldLoweringInfo* field_infos, size_t count) {
   if (kIsDebugBuild) {
@@ -44,7 +45,6 @@
 
   // We're going to resolve fields and check access in a tight loop. It's better to hold
   // the lock and needed references once than re-acquiring them again and again.
-  ScopedObjectAccess soa(Thread::Current());
   StackHandleScope<3> hs(soa.Self());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
   Handle<mirror::ClassLoader> class_loader(
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 053029d..b6dc27d 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -26,6 +26,7 @@
 
 class CompilerDriver;
 class DexCompilationUnit;
+class ScopedObjectAccess;
 
 /*
  * Field info is calculated from the perspective of the compilation unit that accesses
@@ -133,9 +134,12 @@
   // For each requested instance field retrieve the field's declaring location (dex file, class
   // index and field index) and volatility and compute whether we can fast path the access
   // with IGET/IPUT. For fast path fields, retrieve the field offset.
-  static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit,
-                      MirIFieldLoweringInfo* field_infos, size_t count)
-      REQUIRES(!Locks::mutator_lock_);
+  static void Resolve(const ScopedObjectAccess& soa,
+                      CompilerDriver* compiler_driver,
+                      const DexCompilationUnit* mUnit,
+                      MirIFieldLoweringInfo* field_infos,
+                      size_t count)
+      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Construct an unresolved instance field lowering info.
   MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 3834242..7976a9a 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -699,16 +699,17 @@
 /* Parse a Dex method and insert it into the MIRGraph at the current insert point. */
 void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
                            InvokeType invoke_type ATTRIBUTE_UNUSED, uint16_t class_def_idx,
-                           uint32_t method_idx, jobject class_loader, const DexFile& dex_file) {
+                           uint32_t method_idx, jobject class_loader, const DexFile& dex_file,
+                           Handle<mirror::DexCache> dex_cache) {
   current_code_item_ = code_item;
   method_stack_.push_back(std::make_pair(current_method_, current_offset_));
   current_method_ = m_units_.size();
   current_offset_ = 0;
   // TODO: will need to snapshot stack image and use that as the mir context identification.
   m_units_.push_back(new (arena_) DexCompilationUnit(
-      cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file,
-      current_code_item_, class_def_idx, method_idx, access_flags,
-      cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx)));
+      cu_, class_loader, Runtime::Current()->GetClassLinker(), dex_file, current_code_item_,
+      class_def_idx, method_idx, access_flags,
+      cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx), dex_cache));
   const uint16_t* code_ptr = current_code_item_->insns_;
   const uint16_t* code_end =
       current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 8bf709a..1df6a4f 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -368,7 +368,7 @@
 
 struct SuccessorBlockInfo;
 
-class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
+class BasicBlock : public DeletableArenaObject<kArenaAllocBasicBlock> {
  public:
   BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
       : id(block_id),
@@ -572,9 +572,14 @@
    * Parse dex method and add MIR at current insert point.  Returns id (which is
    * actually the index of the method in the m_units_ array).
    */
-  void InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
-                    InvokeType invoke_type, uint16_t class_def_idx,
-                    uint32_t method_idx, jobject class_loader, const DexFile& dex_file);
+  void InlineMethod(const DexFile::CodeItem* code_item,
+                    uint32_t access_flags,
+                    InvokeType invoke_type,
+                    uint16_t class_def_idx,
+                    uint32_t method_idx,
+                    jobject class_loader,
+                    const DexFile& dex_file,
+                    Handle<mirror::DexCache> dex_cache);
 
   /* Find existing block */
   BasicBlock* FindBlock(DexOffset code_offset,
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index 31c3808..658e7d6 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -112,7 +112,8 @@
           mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(),
           *it->target_dex_file_, nullptr /* code_item not used */, 0u /* class_def_idx not used */,
           it->target_method_idx_, 0u /* access_flags not used */,
-          nullptr /* verified_method not used */);
+          nullptr /* verified_method not used */,
+          current_dex_cache);
       resolved_method = compiler_driver->ResolveMethod(soa, current_dex_cache, class_loader, &cu,
                                                        it->target_method_idx_, invoke_type, false);
       if (resolved_method == nullptr) {
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 80b7ac1..eb4915b 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -17,6 +17,7 @@
 #include "base/bit_vector-inl.h"
 #include "base/logging.h"
 #include "base/scoped_arena_containers.h"
+#include "class_linker-inl.h"
 #include "dataflow_iterator-inl.h"
 #include "dex/verified_method.h"
 #include "dex_flags.h"
@@ -30,6 +31,7 @@
 #include "quick/dex_file_method_inliner.h"
 #include "quick/dex_file_to_method_inliner_map.h"
 #include "stack.h"
+#include "thread-inl.h"
 #include "type_inference.h"
 #include "utils.h"
 
@@ -1469,13 +1471,23 @@
 
   const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(invoke);
   MethodReference target = method_info.GetTargetMethod();
-  DexCompilationUnit inlined_unit(
-      cu_, cu_->class_loader, cu_->class_linker, *target.dex_file,
-      nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
-      0u /* access_flags not used */, nullptr /* verified_method not used */);
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::DexCache> dex_cache(
+      hs.NewHandle(cu_->class_linker->FindDexCache(hs.Self(), *target.dex_file)));
+  DexCompilationUnit inlined_unit(cu_,
+                                  cu_->class_loader,
+                                  cu_->class_linker,
+                                  *target.dex_file,
+                                  nullptr /* code_item not used */,
+                                  0u /* class_def_idx not used */,
+                                  target.dex_method_index,
+                                  0u /* access_flags not used */,
+                                  nullptr /* verified_method not used */,
+                                  dex_cache);
   DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode);
   MirIFieldLoweringInfo inlined_field_info(field_idx, type, false);
-  MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
+  MirIFieldLoweringInfo::Resolve(soa, cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
   DCHECK(inlined_field_info.IsResolved());
 
   uint32_t field_info_index = ifield_lowering_infos_.size();
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 72754ae..7082bed 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1115,7 +1115,7 @@
 
 CompiledMethod* Mir2Lir::GetCompiledMethod() {
   // Combine vmap tables - core regs, then fp regs - into vmap_table.
-  Leb128EncodingVector vmap_encoder;
+  Leb128EncodingVector<> vmap_encoder;
   if (frame_size_ > 0) {
     // Prefix the encoded data with its size.
     size_t size = core_vmap_table_.size() + 1 /* marker */ + fp_vmap_table_.size();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 3642b82..b5ecf9c 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -663,7 +663,8 @@
                                        uint16_t class_def_idx,
                                        uint32_t method_idx,
                                        jobject class_loader,
-                                       const DexFile& dex_file) const {
+                                       const DexFile& dex_file,
+                                       Handle<mirror::DexCache> dex_cache) const {
   if (kPoisonHeapReferences) {
     VLOG(compiler) << "Skipping method : " << PrettyMethod(method_idx, dex_file)
                    << "  Reason = Quick does not support heap poisoning.";
@@ -749,7 +750,7 @@
 
   /* Build the raw MIR graph */
   cu.mir_graph->InlineMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx,
-                             class_loader, dex_file);
+                             class_loader, dex_file, dex_cache);
 
   if (!CanCompileMethod(method_idx, dex_file, &cu)) {
     VLOG(compiler)  << cu.instruction_set << ": Cannot compile method : "
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 4a39ab3..d512b25 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -21,6 +21,10 @@
 
 namespace art {
 
+namespace mirror {
+class DexCache;
+}
+
 class Compiler;
 class CompilerDriver;
 class Mir2Lir;
@@ -43,7 +47,8 @@
                           uint16_t class_def_idx,
                           uint32_t method_idx,
                           jobject class_loader,
-                          const DexFile& dex_file) const OVERRIDE;
+                          const DexFile& dex_file,
+                          Handle<mirror::DexCache> dex_cache) const OVERRIDE;
 
   CompiledMethod* JniCompile(uint32_t access_flags,
                              uint32_t method_idx,
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
index eaa2bfa..872a8d6 100644
--- a/compiler/dex/type_inference_test.cc
+++ b/compiler/dex/type_inference_test.cc
@@ -252,7 +252,8 @@
     cu_.mir_graph->m_units_.push_back(new (cu_.mir_graph->arena_) DexCompilationUnit(
         &cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
         0u /* class_def_idx not used */, 0u /* method_index not used */,
-        cu_.access_flags, nullptr /* verified_method not used */));
+        cu_.access_flags, nullptr /* verified_method not used */,
+        NullHandle<mirror::DexCache>()));
     cu_.mir_graph->current_method_ = 0u;
     code_item_ = static_cast<DexFile::CodeItem*>(
         cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 8f1987a..e535afd 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -34,7 +34,7 @@
   return mUnit->GetClassLinker()->FindDexCache(Thread::Current(), *mUnit->GetDexFile(), false);
 }
 
-inline mirror::ClassLoader* CompilerDriver::GetClassLoader(ScopedObjectAccess& soa,
+inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa,
                                                            const DexCompilationUnit* mUnit) {
   return soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader());
 }
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 950f824..9f05e64 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -571,7 +571,8 @@
                           jobject class_loader,
                           const DexFile& dex_file,
                           optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level,
-                          bool compilation_enabled)
+                          bool compilation_enabled,
+                          Handle<mirror::DexCache> dex_cache)
     REQUIRES(!driver->compiled_methods_lock_) {
   DCHECK(driver != nullptr);
   CompiledMethod* compiled_method = nullptr;
@@ -608,7 +609,7 @@
       // NOTE: if compiler declines to compile this method, it will return null.
       compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type,
                                                        class_def_idx, method_idx, class_loader,
-                                                       dex_file);
+                                                       dex_file, dex_cache);
     }
     if (compiled_method == nullptr &&
         dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) {
@@ -673,6 +674,8 @@
   uint32_t method_idx = method->GetDexMethodIndex();
   uint32_t access_flags = method->GetAccessFlags();
   InvokeType invoke_type = method->GetInvokeType();
+  StackHandleScope<1> hs(self);
+  Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
   {
     ScopedObjectAccessUnchecked soa(self);
     ScopedLocalRef<jobject> local_class_loader(
@@ -683,7 +686,9 @@
     class_def_idx = method->GetClassDefIndex();
   }
   const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
-  self->TransitionFromRunnableToSuspended(kNative);
+
+  // Go to native so that we don't block GC during compilation.
+  ScopedThreadSuspension sts(self, kNative);
 
   std::vector<const DexFile*> dex_files;
   dex_files.push_back(dex_file);
@@ -709,19 +714,20 @@
                 jclass_loader,
                 *dex_file,
                 dex_to_dex_compilation_level,
-                true);
+                true,
+                dex_cache);
 
   self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
-  self->TransitionFromSuspendedToRunnable();
 }
 
 CompiledMethod* CompilerDriver::CompileArtMethod(Thread* self, ArtMethod* method) {
   const uint32_t method_idx = method->GetDexMethodIndex();
   const uint32_t access_flags = method->GetAccessFlags();
   const InvokeType invoke_type = method->GetInvokeType();
-  StackHandleScope<1> hs(self);
+  StackHandleScope<2> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
       method->GetDeclaringClass()->GetClassLoader()));
+  Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
   jobject jclass_loader = class_loader.ToJObject();
   const DexFile* dex_file = method->GetDexFile();
   const uint16_t class_def_idx = method->GetClassDefIndex();
@@ -729,7 +735,8 @@
   optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
       GetDexToDexCompilationLevel(self, *this, class_loader, *dex_file, class_def);
   const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
-  self->TransitionFromRunnableToSuspended(kNative);
+  // Go to native so that we don't block GC during compilation.
+  ScopedThreadSuspension sts(self, kNative);
   CompileMethod(self,
                 this,
                 code_item,
@@ -740,9 +747,9 @@
                 jclass_loader,
                 *dex_file,
                 dex_to_dex_compilation_level,
-                true);
+                true,
+                dex_cache);
   auto* compiled_method = GetCompiledMethod(MethodReference(dex_file, method_idx));
-  self->TransitionFromSuspendedToRunnable();
   return compiled_method;
 }
 
@@ -1424,24 +1431,19 @@
   // Try to resolve the field and compiling method's class.
   ArtField* resolved_field;
   mirror::Class* referrer_class;
-  mirror::DexCache* dex_cache;
+  Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
   {
-    StackHandleScope<2> hs(soa.Self());
-    Handle<mirror::DexCache> dex_cache_handle(
-        hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(
-            soa.Self(), *mUnit->GetDexFile(), false)));
+    StackHandleScope<1> hs(soa.Self());
     Handle<mirror::ClassLoader> class_loader_handle(
         hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
-    resolved_field =
-        ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, false);
+    resolved_field = ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, false);
     referrer_class = resolved_field != nullptr
-        ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
-    dex_cache = dex_cache_handle.Get();
+        ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr;
   }
   bool can_link = false;
   if (resolved_field != nullptr && referrer_class != nullptr) {
     std::pair<bool, bool> fast_path = IsFastInstanceField(
-        dex_cache, referrer_class, resolved_field, field_idx);
+        dex_cache.Get(), referrer_class, resolved_field, field_idx);
     can_link = is_put ? fast_path.second : fast_path.first;
   }
   ProcessedInstanceField(can_link);
@@ -1475,25 +1477,21 @@
   // Try to resolve the field and compiling method's class.
   ArtField* resolved_field;
   mirror::Class* referrer_class;
-  mirror::DexCache* dex_cache;
+  Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
   {
-    StackHandleScope<2> hs(soa.Self());
-    Handle<mirror::DexCache> dex_cache_handle(
-        hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(
-            soa.Self(), *mUnit->GetDexFile(), false)));
+    StackHandleScope<1> hs(soa.Self());
     Handle<mirror::ClassLoader> class_loader_handle(
         hs.NewHandle(soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
     resolved_field =
-        ResolveField(soa, dex_cache_handle, class_loader_handle, mUnit, field_idx, true);
+        ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, true);
     referrer_class = resolved_field != nullptr
-        ? ResolveCompilingMethodsClass(soa, dex_cache_handle, class_loader_handle, mUnit) : nullptr;
-    dex_cache = dex_cache_handle.Get();
+        ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr;
   }
   bool result = false;
   if (resolved_field != nullptr && referrer_class != nullptr) {
     *is_volatile = IsFieldVolatile(resolved_field);
     std::pair<bool, bool> fast_path = IsFastStaticField(
-        dex_cache, referrer_class, resolved_field, field_idx, storage_index);
+        dex_cache.Get(), referrer_class, resolved_field, field_idx, storage_index);
     result = is_put ? fast_path.second : fast_path.first;
   }
   if (result) {
@@ -1664,10 +1662,8 @@
   int stats_flags = 0;
   ScopedObjectAccess soa(Thread::Current());
   // Try to resolve the method and compiling method's class.
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache(
-      hs.NewHandle(mUnit->GetClassLinker()->FindDexCache(
-          soa.Self(), *mUnit->GetDexFile(), false)));
+  StackHandleScope<2> hs(soa.Self());
+  Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
       soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
   uint32_t method_idx = target_method->dex_method_index;
@@ -2355,39 +2351,44 @@
     const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
     ClassLinker* class_linker = manager_->GetClassLinker();
     jobject jclass_loader = manager_->GetClassLoader();
-    Thread* self = Thread::Current();
-    {
-      // Use a scoped object access to perform to the quick SkipClass check.
-      const char* descriptor = dex_file.GetClassDescriptor(class_def);
-      ScopedObjectAccess soa(self);
-      StackHandleScope<3> hs(soa.Self());
-      Handle<mirror::ClassLoader> class_loader(
-          hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-      Handle<mirror::Class> klass(
-          hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
-      if (klass.Get() == nullptr) {
-        CHECK(soa.Self()->IsExceptionPending());
-        soa.Self()->ClearException();
-      } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
-        return;
-      }
-    }
     ClassReference ref(&dex_file, class_def_index);
     // Skip compiling classes with generic verifier failures since they will still fail at runtime
     if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) {
       return;
     }
+    // Use a scoped object access to perform to the quick SkipClass check.
+    const char* descriptor = dex_file.GetClassDescriptor(class_def);
+    ScopedObjectAccess soa(Thread::Current());
+    StackHandleScope<3> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+    Handle<mirror::Class> klass(
+        hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader)));
+    Handle<mirror::DexCache> dex_cache;
+    if (klass.Get() == nullptr) {
+      soa.Self()->AssertPendingException();
+      soa.Self()->ClearException();
+      dex_cache = hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
+    } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
+      return;
+    } else {
+      dex_cache = hs.NewHandle(klass->GetDexCache());
+    }
+
     const uint8_t* class_data = dex_file.GetClassData(class_def);
     if (class_data == nullptr) {
       // empty class, probably a marker interface
       return;
     }
 
+    // Go to native so that we don't block GC during compilation.
+    ScopedThreadSuspension sts(soa.Self(), kNative);
+
     CompilerDriver* const driver = manager_->GetCompiler();
 
     // Can we run DEX-to-DEX compiler on this class ?
     optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
-        GetDexToDexCompilationLevel(self, *driver, jclass_loader, dex_file, class_def);
+        GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def);
 
     ClassDataItemIterator it(dex_file, class_data);
     // Skip fields
@@ -2412,10 +2413,10 @@
         continue;
       }
       previous_direct_method_idx = method_idx;
-      CompileMethod(self, driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+      CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
                     it.GetMethodInvokeType(class_def), class_def_index,
                     method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
-                    compilation_enabled);
+                    compilation_enabled, dex_cache);
       it.Next();
     }
     // Compile virtual methods
@@ -2429,10 +2430,10 @@
         continue;
       }
       previous_virtual_method_idx = method_idx;
-      CompileMethod(self, driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+      CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
                     it.GetMethodInvokeType(class_def), class_def_index,
                     method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
-                    compilation_enabled);
+                    compilation_enabled, dex_cache);
       it.Next();
     }
     DCHECK(!it.HasNext());
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 5718be9..b229184 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -226,7 +226,8 @@
   mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
     SHARED_REQUIRES(Locks::mutator_lock_);
 
-  mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
+  mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
+                                      const DexCompilationUnit* mUnit)
     SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Resolve compiling method's class. Returns null on failure.
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index e6c8c18..cfaa01b 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -18,6 +18,7 @@
 
 #include "base/stringprintf.h"
 #include "dex/compiler_ir.h"
+#include "mirror/dex_cache.h"
 #include "utils.h"
 
 namespace art {
@@ -30,7 +31,8 @@
                                        uint16_t class_def_idx,
                                        uint32_t method_idx,
                                        uint32_t access_flags,
-                                       const VerifiedMethod* verified_method)
+                                       const VerifiedMethod* verified_method,
+                                       Handle<mirror::DexCache> dex_cache)
     : cu_(cu),
       class_loader_(class_loader),
       class_linker_(class_linker),
@@ -39,7 +41,8 @@
       class_def_idx_(class_def_idx),
       dex_method_idx_(method_idx),
       access_flags_(access_flags),
-      verified_method_(verified_method) {
+      verified_method_(verified_method),
+      dex_cache_(dex_cache) {
 }
 
 const std::string& DexCompilationUnit::GetSymbol() {
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 3983006..16872f4 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -19,9 +19,10 @@
 
 #include <stdint.h>
 
-#include "dex_file.h"
-#include "jni.h"
 #include "base/arena_object.h"
+#include "dex_file.h"
+#include "handle.h"
+#include "jni.h"
 
 namespace art {
 namespace mirror {
@@ -36,10 +37,16 @@
  public:
   explicit DexCompilationUnit(CompilationUnit* cu);
 
-  DexCompilationUnit(CompilationUnit* cu, jobject class_loader, ClassLinker* class_linker,
-                     const DexFile& dex_file, const DexFile::CodeItem* code_item,
-                     uint16_t class_def_idx, uint32_t method_idx, uint32_t access_flags,
-                     const VerifiedMethod* verified_method);
+  DexCompilationUnit(CompilationUnit* cu,
+                     jobject class_loader,
+                     ClassLinker* class_linker,
+                     const DexFile& dex_file,
+                     const DexFile::CodeItem* code_item,
+                     uint16_t class_def_idx,
+                     uint32_t method_idx,
+                     uint32_t access_flags,
+                     const VerifiedMethod* verified_method,
+                     Handle<mirror::DexCache> dex_cache);
 
   CompilationUnit* GetCompilationUnit() const {
     return cu_;
@@ -109,6 +116,10 @@
 
   const std::string& GetSymbol();
 
+  Handle<mirror::DexCache> GetDexCache() const {
+    return dex_cache_;
+  }
+
  private:
   CompilationUnit* const cu_;
 
@@ -124,6 +135,8 @@
   const uint32_t access_flags_;
   const VerifiedMethod* verified_method_;
 
+  Handle<mirror::DexCache> dex_cache_;
+
   std::string symbol_;
 };
 
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 9172c83..955c575 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -85,11 +85,9 @@
 bool ImageWriter::PrepareImageAddressSpace() {
   target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
   {
-    Thread::Current()->TransitionFromSuspendedToRunnable();
+    ScopedObjectAccess soa(Thread::Current());
     PruneNonImageClasses();  // Remove junk
     ComputeLazyFieldsForImageClasses();  // Add useful information
-
-    Thread::Current()->TransitionFromRunnableToSuspended(kNative);
   }
   gc::Heap* heap = Runtime::Current()->GetHeap();
   heap->CollectGarbage(false);  // Remove garbage.
@@ -109,9 +107,10 @@
     CheckNonImageClassesRemoved();
   }
 
-  Thread::Current()->TransitionFromSuspendedToRunnable();
-  CalculateNewObjectOffsets();
-  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    CalculateNewObjectOffsets();
+  }
 
   // This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
   // bin size sums being calculated.
@@ -164,14 +163,14 @@
   size_t oat_data_offset = 0;
   ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
 
-  Thread::Current()->TransitionFromSuspendedToRunnable();
-
-  CreateHeader(oat_loaded_size, oat_data_offset);
-  CopyAndFixupNativeData();
-  // TODO: heap validation can't handle these fix up passes.
-  Runtime::Current()->GetHeap()->DisableObjectValidation();
-  CopyAndFixupObjects();
-  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+  {
+    ScopedObjectAccess soa(Thread::Current());
+    CreateHeader(oat_loaded_size, oat_data_offset);
+    CopyAndFixupNativeData();
+    // TODO: heap validation can't handle these fix up passes.
+    Runtime::Current()->GetHeap()->DisableObjectValidation();
+    CopyAndFixupObjects();
+  }
 
   SetOatChecksumFromElfFile(oat_file.get());
 
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 23ab94e..1650fd1 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1205,7 +1205,8 @@
         resolved_field->GetOffset(),
         resolved_field->IsVolatile(),
         field_index,
-        *dex_file_));
+        *dex_file_,
+        dex_compilation_unit_->GetDexCache()));
   } else {
     current_block_->AddInstruction(new (arena_) HInstanceFieldGet(
         current_block_->GetLastInstruction(),
@@ -1213,7 +1214,8 @@
         resolved_field->GetOffset(),
         resolved_field->IsVolatile(),
         field_index,
-        *dex_file_));
+        *dex_file_,
+        dex_compilation_unit_->GetDexCache()));
 
     UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
   }
@@ -1334,14 +1336,16 @@
                                                                 resolved_field->GetOffset(),
                                                                 resolved_field->IsVolatile(),
                                                                 field_index,
-                                                                *dex_file_));
+                                                                *dex_file_,
+                                                                dex_cache_));
   } else {
     current_block_->AddInstruction(new (arena_) HStaticFieldGet(cls,
                                                                 field_type,
                                                                 resolved_field->GetOffset(),
                                                                 resolved_field->IsVolatile(),
                                                                 field_index,
-                                                                *dex_file_));
+                                                                *dex_file_,
+                                                                dex_cache_));
     UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
   }
   return true;
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index d6b25ee..560ed86 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -40,7 +40,8 @@
                 const DexFile* dex_file,
                 CompilerDriver* driver,
                 OptimizingCompilerStats* compiler_stats,
-                const uint8_t* interpreter_metadata)
+                const uint8_t* interpreter_metadata,
+                Handle<mirror::DexCache> dex_cache)
       : arena_(graph->GetArena()),
         branch_targets_(graph->GetArena(), 0),
         locals_(graph->GetArena(), 0),
@@ -57,7 +58,8 @@
         latest_result_(nullptr),
         can_use_baseline_for_string_init_(true),
         compilation_stats_(compiler_stats),
-        interpreter_metadata_(interpreter_metadata) {}
+        interpreter_metadata_(interpreter_metadata),
+        dex_cache_(dex_cache) {}
 
   // Only for unit testing.
   HGraphBuilder(HGraph* graph, Primitive::Type return_type = Primitive::kPrimInt)
@@ -77,7 +79,8 @@
         latest_result_(nullptr),
         can_use_baseline_for_string_init_(true),
         compilation_stats_(nullptr),
-        interpreter_metadata_(nullptr) {}
+        interpreter_metadata_(nullptr),
+        dex_cache_(NullHandle<mirror::DexCache>()) {}
 
   bool BuildGraph(const DexFile::CodeItem& code);
 
@@ -334,6 +337,9 @@
 
   const uint8_t* interpreter_metadata_;
 
+  // Dex cache for dex_file_.
+  Handle<mirror::DexCache> dex_cache_;
+
   DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
 };
 
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index f4cf9b5..1097adb 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -239,7 +239,10 @@
         InitLocationsBaseline(current);
       }
       DCHECK(CheckTypeConsistency(current));
+      uintptr_t native_pc_begin = GetAssembler()->CodeSize();
       current->Accept(instruction_visitor);
+      uintptr_t native_pc_end = GetAssembler()->CodeSize();
+      RecordNativeDebugInfo(current->GetDexPc(), native_pc_begin, native_pc_end);
     }
   }
 
@@ -585,7 +588,7 @@
 }
 
 void CodeGenerator::BuildNativeGCMap(
-    std::vector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const {
+    ArenaVector<uint8_t>* data, const DexCompilationUnit& dex_compilation_unit) const {
   const std::vector<uint8_t>& gc_map_raw =
       dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
   verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
@@ -604,16 +607,7 @@
   }
 }
 
-void CodeGenerator::BuildSourceMap(DefaultSrcMap* src_map) const {
-  for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
-    const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
-    uint32_t pc2dex_offset = stack_map_entry.native_pc_offset;
-    int32_t pc2dex_dalvik_offset = stack_map_entry.dex_pc;
-    src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
-  }
-}
-
-void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
+void CodeGenerator::BuildMappingTable(ArenaVector<uint8_t>* data) const {
   uint32_t pc2dex_data_size = 0u;
   uint32_t pc2dex_entries = stack_map_stream_.GetNumberOfStackMaps();
   uint32_t pc2dex_offset = 0u;
@@ -712,24 +706,31 @@
   }
 }
 
-void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const {
-  Leb128EncodingVector vmap_encoder;
+void CodeGenerator::BuildVMapTable(ArenaVector<uint8_t>* data) const {
+  Leb128Encoder<ArenaAllocatorAdapter<uint8_t>> vmap_encoder(data);
   // We currently don't use callee-saved registers.
   size_t size = 0 + 1 /* marker */ + 0;
   vmap_encoder.Reserve(size + 1u);  // All values are likely to be one byte in ULEB128 (<128).
   vmap_encoder.PushBackUnsigned(size);
   vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
-
-  *data = vmap_encoder.GetData();
 }
 
-void CodeGenerator::BuildStackMaps(std::vector<uint8_t>* data) {
+void CodeGenerator::BuildStackMaps(ArenaVector<uint8_t>* data) {
   uint32_t size = stack_map_stream_.PrepareForFillIn();
   data->resize(size);
   MemoryRegion region(data->data(), size);
   stack_map_stream_.FillIn(region);
 }
 
+void CodeGenerator::RecordNativeDebugInfo(uint32_t dex_pc,
+                                          uintptr_t native_pc_begin,
+                                          uintptr_t native_pc_end) {
+  if (src_map_ != nullptr && dex_pc != kNoDexPc && native_pc_begin != native_pc_end) {
+    src_map_->push_back(SrcMapElem({static_cast<uint32_t>(native_pc_begin),
+                                    static_cast<int32_t>(dex_pc)}));
+  }
+}
+
 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
                                  uint32_t dex_pc,
                                  SlowPathCode* slow_path) {
@@ -1071,12 +1072,6 @@
       << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
 }
 
-void SlowPathCode::RecordPcInfo(CodeGenerator* codegen,
-                                HInstruction* instruction,
-                                uint32_t dex_pc) {
-  codegen->RecordPcInfo(instruction, dex_pc, this);
-}
-
 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
   RegisterSet* register_set = locations->GetLiveRegisters();
   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 938369b..b3c4d72 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -19,6 +19,8 @@
 
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
+#include "base/arena_containers.h"
+#include "base/arena_object.h"
 #include "base/bit_field.h"
 #include "driver/compiler_options.h"
 #include "globals.h"
@@ -81,7 +83,6 @@
 
   virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
   virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
-  void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
 
   bool IsCoreRegisterSaved(int reg) const {
     return saved_core_stack_offsets_[reg] != kRegisterNotSaved;
@@ -228,7 +229,11 @@
     return (fpu_callee_save_mask_ & (1 << reg)) != 0;
   }
 
+  // Record native to dex mapping for a suspend point.  Required by runtime.
   void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
+  // Record additional native to dex mappings for native debugging/profiling tools.
+  void RecordNativeDebugInfo(uint32_t dex_pc, uintptr_t native_pc_begin, uintptr_t native_pc_end);
+
   bool CanMoveNullCheckToUser(HNullCheck* null_check);
   void MaybeRecordImplicitNullCheck(HInstruction* instruction);
 
@@ -236,12 +241,13 @@
     slow_paths_.Add(slow_path);
   }
 
-  void BuildSourceMap(DefaultSrcMap* src_map) const;
-  void BuildMappingTable(std::vector<uint8_t>* vector) const;
-  void BuildVMapTable(std::vector<uint8_t>* vector) const;
+  void SetSrcMap(DefaultSrcMap* src_map) { src_map_ = src_map; }
+
+  void BuildMappingTable(ArenaVector<uint8_t>* vector) const;
+  void BuildVMapTable(ArenaVector<uint8_t>* vector) const;
   void BuildNativeGCMap(
-      std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
-  void BuildStackMaps(std::vector<uint8_t>* vector);
+      ArenaVector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
+  void BuildStackMaps(ArenaVector<uint8_t>* vector);
 
   bool IsBaseline() const {
     return is_baseline_;
@@ -394,6 +400,7 @@
         disasm_info_(nullptr),
         graph_(graph),
         compiler_options_(compiler_options),
+        src_map_(nullptr),
         slow_paths_(graph->GetArena(), 8),
         current_block_index_(0),
         is_leaf_(true),
@@ -488,6 +495,8 @@
   HGraph* const graph_;
   const CompilerOptions& compiler_options_;
 
+  // Native to dex_pc map used for native debugging/profiling tools.
+  DefaultSrcMap* src_map_;
   GrowableArray<SlowPathCode*> slow_paths_;
 
   // The current block index in `block_order_` of the block
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 5b7eea6..679899a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -142,24 +142,22 @@
 
 class BoundsCheckSlowPathARM : public SlowPathCodeARM {
  public:
-  BoundsCheckSlowPathARM(HBoundsCheck* instruction,
-                         Location index_location,
-                         Location length_location)
-      : instruction_(instruction),
-        index_location_(index_location),
-        length_location_(length_location) {}
+  explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
+      : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+    LocationSummary* locations = instruction_->GetLocations();
+
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     codegen->EmitParallelMoves(
-        index_location_,
+        locations->InAt(0),
         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
         Primitive::kPrimInt,
-        length_location_,
+        locations->InAt(1),
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
         Primitive::kPrimInt);
     arm_codegen->InvokeRuntime(
@@ -172,8 +170,6 @@
 
  private:
   HBoundsCheck* const instruction_;
-  const Location index_location_;
-  const Location length_location_;
 
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
 };
@@ -263,17 +259,12 @@
 
 class TypeCheckSlowPathARM : public SlowPathCodeARM {
  public:
-  TypeCheckSlowPathARM(HInstruction* instruction,
-                       Location class_to_check,
-                       Location object_class,
-                       uint32_t dex_pc)
-      : instruction_(instruction),
-        class_to_check_(class_to_check),
-        object_class_(object_class),
-        dex_pc_(dex_pc) {}
+  explicit TypeCheckSlowPathARM(HInstruction* instruction) : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
+    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+                                                        : locations->Out();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -285,20 +276,25 @@
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     codegen->EmitParallelMoves(
-        class_to_check_,
+        locations->InAt(1),
         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
         Primitive::kPrimNot,
-        object_class_,
+        object_class,
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
         Primitive::kPrimNot);
 
     if (instruction_->IsInstanceOf()) {
-      arm_codegen->InvokeRuntime(
-          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
+      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
       arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
     } else {
       DCHECK(instruction_->IsCheckCast());
-      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
+      arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+                                 instruction_,
+                                 instruction_->GetDexPc(),
+                                 this);
     }
 
     RestoreLiveRegisters(codegen, locations);
@@ -309,9 +305,6 @@
 
  private:
   HInstruction* const instruction_;
-  const Location class_to_check_;
-  const Location object_class_;
-  uint32_t dex_pc_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
 };
@@ -3899,8 +3892,8 @@
 
 void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations = instruction->GetLocations();
-  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(
-      instruction, locations->InAt(0), locations->InAt(1));
+  SlowPathCodeARM* slow_path =
+      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
   codegen_->AddSlowPath(slow_path);
 
   Register index = locations->InAt(0).AsRegister<Register>();
@@ -4343,6 +4336,7 @@
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
   // The out register is used as a temporary, so it overlaps with the inputs.
+  // Note that TypeCheckSlowPathARM uses this register too.
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
 }
 
@@ -4372,8 +4366,7 @@
   } else {
     // If the classes are not equal, we go into a slow path.
     DCHECK(locations->OnlyCallsOnSlowPath());
-    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
-        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
     codegen_->AddSlowPath(slow_path);
     __ b(slow_path->GetEntryLabel(), NE);
     __ LoadImmediate(out, 1);
@@ -4396,6 +4389,7 @@
       instruction, LocationSummary::kCallOnSlowPath);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
+  // Note that TypeCheckSlowPathARM uses this register too.
   locations->AddTemp(Location::RequiresRegister());
 }
 
@@ -4406,8 +4400,8 @@
   Register temp = locations->GetTemp(0).AsRegister<Register>();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
 
-  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
-      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+  SlowPathCodeARM* slow_path =
+      new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction);
   codegen_->AddSlowPath(slow_path);
 
   // avoid null check if we know obj is not null.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b18fb6e..390ea6b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -191,23 +191,19 @@
 
 class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
-                           Location index_location,
-                           Location length_location)
-      : instruction_(instruction),
-        index_location_(index_location),
-        length_location_(length_location) {}
-
+  explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     codegen->EmitParallelMoves(
-        index_location_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
-        length_location_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
+        locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
+        locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
     arm64_codegen->InvokeRuntime(
         QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
     CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
@@ -219,8 +215,6 @@
 
  private:
   HBoundsCheck* const instruction_;
-  const Location index_location_;
-  const Location length_location_;
 
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
 };
@@ -403,20 +397,17 @@
 
 class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
  public:
-  TypeCheckSlowPathARM64(HInstruction* instruction,
-                         Location class_to_check,
-                         Location object_class,
-                         uint32_t dex_pc)
-      : instruction_(instruction),
-        class_to_check_(class_to_check),
-        object_class_(object_class),
-        dex_pc_(dex_pc) {}
+  explicit TypeCheckSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
+    Location class_to_check = locations->InAt(1);
+    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+                                                        : locations->Out();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
     CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+    uint32_t dex_pc = instruction_->GetDexPc();
 
     __ Bind(GetEntryLabel());
     SaveLiveRegisters(codegen, locations);
@@ -425,12 +416,12 @@
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     codegen->EmitParallelMoves(
-        class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
-        object_class_, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
+        class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
+        object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
 
     if (instruction_->IsInstanceOf()) {
       arm64_codegen->InvokeRuntime(
-          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_, this);
+          QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this);
       Primitive::Type ret_type = instruction_->GetType();
       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
       arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
@@ -438,7 +429,7 @@
                            const mirror::Class*, const mirror::Class*>();
     } else {
       DCHECK(instruction_->IsCheckCast());
-      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
+      arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
       CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
     }
 
@@ -450,9 +441,6 @@
 
  private:
   HInstruction* const instruction_;
-  const Location class_to_check_;
-  const Location object_class_;
-  uint32_t dex_pc_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
 };
@@ -1602,9 +1590,8 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
-      instruction, locations->InAt(0), locations->InAt(1));
+  BoundsCheckSlowPathARM64* slow_path =
+      new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
   codegen_->AddSlowPath(slow_path);
 
   __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
@@ -1616,17 +1603,17 @@
       instruction, LocationSummary::kCallOnSlowPath);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
+  // Note that TypeCheckSlowPathARM64 uses this register too.
   locations->AddTemp(Location::RequiresRegister());
 }
 
 void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
   Register obj = InputRegisterAt(instruction, 0);;
   Register cls = InputRegisterAt(instruction, 1);;
   Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
 
-  SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
-      instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
+  SlowPathCodeARM64* slow_path =
+      new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction);
   codegen_->AddSlowPath(slow_path);
 
   // Avoid null check if we know obj is not null.
@@ -2240,6 +2227,7 @@
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
   // The output does overlap inputs.
+  // Note that TypeCheckSlowPathARM64 uses this register too.
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
 }
 
@@ -2269,8 +2257,7 @@
     // If the classes are not equal, we go into a slow path.
     DCHECK(locations->OnlyCallsOnSlowPath());
     SlowPathCodeARM64* slow_path =
-        new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
-        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+        new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction);
     codegen_->AddSlowPath(slow_path);
     __ B(ne, slow_path->GetEntryLabel());
     __ Mov(out, 1);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 1528d09..10942ef 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -112,23 +112,19 @@
 
 class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction,
-                            Location index_location,
-                            Location length_location)
-      : instruction_(instruction),
-        index_location_(index_location),
-        length_location_(length_location) {}
+  explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(index_location_,
+    codegen->EmitParallelMoves(locations->InAt(0),
                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
                                Primitive::kPrimInt,
-                               length_location_,
+                               locations->InAt(1),
                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
                                Primitive::kPrimInt);
     mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
@@ -144,8 +140,6 @@
 
  private:
   HBoundsCheck* const instruction_;
-  const Location index_location_;
-  const Location length_location_;
 
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
 };
@@ -334,17 +328,13 @@
 
 class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
  public:
-  TypeCheckSlowPathMIPS64(HInstruction* instruction,
-                          Location class_to_check,
-                          Location object_class,
-                          uint32_t dex_pc)
-      : instruction_(instruction),
-        class_to_check_(class_to_check),
-        object_class_(object_class),
-        dex_pc_(dex_pc) {}
+  explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
+    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+                                                        : locations->Out();
+    uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
     CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
@@ -355,17 +345,17 @@
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(class_to_check_,
+    codegen->EmitParallelMoves(locations->InAt(1),
                                Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
                                Primitive::kPrimNot,
-                               object_class_,
+                               object_class,
                                Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
                                Primitive::kPrimNot);
 
     if (instruction_->IsInstanceOf()) {
       mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
                                     instruction_,
-                                    dex_pc_,
+                                    dex_pc,
                                     this);
       Primitive::Type ret_type = instruction_->GetType();
       Location ret_loc = calling_convention.GetReturnLocation(ret_type);
@@ -376,7 +366,7 @@
                            const mirror::Class*>();
     } else {
       DCHECK(instruction_->IsCheckCast());
-      mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
+      mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
       CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
     }
 
@@ -388,9 +378,6 @@
 
  private:
   HInstruction* const instruction_;
-  const Location class_to_check_;
-  const Location object_class_;
-  uint32_t dex_pc_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
 };
@@ -1590,10 +1577,8 @@
 
 void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
   LocationSummary* locations = instruction->GetLocations();
-  BoundsCheckSlowPathMIPS64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(
-      instruction,
-      locations->InAt(0),
-      locations->InAt(1));
+  BoundsCheckSlowPathMIPS64* slow_path =
+      new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
   codegen_->AddSlowPath(slow_path);
 
   GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
@@ -1616,6 +1601,7 @@
       LocationSummary::kCallOnSlowPath);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
+  // Note that TypeCheckSlowPathMIPS64 uses this register too.
   locations->AddTemp(Location::RequiresRegister());
 }
 
@@ -1625,11 +1611,8 @@
   GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
   GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
 
-  SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(
-      instruction,
-      locations->InAt(1),
-      Location::RegisterLocation(obj_cls),
-      instruction->GetDexPc());
+  SlowPathCodeMIPS64* slow_path =
+      new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
   codegen_->AddSlowPath(slow_path);
 
   // TODO: avoid this check if we know obj is not null.
@@ -2270,6 +2253,7 @@
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RequiresRegister());
   // The output does overlap inputs.
+  // Note that TypeCheckSlowPathMIPS64 uses this register too.
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
 }
 
@@ -2296,10 +2280,7 @@
     // If the classes are not equal, we go into a slow path.
     DCHECK(locations->OnlyCallsOnSlowPath());
     SlowPathCodeMIPS64* slow_path =
-        new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
-                                                             locations->InAt(1),
-                                                             locations->Out(),
-                                                             instruction->GetDexPc());
+        new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
     codegen_->AddSlowPath(slow_path);
     __ Bnec(out, cls, slow_path->GetEntryLabel());
     __ LoadConst32(out, 1);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4fa7b28..f48395b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -116,24 +116,20 @@
 
 class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
  public:
-  BoundsCheckSlowPathX86(HBoundsCheck* instruction,
-                         Location index_location,
-                         Location length_location)
-      : instruction_(instruction),
-        index_location_(index_location),
-        length_location_(length_location) {}
+  explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     x86_codegen->EmitParallelMoves(
-        index_location_,
+        locations->InAt(0),
         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
         Primitive::kPrimInt,
-        length_location_,
+        locations->InAt(1),
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
         Primitive::kPrimInt);
     x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
@@ -148,8 +144,6 @@
 
  private:
   HBoundsCheck* const instruction_;
-  const Location index_location_;
-  const Location length_location_;
 
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86);
 };
@@ -280,15 +274,12 @@
 
 class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
  public:
-  TypeCheckSlowPathX86(HInstruction* instruction,
-                       Location class_to_check,
-                       Location object_class)
-      : instruction_(instruction),
-        class_to_check_(class_to_check),
-        object_class_(object_class) {}
+  explicit TypeCheckSlowPathX86(HInstruction* instruction) : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
+    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+                                                        : locations->Out();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -300,10 +291,10 @@
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     x86_codegen->EmitParallelMoves(
-        class_to_check_,
+        locations->InAt(1),
         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
         Primitive::kPrimNot,
-        object_class_,
+        object_class,
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
         Primitive::kPrimNot);
 
@@ -332,8 +323,6 @@
 
  private:
   HInstruction* const instruction_;
-  const Location class_to_check_;
-  const Location object_class_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
 };
@@ -4358,7 +4347,7 @@
   Location index_loc = locations->InAt(0);
   Location length_loc = locations->InAt(1);
   SlowPathCodeX86* slow_path =
-    new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction, index_loc, length_loc);
+    new (GetGraph()->GetArena()) BoundsCheckSlowPathX86(instruction);
 
   if (length_loc.IsConstant()) {
     int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -4830,6 +4819,7 @@
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::Any());
+  // Note that TypeCheckSlowPathX86 uses this register too.
   locations->SetOut(Location::RequiresRegister());
 }
 
@@ -4866,8 +4856,7 @@
   } else {
     // If the classes are not equal, we go into a slow path.
     DCHECK(locations->OnlyCallsOnSlowPath());
-    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
-        instruction, locations->InAt(1), locations->Out());
+    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction);
     codegen_->AddSlowPath(slow_path);
     __ j(kNotEqual, slow_path->GetEntryLabel());
     __ movl(out, Immediate(1));
@@ -4890,6 +4879,7 @@
       instruction, LocationSummary::kCallOnSlowPath);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::Any());
+  // Note that TypeCheckSlowPathX86 uses this register too.
   locations->AddTemp(Location::RequiresRegister());
 }
 
@@ -4899,8 +4889,8 @@
   Location cls = locations->InAt(1);
   Register temp = locations->GetTemp(0).AsRegister<Register>();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
-      instruction, locations->InAt(1), locations->GetTemp(0));
+  SlowPathCodeX86* slow_path =
+      new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction);
   codegen_->AddSlowPath(slow_path);
 
   // Avoid null check if we know obj is not null.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 29bad12..e1ec2ea 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -170,24 +170,21 @@
 
 class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
  public:
-  BoundsCheckSlowPathX86_64(HBoundsCheck* instruction,
-                            Location index_location,
-                            Location length_location)
-      : instruction_(instruction),
-        index_location_(index_location),
-        length_location_(length_location) {}
+  explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction)
+    : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = instruction_->GetLocations();
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     codegen->EmitParallelMoves(
-        index_location_,
+        locations->InAt(0),
         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
         Primitive::kPrimInt,
-        length_location_,
+        locations->InAt(1),
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
         Primitive::kPrimInt);
     x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
@@ -200,8 +197,6 @@
 
  private:
   HBoundsCheck* const instruction_;
-  const Location index_location_;
-  const Location length_location_;
 
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
 };
@@ -293,17 +288,14 @@
 
 class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
  public:
-  TypeCheckSlowPathX86_64(HInstruction* instruction,
-                          Location class_to_check,
-                          Location object_class,
-                          uint32_t dex_pc)
-      : instruction_(instruction),
-        class_to_check_(class_to_check),
-        object_class_(object_class),
-        dex_pc_(dex_pc) {}
+  explicit TypeCheckSlowPathX86_64(HInstruction* instruction)
+      : instruction_(instruction) {}
 
   void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
+    Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
+                                                        : locations->Out();
+    uint32_t dex_pc = instruction_->GetDexPc();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -315,23 +307,23 @@
     // move resolver.
     InvokeRuntimeCallingConvention calling_convention;
     codegen->EmitParallelMoves(
-        class_to_check_,
+        locations->InAt(1),
         Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
         Primitive::kPrimNot,
-        object_class_,
+        object_class,
         Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
         Primitive::kPrimNot);
 
     if (instruction_->IsInstanceOf()) {
       x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
                                  instruction_,
-                                 dex_pc_,
+                                 dex_pc,
                                  this);
     } else {
       DCHECK(instruction_->IsCheckCast());
       x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
                                  instruction_,
-                                 dex_pc_,
+                                 dex_pc,
                                  this);
     }
 
@@ -347,9 +339,6 @@
 
  private:
   HInstruction* const instruction_;
-  const Location class_to_check_;
-  const Location object_class_;
-  const uint32_t dex_pc_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
 };
@@ -4196,7 +4185,7 @@
   Location index_loc = locations->InAt(0);
   Location length_loc = locations->InAt(1);
   SlowPathCodeX86_64* slow_path =
-    new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction, index_loc, length_loc);
+    new (GetGraph()->GetArena()) BoundsCheckSlowPathX86_64(instruction);
 
   if (length_loc.IsConstant()) {
     int32_t length = CodeGenerator::GetInt32ValueOf(length_loc.GetConstant());
@@ -4653,6 +4642,7 @@
   LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::Any());
+  // Note that TypeCheckSlowPathX86_64 uses this register too.
   locations->SetOut(Location::RequiresRegister());
 }
 
@@ -4688,8 +4678,7 @@
   } else {
     // If the classes are not equal, we go into a slow path.
     DCHECK(locations->OnlyCallsOnSlowPath());
-    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
-        instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
+    slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction);
     codegen_->AddSlowPath(slow_path);
     __ j(kNotEqual, slow_path->GetEntryLabel());
     __ movl(out, Immediate(1));
@@ -4712,6 +4701,7 @@
       instruction, LocationSummary::kCallOnSlowPath);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::Any());
+  // Note that TypeCheckSlowPathX86_64 uses this register too.
   locations->AddTemp(Location::RequiresRegister());
 }
 
@@ -4721,8 +4711,8 @@
   Location cls = locations->InAt(1);
   CpuRegister temp = locations->GetTemp(0).AsRegister<CpuRegister>();
   uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
-      instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+  SlowPathCodeX86_64* slow_path =
+      new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(instruction);
   codegen_->AddSlowPath(slow_path);
 
   // Avoid null check if we know obj is not null.
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 42ef3ff..32f45b5 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -28,6 +28,7 @@
 TEST(GVNTest, LocalFieldElimination) {
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
+  NullHandle<mirror::DexCache> dex_cache;
 
   HGraph* graph = CreateGraph(&allocator);
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -45,20 +46,23 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
   block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
                                                            Primitive::kPrimNot,
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
   HInstruction* to_remove = block->GetLastInstruction();
   block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
                                                            Primitive::kPrimNot,
                                                            MemberOffset(43),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
   HInstruction* different_offset = block->GetLastInstruction();
   // Kill the value.
   block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
@@ -67,13 +71,15 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
   block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
                                                            Primitive::kPrimNot,
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
   HInstruction* use_after_kill = block->GetLastInstruction();
   block->AddInstruction(new (&allocator) HExit());
 
@@ -94,6 +100,7 @@
 TEST(GVNTest, GlobalFieldElimination) {
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
+  NullHandle<mirror::DexCache> dex_cache;
 
   HGraph* graph = CreateGraph(&allocator);
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -110,7 +117,8 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
 
   block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
   HBasicBlock* then = new (&allocator) HBasicBlock(graph);
@@ -130,21 +138,24 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
-                                                          graph->GetDexFile()));
+                                                          graph->GetDexFile(),
+                                                          dex_cache));
   then->AddInstruction(new (&allocator) HGoto());
   else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
                                                            Primitive::kPrimBoolean,
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
   else_->AddInstruction(new (&allocator) HGoto());
   join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
                                                           Primitive::kPrimBoolean,
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
-                                                          graph->GetDexFile()));
+                                                          graph->GetDexFile(),
+                                                          dex_cache));
   join->AddInstruction(new (&allocator) HExit());
 
   graph->TryBuildingSsa();
@@ -161,6 +172,7 @@
 TEST(GVNTest, LoopFieldElimination) {
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
+  NullHandle<mirror::DexCache> dex_cache;
 
   HGraph* graph = CreateGraph(&allocator);
   HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -178,7 +190,8 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
-                                                           graph->GetDexFile()));
+                                                           graph->GetDexFile(),
+                                                           dex_cache));
   block->AddInstruction(new (&allocator) HGoto());
 
   HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
@@ -198,7 +211,8 @@
                                                                  MemberOffset(42),
                                                                  false,
                                                                  kUnknownFieldIndex,
-                                                                 graph->GetDexFile()));
+                                                                 graph->GetDexFile(),
+                                                                 dex_cache));
   HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
   loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
 
@@ -210,14 +224,16 @@
                                                                MemberOffset(42),
                                                                false,
                                                                kUnknownFieldIndex,
-                                                               graph->GetDexFile()));
+                                                               graph->GetDexFile(),
+                                                               dex_cache));
   HInstruction* field_set = loop_body->GetLastInstruction();
   loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
                                                                Primitive::kPrimBoolean,
                                                                MemberOffset(42),
                                                                false,
                                                                kUnknownFieldIndex,
-                                                               graph->GetDexFile()));
+                                                               graph->GetDexFile(),
+                                                               dex_cache));
   HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
   loop_body->AddInstruction(new (&allocator) HGoto());
 
@@ -226,7 +242,8 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
-                                                          graph->GetDexFile()));
+                                                          graph->GetDexFile(),
+                                                          dex_cache));
   HInstruction* field_get_in_exit = exit->GetLastInstruction();
   exit->AddInstruction(new (&allocator) HExit());
 
@@ -265,6 +282,7 @@
 TEST(GVNTest, LoopSideEffects) {
   ArenaPool pool;
   ArenaAllocator allocator(&pool);
+  NullHandle<mirror::DexCache> dex_cache;
 
   static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
 
@@ -320,7 +338,8 @@
                                                              MemberOffset(42),
                                                              false,
                                                              kUnknownFieldIndex,
-                                                             graph->GetDexFile()));
+                                                             graph->GetDexFile(),
+                                                             dex_cache));
 
     SideEffectsAnalysis side_effects(graph);
     side_effects.Run();
@@ -342,7 +361,8 @@
                                            MemberOffset(42),
                                            false,
                                            kUnknownFieldIndex,
-                                           graph->GetDexFile()),
+                                           graph->GetDexFile(),
+                                           dex_cache),
         outer_loop_body->GetLastInstruction());
 
     SideEffectsAnalysis side_effects(graph);
@@ -365,7 +385,8 @@
                                            MemberOffset(42),
                                            false,
                                            kUnknownFieldIndex,
-                                           graph->GetDexFile()),
+                                           graph->GetDexFile(),
+                                           dex_cache),
         inner_loop_body->GetLastInstruction());
 
     SideEffectsAnalysis side_effects(graph);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3f90676..0547ce8 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -182,10 +182,13 @@
   ArtMethod* resolved_method;
   if (invoke_instruction->IsInvokeStaticOrDirect()) {
     MethodReference ref = invoke_instruction->AsInvokeStaticOrDirect()->GetTargetMethod();
-    resolved_method = class_linker->FindDexCache(soa.Self(), *ref.dex_file)->GetResolvedMethod(
+    mirror::DexCache* const dex_cache = (&caller_dex_file == ref.dex_file)
+        ? caller_compilation_unit_.GetDexCache().Get()
+        : class_linker->FindDexCache(soa.Self(), *ref.dex_file);
+    resolved_method = dex_cache->GetResolvedMethod(
         ref.dex_method_index, class_linker->GetImagePointerSize());
   } else {
-    resolved_method = class_linker->FindDexCache(soa.Self(), caller_dex_file)->GetResolvedMethod(
+    resolved_method = caller_compilation_unit_.GetDexCache().Get()->GetResolvedMethod(
         method_index, class_linker->GetImagePointerSize());
   }
 
@@ -273,6 +276,7 @@
   const DexFile& callee_dex_file = *resolved_method->GetDexFile();
   uint32_t method_index = resolved_method->GetDexMethodIndex();
   ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
+  Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
   DexCompilationUnit dex_compilation_unit(
     nullptr,
     caller_compilation_unit_.GetClassLoader(),
@@ -282,7 +286,8 @@
     resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
     method_index,
     resolved_method->GetAccessFlags(),
-    compiler_driver_->GetVerifiedMethod(&callee_dex_file, method_index));
+    compiler_driver_->GetVerifiedMethod(&callee_dex_file, method_index),
+    dex_cache);
 
   bool requires_ctor_barrier = false;
 
@@ -326,7 +331,8 @@
                         resolved_method->GetDexFile(),
                         compiler_driver_,
                         &inline_stats,
-                        resolved_method->GetQuickenedInfo());
+                        resolved_method->GetQuickenedInfo(),
+                        dex_cache);
 
   if (!builder.BuildGraph(*code_item)) {
     VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 806fd7a..69a3e62 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -103,7 +103,7 @@
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
                                           Location::RegisterLocation(kArtMethodRegister));
-      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+      codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
@@ -989,10 +989,7 @@
   DCHECK_ALIGNED(value_offset, 4);
   static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
 
-  // temp cannot overflow because we cannot allocate a String object with size 4GiB or greater.
-  __ add(temp, temp, ShifterOperand(temp));
   __ LoadImmediate(temp1, value_offset);
-  __ add(temp, temp, ShifterOperand(value_offset));
 
   // Loop to compare strings 2 characters at a time starting at the front of the string.
   // Ok to do this because strings with an odd length are zero-padded.
@@ -1002,8 +999,8 @@
   __ cmp(out, ShifterOperand(temp2));
   __ b(&return_false, NE);
   __ add(temp1, temp1, ShifterOperand(sizeof(uint32_t)));
-  __ cmp(temp1, ShifterOperand(temp));
-  __ b(&loop, LO);
+  __ subs(temp, temp, ShifterOperand(sizeof(uint32_t) /  sizeof(uint16_t)));
+  __ b(&loop, GT);
 
   // Return true and exit the function.
   // If loop does not result in returning false, we return true.
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index a5332ea..0171d69 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -112,7 +112,7 @@
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
                                           LocationFrom(kArtMethodRegister));
-      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+      codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index b7126b2..be076cd 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -141,7 +141,7 @@
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
                                           Location::RegisterLocation(EAX));
-      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+      codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 15fbac1..1f35b59 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -132,7 +132,7 @@
     if (invoke_->IsInvokeStaticOrDirect()) {
       codegen->GenerateStaticOrDirectCall(
           invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(RDI));
-      RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
+      codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
     } else {
       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
       UNREACHABLE();
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 2fc66e6..bc4a663 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -120,13 +120,14 @@
   BuildLoop();
 
   // Populate the loop with instructions: set/get field with different types.
+  NullHandle<mirror::DexCache> dex_cache;
   HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
       parameter_, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile());
+      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
   loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
   HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
       parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
-      false, kUnknownFieldIndex, graph_->GetDexFile());
+      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
   loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
 
   EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -140,13 +141,14 @@
   BuildLoop();
 
   // Populate the loop with instructions: set/get field with same types.
+  NullHandle<mirror::DexCache> dex_cache;
   HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
       parameter_, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile());
+      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
   loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
   HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
       parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile());
+      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
   loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
 
   EXPECT_EQ(get_field->GetBlock(), loop_body_);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 2ed2d9a..fef6f21 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_NODES_H_
 #define ART_COMPILER_OPTIMIZING_NODES_H_
 
+#include <array>
 #include <type_traits>
 
 #include "base/arena_containers.h"
@@ -58,6 +59,10 @@
 class SlowPathCode;
 class SsaBuilder;
 
+namespace mirror {
+class DexCache;
+}  // namespace mirror
+
 static const int kDefaultNumberOfBlocks = 8;
 static const int kDefaultNumberOfSuccessors = 2;
 static const int kDefaultNumberOfPredecessors = 2;
@@ -81,7 +86,7 @@
   kCondGE,
 };
 
-class HInstructionList {
+class HInstructionList : public ValueObject {
  public:
   HInstructionList() : first_instruction_(nullptr), last_instruction_(nullptr) {}
 
@@ -127,7 +132,7 @@
 };
 
 // Control-flow graph of a method. Contains a list of basic blocks.
-class HGraph : public ArenaObject<kArenaAllocMisc> {
+class HGraph : public ArenaObject<kArenaAllocGraph> {
  public:
   HGraph(ArenaAllocator* arena,
          const DexFile& dex_file,
@@ -464,7 +469,7 @@
   DISALLOW_COPY_AND_ASSIGN(HGraph);
 };
 
-class HLoopInformation : public ArenaObject<kArenaAllocMisc> {
+class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
  public:
   HLoopInformation(HBasicBlock* header, HGraph* graph)
       : header_(header),
@@ -562,7 +567,7 @@
 // Stores try/catch information for basic blocks.
 // Note that HGraph is constructed so that catch blocks cannot simultaneously
 // be try blocks.
-class TryCatchInformation : public ArenaObject<kArenaAllocMisc> {
+class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
  public:
   // Try block information constructor.
   explicit TryCatchInformation(const HTryBoundary& try_entry)
@@ -619,7 +624,7 @@
 // as a double linked list. Each block knows its predecessors and
 // successors.
 
-class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
+class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
  public:
   explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
       : graph_(graph),
@@ -1107,7 +1112,7 @@
 template <typename T> class HUseList;
 
 template <typename T>
-class HUseListNode : public ArenaObject<kArenaAllocMisc> {
+class HUseListNode : public ArenaObject<kArenaAllocUseListNode> {
  public:
   HUseListNode* GetPrevious() const { return prev_; }
   HUseListNode* GetNext() const { return next_; }
@@ -1492,7 +1497,7 @@
 };
 
 // A HEnvironment object contains the values of virtual registers at a given location.
-class HEnvironment : public ArenaObject<kArenaAllocMisc> {
+class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
  public:
   HEnvironment(ArenaAllocator* arena,
                size_t number_of_vregs,
@@ -1682,7 +1687,7 @@
 
 std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
 
-class HInstruction : public ArenaObject<kArenaAllocMisc> {
+class HInstruction : public ArenaObject<kArenaAllocInstruction> {
  public:
   explicit HInstruction(SideEffects side_effects)
       : previous_(nullptr),
@@ -1731,9 +1736,7 @@
 
   virtual bool NeedsEnvironment() const { return false; }
   virtual uint32_t GetDexPc() const {
-    LOG(FATAL) << "GetDexPc() cannot be called on an instruction that"
-                  " does not need an environment";
-    UNREACHABLE();
+    return kNoDexPc;
   }
   virtual bool IsControlFlow() const { return false; }
 
@@ -2038,54 +2041,7 @@
   DISALLOW_COPY_AND_ASSIGN(HBackwardInstructionIterator);
 };
 
-// An embedded container with N elements of type T.  Used (with partial
-// specialization for N=0) because embedded arrays cannot have size 0.
-template<typename T, intptr_t N>
-class EmbeddedArray {
- public:
-  EmbeddedArray() : elements_() {}
-
-  intptr_t GetLength() const { return N; }
-
-  const T& operator[](intptr_t i) const {
-    DCHECK_LT(i, GetLength());
-    return elements_[i];
-  }
-
-  T& operator[](intptr_t i) {
-    DCHECK_LT(i, GetLength());
-    return elements_[i];
-  }
-
-  const T& At(intptr_t i) const {
-    return (*this)[i];
-  }
-
-  void SetAt(intptr_t i, const T& val) {
-    (*this)[i] = val;
-  }
-
- private:
-  T elements_[N];
-};
-
-template<typename T>
-class EmbeddedArray<T, 0> {
- public:
-  intptr_t length() const { return 0; }
-  const T& operator[](intptr_t i) const {
-    UNUSED(i);
-    LOG(FATAL) << "Unreachable";
-    UNREACHABLE();
-  }
-  T& operator[](intptr_t i) {
-    UNUSED(i);
-    LOG(FATAL) << "Unreachable";
-    UNREACHABLE();
-  }
-};
-
-template<intptr_t N>
+template<size_t N>
 class HTemplateInstruction: public HInstruction {
  public:
   HTemplateInstruction<N>(SideEffects side_effects)
@@ -2095,18 +2051,47 @@
   size_t InputCount() const OVERRIDE { return N; }
 
  protected:
-  const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { return inputs_[i]; }
+  const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE {
+    DCHECK_LT(i, N);
+    return inputs_[i];
+  }
 
   void SetRawInputRecordAt(size_t i, const HUserRecord<HInstruction*>& input) OVERRIDE {
+    DCHECK_LT(i, N);
     inputs_[i] = input;
   }
 
  private:
-  EmbeddedArray<HUserRecord<HInstruction*>, N> inputs_;
+  std::array<HUserRecord<HInstruction*>, N> inputs_;
 
   friend class SsaBuilder;
 };
 
+// HTemplateInstruction specialization for N=0.
+template<>
+class HTemplateInstruction<0>: public HInstruction {
+ public:
+  explicit HTemplateInstruction(SideEffects side_effects) : HInstruction(side_effects) {}
+  virtual ~HTemplateInstruction() {}
+
+  size_t InputCount() const OVERRIDE { return 0; }
+
+ protected:
+  const HUserRecord<HInstruction*> InputRecordAt(size_t i ATTRIBUTE_UNUSED) const OVERRIDE {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
+
+  void SetRawInputRecordAt(size_t i ATTRIBUTE_UNUSED,
+                           const HUserRecord<HInstruction*>& input ATTRIBUTE_UNUSED) OVERRIDE {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
+
+ private:
+  friend class SsaBuilder;
+};
+
 template<intptr_t N>
 class HExpression : public HTemplateInstruction<N> {
  public:
@@ -4020,25 +4005,29 @@
             Primitive::Type field_type,
             bool is_volatile,
             uint32_t index,
-            const DexFile& dex_file)
+            const DexFile& dex_file,
+            Handle<mirror::DexCache> dex_cache)
       : field_offset_(field_offset),
         field_type_(field_type),
         is_volatile_(is_volatile),
         index_(index),
-        dex_file_(dex_file) {}
+        dex_file_(dex_file),
+        dex_cache_(dex_cache) {}
 
   MemberOffset GetFieldOffset() const { return field_offset_; }
   Primitive::Type GetFieldType() const { return field_type_; }
   uint32_t GetFieldIndex() const { return index_; }
   const DexFile& GetDexFile() const { return dex_file_; }
   bool IsVolatile() const { return is_volatile_; }
+  Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; }
 
  private:
   const MemberOffset field_offset_;
   const Primitive::Type field_type_;
   const bool is_volatile_;
-  uint32_t index_;
+  const uint32_t index_;
   const DexFile& dex_file_;
+  const Handle<mirror::DexCache> dex_cache_;
 };
 
 class HInstanceFieldGet : public HExpression<1> {
@@ -4048,11 +4037,12 @@
                     MemberOffset field_offset,
                     bool is_volatile,
                     uint32_t field_idx,
-                    const DexFile& dex_file)
+                    const DexFile& dex_file,
+                    Handle<mirror::DexCache> dex_cache)
       : HExpression(
             field_type,
             SideEffects::FieldReadOfType(field_type, is_volatile)),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
+        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
     SetRawInputAt(0, value);
   }
 
@@ -4092,10 +4082,11 @@
                     MemberOffset field_offset,
                     bool is_volatile,
                     uint32_t field_idx,
-                    const DexFile& dex_file)
+                    const DexFile& dex_file,
+                    Handle<mirror::DexCache> dex_cache)
       : HTemplateInstruction(
           SideEffects::FieldWriteOfType(field_type, is_volatile)),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file),
+        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
         value_can_be_null_(true) {
     SetRawInputAt(0, object);
     SetRawInputAt(1, value);
@@ -4510,11 +4501,12 @@
                   MemberOffset field_offset,
                   bool is_volatile,
                   uint32_t field_idx,
-                  const DexFile& dex_file)
+                  const DexFile& dex_file,
+                  Handle<mirror::DexCache> dex_cache)
       : HExpression(
             field_type,
             SideEffects::FieldReadOfType(field_type, is_volatile)),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
+        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
     SetRawInputAt(0, cls);
   }
 
@@ -4551,10 +4543,11 @@
                   MemberOffset field_offset,
                   bool is_volatile,
                   uint32_t field_idx,
-                  const DexFile& dex_file)
+                  const DexFile& dex_file,
+                  Handle<mirror::DexCache> dex_cache)
       : HTemplateInstruction(
           SideEffects::FieldWriteOfType(field_type, is_volatile)),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file),
+        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
         value_can_be_null_(true) {
     SetRawInputAt(0, cls);
     SetRawInputAt(1, value);
@@ -4833,7 +4826,7 @@
   DISALLOW_COPY_AND_ASSIGN(HFakeString);
 };
 
-class MoveOperands : public ArenaObject<kArenaAllocMisc> {
+class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> {
  public:
   MoveOperands(Location source,
                Location destination,
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index f793a65..2f59d4c 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -26,7 +26,7 @@
 /**
  * Abstraction to implement an optimization pass.
  */
-class HOptimization : public ArenaObject<kArenaAllocMisc> {
+class HOptimization : public ArenaObject<kArenaAllocOptimization> {
  public:
   HOptimization(HGraph* graph,
                 const char* pass_name,
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6f251e8..91b03d4 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -25,6 +25,7 @@
 
 #include "art_method-inl.h"
 #include "base/arena_allocator.h"
+#include "base/arena_containers.h"
 #include "base/dumpable.h"
 #include "base/timing_logger.h"
 #include "boolean_simplifier.h"
@@ -68,7 +69,9 @@
  */
 class CodeVectorAllocator FINAL : public CodeAllocator {
  public:
-  CodeVectorAllocator() : size_(0) {}
+  explicit CodeVectorAllocator(ArenaAllocator* arena)
+      : memory_(arena->Adapter(kArenaAllocCodeBuffer)),
+        size_(0) {}
 
   virtual uint8_t* Allocate(size_t size) {
     size_ = size;
@@ -77,10 +80,10 @@
   }
 
   size_t GetSize() const { return size_; }
-  const std::vector<uint8_t>& GetMemory() const { return memory_; }
+  const ArenaVector<uint8_t>& GetMemory() const { return memory_; }
 
  private:
-  std::vector<uint8_t> memory_;
+  ArenaVector<uint8_t> memory_;
   size_t size_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeVectorAllocator);
@@ -243,7 +246,8 @@
                           uint16_t class_def_idx,
                           uint32_t method_idx,
                           jobject class_loader,
-                          const DexFile& dex_file) const OVERRIDE;
+                          const DexFile& dex_file,
+                          Handle<mirror::DexCache> dex_cache) const OVERRIDE;
 
   CompiledMethod* TryCompile(const DexFile::CodeItem* code_item,
                              uint32_t access_flags,
@@ -251,7 +255,8 @@
                              uint16_t class_def_idx,
                              uint32_t method_idx,
                              jobject class_loader,
-                             const DexFile& dex_file) const;
+                             const DexFile& dex_file,
+                             Handle<mirror::DexCache> dex_cache) const;
 
   CompiledMethod* JniCompile(uint32_t access_flags,
                              uint32_t method_idx,
@@ -498,7 +503,7 @@
 
 // The stack map we generate must be 4-byte aligned on ARM. Since existing
 // maps are generated alongside these stack maps, we must also align them.
-static ArrayRef<const uint8_t> AlignVectorSize(std::vector<uint8_t>& vector) {
+static ArrayRef<const uint8_t> AlignVectorSize(ArenaVector<uint8_t>& vector) {
   size_t size = vector.size();
   size_t aligned_size = RoundUp(size, 4);
   for (; size < aligned_size; ++size) {
@@ -553,17 +558,17 @@
 
   AllocateRegisters(graph, codegen, pass_observer);
 
-  CodeVectorAllocator allocator;
+  ArenaAllocator* arena = graph->GetArena();
+  CodeVectorAllocator allocator(arena);
+  DefaultSrcMap src_mapping_table;
+  codegen->SetSrcMap(compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()
+                         ? &src_mapping_table
+                         : nullptr);
   codegen->CompileOptimized(&allocator);
 
   ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
 
-  DefaultSrcMap src_mapping_table;
-  if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) {
-    codegen->BuildSourceMap(&src_mapping_table);
-  }
-
-  std::vector<uint8_t> stack_map;
+  ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
   codegen->BuildStackMaps(&stack_map);
 
   MaybeRecordStat(MethodCompilationStat::kCompiledOptimized);
@@ -595,20 +600,21 @@
     CompilerDriver* compiler_driver,
     const DexCompilationUnit& dex_compilation_unit,
     PassObserver* pass_observer) const {
-  CodeVectorAllocator allocator;
+  ArenaAllocator* arena = codegen->GetGraph()->GetArena();
+  CodeVectorAllocator allocator(arena);
+  DefaultSrcMap src_mapping_table;
+  codegen->SetSrcMap(compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()
+                         ? &src_mapping_table
+                         : nullptr);
   codegen->CompileBaseline(&allocator);
 
   ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
 
-  std::vector<uint8_t> mapping_table;
+  ArenaVector<uint8_t> mapping_table(arena->Adapter(kArenaAllocBaselineMaps));
   codegen->BuildMappingTable(&mapping_table);
-  DefaultSrcMap src_mapping_table;
-  if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) {
-    codegen->BuildSourceMap(&src_mapping_table);
-  }
-  std::vector<uint8_t> vmap_table;
+  ArenaVector<uint8_t> vmap_table(arena->Adapter(kArenaAllocBaselineMaps));
   codegen->BuildVMapTable(&vmap_table);
-  std::vector<uint8_t> gc_map;
+  ArenaVector<uint8_t> gc_map(arena->Adapter(kArenaAllocBaselineMaps));
   codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
 
   MaybeRecordStat(MethodCompilationStat::kCompiledBaseline);
@@ -638,7 +644,8 @@
                                                uint16_t class_def_idx,
                                                uint32_t method_idx,
                                                jobject class_loader,
-                                               const DexFile& dex_file) const {
+                                               const DexFile& dex_file,
+                                               Handle<mirror::DexCache> dex_cache) const {
   UNUSED(invoke_type);
   std::string method_name = PrettyMethod(method_idx, dex_file);
   MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
@@ -674,7 +681,7 @@
   DexCompilationUnit dex_compilation_unit(
     nullptr, class_loader, Runtime::Current()->GetClassLinker(), dex_file, code_item,
     class_def_idx, method_idx, access_flags,
-    compiler_driver->GetVerifiedMethod(&dex_file, method_idx));
+    compiler_driver->GetVerifiedMethod(&dex_file, method_idx), dex_cache);
 
   bool requires_barrier = dex_compilation_unit.IsConstructor()
       && compiler_driver->RequiresConstructorBarrier(Thread::Current(),
@@ -712,10 +719,7 @@
   const uint8_t* interpreter_metadata = nullptr;
   {
     ScopedObjectAccess soa(Thread::Current());
-    StackHandleScope<4> hs(soa.Self());
-    ClassLinker* class_linker = dex_compilation_unit.GetClassLinker();
-    Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
-        soa.Self(), dex_file)));
+    StackHandleScope<1> hs(soa.Self());
     Handle<mirror::ClassLoader> loader(hs.NewHandle(
         soa.Decode<mirror::ClassLoader*>(class_loader)));
     ArtMethod* art_method = compiler_driver->ResolveMethod(
@@ -732,7 +736,8 @@
                         &dex_file,
                         compiler_driver,
                         compilation_stats_.get(),
-                        interpreter_metadata);
+                        interpreter_metadata,
+                        dex_cache);
 
   VLOG(compiler) << "Building " << method_name;
 
@@ -752,6 +757,7 @@
   // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back
   // to Quick.
   bool can_use_baseline = !run_optimizations_ && builder.CanUseBaselineForStringInit();
+  CompiledMethod* compiled_method = nullptr;
   if (run_optimizations_ && can_allocate_registers) {
     VLOG(compiler) << "Optimizing " << method_name;
 
@@ -766,11 +772,11 @@
       }
     }
 
-    return CompileOptimized(graph,
-                            codegen.get(),
-                            compiler_driver,
-                            dex_compilation_unit,
-                            &pass_observer);
+    compiled_method = CompileOptimized(graph,
+                                       codegen.get(),
+                                       compiler_driver,
+                                       dex_compilation_unit,
+                                       &pass_observer);
   } else if (shouldOptimize && can_allocate_registers) {
     LOG(FATAL) << "Could not allocate registers in optimizing compiler";
     UNREACHABLE();
@@ -783,13 +789,20 @@
       MaybeRecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator);
     }
 
-    return CompileBaseline(codegen.get(),
-                           compiler_driver,
-                           dex_compilation_unit,
-                           &pass_observer);
-  } else {
-    return nullptr;
+    compiled_method = CompileBaseline(codegen.get(),
+                                      compiler_driver,
+                                      dex_compilation_unit,
+                                      &pass_observer);
   }
+
+  if (kArenaAllocatorCountAllocations) {
+    if (arena.BytesAllocated() > 4 * MB) {
+      MemStats mem_stats(arena.GetMemStats());
+      LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
+    }
+  }
+
+  return compiled_method;
 }
 
 CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
@@ -798,13 +811,14 @@
                                             uint16_t class_def_idx,
                                             uint32_t method_idx,
                                             jobject jclass_loader,
-                                            const DexFile& dex_file) const {
+                                            const DexFile& dex_file,
+                                            Handle<mirror::DexCache> dex_cache) const {
   CompilerDriver* compiler_driver = GetCompilerDriver();
   CompiledMethod* method = nullptr;
   DCHECK(!compiler_driver->GetVerifiedMethod(&dex_file, method_idx)->HasRuntimeThrow());
   if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)) {
      method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
-                         method_idx, jclass_loader, dex_file);
+                         method_idx, jclass_loader, dex_file, dex_cache);
   } else {
     if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
       MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
@@ -817,7 +831,7 @@
     return method;
   }
   method = delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
-                              jclass_loader, dex_file);
+                              jclass_loader, dex_file, dex_cache);
 
   if (method != nullptr) {
     MaybeRecordStat(MethodCompilationStat::kCompiledQuick);
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index ef753ed..0384e46 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -434,8 +434,7 @@
 
   ScopedObjectAccess soa(Thread::Current());
   ClassLinker* cl = Runtime::Current()->GetClassLinker();
-  mirror::DexCache* dex_cache = cl->FindDexCache(soa.Self(), info.GetDexFile(), false);
-  ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), dex_cache);
+  ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), info.GetDexCache().Get());
   // TODO: There are certain cases where we can't resolve the field.
   // b/21914925 is open to keep track of a repro case for this issue.
   mirror::Class* klass = (field == nullptr) ? nullptr : field->GetType<false>();
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index b7da362..965a8df 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -472,6 +472,7 @@
                                   HInstruction** input2) {
   HGraph* graph = CreateGraph(allocator);
   HBasicBlock* entry = new (allocator) HBasicBlock(graph);
+  NullHandle<mirror::DexCache> dex_cache;
   graph->AddBlock(entry);
   graph->SetEntryBlock(entry);
   HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
@@ -486,7 +487,8 @@
                                                          MemberOffset(22),
                                                          false,
                                                          kUnknownFieldIndex,
-                                                         graph->GetDexFile());
+                                                         graph->GetDexFile(),
+                                                         dex_cache);
   block->AddInstruction(test);
   block->AddInstruction(new (allocator) HIf(test));
   HBasicBlock* then = new (allocator) HBasicBlock(graph);
@@ -510,13 +512,15 @@
                                               MemberOffset(42),
                                               false,
                                               kUnknownFieldIndex,
-                                              graph->GetDexFile());
+                                              graph->GetDexFile(),
+                                              dex_cache);
 *input2 = new (allocator) HInstanceFieldGet(parameter,
                                             Primitive::kPrimInt,
                                             MemberOffset(42),
                                             false,
                                             kUnknownFieldIndex,
-                                            graph->GetDexFile());
+                                            graph->GetDexFile(),
+                                            dex_cache);
   then->AddInstruction(*input1);
   else_->AddInstruction(*input2);
   join->AddInstruction(new (allocator) HExit());
@@ -613,6 +617,7 @@
                                 HInstruction** field,
                                 HInstruction** ret) {
   HGraph* graph = CreateGraph(allocator);
+  NullHandle<mirror::DexCache> dex_cache;
   HBasicBlock* entry = new (allocator) HBasicBlock(graph);
   graph->AddBlock(entry);
   graph->SetEntryBlock(entry);
@@ -628,7 +633,8 @@
                                              MemberOffset(42),
                                              false,
                                              kUnknownFieldIndex,
-                                             graph->GetDexFile());
+                                             graph->GetDexFile(),
+                                             dex_cache);
   block->AddInstruction(*field);
   *ret = new (allocator) HReturn(*field);
   block->AddInstruction(*ret);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 07cf88c..f9520be 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -309,6 +309,8 @@
   UsageError("      stripped using standard command line tools such as strip or objcopy.");
   UsageError("      (enabled by default in debug builds, disabled by default otherwise)");
   UsageError("");
+  UsageError("  --debuggable: Produce debuggable code. Implies --generate-debug-info.");
+  UsageError("");
   UsageError("  --no-generate-debug-info: Do not generate debug information for native debugging.");
   UsageError("");
   UsageError("  --runtime-arg <argument>: used to specify various arguments for the runtime,");
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 2353dcf..aa4cf55 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1605,14 +1605,11 @@
       // Since FlushAllocStack() above resets the (active) allocation
       // stack. Need to revoke the thread-local allocation stacks that
       // point into it.
-      {
-        self->TransitionFromRunnableToSuspended(kNative);
-        ThreadList* thread_list = Runtime::Current()->GetThreadList();
-        thread_list->SuspendAll(__FUNCTION__);
-        heap->RevokeAllThreadLocalAllocationStacks(self);
-        thread_list->ResumeAll();
-        self->TransitionFromSuspendedToRunnable();
-      }
+      ScopedThreadSuspension sts(self, kNative);
+      ThreadList* thread_list = Runtime::Current()->GetThreadList();
+      thread_list->SuspendAll(__FUNCTION__);
+      heap->RevokeAllThreadLocalAllocationStacks(self);
+      thread_list->ResumeAll();
     }
     {
       // Mark dex caches.
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 40e2cd3..4a45f49 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -32,20 +32,17 @@
 
   static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
       NO_THREAD_SAFETY_ANALYSIS {
-    Runtime* r = Runtime::Current();
+    Runtime* const runtime = Runtime::Current();
+    Thread* const self = Thread::Current();
+    ScopedObjectAccess soa(self);  // So we can create callee-save methods.
 
-    Thread* t = Thread::Current();
-    t->TransitionFromSuspendedToRunnable();  // So we can create callee-save methods.
-
-    r->SetInstructionSet(isa);
-    ArtMethod* save_method = r->CreateCalleeSaveMethod();
-    r->SetCalleeSaveMethod(save_method, type);
+    runtime->SetInstructionSet(isa);
+    ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
+    runtime->SetCalleeSaveMethod(save_method, type);
     QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
     EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
         << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
         << frame_info.FpSpillMask() << std::dec;
-
-    t->TransitionFromRunnableToSuspended(ThreadState::kNative);  // So we can shut down.
   }
 };
 
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index e5832e1..3a4bccd 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -32,32 +32,43 @@
 
 template <bool kCount>
 const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
-  "Misc       ",
-  "BasicBlock ",
-  "BBList     ",
-  "BBPreds    ",
-  "DfsPreOrd  ",
-  "DfsPostOrd ",
-  "DomPostOrd ",
-  "TopoOrd    ",
-  "Lowering   ",
-  "LIR        ",
-  "LIR masks  ",
-  "SwitchTbl  ",
-  "FillArray  ",
-  "SlowPaths  ",
-  "MIR        ",
-  "DataFlow   ",
-  "GrowList   ",
-  "GrowBitMap ",
-  "SSA2Dalvik ",
-  "Dalvik2SSA ",
-  "DebugInfo  ",
-  "Successor  ",
-  "RegAlloc   ",
-  "Data       ",
-  "Preds      ",
-  "STL        ",
+  "Misc         ",
+  "BBList       ",
+  "BBPreds      ",
+  "DfsPreOrd    ",
+  "DfsPostOrd   ",
+  "DomPostOrd   ",
+  "TopoOrd      ",
+  "Lowering     ",
+  "LIR          ",
+  "LIR masks    ",
+  "SwitchTbl    ",
+  "FillArray    ",
+  "SlowPaths    ",
+  "MIR          ",
+  "DataFlow     ",
+  "GrowList     ",
+  "GrowBitMap   ",
+  "SSA2Dalvik   ",
+  "Dalvik2SSA   ",
+  "DebugInfo    ",
+  "Successor    ",
+  "RegAlloc     ",
+  "Data         ",
+  "Preds        ",
+  "STL          ",
+  "Graph        ",
+  "BasicBlock   ",
+  "Instruction  ",
+  "LoopInfo     ",
+  "TryCatchInf  ",
+  "UseListNode  ",
+  "Environment  ",
+  "MoveOperands ",
+  "CodeBuffer   ",
+  "StackMaps    ",
+  "BaselineMaps ",
+  "Optimization ",
 };
 
 template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 05c66f0..af2bfbc 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -43,7 +43,6 @@
 // Type of allocation for memory tuning.
 enum ArenaAllocKind {
   kArenaAllocMisc,
-  kArenaAllocBB,
   kArenaAllocBBList,
   kArenaAllocBBPredecessors,
   kArenaAllocDfsPreOrder,
@@ -68,6 +67,18 @@
   kArenaAllocData,
   kArenaAllocPredecessors,
   kArenaAllocSTL,
+  kArenaAllocGraph,
+  kArenaAllocBasicBlock,
+  kArenaAllocInstruction,
+  kArenaAllocLoopInfo,
+  kArenaAllocTryCatchInfo,
+  kArenaAllocUseListNode,
+  kArenaAllocEnvironment,
+  kArenaAllocMoveOperands,
+  kArenaAllocCodeBuffer,
+  kArenaAllocStackMaps,
+  kArenaAllocBaselineMaps,
+  kArenaAllocOptimization,
   kNumArenaAllocKinds
 };
 
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index a7aafdf..810c1c4 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -76,6 +76,7 @@
 class ArenaAllocatorAdapterKindImpl {
  public:
   explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) : kind_(kind) { }
+  ArenaAllocatorAdapterKindImpl(const ArenaAllocatorAdapterKindImpl&) = default;
   ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl&) = default;
   ArenaAllocKind Kind() { return kind_; }
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fc1127c..0bc4249 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2583,15 +2583,12 @@
                                                   bool allow_failure) {
   // Search assuming unique-ness of dex file.
   JavaVMExt* const vm = self->GetJniEnv()->vm;
-  {
-    MutexLock mu(self, vm->WeakGlobalsLock());
-    for (jobject weak_root : dex_caches_) {
-      DCHECK_EQ(GetIndirectRefKind(weak_root), kWeakGlobal);
-      mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
-          vm->DecodeWeakGlobalLocked(self, weak_root));
-      if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
-        return dex_cache;
-      }
+  for (jobject weak_root : dex_caches_) {
+    DCHECK_EQ(GetIndirectRefKind(weak_root), kWeakGlobal);
+    mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
+        vm->DecodeWeakGlobal(self, weak_root));
+    if (dex_cache != nullptr && dex_cache->GetDexFile() == &dex_file) {
+      return dex_cache;
     }
   }
   if (allow_failure) {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 8afb968..e1aca2f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -736,14 +736,14 @@
   // Ensure all threads are suspended while we read objects' lock words.
   Thread* self = Thread::Current();
   CHECK_EQ(self->GetState(), kRunnable);
-  self->TransitionFromRunnableToSuspended(kSuspended);
-  Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
 
-  MonitorInfo monitor_info(o);
-
-  Runtime::Current()->GetThreadList()->ResumeAll();
-  self->TransitionFromSuspendedToRunnable();
-
+  MonitorInfo monitor_info;
+  {
+    ScopedThreadSuspension sts(self, kSuspended);
+    Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
+    monitor_info = MonitorInfo(o);
+    Runtime::Current()->GetThreadList()->ResumeAll();
+  }
   if (monitor_info.owner_ != nullptr) {
     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
   } else {
@@ -3148,7 +3148,7 @@
     }
   }
   CHECK_EQ(self->GetState(), kRunnable);
-  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
+  ScopedThreadSuspension sts(self, kWaitingForDeoptimization);
   // We need to suspend mutator threads first.
   Runtime* const runtime = Runtime::Current();
   runtime->GetThreadList()->SuspendAll(__FUNCTION__);
@@ -3164,7 +3164,6 @@
   }
   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
   runtime->GetThreadList()->ResumeAll();
-  self->TransitionFromSuspendedToRunnable();
 }
 
 static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
@@ -3493,9 +3492,9 @@
 
 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
 // cause suspension if the thread is the current thread.
-class ScopedThreadSuspension {
+class ScopedDebuggerThreadSuspension {
  public:
-  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
+  ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
       REQUIRES(!Locks::thread_list_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_) :
       thread_(nullptr),
@@ -3508,13 +3507,14 @@
       if (thread_ == soa.Self()) {
         self_suspend_ = true;
       } else {
-        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
-        jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
-        bool timed_out;
-        ThreadList* thread_list = Runtime::Current()->GetThreadList();
-        Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
-                                                                    &timed_out);
-        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
+        Thread* suspended_thread;
+        {
+          ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
+          jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
+          bool timed_out;
+          ThreadList* const thread_list = Runtime::Current()->GetThreadList();
+          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
+        }
         if (suspended_thread == nullptr) {
           // Thread terminated from under us while suspending.
           error_ = JDWP::ERR_INVALID_THREAD;
@@ -3534,7 +3534,7 @@
     return error_;
   }
 
-  ~ScopedThreadSuspension() {
+  ~ScopedDebuggerThreadSuspension() {
     if (other_suspend_) {
       Runtime::Current()->GetThreadList()->Resume(thread_, true);
     }
@@ -3550,7 +3550,7 @@
 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
                                    JDWP::JdwpStepDepth step_depth) {
   Thread* self = Thread::Current();
-  ScopedThreadSuspension sts(self, thread_id);
+  ScopedDebuggerThreadSuspension sts(self, thread_id);
   if (sts.GetError() != JDWP::ERR_NONE) {
     return sts.GetError();
   }
@@ -3988,10 +3988,9 @@
 
   // Suspend other threads if the invoke is not single-threaded.
   if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
-    soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
+    ScopedThreadSuspension sts(soa.Self(), kWaitingForDebuggerSuspension);
     VLOG(jdwp) << "      Suspending all threads";
     Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
-    soa.Self()->TransitionFromSuspendedToRunnable();
   }
 
   VLOG(jdwp) << "  --> returned " << result_tag
@@ -4657,7 +4656,7 @@
         context.SetChunkOverhead(0);
         // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
         // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
-        self->TransitionFromRunnableToSuspended(kSuspended);
+        ScopedThreadSuspension sts(self, kSuspended);
         ThreadList* tl = Runtime::Current()->GetThreadList();
         tl->SuspendAll(__FUNCTION__);
         {
@@ -4665,7 +4664,6 @@
           space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
         }
         tl->ResumeAll();
-        self->TransitionFromSuspendedToRunnable();
       } else if (space->IsBumpPointerSpace()) {
         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
         context.SetChunkOverhead(0);
@@ -4673,15 +4671,16 @@
         HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
       } else if (space->IsRegionSpace()) {
         heap->IncrementDisableMovingGC(self);
-        self->TransitionFromRunnableToSuspended(kSuspended);
-        ThreadList* tl = Runtime::Current()->GetThreadList();
-        tl->SuspendAll(__FUNCTION__);
-        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-        context.SetChunkOverhead(0);
-        space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
-        HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
-        tl->ResumeAll();
-        self->TransitionFromSuspendedToRunnable();
+        {
+          ScopedThreadSuspension sts(self, kSuspended);
+          ThreadList* tl = Runtime::Current()->GetThreadList();
+          tl->SuspendAll(__FUNCTION__);
+          ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+          context.SetChunkOverhead(0);
+          space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
+          HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
+          tl->ResumeAll();
+        }
         heap->DecrementDisableMovingGC(self);
       } else {
         UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 85274cd..4cb9a3b 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -1132,7 +1132,7 @@
   return ProcessAnnotationSet(field_class, annotation_set, kDexVisibilityRuntime);
 }
 
-mirror::ObjectArray<mirror::Object>* DexFile::GetSignatureAnnotationForField(ArtField* field)
+mirror::ObjectArray<mirror::String>* DexFile::GetSignatureAnnotationForField(ArtField* field)
     const {
   const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   if (annotation_set == nullptr) {
@@ -1255,7 +1255,7 @@
   return ProcessAnnotationSet(method_class, annotation_set, kDexVisibilityRuntime);
 }
 
-mirror::ObjectArray<mirror::Object>* DexFile::GetExceptionTypesForMethod(ArtMethod* method) const {
+mirror::ObjectArray<mirror::Class>* DexFile::GetExceptionTypesForMethod(ArtMethod* method) const {
   const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1291,7 +1291,7 @@
   Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
   const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
       method_class, annotation_set, kDexVisibilityRuntime, annotation_class);
-  return (annotation_item != nullptr);
+  return annotation_item != nullptr;
 }
 
 const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForClass(Handle<mirror::Class> klass)
@@ -1319,6 +1319,153 @@
   return ProcessAnnotationSet(klass, annotation_set, kDexVisibilityRuntime);
 }
 
+mirror::ObjectArray<mirror::Class>* DexFile::GetDeclaredClasses(Handle<mirror::Class> klass) const {
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+  if (annotation_set == nullptr) {
+    return nullptr;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
+      annotation_set, "Ldalvik/annotation/MemberClasses;", kDexVisibilitySystem);
+  if (annotation_item == nullptr) {
+    return nullptr;
+  }
+  StackHandleScope<1> hs(Thread::Current());
+  mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+  Handle<mirror::Class> class_array_class(hs.NewHandle(
+      Runtime::Current()->GetClassLinker()->FindArrayClass(hs.Self(), &class_class)));
+  if (class_array_class.Get() == nullptr) {
+    return nullptr;
+  }
+  mirror::Object* obj = GetAnnotationValue(
+      klass, annotation_item, "value", class_array_class, kDexAnnotationArray);
+  if (obj == nullptr) {
+    return nullptr;
+  }
+  return obj->AsObjectArray<mirror::Class>();
+}
+
+mirror::Class* DexFile::GetDeclaringClass(Handle<mirror::Class> klass) const {
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+  if (annotation_set == nullptr) {
+    return nullptr;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
+      annotation_set, "Ldalvik/annotation/EnclosingClass;", kDexVisibilitySystem);
+  if (annotation_item == nullptr) {
+    return nullptr;
+  }
+  mirror::Object* obj = GetAnnotationValue(
+      klass, annotation_item, "value", NullHandle<mirror::Class>(), kDexAnnotationType);
+  if (obj == nullptr) {
+    return nullptr;
+  }
+  return obj->AsClass();
+}
+
+mirror::Class* DexFile::GetEnclosingClass(Handle<mirror::Class> klass) const {
+  mirror::Class* declaring_class = GetDeclaringClass(klass);
+  if (declaring_class != nullptr) {
+    return declaring_class;
+  }
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+  if (annotation_set == nullptr) {
+    return nullptr;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
+      annotation_set, "Ldalvik/annotation/EnclosingMethod;", kDexVisibilitySystem);
+  if (annotation_item == nullptr) {
+    return nullptr;
+  }
+  const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "value");
+  if (annotation == nullptr) {
+    return nullptr;
+  }
+  AnnotationValue annotation_value;
+  if (!ProcessAnnotationValue(
+      klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllRaw)) {
+    return nullptr;
+  }
+  if (annotation_value.type_ != kDexAnnotationMethod) {
+    return nullptr;
+  }
+  StackHandleScope<2> hs(Thread::Current());
+  Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
+  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
+  ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType(
+      klass->GetDexFile(), annotation_value.value_.GetI(), dex_cache, class_loader);
+  if (method == nullptr) {
+    return nullptr;
+  }
+  return method->GetDeclaringClass();
+}
+
+mirror::Object* DexFile::GetEnclosingMethod(Handle<mirror::Class> klass) const {
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+  if (annotation_set == nullptr) {
+    return nullptr;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
+      annotation_set, "Ldalvik/annotation/EnclosingMethod;", kDexVisibilitySystem);
+  if (annotation_item == nullptr) {
+    return nullptr;
+  }
+  return GetAnnotationValue(
+      klass, annotation_item, "value", NullHandle<mirror::Class>(), kDexAnnotationMethod);
+}
+
+bool DexFile::GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const {
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+  if (annotation_set == nullptr) {
+    return false;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
+      annotation_set, "Ldalvik/annotation/InnerClass;", kDexVisibilitySystem);
+  if (annotation_item == nullptr) {
+    return false;
+  }
+  const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "name");
+  if (annotation == nullptr) {
+    return false;
+  }
+  AnnotationValue annotation_value;
+  if (!ProcessAnnotationValue(
+      klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllObjects)) {
+    return false;
+  }
+  if (annotation_value.type_ != kDexAnnotationNull &&
+      annotation_value.type_ != kDexAnnotationString) {
+    return false;
+  }
+  *name = down_cast<mirror::String*>(annotation_value.value_.GetL());
+  return true;
+}
+
+bool DexFile::GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const {
+  const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+  if (annotation_set == nullptr) {
+    return false;
+  }
+  const AnnotationItem* annotation_item = SearchAnnotationSet(
+      annotation_set, "Ldalvik/annotation/InnerClass;", kDexVisibilitySystem);
+  if (annotation_item == nullptr) {
+    return false;
+  }
+  const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "accessFlags");
+  if (annotation == nullptr) {
+    return false;
+  }
+  AnnotationValue annotation_value;
+  if (!ProcessAnnotationValue(
+      klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllRaw)) {
+    return false;
+  }
+  if (annotation_value.type_ != kDexAnnotationInt) {
+    return false;
+  }
+  *flags = annotation_value.value_.GetI();
+  return true;
+}
+
 bool DexFile::IsClassAnnotationPresent(Handle<mirror::Class> klass,
                                        Handle<mirror::Class> annotation_class) const {
   const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
@@ -1327,7 +1474,7 @@
   }
   const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
       klass, annotation_set, kDexVisibilityRuntime, annotation_class);
-  return (annotation_item != nullptr);
+  return annotation_item != nullptr;
 }
 
 mirror::Object* DexFile::CreateAnnotationMember(Handle<mirror::Class> klass,
@@ -1444,7 +1591,7 @@
   return annotation_value.value_.GetL();
 }
 
-mirror::ObjectArray<mirror::Object>* DexFile::GetSignatureValue(Handle<mirror::Class> klass,
+mirror::ObjectArray<mirror::String>* DexFile::GetSignatureValue(Handle<mirror::Class> klass,
     const AnnotationSetItem* annotation_set) const {
   StackHandleScope<1> hs(Thread::Current());
   const AnnotationItem* annotation_item =
@@ -1455,15 +1602,18 @@
   mirror::Class* string_class = mirror::String::GetJavaLangString();
   Handle<mirror::Class> string_array_class(hs.NewHandle(
       Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class)));
+  if (string_array_class.Get() == nullptr) {
+    return nullptr;
+  }
   mirror::Object* obj =
       GetAnnotationValue(klass, annotation_item, "value", string_array_class, kDexAnnotationArray);
   if (obj == nullptr) {
     return nullptr;
   }
-  return obj->AsObjectArray<mirror::Object>();
+  return obj->AsObjectArray<mirror::String>();
 }
 
-mirror::ObjectArray<mirror::Object>* DexFile::GetThrowsValue(Handle<mirror::Class> klass,
+mirror::ObjectArray<mirror::Class>* DexFile::GetThrowsValue(Handle<mirror::Class> klass,
     const AnnotationSetItem* annotation_set) const {
   StackHandleScope<1> hs(Thread::Current());
   const AnnotationItem* annotation_item =
@@ -1474,12 +1624,15 @@
   mirror::Class* class_class = mirror::Class::GetJavaLangClass();
   Handle<mirror::Class> class_array_class(hs.NewHandle(
       Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &class_class)));
+  if (class_array_class.Get() == nullptr) {
+    return nullptr;
+  }
   mirror::Object* obj =
       GetAnnotationValue(klass, annotation_item, "value", class_array_class, kDexAnnotationArray);
   if (obj == nullptr) {
     return nullptr;
   }
-  return obj->AsObjectArray<mirror::Object>();
+  return obj->AsObjectArray<mirror::Class>();
 }
 
 mirror::ObjectArray<mirror::Object>* DexFile::ProcessAnnotationSet(Handle<mirror::Class> klass,
@@ -1511,6 +1664,8 @@
     if (annotation_obj != nullptr) {
       result->SetWithoutChecks<false>(dest_index, annotation_obj);
       ++dest_index;
+    } else if (self->IsExceptionPending()) {
+      return nullptr;
     }
   }
 
@@ -1520,6 +1675,10 @@
 
   mirror::ObjectArray<mirror::Object>* trimmed_result =
       mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index);
+  if (trimmed_result == nullptr) {
+    return nullptr;
+  }
+
   for (uint32_t i = 0; i < dest_index; ++i) {
     mirror::Object* obj = result->GetWithoutChecks(i);
     trimmed_result->SetWithoutChecks<false>(i, obj);
@@ -1537,6 +1696,9 @@
       soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
   mirror::Class* annotation_array_array_class =
       Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
+  if (annotation_array_array_class == nullptr) {
+    return nullptr;
+  }
   Handle<mirror::ObjectArray<mirror::Object>> annotation_array_array(hs.NewHandle(
       mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_array_class, size)));
   if (annotation_array_array.Get() == nullptr) {
@@ -1629,9 +1791,15 @@
             klass->GetDexFile(), index, klass.Get());
         set_object = true;
         if (element_object == nullptr) {
-          self->ClearException();
-          const char* msg = StringByTypeIdx(index);
-          self->ThrowNewException("Ljava/lang/TypeNotPresentException;", msg);
+          CHECK(self->IsExceptionPending());
+          if (result_style == kAllObjects) {
+            const char* msg = StringByTypeIdx(index);
+            self->ThrowNewWrappedException("Ljava/lang/TypeNotPresentException;", msg);
+            element_object = self->GetException();
+            self->ClearException();
+          } else {
+            return false;
+          }
         }
       }
       break;
@@ -1835,8 +2003,10 @@
       soa.Decode<mirror::Class*>(WellKnownClasses::libcore_reflect_AnnotationMember);
   mirror::Class* annotation_member_array_class =
       class_linker->FindArrayClass(self, &annotation_member_class);
+  if (annotation_member_array_class == nullptr) {
+    return nullptr;
+  }
   mirror::ObjectArray<mirror::Object>* element_array = nullptr;
-
   if (size > 0) {
     element_array =
         mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 8928321..98d4e59 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -931,7 +931,7 @@
       const SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) const
       SHARED_REQUIRES(Locks::mutator_lock_);
-  mirror::ObjectArray<mirror::Object>* GetSignatureAnnotationForField(ArtField* field) const
+  mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) const
       SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) const
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -946,7 +946,7 @@
       const SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) const
       SHARED_REQUIRES(Locks::mutator_lock_);
-  mirror::ObjectArray<mirror::Object>* GetExceptionTypesForMethod(ArtMethod* method) const
+  mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) const
       SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) const
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -960,6 +960,18 @@
       SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) const
       SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class)
       const SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -983,11 +995,11 @@
                                      Handle<mirror::Class> array_class,
                                      uint32_t expected_type) const
       SHARED_REQUIRES(Locks::mutator_lock_);
-  mirror::ObjectArray<mirror::Object>* GetSignatureValue(Handle<mirror::Class> klass,
+  mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass,
                                                          const AnnotationSetItem* annotation_set)
       const SHARED_REQUIRES(Locks::mutator_lock_);
-  mirror::ObjectArray<mirror::Object>* GetThrowsValue(Handle<mirror::Class> klass,
-                                                      const AnnotationSetItem* annotation_set) const
+  mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass,
+                                                     const AnnotationSetItem* annotation_set) const
       SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(Handle<mirror::Class> klass,
                                                             const AnnotationSetItem* annotation_set,
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 5cdf967..0b36694 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -36,14 +36,13 @@
     Runtime* r = Runtime::Current();
 
     Thread* t = Thread::Current();
-    t->TransitionFromSuspendedToRunnable();  // So we can create callee-save methods.
+
+    ScopedObjectAccess soa(t);
 
     r->SetInstructionSet(isa);
     ArtMethod* save_method = r->CreateCalleeSaveMethod();
     r->SetCalleeSaveMethod(save_method, type);
 
-    t->TransitionFromRunnableToSuspended(ThreadState::kNative);  // So we can shut down.
-
     return save_method;
   }
 
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index de4b3f4..33d756e 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -108,8 +108,8 @@
   const DexFile* dex_;
 
   std::vector<uint8_t> fake_code_;
-  Leb128EncodingVector fake_mapping_data_;
-  Leb128EncodingVector fake_vmap_table_data_;
+  Leb128EncodingVector<> fake_mapping_data_;
+  Leb128EncodingVector<> fake_vmap_table_data_;
   std::vector<uint8_t> fake_gc_map_;
   std::vector<uint8_t> fake_header_code_and_maps_;
 
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 16c9354..369e408 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -167,14 +167,21 @@
 }
 
 void AllocRecordObjectMap::AllowNewAllocationRecords() {
+  CHECK(!kUseReadBarrier);
   allow_new_record_ = true;
   new_record_condition_.Broadcast(Thread::Current());
 }
 
 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
+  CHECK(!kUseReadBarrier);
   allow_new_record_ = false;
 }
 
+void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
+  CHECK(kUseReadBarrier);
+  new_record_condition_.Broadcast(Thread::Current());
+}
+
 struct AllocRecordStackVisitor : public StackVisitor {
   AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
       SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 0a4f532..ffdfd31 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -277,6 +277,9 @@
   void AllowNewAllocationRecords()
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(Locks::alloc_tracker_lock_);
+  void BroadcastForNewAllocationRecords()
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(Locks::alloc_tracker_lock_);
 
   // TODO: Is there a better way to hide the entries_'s type?
   EntryList::iterator Begin()
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index a5bc60a..57af959 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -499,7 +499,8 @@
     DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
         << thread->GetState() << " thread " << thread << " self " << self;
     // Disable the thread-local is_gc_marking flag.
-    DCHECK(thread->GetIsGcMarking());
+    // Note a thread that has just started right before this checkpoint may have already this flag
+    // set to false, which is ok.
     thread->SetIsGcMarking(false);
     // If thread is a running mutator, then act on behalf of the garbage collector.
     // See the code in ThreadList::RunCheckpoint.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index aec8d63..9292c7a 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -887,13 +887,14 @@
     // easily broken. Visit objects while GC isn't running by using
     // IncrementDisableMovingGC() and threads are suspended.
     IncrementDisableMovingGC(self);
-    self->TransitionFromRunnableToSuspended(kWaitingForVisitObjects);
-    ThreadList* tl = Runtime::Current()->GetThreadList();
-    tl->SuspendAll(__FUNCTION__);
-    VisitObjectsInternalRegionSpace(callback, arg);
-    VisitObjectsInternal(callback, arg);
-    tl->ResumeAll();
-    self->TransitionFromSuspendedToRunnable();
+    {
+      ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
+      ThreadList* tl = Runtime::Current()->GetThreadList();
+      tl->SuspendAll(__FUNCTION__);
+      VisitObjectsInternalRegionSpace(callback, arg);
+      VisitObjectsInternal(callback, arg);
+      tl->ResumeAll();
+    }
     DecrementDisableMovingGC(self);
   } else {
     // GCs can move objects, so don't allow this.
@@ -1900,6 +1901,11 @@
   CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
 }
 
+bool Heap::SupportHomogeneousSpaceCompactAndCollectorTransitions() const {
+  return main_space_backup_.get() != nullptr && main_space_ != nullptr &&
+      foreground_collector_type_ == kCollectorTypeCMS;
+}
+
 HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
   Thread* self = Thread::Current();
   // Inc requested homogeneous space compaction.
@@ -1919,7 +1925,10 @@
     // exit.
     if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
         !main_space_->CanMoveObjects()) {
-      return HomogeneousSpaceCompactResult::kErrorReject;
+      return kErrorReject;
+    }
+    if (!SupportHomogeneousSpaceCompactAndCollectorTransitions()) {
+      return kErrorUnsupported;
     }
     collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
   }
@@ -3836,6 +3845,7 @@
 }
 
 void Heap::AllowNewAllocationRecords() const {
+  CHECK(!kUseReadBarrier);
   if (IsAllocTrackingEnabled()) {
     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
     if (IsAllocTrackingEnabled()) {
@@ -3845,6 +3855,7 @@
 }
 
 void Heap::DisallowNewAllocationRecords() const {
+  CHECK(!kUseReadBarrier);
   if (IsAllocTrackingEnabled()) {
     MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
     if (IsAllocTrackingEnabled()) {
@@ -3853,6 +3864,16 @@
   }
 }
 
+void Heap::BroadcastForNewAllocationRecords() const {
+  CHECK(kUseReadBarrier);
+  if (IsAllocTrackingEnabled()) {
+    MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
+    if (IsAllocTrackingEnabled()) {
+      GetAllocationRecords()->BroadcastForNewAllocationRecords();
+    }
+  }
+}
+
 // Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
 class StackCrawlState {
  public:
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 85688ae..d0d0be3 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -105,6 +105,8 @@
   kSuccess,
   // Reject due to disabled moving GC.
   kErrorReject,
+  // Unsupported due to the current configuration.
+  kErrorUnsupported,
   // System is shutting down.
   kErrorVMShuttingDown,
 };
@@ -751,8 +753,16 @@
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Locks::alloc_tracker_lock_);
 
+  void BroadcastForNewAllocationRecords() const
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!Locks::alloc_tracker_lock_);
+
   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
 
+  // Create a new alloc space and compact default alloc space to it.
+  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
+  bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
+
  private:
   class ConcurrentGCTask;
   class CollectorTransitionTask;
@@ -905,9 +915,6 @@
   // Find a collector based on GC type.
   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
 
-  // Create a new alloc space and compact default alloc space to it.
-  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
-
   // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
   void CreateMainMallocSpace(MemMap* mem_map,
                              size_t initial_size,
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index e1c5b64..77f606d 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -298,18 +298,16 @@
   }
 }
 
-void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
-  UNUSED(failed_alloc_bytes);
-  Thread* self = Thread::Current();
+void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
+                                                 size_t failed_alloc_bytes ATTRIBUTE_UNUSED) {
+  Thread* const self = Thread::Current();
   size_t max_contiguous_allocation = 0;
   // To allow the Walk/InspectAll() to exclusively-lock the mutator
   // lock, temporarily release the shared access to the mutator
   // lock here by transitioning to the suspended state.
   Locks::mutator_lock_->AssertSharedHeld(self);
-  self->TransitionFromRunnableToSuspended(kSuspended);
+  ScopedThreadSuspension sts(self, kSuspended);
   Walk(MSpaceChunkCallback, &max_contiguous_allocation);
-  self->TransitionFromSuspendedToRunnable();
-  Locks::mutator_lock_->AssertSharedHeld(self);
   os << "; failed due to fragmentation (largest possible contiguous allocation "
      <<  max_contiguous_allocation << " bytes)";
 }
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 1a193c3..d8072ea 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -331,10 +331,8 @@
     // The mutators are not suspended yet and we have a shared access
     // to the mutator lock. Temporarily release the shared access by
     // transitioning to the suspend state, and suspend the mutators.
-    self->TransitionFromRunnableToSuspended(kSuspended);
+    ScopedThreadSuspension sts(self, kSuspended);
     InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
-    self->TransitionFromSuspendedToRunnable();
-    Locks::mutator_lock_->AssertSharedHeld(self);
   } else {
     // The mutators are not suspended yet. Suspend the mutators.
     InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index c398555..d13526b 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -290,9 +290,7 @@
 
   // Synchronized get which reads a reference, acquiring a lock if necessary.
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
-                                  IndirectRef iref) const
-      SHARED_REQUIRES(Locks::mutator_lock_) {
+  mirror::Object* SynchronizedGet(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_) {
     return Get<kReadBarrierOption>(iref);
   }
 
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index b49f7e1..56fe9ef 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -139,12 +139,11 @@
     ScopedObjectAccess soa(Thread::Current());
     instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
     {
-      soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+      ScopedThreadSuspension sts(soa.Self(), kSuspended);
       Runtime* runtime = Runtime::Current();
       runtime->GetThreadList()->SuspendAll("Instrumentation::ConfigureStubs");
       instr->ConfigureStubs(key, level);
       runtime->GetThreadList()->ResumeAll();
-      soa.Self()->TransitionFromSuspendedToRunnable();
     }
   }
 
@@ -162,12 +161,11 @@
     instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
     TestInstrumentationListener listener;
     {
-      soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+      ScopedThreadSuspension sts(soa.Self(), kSuspended);
       Runtime* runtime = Runtime::Current();
       runtime->GetThreadList()->SuspendAll("Add instrumentation listener");
       instr->AddListener(&listener, instrumentation_event);
       runtime->GetThreadList()->ResumeAll();
-      soa.Self()->TransitionFromSuspendedToRunnable();
     }
 
     ArtMethod* const event_method = nullptr;
@@ -182,12 +180,11 @@
 
     listener.Reset();
     {
-      soa.Self()->TransitionFromRunnableToSuspended(kSuspended);
+      ScopedThreadSuspension sts(soa.Self(), kSuspended);
       Runtime* runtime = Runtime::Current();
       runtime->GetThreadList()->SuspendAll("Remove instrumentation listener");
       instr->RemoveListener(&listener, instrumentation_event);
       runtime->GetThreadList()->ResumeAll();
-      soa.Self()->TransitionFromSuspendedToRunnable();
     }
 
     // Check the listener is not registered and is not notified of the event.
@@ -201,14 +198,13 @@
       SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
-    self->TransitionFromRunnableToSuspended(kSuspended);
+    ScopedThreadSuspension sts(self, kSuspended);
     runtime->GetThreadList()->SuspendAll("Single method deoptimization");
     if (enable_deoptimization) {
       instrumentation->EnableDeoptimization();
     }
     instrumentation->Deoptimize(method);
     runtime->GetThreadList()->ResumeAll();
-    self->TransitionFromSuspendedToRunnable();
   }
 
   void UndeoptimizeMethod(Thread* self, ArtMethod* method,
@@ -216,64 +212,59 @@
       SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
-    self->TransitionFromRunnableToSuspended(kSuspended);
+    ScopedThreadSuspension sts(self, kSuspended);
     runtime->GetThreadList()->SuspendAll("Single method undeoptimization");
     instrumentation->Undeoptimize(method);
     if (disable_deoptimization) {
       instrumentation->DisableDeoptimization(key);
     }
     runtime->GetThreadList()->ResumeAll();
-    self->TransitionFromSuspendedToRunnable();
   }
 
   void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
         SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
-    self->TransitionFromRunnableToSuspended(kSuspended);
+    ScopedThreadSuspension sts(self, kSuspended);
     runtime->GetThreadList()->SuspendAll("Full deoptimization");
     if (enable_deoptimization) {
       instrumentation->EnableDeoptimization();
     }
     instrumentation->DeoptimizeEverything(key);
     runtime->GetThreadList()->ResumeAll();
-    self->TransitionFromSuspendedToRunnable();
   }
 
   void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
         SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
-    self->TransitionFromRunnableToSuspended(kSuspended);
+    ScopedThreadSuspension sts(self, kSuspended);
     runtime->GetThreadList()->SuspendAll("Full undeoptimization");
     instrumentation->UndeoptimizeEverything(key);
     if (disable_deoptimization) {
       instrumentation->DisableDeoptimization(key);
     }
     runtime->GetThreadList()->ResumeAll();
-    self->TransitionFromSuspendedToRunnable();
   }
 
   void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
         SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
-    self->TransitionFromRunnableToSuspended(kSuspended);
+    ScopedThreadSuspension sts(self, kSuspended);
     runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
     instrumentation->EnableMethodTracing(key, needs_interpreter);
     runtime->GetThreadList()->ResumeAll();
-    self->TransitionFromSuspendedToRunnable();
   }
 
   void DisableMethodTracing(Thread* self, const char* key)
         SHARED_REQUIRES(Locks::mutator_lock_) {
     Runtime* runtime = Runtime::Current();
     instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
-    self->TransitionFromRunnableToSuspended(kSuspended);
+    ScopedThreadSuspension sts(self, kSuspended);
     runtime->GetThreadList()->SuspendAll("EnableMethodTracing");
     instrumentation->DisableMethodTracing(key);
     runtime->GetThreadList()->ResumeAll();
-    self->TransitionFromSuspendedToRunnable();
   }
 
  private:
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 6d22fe0..179353e 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -211,13 +211,6 @@
   return nullptr;
 }
 
-void InternTable::EnsureNewWeakInternsDisallowed() {
-  // Lock and unlock once to ensure that no threads are still in the
-  // middle of adding new interns.
-  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
-  CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
-}
-
 void InternTable::BroadcastForNewInterns() {
   CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
@@ -227,13 +220,13 @@
 
 void InternTable::WaitUntilAccessible(Thread* self) {
   Locks::intern_table_lock_->ExclusiveUnlock(self);
-  self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead);
-  Locks::intern_table_lock_->ExclusiveLock(self);
-  while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
-    weak_intern_condition_.Wait(self);
+  {
+    ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
+    MutexLock mu(self, *Locks::intern_table_lock_);
+    while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
+      weak_intern_condition_.Wait(self);
+    }
   }
-  Locks::intern_table_lock_->ExclusiveUnlock(self);
-  self->TransitionFromSuspendedToRunnable();
   Locks::intern_table_lock_->ExclusiveLock(self);
 }
 
@@ -459,6 +452,7 @@
 }
 
 void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
+  CHECK(!kUseReadBarrier);
   weak_root_state_ = new_state;
   if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
     weak_intern_condition_.Broadcast(Thread::Current());
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index ae9f7a7..24c5af9 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -96,11 +96,7 @@
 
   void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
 
-  void DisallowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
-  void AllowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
-  void EnsureNewInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
   void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
-  void EnsureNewWeakInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Adds all of the resolved image strings from the image space into the intern table. The
   // advantage of doing this is preventing expensive DexFile::FindStringId calls.
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index ef7a924..d6c798a 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -375,7 +375,7 @@
       unchecked_functions_(&gJniInvokeInterface),
       weak_globals_lock_("JNI weak global reference table lock", kJniWeakGlobalsLock),
       weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
-      allow_new_weak_globals_(true),
+      allow_accessing_weak_globals_(true),
       weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
   functions = unchecked_functions_;
   SetCheckJniEnabled(runtime_options.Exists(RuntimeArgumentMap::CheckJni));
@@ -406,9 +406,9 @@
     check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
   } else {
     // Ensure that we get a native stack trace for this thread.
-    self->TransitionFromRunnableToSuspended(kNative);
+    ScopedThreadSuspension sts(self, kNative);
     LOG(FATAL) << os.str();
-    self->TransitionFromSuspendedToRunnable();  // Unreachable, keep annotalysis happy.
+    UNREACHABLE();
   }
 }
 
@@ -473,8 +473,7 @@
     return nullptr;
   }
   MutexLock mu(self, weak_globals_lock_);
-  while (UNLIKELY((!kUseReadBarrier && !allow_new_weak_globals_) ||
-                  (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+  while (UNLIKELY(!MayAccessWeakGlobals(self))) {
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
   IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
@@ -542,24 +541,24 @@
 }
 
 void JavaVMExt::DisallowNewWeakGlobals() {
-  MutexLock mu(Thread::Current(), weak_globals_lock_);
-  allow_new_weak_globals_ = false;
+  CHECK(!kUseReadBarrier);
+  Thread* const self = Thread::Current();
+  MutexLock mu(self, weak_globals_lock_);
+  // DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
+  // mutator lock exclusively held so that we don't have any threads in the middle of
+  // DecodeWeakGlobal.
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  allow_accessing_weak_globals_.StoreSequentiallyConsistent(false);
 }
 
 void JavaVMExt::AllowNewWeakGlobals() {
+  CHECK(!kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, weak_globals_lock_);
-  allow_new_weak_globals_ = true;
+  allow_accessing_weak_globals_.StoreSequentiallyConsistent(true);
   weak_globals_add_condition_.Broadcast(self);
 }
 
-void JavaVMExt::EnsureNewWeakGlobalsDisallowed() {
-  // Lock and unlock once to ensure that no threads are still in the
-  // middle of adding new weak globals.
-  MutexLock mu(Thread::Current(), weak_globals_lock_);
-  CHECK(!allow_new_weak_globals_);
-}
-
 void JavaVMExt::BroadcastForNewWeakGlobals() {
   CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
@@ -567,8 +566,8 @@
   weak_globals_add_condition_.Broadcast(self);
 }
 
-mirror::Object* JavaVMExt::DecodeGlobal(Thread* self, IndirectRef ref) {
-  return globals_.SynchronizedGet(self, &globals_lock_, ref);
+mirror::Object* JavaVMExt::DecodeGlobal(IndirectRef ref) {
+  return globals_.SynchronizedGet(ref);
 }
 
 void JavaVMExt::UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result) {
@@ -576,7 +575,26 @@
   globals_.Update(ref, result);
 }
 
+inline bool JavaVMExt::MayAccessWeakGlobals(Thread* self) const {
+  return MayAccessWeakGlobalsUnlocked(self);
+}
+
+inline bool JavaVMExt::MayAccessWeakGlobalsUnlocked(Thread* self) const {
+  return kUseReadBarrier
+      ? self->GetWeakRefAccessEnabled()
+      : allow_accessing_weak_globals_.LoadSequentiallyConsistent();
+}
+
 mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
+  // It is safe to access GetWeakRefAccessEnabled without the lock since CC uses checkpoints to call
+  // SetWeakRefAccessEnabled, and the other collectors only modify allow_accessing_weak_globals_
+  // when the mutators are paused.
+  // This only applies in the case where MayAccessWeakGlobals goes from false to true. In the other
+  // case, it may be racy, this is benign since DecodeWeakGlobalLocked does the correct behavior
+  // if MayAccessWeakGlobals is false.
+  if (LIKELY(MayAccessWeakGlobalsUnlocked(self))) {
+    return weak_globals_.SynchronizedGet(ref);
+  }
   MutexLock mu(self, weak_globals_lock_);
   return DecodeWeakGlobalLocked(self, ref);
 }
@@ -585,8 +603,7 @@
   if (kDebugLocking) {
     weak_globals_lock_.AssertHeld(self);
   }
-  while (UNLIKELY((!kUseReadBarrier && !allow_new_weak_globals_) ||
-                  (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+  while (UNLIKELY(!MayAccessWeakGlobals(self))) {
     weak_globals_add_condition_.WaitHoldingLocks(self);
   }
   return weak_globals_.Get(ref);
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index e80266f..87430c8 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -108,8 +108,6 @@
 
   void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
   void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
-  void EnsureNewWeakGlobalsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!weak_globals_lock_);
   void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!weak_globals_lock_);
 
@@ -126,7 +124,7 @@
   void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
 
-  mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
+  mirror::Object* DecodeGlobal(IndirectRef ref)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
@@ -155,6 +153,12 @@
       REQUIRES(!globals_lock_);
 
  private:
+  // Return true if self can currently access weak globals.
+  bool MayAccessWeakGlobalsUnlocked(Thread* self) const SHARED_REQUIRES(Locks::mutator_lock_);
+  bool MayAccessWeakGlobals(Thread* self) const
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      REQUIRES(weak_globals_lock_);
+
   Runtime* const runtime_;
 
   // Used for testing. By default, we'll LOG(FATAL) the reason.
@@ -184,8 +188,10 @@
   // Since weak_globals_ contain weak roots, be careful not to
   // directly access the object references in it. Use Get() with the
   // read barrier enabled.
-  IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
-  bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
+  // Not guarded by weak_globals_lock since we may use SynchronizedGet in DecodeWeakGlobal.
+  IndirectReferenceTable weak_globals_;
+  // Not guarded by weak_globals_lock since we may use SynchronizedGet in DecodeWeakGlobal.
+  Atomic<bool> allow_accessing_weak_globals_;
   ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(JavaVMExt);
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 5d21f17..06b67b3 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -623,7 +623,7 @@
   CHECK(pReq != nullptr);
   /* send request and possibly suspend ourselves */
   JDWP::ObjectId thread_self_id = Dbg::GetThreadSelfId();
-  self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
+  ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
   if (suspend_policy != SP_NONE) {
     AcquireJdwpTokenForEvent(threadId);
   }
@@ -633,7 +633,6 @@
     ScopedThreadStateChange stsc(self, kSuspended);
     SuspendByPolicy(suspend_policy, thread_self_id);
   }
-  self->TransitionFromSuspendedToRunnable();
 }
 
 /*
@@ -1323,9 +1322,8 @@
   }
   if (safe_to_release_mutator_lock_over_send) {
     // Change state to waiting to allow GC, ... while we're sending.
-    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
+    ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
     SendBufferedRequest(type, wrapiov);
-    self->TransitionFromSuspendedToRunnable();
   } else {
     // Send and possibly block GC...
     SendBufferedRequest(type, wrapiov);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 7776f8f..0a4d6e3 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -31,6 +31,7 @@
 #include "jdwp/jdwp_expand_buf.h"
 #include "jdwp/jdwp_priv.h"
 #include "runtime.h"
+#include "scoped_thread_state_change.h"
 #include "thread-inl.h"
 #include "utils.h"
 
@@ -238,9 +239,8 @@
 static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   Thread* self = Thread::Current();
-  self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
+  ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
   Dbg::SuspendVM();
-  self->TransitionFromSuspendedToRunnable();
   return ERR_NONE;
 }
 
@@ -922,9 +922,8 @@
   }
 
   Thread* self = Thread::Current();
-  self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
+  ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
   JdwpError result = Dbg::SuspendThread(thread_id);
-  self->TransitionFromSuspendedToRunnable();
   return result;
 }
 
@@ -1609,7 +1608,7 @@
    * Do this after anything that can stall indefinitely.
    */
   Thread* self = Thread::Current();
-  ThreadState old_state = self->TransitionFromSuspendedToRunnable();
+  ScopedObjectAccess soa(self);
 
   expandBufAddSpace(pReply, kJDWPHeaderLen);
 
@@ -1670,9 +1669,6 @@
     last_activity_time_ms_.StoreSequentiallyConsistent(MilliTime());
   }
 
-  /* tell the VM that GC is okay again */
-  self->TransitionFromRunnableToSuspended(old_state);
-
   return replyLength;
 }
 
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 5a9a0f5..668d5dc 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -126,9 +126,12 @@
  * Write a packet of "length" bytes. Grabs a mutex to assure atomicity.
  */
 ssize_t JdwpNetStateBase::WritePacket(ExpandBuf* pReply, size_t length) {
-  MutexLock mu(Thread::Current(), socket_lock_);
-  DCHECK(IsConnected()) << "Connection with debugger is closed";
   DCHECK_LE(length, expandBufGetLength(pReply));
+  if (!IsConnected()) {
+    LOG(WARNING) << "Connection with debugger is closed";
+    return -1;
+  }
+  MutexLock mu(Thread::Current(), socket_lock_);
   return TEMP_FAILURE_RETRY(write(clientSock, expandBufGetBuffer(pReply), length));
 }
 
@@ -533,9 +536,8 @@
       ddm_is_active_ = false;
 
       /* broadcast the disconnect; must be in RUNNING state */
-      thread_->TransitionFromSuspendedToRunnable();
+      ScopedObjectAccess soa(thread_);
       Dbg::DdmDisconnected();
-      thread_->TransitionFromRunnableToSuspended(kWaitingInMainDebuggerLoop);
     }
 
     {
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 64a6076..26575fd 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -139,7 +139,8 @@
 
 void BoxTable::BlockUntilWeaksAllowed() {
   Thread* self = Thread::Current();
-  while (UNLIKELY(allow_new_weaks_ == false)) {
+  while (UNLIKELY((!kUseReadBarrier && !allow_new_weaks_) ||
+                  (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
     new_weaks_condition_.WaitHoldingLocks(self);  // wait while holding mutator lock
   }
 }
@@ -184,6 +185,7 @@
 }
 
 void BoxTable::DisallowNewWeakBoxedLambdas() {
+  CHECK(!kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::lambda_table_lock_);
 
@@ -191,6 +193,7 @@
 }
 
 void BoxTable::AllowNewWeakBoxedLambdas() {
+  CHECK(!kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::lambda_table_lock_);
 
@@ -198,10 +201,11 @@
   new_weaks_condition_.Broadcast(self);
 }
 
-void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() {
+void BoxTable::BroadcastForNewWeakBoxedLambdas() {
+  CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::lambda_table_lock_);
-  CHECK_NE(allow_new_weaks_, false);
+  new_weaks_condition_.Broadcast(self);
 }
 
 bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 312d811..9ffda66 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -67,8 +67,8 @@
   void AllowNewWeakBoxedLambdas()
       REQUIRES(!Locks::lambda_table_lock_);
 
-  // GC callback: Verify that the state is now blocking anyone from touching the map.
-  void EnsureNewWeakBoxedLambdasDisallowed()
+  // GC callback: Unblock any readers who have been queued waiting to touch the map.
+  void BroadcastForNewWeakBoxedLambdas()
       REQUIRES(!Locks::lambda_table_lock_);
 
   BoxTable();
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 976936d..baf9da2 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -178,10 +178,11 @@
   dest->push_back(out);
 }
 
-// An encoder that pushed uint32_t data onto the given std::vector.
+// An encoder that pushes int32_t/uint32_t data onto the given std::vector.
+template <typename Allocator = std::allocator<uint8_t>>
 class Leb128Encoder {
  public:
-  explicit Leb128Encoder(std::vector<uint8_t>* data) : data_(data) {
+  explicit Leb128Encoder(std::vector<uint8_t, Allocator>* data) : data_(data) {
     DCHECK(data != nullptr);
   }
 
@@ -211,22 +212,27 @@
     }
   }
 
-  const std::vector<uint8_t>& GetData() const {
+  const std::vector<uint8_t, Allocator>& GetData() const {
     return *data_;
   }
 
  protected:
-  std::vector<uint8_t>* const data_;
+  std::vector<uint8_t, Allocator>* const data_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Leb128Encoder);
 };
 
 // An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
-class Leb128EncodingVector FINAL : private std::vector<uint8_t>, public Leb128Encoder {
+template <typename Allocator = std::allocator<uint8_t>>
+class Leb128EncodingVector FINAL : private std::vector<uint8_t, Allocator>,
+                                   public Leb128Encoder<Allocator> {
  public:
-  Leb128EncodingVector() : Leb128Encoder(this) {
-  }
+  Leb128EncodingVector() : Leb128Encoder<Allocator>(this) { }
+
+  explicit Leb128EncodingVector(const Allocator& alloc)
+    : std::vector<uint8_t, Allocator>(alloc),
+      Leb128Encoder<Allocator>(this) { }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector);
diff --git a/runtime/leb128_test.cc b/runtime/leb128_test.cc
index 1bb493d..09f7ecc 100644
--- a/runtime/leb128_test.cc
+++ b/runtime/leb128_test.cc
@@ -94,7 +94,7 @@
 TEST(Leb128Test, UnsignedSinglesVector) {
   // Test individual encodings.
   for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
-    Leb128EncodingVector builder;
+    Leb128EncodingVector<> builder;
     builder.PushBackUnsigned(uleb128_tests[i].decoded);
     EXPECT_EQ(UnsignedLeb128Size(uleb128_tests[i].decoded), builder.GetData().size());
     const uint8_t* data_ptr = &uleb128_tests[i].leb128_data[0];
@@ -131,7 +131,7 @@
 
 TEST(Leb128Test, UnsignedStreamVector) {
   // Encode a number of entries.
-  Leb128EncodingVector builder;
+  Leb128EncodingVector<> builder;
   for (size_t i = 0; i < arraysize(uleb128_tests); ++i) {
     builder.PushBackUnsigned(uleb128_tests[i].decoded);
   }
@@ -175,7 +175,7 @@
 TEST(Leb128Test, SignedSinglesVector) {
   // Test individual encodings.
   for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
-    Leb128EncodingVector builder;
+    Leb128EncodingVector<> builder;
     builder.PushBackSigned(sleb128_tests[i].decoded);
     EXPECT_EQ(SignedLeb128Size(sleb128_tests[i].decoded), builder.GetData().size());
     const uint8_t* data_ptr = &sleb128_tests[i].leb128_data[0];
@@ -212,7 +212,7 @@
 
 TEST(Leb128Test, SignedStreamVector) {
   // Encode a number of entries.
-  Leb128EncodingVector builder;
+  Leb128EncodingVector<> builder;
   for (size_t i = 0; i < arraysize(sleb128_tests); ++i) {
     builder.PushBackSigned(sleb128_tests[i].decoded);
   }
@@ -275,7 +275,7 @@
 TEST(Leb128Test, Speed) {
   std::unique_ptr<Histogram<uint64_t>> enc_hist(new Histogram<uint64_t>("Leb128EncodeSpeedTest", 5));
   std::unique_ptr<Histogram<uint64_t>> dec_hist(new Histogram<uint64_t>("Leb128DecodeSpeedTest", 5));
-  Leb128EncodingVector builder;
+  Leb128EncodingVector<> builder;
   // Push back 1024 chunks of 1024 values measuring encoding speed.
   uint64_t last_time = NanoTime();
   for (size_t i = 0; i < 1024; i++) {
diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h
index d9bc656..0ff6d7a 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/length_prefixed_array.h
@@ -19,7 +19,6 @@
 
 #include <stddef.h>  // for offsetof()
 
-#include "linear_alloc.h"
 #include "stride_iterator.h"
 #include "base/bit_utils.h"
 #include "base/casts.h"
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index da6ee25..fa58418 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -454,15 +454,13 @@
   uintptr_t saved_dex_pc = locking_dex_pc_;
   locking_dex_pc_ = 0;
 
-  /*
-   * Update thread state. If the GC wakes up, it'll ignore us, knowing
-   * that we won't touch any references in this state, and we'll check
-   * our suspend mode before we transition out.
-   */
-  self->TransitionFromRunnableToSuspended(why);
-
   bool was_interrupted = false;
   {
+    // Update thread state. If the GC wakes up, it'll ignore us, knowing
+    // that we won't touch any references in this state, and we'll check
+    // our suspend mode before we transition out.
+    ScopedThreadSuspension sts(self, why);
+
     // Pseudo-atomically wait on self's wait_cond_ and release the monitor lock.
     MutexLock mu(self, *self->GetWaitMutex());
 
@@ -494,9 +492,6 @@
     }
   }
 
-  // Set self->status back to kRunnable, and self-suspend if needed.
-  self->TransitionFromSuspendedToRunnable();
-
   {
     // We reset the thread's wait_monitor_ field after transitioning back to runnable so
     // that a thread in a waiting/sleeping state has a non-null wait_monitor_ for debugging
@@ -667,9 +662,11 @@
     // Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
     self->SetMonitorEnterObject(obj.Get());
     bool timed_out;
-    self->TransitionFromRunnableToSuspended(kBlocked);
-    Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
-    self->TransitionFromSuspendedToRunnable();
+    Thread* owner;
+    {
+      ScopedThreadSuspension sts(self, kBlocked);
+      owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
+    }
     if (owner != nullptr) {
       // We succeeded in suspending the thread, check the lock's status didn't change.
       lock_word = obj->GetLockWord(true);
@@ -1126,24 +1123,19 @@
 }
 
 void MonitorList::DisallowNewMonitors() {
+  CHECK(!kUseReadBarrier);
   MutexLock mu(Thread::Current(), monitor_list_lock_);
   allow_new_monitors_ = false;
 }
 
 void MonitorList::AllowNewMonitors() {
+  CHECK(!kUseReadBarrier);
   Thread* self = Thread::Current();
   MutexLock mu(self, monitor_list_lock_);
   allow_new_monitors_ = true;
   monitor_add_condition_.Broadcast(self);
 }
 
-void MonitorList::EnsureNewMonitorsDisallowed() {
-  // Lock and unlock once to ensure that no threads are still in the
-  // middle of adding new monitors.
-  MutexLock mu(Thread::Current(), monitor_list_lock_);
-  CHECK(!allow_new_monitors_);
-}
-
 void MonitorList::BroadcastForNewMonitors() {
   CHECK(kUseReadBarrier);
   Thread* self = Thread::Current();
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 346e866..8cd93c6 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -295,7 +295,6 @@
       REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
   void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
   void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
-  void EnsureNewMonitorsDisallowed() REQUIRES(!monitor_list_lock_);
   void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
   // Returns how many monitors were deflated.
   size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_);
@@ -321,14 +320,14 @@
 // For use only by the JDWP implementation.
 class MonitorInfo {
  public:
+  MonitorInfo() = default;
+  MonitorInfo(const MonitorInfo&) = default;
+  MonitorInfo& operator=(const MonitorInfo&) = default;
   explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_);
 
   Thread* owner_;
   size_t entry_count_;
   std::vector<Thread*> waiters_;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MonitorInfo);
 };
 
 }  // namespace art
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 69112b1..83e0c0d 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -316,7 +316,7 @@
   }
 
   // Need to drop the mutator lock to allow barriers.
-  soa.Self()->TransitionFromRunnableToSuspended(kNative);
+  ScopedThreadSuspension sts(soa.Self(), kNative);
   ThreadPool thread_pool(pool_name, 3);
   thread_pool.AddTask(self, new CreateTask(test, create_sleep, c_millis, c_expected));
   if (interrupt) {
@@ -340,7 +340,6 @@
   }
 
   thread_pool.StopWorkers(self);
-  soa.Self()->TransitionFromSuspendedToRunnable();
 }
 
 
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 541eeb1..7910f94 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -35,15 +35,16 @@
     trace = soa.Self()->CreateInternalStackTrace<false>(soa);
   } else {
     // Suspend thread to build stack trace.
-    soa.Self()->TransitionFromRunnableToSuspended(kNative);
+    ScopedThreadSuspension sts(soa.Self(), kSuspended);
     ThreadList* thread_list = Runtime::Current()->GetThreadList();
     bool timed_out;
     Thread* thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
     if (thread != nullptr) {
       // Must be runnable to create returned array.
-      CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
-      trace = thread->CreateInternalStackTrace<false>(soa);
-      soa.Self()->TransitionFromRunnableToSuspended(kNative);
+      {
+        ScopedObjectAccess soa2(soa.Self());
+        trace = thread->CreateInternalStackTrace<false>(soa);
+      }
       // Restart suspended thread.
       thread_list->Resume(thread, false);
     } else {
@@ -52,7 +53,6 @@
             "generous timeout.";
       }
     }
-    CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
   }
   return trace;
 }
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 7e464e9..8fd6849 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -430,7 +430,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsProxyClass()) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
   Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
@@ -442,7 +442,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsProxyClass()) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     // Return an empty array instead of a null pointer.
     mirror::Class* annotation_array_class =
         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
@@ -453,18 +453,141 @@
   return soa.AddLocalReference<jobjectArray>(klass->GetDexFile().GetAnnotationsForClass(klass));
 }
 
+static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  mirror::ObjectArray<mirror::Class>* classes = nullptr;
+  if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
+    classes = klass->GetDexFile().GetDeclaredClasses(klass);
+  }
+  if (classes == nullptr) {
+    // Return an empty array instead of a null pointer.
+    mirror::Class* class_class = mirror::Class::GetJavaLangClass();
+    mirror::Class* class_array_class =
+        Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
+    if (class_array_class == nullptr) {
+      return nullptr;
+    }
+    mirror::ObjectArray<mirror::Class>* empty_array =
+        mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
+    return soa.AddLocalReference<jobjectArray>(empty_array);
+  }
+  return soa.AddLocalReference<jobjectArray>(classes);
+}
+
+static jclass Class_getEnclosingClass(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    return nullptr;
+  }
+  return soa.AddLocalReference<jclass>(klass->GetDexFile().GetEnclosingClass(klass));
+}
+
+static jobject Class_getEnclosingConstructorNative(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    return nullptr;
+  }
+  mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass);
+  if (method != nullptr) {
+    if (method->GetClass() ==
+        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Constructor)) {
+      return soa.AddLocalReference<jobject>(method);
+    }
+  }
+  return nullptr;
+}
+
+static jobject Class_getEnclosingMethodNative(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    return nullptr;
+  }
+  mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass);
+  if (method != nullptr) {
+    if (method->GetClass() ==
+        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Method)) {
+      return soa.AddLocalReference<jobject>(method);
+    }
+  }
+  return nullptr;
+}
+
+static jint Class_getInnerClassFlags(JNIEnv* env, jobject javaThis, jint defaultValue) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    return defaultValue;
+  }
+  uint32_t flags;
+  if (!klass->GetDexFile().GetInnerClassFlags(klass, &flags)) {
+    return defaultValue;
+  }
+  return flags;
+}
+
+static jstring Class_getInnerClassName(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    return nullptr;
+  }
+  mirror::String* class_name = nullptr;
+  if (!klass->GetDexFile().GetInnerClass(klass, &class_name)) {
+    return nullptr;
+  }
+  return soa.AddLocalReference<jstring>(class_name);
+}
+
+static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    return false;
+  }
+  mirror::String* class_name = nullptr;
+  if (!klass->GetDexFile().GetInnerClass(klass, &class_name)) {
+    return false;
+  }
+  return class_name == nullptr;
+}
+
 static jboolean Class_isDeclaredAnnotationPresent(JNIEnv* env, jobject javaThis,
                                                   jclass annotationType) {
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsProxyClass()) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return false;
   }
   Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
   return klass->GetDexFile().IsClassAnnotationPresent(klass, annotation_class);
 }
 
+static jclass Class_getDeclaringClass(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    return nullptr;
+  }
+  // Return null for anonymous classes.
+  if (Class_isAnonymousClass(env, javaThis)) {
+    return nullptr;
+  }
+  return soa.AddLocalReference<jclass>(klass->GetDexFile().GetDeclaringClass(klass));
+}
+
 static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<4> hs(soa.Self());
@@ -550,6 +673,7 @@
   NATIVE_METHOD(Class, getDeclaredAnnotation,
                 "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
   NATIVE_METHOD(Class, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
+  NATIVE_METHOD(Class, getDeclaredClasses, "!()[Ljava/lang/Class;"),
   NATIVE_METHOD(Class, getDeclaredConstructorInternal,
                 "!([Ljava/lang/Class;)Ljava/lang/reflect/Constructor;"),
   NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"),
@@ -561,9 +685,16 @@
                 "!(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;"),
   NATIVE_METHOD(Class, getDeclaredMethodsUnchecked,
                 "!(Z)[Ljava/lang/reflect/Method;"),
+  NATIVE_METHOD(Class, getDeclaringClass, "!()Ljava/lang/Class;"),
+  NATIVE_METHOD(Class, getEnclosingClass, "!()Ljava/lang/Class;"),
+  NATIVE_METHOD(Class, getEnclosingConstructorNative, "!()Ljava/lang/reflect/Constructor;"),
+  NATIVE_METHOD(Class, getEnclosingMethodNative, "!()Ljava/lang/reflect/Method;"),
+  NATIVE_METHOD(Class, getInnerClassFlags, "!(I)I"),
+  NATIVE_METHOD(Class, getInnerClassName, "!()Ljava/lang/String;"),
   NATIVE_METHOD(Class, getNameNative, "!()Ljava/lang/String;"),
   NATIVE_METHOD(Class, getProxyInterfaces, "!()[Ljava/lang/Class;"),
   NATIVE_METHOD(Class, getPublicDeclaredFields, "!()[Ljava/lang/reflect/Field;"),
+  NATIVE_METHOD(Class, isAnonymousClass, "!()Z"),
   NATIVE_METHOD(Class, isDeclaredAnnotationPresent, "!(Ljava/lang/Class;)Z"),
   NATIVE_METHOD(Class, newInstance, "!()Ljava/lang/Object;"),
 };
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index b4b77e7..e1e9ceb 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -48,15 +48,18 @@
 static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
-  mirror::ObjectArray<mirror::Object>* result_array =
+  mirror::ObjectArray<mirror::Class>* result_array =
       method->GetDexFile()->GetExceptionTypesForMethod(method);
   if (result_array == nullptr) {
     // Return an empty array instead of a null pointer.
     mirror::Class* class_class = mirror::Class::GetJavaLangClass();
     mirror::Class* class_array_class =
         Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
-    mirror::ObjectArray<mirror::Object>* empty_array =
-        mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), class_array_class, 0);
+    if (class_array_class == nullptr) {
+      return nullptr;
+    }
+    mirror::ObjectArray<mirror::Class>* empty_array =
+        mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
     return soa.AddLocalReference<jobjectArray>(empty_array);
   } else {
     return soa.AddLocalReference<jobjectArray>(result_array);
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 1219f85..caacba6 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -82,15 +82,18 @@
     mirror::ObjectArray<mirror::Class>* declared_exceptions = klass->GetThrows()->Get(throws_index);
     return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
   } else {
-    mirror::ObjectArray<mirror::Object>* result_array =
+    mirror::ObjectArray<mirror::Class>* result_array =
         method->GetDexFile()->GetExceptionTypesForMethod(method);
     if (result_array == nullptr) {
       // Return an empty array instead of a null pointer
       mirror::Class* class_class = mirror::Class::GetJavaLangClass();
       mirror::Class* class_array_class =
           Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
-      mirror::ObjectArray<mirror::Object>* empty_array =
-          mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), class_array_class, 0);
+      if (class_array_class == nullptr) {
+        return nullptr;
+      }
+      mirror::ObjectArray<mirror::Class>* empty_array =
+          mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class, 0);
       return soa.AddLocalReference<jobjectArray>(empty_array);
     } else {
       return soa.AddLocalReference<jobjectArray>(result_array);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a9dc16d..4797564 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -20,6 +20,7 @@
 #include <sys/mount.h>
 #ifdef __linux__
 #include <linux/fs.h>
+#include <sys/prctl.h>
 #endif
 
 #define ATRACE_TAG ATRACE_TAG_DALVIK
@@ -493,6 +494,14 @@
 
   CHECK(!no_sig_chain_) << "A started runtime should have sig chain enabled";
 
+  // If a debug host build, disable ptrace restriction for debugging and test timeout thread dump.
+  // Only 64-bit as prctl() may fail in 32 bit userspace on a 64-bit kernel.
+#if defined(__linux__) && !defined(__ANDROID__) && defined(__x86_64__)
+  if (kIsDebugBuild) {
+    CHECK_EQ(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), 0);
+  }
+#endif
+
   // Restore main thread state to kNative as expected by native code.
   Thread* self = Thread::Current();
 
@@ -1506,6 +1515,7 @@
 }
 
 void Runtime::DisallowNewSystemWeaks() {
+  CHECK(!kUseReadBarrier);
   monitor_list_->DisallowNewMonitors();
   intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
   java_vm_->DisallowNewWeakGlobals();
@@ -1514,6 +1524,7 @@
 }
 
 void Runtime::AllowNewSystemWeaks() {
+  CHECK(!kUseReadBarrier);
   monitor_list_->AllowNewMonitors();
   intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping.
   java_vm_->AllowNewWeakGlobals();
@@ -1521,20 +1532,15 @@
   lambda_box_table_->AllowNewWeakBoxedLambdas();
 }
 
-void Runtime::EnsureNewSystemWeaksDisallowed() {
-  // Lock and unlock the system weak locks once to ensure that no
-  // threads are still in the middle of adding new system weaks.
-  monitor_list_->EnsureNewMonitorsDisallowed();
-  intern_table_->EnsureNewWeakInternsDisallowed();
-  java_vm_->EnsureNewWeakGlobalsDisallowed();
-  lambda_box_table_->EnsureNewWeakBoxedLambdasDisallowed();
-}
-
 void Runtime::BroadcastForNewSystemWeaks() {
+  // This is used for the read barrier case that uses the thread-local
+  // Thread::GetWeakRefAccessEnabled() flag.
   CHECK(kUseReadBarrier);
   monitor_list_->BroadcastForNewMonitors();
   intern_table_->BroadcastForNewInterns();
   java_vm_->BroadcastForNewWeakGlobals();
+  heap_->BroadcastForNewAllocationRecords();
+  lambda_box_table_->BroadcastForNewWeakBoxedLambdas();
 }
 
 void Runtime::SetInstructionSet(InstructionSet instruction_set) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index bd21db1..a35eac1 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -303,7 +303,6 @@
 
   void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
   void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
-  void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_);
   void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b90aa0e..d1cc09a 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -31,7 +31,7 @@
 // more complicated suspension checking. The subclasses ScopedObjectAccessUnchecked and
 // ScopedObjectAccess are used to handle the change into Runnable to Get direct access to objects,
 // the unchecked variant doesn't aid annotalysis.
-class ScopedThreadStateChange {
+class ScopedThreadStateChange : public ValueObject {
  public:
   ScopedThreadStateChange(Thread* self, ThreadState new_thread_state)
       REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE
@@ -102,7 +102,7 @@
 };
 
 // Assumes we are already runnable.
-class ScopedObjectAccessAlreadyRunnable {
+class ScopedObjectAccessAlreadyRunnable : public ValueObject {
  public:
   Thread* Self() const {
     return self_;
@@ -277,6 +277,30 @@
   DISALLOW_COPY_AND_ASSIGN(ScopedObjectAccess);
 };
 
+// Annotalysis helper for going to a suspended state from runnable.
+class ScopedThreadSuspension : public ValueObject {
+ public:
+  explicit ScopedThreadSuspension(Thread* self, ThreadState suspended_state)
+      REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_)
+      UNLOCK_FUNCTION(Locks::mutator_lock_)
+      ALWAYS_INLINE
+      : self_(self), suspended_state_(suspended_state) {
+    DCHECK(self_ != nullptr);
+    self_->TransitionFromRunnableToSuspended(suspended_state);
+  }
+
+  ~ScopedThreadSuspension() SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {
+    DCHECK_EQ(self_->GetState(), suspended_state_);
+    self_->TransitionFromSuspendedToRunnable();
+  }
+
+ private:
+  Thread* const self_;
+  const ThreadState suspended_state_;
+  DISALLOW_COPY_AND_ASSIGN(ScopedThreadSuspension);
+};
+
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 63534b1..af5830a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1035,9 +1035,8 @@
   ATRACE_BEGIN("Full suspend check");
   // Make thread appear suspended to other threads, release mutator_lock_.
   tls32_.suspended_at_suspend_check = true;
-  TransitionFromRunnableToSuspended(kSuspended);
-  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
-  TransitionFromSuspendedToRunnable();
+  // Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
+  ScopedThreadSuspension(this, kSuspended);
   tls32_.suspended_at_suspend_check = false;
   ATRACE_END();
   VLOG(threads) << this << " self-reviving";
@@ -1728,7 +1727,7 @@
       result = nullptr;
     }
   } else if (kind == kGlobal) {
-    result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
+    result = tlsPtr_.jni_env->vm->DecodeGlobal(ref);
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
     result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 8aa1189..27dacea 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1441,14 +1441,6 @@
   return true;
 }
 
-void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* dst) {
-  Leb128Encoder(dst).PushBackUnsigned(data);
-}
-
-void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* dst) {
-  Leb128Encoder(dst).PushBackSigned(data);
-}
-
 std::string PrettyDescriptor(Primitive::Type type) {
   return PrettyDescriptor(Primitive::Descriptor(type));
 }
diff --git a/runtime/utils.h b/runtime/utils.h
index d1be51a..16835c2 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -294,9 +294,6 @@
   buf->push_back((data >> 24) & 0xff);
 }
 
-void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* buf);
-void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* buf);
-
 // Deleter using free() for use with std::unique_ptr<>. See also UniqueCPtr<> below.
 struct FreeDelete {
   // NOTE: Deleting a const object is valid but free() takes a non-const pointer.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 35cc4e3..d768afd 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3409,7 +3409,6 @@
 ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
     uint32_t dex_method_idx, MethodType method_type) {
   const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
-  // LOG(INFO) << dex_file_->NumTypeIds() << " " << dex_file_->NumClassDefs();
   const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
   if (klass_type.IsConflict()) {
     std::string append(" in attempt to access method ");
diff --git a/test/005-annotations/expected.txt b/test/005-annotations/expected.txt
index 36b3868..e1c3dad 100644
--- a/test/005-annotations/expected.txt
+++ b/test/005-annotations/expected.txt
@@ -101,3 +101,10 @@
 Package declared annotations:
       @android.test.anno.AnnoSimplePackage()
         interface android.test.anno.AnnoSimplePackage
+
+Inner Classes:
+Canonical:android.test.anno.ClassWithInnerClasses.InnerClass Simple:InnerClass
+Canonical:null Simple:
+
+Get annotation with missing class should not throw
+Got expected TypeNotPresentException
diff --git a/test/005-annotations/src/android/test/anno/AnnoMissingClass.java b/test/005-annotations/src/android/test/anno/AnnoMissingClass.java
new file mode 100644
index 0000000..c32e9a2
--- /dev/null
+++ b/test/005-annotations/src/android/test/anno/AnnoMissingClass.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.test.anno;
+
+import java.lang.annotation.*;
+
+@Retention(RetentionPolicy.RUNTIME)
+public @interface AnnoMissingClass {
+    Class value();
+}
diff --git a/test/005-annotations/src/android/test/anno/ClassWithInnerClasses.java b/test/005-annotations/src/android/test/anno/ClassWithInnerClasses.java
new file mode 100644
index 0000000..e151f1a
--- /dev/null
+++ b/test/005-annotations/src/android/test/anno/ClassWithInnerClasses.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.test.anno;
+
+public class ClassWithInnerClasses {
+  public class InnerClass {
+    public String toString() {
+      return "Canonical:" + getClass().getCanonicalName() + " Simple:" + getClass().getSimpleName();
+    }
+  }
+  Object anonymousClass = new Object() {
+    public String toString() {
+      return "Canonical:" + getClass().getCanonicalName() + " Simple:" + getClass().getSimpleName();
+    }
+  };
+
+  public void print() {
+    System.out.println(new InnerClass());
+    System.out.println(anonymousClass);
+  }
+}
diff --git a/test/005-annotations/src/android/test/anno/ClassWithMissingAnnotation.java b/test/005-annotations/src/android/test/anno/ClassWithMissingAnnotation.java
new file mode 100644
index 0000000..8cfdd8c
--- /dev/null
+++ b/test/005-annotations/src/android/test/anno/ClassWithMissingAnnotation.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.test.anno;
+
+// Add annotation for missing type to cause TypeNotPresentException.
+@AnnoMissingClass(MissingAnnotation.class)
+public class ClassWithMissingAnnotation {
+}
diff --git a/test/005-annotations/src/android/test/anno/TestAnnotations.java b/test/005-annotations/src/android/test/anno/TestAnnotations.java
index 1deff33..7b74a73 100644
--- a/test/005-annotations/src/android/test/anno/TestAnnotations.java
+++ b/test/005-annotations/src/android/test/anno/TestAnnotations.java
@@ -180,5 +180,24 @@
         printAnnotationArray("    ", TestAnnotations.class.getPackage().getAnnotations());
         System.out.println("Package declared annotations:");
         printAnnotationArray("    ", TestAnnotations.class.getPackage().getDeclaredAnnotations());
+
+        System.out.println();
+
+        // Test inner classes.
+        System.out.println("Inner Classes:");
+        new ClassWithInnerClasses().print();
+
+        System.out.println();
+
+        // Test TypeNotPresentException.
+        try {
+            AnnoMissingClass missingAnno =
+                ClassWithMissingAnnotation.class.getAnnotation(AnnoMissingClass.class);
+            System.out.println("Get annotation with missing class should not throw");
+            System.out.println(missingAnno.value());
+            System.out.println("Getting value of missing annotaton should have thrown");
+        } catch (TypeNotPresentException expected) {
+            System.out.println("Got expected TypeNotPresentException");
+        }
     }
 }
diff --git a/test/1337-gc-coverage/check b/test/1337-gc-coverage/check
new file mode 100755
index 0000000..842bdc6
--- /dev/null
+++ b/test/1337-gc-coverage/check
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Check that the string "error" isn't present
+if grep error "$2"; then
+    exit 1
+else
+    exit 0
+fi
diff --git a/test/1337-gc-coverage/expected.txt b/test/1337-gc-coverage/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/1337-gc-coverage/expected.txt
diff --git a/test/1337-gc-coverage/gc_coverage.cc b/test/1337-gc-coverage/gc_coverage.cc
new file mode 100644
index 0000000..7cf30bd
--- /dev/null
+++ b/test/1337-gc-coverage/gc_coverage.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "jni.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace {
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_performHomogeneousSpaceCompact(JNIEnv*, jclass) {
+  return Runtime::Current()->GetHeap()->PerformHomogeneousSpaceCompact() == gc::kSuccess ?
+      JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportHomogeneousSpaceCompact(JNIEnv*, jclass) {
+  return Runtime::Current()->GetHeap()->SupportHomogeneousSpaceCompactAndCollectorTransitions() ?
+      JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_incrementDisableMovingGC(JNIEnv*, jclass) {
+  Runtime::Current()->GetHeap()->IncrementDisableMovingGC(Thread::Current());
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_decrementDisableMovingGC(JNIEnv*, jclass) {
+  Runtime::Current()->GetHeap()->DecrementDisableMovingGC(Thread::Current());
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_objectAddress(JNIEnv* env, jclass, jobject object) {
+  ScopedObjectAccess soa(env);
+  return reinterpret_cast<jlong>(soa.Decode<mirror::Object*>(object));
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportCollectorTransition(JNIEnv*, jclass) {
+  // Same as supportHomogeneousSpaceCompact for now.
+  return Runtime::Current()->GetHeap()->SupportHomogeneousSpaceCompactAndCollectorTransitions() ?
+      JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_transitionToSS(JNIEnv*, jclass) {
+  Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeSS);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_transitionToCMS(JNIEnv*, jclass) {
+  Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeCMS);
+}
+
+}  // namespace
+}  // namespace art
diff --git a/test/1337-gc-coverage/info.txt b/test/1337-gc-coverage/info.txt
new file mode 100644
index 0000000..7e3acd3
--- /dev/null
+++ b/test/1337-gc-coverage/info.txt
@@ -0,0 +1 @@
+Tests internal GC functions which are not exposed through normal APIs.
\ No newline at end of file
diff --git a/test/1337-gc-coverage/src/Main.java b/test/1337-gc-coverage/src/Main.java
new file mode 100644
index 0000000..7875eb1
--- /dev/null
+++ b/test/1337-gc-coverage/src/Main.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.TreeMap;
+
+public class Main {
+  private static TreeMap treeMap = new TreeMap();
+
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    testHomogeneousCompaction();
+    testCollectorTransitions();
+    System.out.println("Done.");
+  }
+
+  private static void allocateStuff() {
+    for (int i = 0; i < 1000; ++i) {
+      Object o = new Object();
+      treeMap.put(o.hashCode(), o);
+    }
+  }
+
+  public static void testHomogeneousCompaction() {
+    System.out.println("Attempting homogeneous compaction");
+    final boolean supportHSC = supportHomogeneousSpaceCompact();
+    Object o = new Object();
+    long addressBefore = objectAddress(o);
+    long addressAfter;
+    allocateStuff();
+    final boolean success = performHomogeneousSpaceCompact();
+    allocateStuff();
+    System.out.println("Homogeneous compaction support=" + supportHSC + " success=" + success);
+    if (supportHSC != success) {
+      System.out.println("error: Expected " + supportHSC + " but got " + success);
+    }
+    if (success) {
+      allocateStuff();
+      addressAfter = objectAddress(o);
+      // This relies on the compaction copying from one space to another space and there being no
+      // overlap.
+      if (addressBefore == addressAfter) {
+        System.out.println("error: Expected different adddress " + addressBefore + " vs " +
+            addressAfter);
+      }
+    }
+    if (supportHSC) {
+      incrementDisableMovingGC();
+      if (performHomogeneousSpaceCompact()) {
+        System.out.println("error: Compaction succeeded when moving GC is disabled");
+      }
+      decrementDisableMovingGC();
+      if (!performHomogeneousSpaceCompact()) {
+        System.out.println("error: Compaction failed when moving GC is enabled");
+      }
+    }
+  }
+
+  private static void testCollectorTransitions() {
+    if (supportCollectorTransition()) {
+      Object o = new Object();
+      // Transition to semi-space collector.
+      allocateStuff();
+      transitionToSS();
+      allocateStuff();
+      long addressBefore = objectAddress(o);
+      Runtime.getRuntime().gc();
+      long addressAfter = objectAddress(o);
+      if (addressBefore == addressAfter) {
+        System.out.println("error: Expected different adddress " + addressBefore + " vs " +
+            addressAfter);
+      }
+      // Transition back to CMS.
+      transitionToCMS();
+      allocateStuff();
+      addressBefore = objectAddress(o);
+      Runtime.getRuntime().gc();
+      addressAfter = objectAddress(o);
+      if (addressBefore != addressAfter) {
+        System.out.println("error: Expected same adddress " + addressBefore + " vs " +
+            addressAfter);
+      }
+    }
+  }
+
+  // Methods to get access to ART internals.
+  private static native boolean supportHomogeneousSpaceCompact();
+  private static native boolean performHomogeneousSpaceCompact();
+  private static native void incrementDisableMovingGC();
+  private static native void decrementDisableMovingGC();
+  private static native long objectAddress(Object object);
+  private static native boolean supportCollectorTransition();
+  private static native void transitionToSS();
+  private static native void transitionToCMS();
+}
diff --git a/test/999-jni-perf/check b/test/999-jni-perf/check
new file mode 100755
index 0000000..ffbb8cf
--- /dev/null
+++ b/test/999-jni-perf/check
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Only compare the last line.
+tail -n 1 "$2" | diff --strip-trailing-cr -q "$1" - >/dev/null
\ No newline at end of file
diff --git a/test/999-jni-perf/expected.txt b/test/999-jni-perf/expected.txt
new file mode 100644
index 0000000..a965a70
--- /dev/null
+++ b/test/999-jni-perf/expected.txt
@@ -0,0 +1 @@
+Done
diff --git a/test/999-jni-perf/info.txt b/test/999-jni-perf/info.txt
new file mode 100644
index 0000000..010b57b
--- /dev/null
+++ b/test/999-jni-perf/info.txt
@@ -0,0 +1 @@
+Tests for measuring performance of JNI state changes.
diff --git a/test/999-jni-perf/perf-jni.cc b/test/999-jni-perf/perf-jni.cc
new file mode 100644
index 0000000..51eeb83
--- /dev/null
+++ b/test/999-jni-perf/perf-jni.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+
+#include "jni.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_perfJniEmptyCall(JNIEnv*, jobject) {
+  return 0;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_perfSOACall(JNIEnv*, jobject) {
+  ScopedObjectAccess soa(Thread::Current());
+  return 0;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_perfSOAUncheckedCall(JNIEnv*, jobject) {
+  ScopedObjectAccessUnchecked soa(Thread::Current());
+  return 0;
+}
+
+}  // namespace
+
+}  // namespace art
diff --git a/test/999-jni-perf/src/Main.java b/test/999-jni-perf/src/Main.java
new file mode 100644
index 0000000..032e700
--- /dev/null
+++ b/test/999-jni-perf/src/Main.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public Main() {
+  }
+
+  private static final String MSG = "ABCDE";
+
+  native int perfJniEmptyCall();
+  native int perfSOACall();
+  native int perfSOAUncheckedCall();
+
+  int runPerfTest(long N) {
+    long start = System.nanoTime();
+    for (long i = 0; i < N; i++) {
+      char c = MSG.charAt(2);
+    }
+    long elapse = System.nanoTime() - start;
+    System.out.println("Fast JNI (charAt): " + (double)elapse / N);
+
+    start = System.nanoTime();
+    for (long i = 0; i < N; i++) {
+      perfJniEmptyCall();
+    }
+    elapse = System.nanoTime() - start;
+    System.out.println("Empty call: " + (double)elapse / N);
+
+    start = System.nanoTime();
+    for (long i = 0; i < N; i++) {
+      perfSOACall();
+    }
+    elapse = System.nanoTime() - start;
+    System.out.println("SOA call: " + (double)elapse / N);
+
+    start = System.nanoTime();
+    for (long i = 0; i < N; i++) {
+      perfSOAUncheckedCall();
+    }
+    elapse = System.nanoTime() - start;
+    System.out.println("SOA unchecked call: " + (double)elapse / N);
+
+    return 0;
+  }
+
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    long iterations = 1000000;
+    if (args.length > 1) {
+      iterations = Long.parseLong(args[1], 10);
+    }
+    Main m = new Main();
+    m.runPerfTest(iterations);
+    System.out.println("Done");
+  }
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 82f8c79..c3d1576 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -29,6 +29,7 @@
   116-nodex2oat/nodex2oat.cc \
   117-nopatchoat/nopatchoat.cc \
   118-noimage-dex2oat/noimage-dex2oat.cc \
+  1337-gc-coverage/gc_coverage.cc \
   137-cfi/cfi.cc \
   139-register-natives/regnative.cc \
   454-get-vreg/get_vreg_jni.cc \
@@ -36,7 +37,8 @@
   457-regs/regs_jni.cc \
   461-get-reference-vreg/get_reference_vreg_jni.cc \
   466-get-live-vreg/get_live_vreg_jni.cc \
-  497-inlining-and-class-loader/clear_dex_cache.cc
+  497-inlining-and-class-loader/clear_dex_cache.cc \
+  999-jni-perf/perf-jni.cc
 
 ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
 ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
@@ -52,14 +54,16 @@
       $$(error expected target or host for argument 1, received $(1))
     endif
   endif
-  ifneq ($(2),d)
+  ifneq ($(2),debug)
     ifneq ($(2),)
       $$(error d or empty for argument 2, received $(2))
     endif
+    suffix := d
+  else
+    suffix :=
   endif
 
   art_target_or_host := $(1)
-  suffix := $(2)
 
   include $(CLEAR_VARS)
   LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
@@ -98,11 +102,11 @@
 
 ifeq ($(ART_BUILD_TARGET),true)
   $(eval $(call build-libarttest,target,))
-  $(eval $(call build-libarttest,target,d))
+  $(eval $(call build-libarttest,target,debug))
 endif
 ifeq ($(ART_BUILD_HOST),true)
   $(eval $(call build-libarttest,host,))
-  $(eval $(call build-libarttest,host,d))
+  $(eval $(call build-libarttest,host,debug))
 endif
 
 # Clear locally used variables.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 39dc030..ad3fb41 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -37,9 +37,9 @@
 QUIET="n"
 RELOCATE="y"
 SECONDARY_DEX=""
-TIME_OUT="y"
-# Value in minutes.
-TIME_OUT_VALUE=10
+TIME_OUT="gdb"  # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
+# Value in seconds
+TIME_OUT_VALUE=600  # 10 minutes.
 USE_GDB="n"
 USE_JVM="n"
 VERIFY="y" # y=yes,n=no,s=softfail
@@ -67,7 +67,7 @@
             echo "$0 missing argument to --testlib" 1>&2
             exit 1
         fi
-       	ARGS="${ARGS} $1"
+        ARGS="${ARGS} $1"
         shift
     elif [ "x$1" = "x-Xcompiler-option" ]; then
         shift
@@ -459,15 +459,32 @@
 
     cmdline="$dalvikvm_cmdline"
 
-    if [ "$TIME_OUT" = "y" ]; then
+    if [ "$TIME_OUT" = "gdb" ]; then
+      if [ `uname` = "Darwin" ]; then
+        # Fall back to timeout on Mac.
+        TIME_OUT="timeout"
+      elif [ "$ISA" = "x86" ]; then
+        # prctl call may fail in 32-bit on an older (3.2) 64-bit Linux kernel. Fall back to timeout.
+        TIME_OUT="timeout"
+      else
+        # Check if gdb is available.
+        gdb --eval-command="quit" > /dev/null 2>&1
+        if [ $? != 0 ]; then
+          # gdb isn't available. Fall back to timeout.
+          TIME_OUT="timeout"
+        fi
+      fi
+    fi
+
+    if [ "$TIME_OUT" = "timeout" ]; then
       # Add timeout command if time out is desired.
       #
       # Note: We use nested timeouts. The inner timeout sends SIGRTMIN+2 (usually 36) to ART, which
       #       will induce a full thread dump before abort. However, dumping threads might deadlock,
       #       so the outer timeout sends the regular SIGTERM after an additional minute to ensure
       #       termination (without dumping all threads).
-      TIME_PLUS_ONE=$(($TIME_OUT_VALUE + 1))
-      cmdline="timeout ${TIME_PLUS_ONE}m timeout -s SIGRTMIN+2 ${TIME_OUT_VALUE}m $cmdline"
+      TIME_PLUS_ONE=$(($TIME_OUT_VALUE + 60))
+      cmdline="timeout ${TIME_PLUS_ONE}s timeout -s SIGRTMIN+2 ${TIME_OUT_VALUE}s $cmdline"
     fi
 
     if [ "$DEV_MODE" = "y" ]; then
@@ -502,12 +519,37 @@
       # When running under gdb, we cannot do piping and grepping...
       $cmdline "$@"
     else
-      trap 'kill -INT -$pid' INT
-      $cmdline "$@" 2>&1 & pid=$!
-      wait $pid
-      # Add extra detail if time out is enabled.
-      if [ ${PIPESTATUS[0]} = 124 ] && [ "$TIME_OUT" = "y" ]; then
-        echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2
+      if [ "$TIME_OUT" != "gdb" ]; then
+        trap 'kill -INT -$pid' INT
+        $cmdline "$@" 2>&1 & pid=$!
+        wait $pid
+        # Add extra detail if time out is enabled.
+        if [ ${PIPESTATUS[0]} = 124 ] && [ "$TIME_OUT" = "timeout" ]; then
+          echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2
+        fi
+      else
+        # With a thread dump that uses gdb if a timeout.
+        trap 'kill -INT -$pid' INT
+        $cmdline "$@" 2>&1 & pid=$!
+        # Spawn a watcher process.
+        ( sleep $TIME_OUT_VALUE && \
+          echo "##### Thread dump using gdb on test timeout" && \
+          ( gdb -q -p $pid --eval-command="info thread" --eval-command="thread apply all bt" \
+                           --eval-command="call exit(124)" --eval-command=quit || \
+            kill $pid )) 2> /dev/null & watcher=$!
+        wait $pid
+        test_exit_status=$?
+        pkill -P $watcher 2> /dev/null # kill the sleep which will in turn end the watcher as well
+        if [ $test_exit_status = 0 ]; then
+          # The test finished normally.
+          exit 0
+        else
+          # The test failed or timed out.
+          if [ $test_exit_status = 124 ]; then
+            # The test timed out.
+            echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2
+          fi
+        fi
       fi
     fi
 fi
diff --git a/tools/ahat/src/DocString.java b/tools/ahat/src/DocString.java
index 1d997dc..19666de 100644
--- a/tools/ahat/src/DocString.java
+++ b/tools/ahat/src/DocString.java
@@ -33,11 +33,18 @@
 
   /**
    * Construct a new DocString, initialized with the given text.
-   * Format arguments are supported.
    */
-  public static DocString text(String format, Object... args) {
+  public static DocString text(String str) {
     DocString doc = new DocString();
-    return doc.append(format, args);
+    return doc.append(str);
+  }
+
+  /**
+   * Construct a new DocString, initialized with the given formatted text.
+   */
+  public static DocString format(String format, Object... args) {
+    DocString doc = new DocString();
+    return doc.appendFormat(format, args);
   }
 
   /**
@@ -58,15 +65,22 @@
 
   /**
    * Append literal text to the given doc string.
-   * Format arguments are supported.
    * Returns this object.
    */
-  public DocString append(String format, Object... args) {
-    String text = String.format(format, args);
+  public DocString append(String text) {
     mStringBuilder.append(HtmlEscapers.htmlEscaper().escape(text));
     return this;
   }
 
+  /**
+   * Append formatted text to the given doc string.
+   * Returns this object.
+   */
+  public DocString appendFormat(String format, Object... args) {
+    append(String.format(format, args));
+    return this;
+  }
+
   public DocString append(DocString str) {
     mStringBuilder.append(str.html());
     return this;
@@ -101,10 +115,9 @@
 
   /**
    * Convenience function for constructing a URI from a string with a uri
-   * known to be valid. Format arguments are supported.
+   * known to be valid.
    */
-  public static URI uri(String format, Object... args) {
-    String uriString = String.format(format, args);
+  public static URI uri(String uriString) {
     try {
       return new URI(uriString);
     } catch (URISyntaxException e) {
@@ -113,6 +126,14 @@
   }
 
   /**
+   * Convenience function for constructing a URI from a formatted string with
+   * a uri known to be valid.
+   */
+  public static URI formattedUri(String format, Object... args) {
+    return uri(String.format(format, args));
+  }
+
+  /**
    * Render the DocString as html.
    */
   public String html() {
diff --git a/tools/ahat/src/DominatedList.java b/tools/ahat/src/DominatedList.java
index 53d1073..123d8be 100644
--- a/tools/ahat/src/DominatedList.java
+++ b/tools/ahat/src/DominatedList.java
@@ -140,7 +140,7 @@
   //  (showing X of Y objects - show none - show less - show more - show all)
   private static void printMenu(Doc doc, Query query, int shown, int all) {
     DocString menu = new DocString();
-    menu.append("(%d of %d objects shown - ", shown, all);
+    menu.appendFormat("(%d of %d objects shown - ", shown, all);
     if (shown > 0) {
       int less = Math.max(0, shown - kIncrAmount);
       menu.appendLink(query.with("dominated", 0), DocString.text("show none"));
diff --git a/tools/ahat/src/HeapTable.java b/tools/ahat/src/HeapTable.java
index 60bb387..37d5816 100644
--- a/tools/ahat/src/HeapTable.java
+++ b/tools/ahat/src/HeapTable.java
@@ -76,10 +76,10 @@
       for (Heap heap : heaps) {
         long size = config.getSize(elem, heap);
         total += size;
-        vals.add(DocString.text("%,14d", size));
+        vals.add(DocString.format("%,14d", size));
       }
       if (showTotal) {
-        vals.add(DocString.text("%,14d", total));
+        vals.add(DocString.format("%,14d", total));
       }
 
       for (ValueConfig<T> value : values) {
diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/ObjectHandler.java
index eecd7d1..5e321e2 100644
--- a/tools/ahat/src/ObjectHandler.java
+++ b/tools/ahat/src/ObjectHandler.java
@@ -39,7 +39,7 @@
     long id = query.getLong("id", 0);
     Instance inst = mSnapshot.findInstance(id);
     if (inst == null) {
-      doc.println(DocString.text("No object with id %08xl", id));
+      doc.println(DocString.format("No object with id %08xl", id));
       return;
     }
 
@@ -53,10 +53,10 @@
     ClassObj cls = inst.getClassObj();
     doc.descriptions();
     doc.description(DocString.text("Class"), Value.render(cls));
-    doc.description(DocString.text("Size"), DocString.text("%d", inst.getSize()));
+    doc.description(DocString.text("Size"), DocString.format("%d", inst.getSize()));
     doc.description(
         DocString.text("Retained Size"),
-        DocString.text("%d", inst.getTotalRetainedSize()));
+        DocString.format("%d", inst.getTotalRetainedSize()));
     doc.description(DocString.text("Heap"), DocString.text(inst.getHeap().getName()));
     doc.end();
 
@@ -89,7 +89,7 @@
     doc.table(new Column("Index", Column.Align.RIGHT), new Column("Value"));
     Object[] elements = array.getValues();
     for (int i = 0; i < elements.length; i++) {
-      doc.row(DocString.text("%d", i), Value.render(elements[i]));
+      doc.row(DocString.format("%d", i), Value.render(elements[i]));
     }
     doc.end();
   }
@@ -146,7 +146,7 @@
     if (bitmap != null) {
       doc.section("Bitmap Image");
       doc.println(DocString.image(
-            DocString.uri("bitmap?id=%d", bitmap.getId()), "bitmap image"));
+            DocString.formattedUri("bitmap?id=%d", bitmap.getId()), "bitmap image"));
     }
   }
 
diff --git a/tools/ahat/src/ObjectsHandler.java b/tools/ahat/src/ObjectsHandler.java
index 066c9d5..4e9c42e 100644
--- a/tools/ahat/src/ObjectsHandler.java
+++ b/tools/ahat/src/ObjectsHandler.java
@@ -53,7 +53,7 @@
         new Column("Object"));
     for (Instance inst : insts) {
       doc.row(
-          DocString.text("%,d", inst.getSize()),
+          DocString.format("%,d", inst.getSize()),
           DocString.text(inst.getHeap().getName()),
           Value.render(inst));
     }
diff --git a/tools/ahat/src/OverviewHandler.java b/tools/ahat/src/OverviewHandler.java
index 6e6c323..f49c009 100644
--- a/tools/ahat/src/OverviewHandler.java
+++ b/tools/ahat/src/OverviewHandler.java
@@ -38,7 +38,7 @@
     doc.descriptions();
     doc.description(
         DocString.text("ahat version"),
-        DocString.text("ahat-%s", OverviewHandler.class.getPackage().getImplementationVersion()));
+        DocString.format("ahat-%s", OverviewHandler.class.getPackage().getImplementationVersion()));
     doc.description(DocString.text("hprof file"), DocString.text(mHprof.toString()));
     doc.end();
 
diff --git a/tools/ahat/src/SiteHandler.java b/tools/ahat/src/SiteHandler.java
index 8fbc176..0a9381e 100644
--- a/tools/ahat/src/SiteHandler.java
+++ b/tools/ahat/src/SiteHandler.java
@@ -61,7 +61,7 @@
 
             public DocString render(Site element) {
               return DocString.link(
-                  DocString.uri("site?stack=%d&depth=%d",
+                  DocString.formattedUri("site?stack=%d&depth=%d",
                     element.getStackId(), element.getStackDepth()),
                   DocString.text(element.getName()));
             }
@@ -87,11 +87,11 @@
     for (Site.ObjectsInfo info : infos) {
       String className = AhatSnapshot.getClassName(info.classObj);
       doc.row(
-          DocString.text("%,14d", info.numBytes),
+          DocString.format("%,14d", info.numBytes),
           DocString.link(
-            DocString.uri("objects?stack=%d&depth=%d&heap=%s&class=%s",
+            DocString.formattedUri("objects?stack=%d&depth=%d&heap=%s&class=%s",
                 site.getStackId(), site.getStackDepth(), info.heap.getName(), className),
-            DocString.text("%,14d", info.numInstances)),
+            DocString.format("%,14d", info.numInstances)),
           DocString.text(info.heap.getName()),
           Value.render(info.classObj));
     }
diff --git a/tools/ahat/src/SitePrinter.java b/tools/ahat/src/SitePrinter.java
index 9c0c2e0..be87032 100644
--- a/tools/ahat/src/SitePrinter.java
+++ b/tools/ahat/src/SitePrinter.java
@@ -51,7 +51,7 @@
               str.append("→ ");
             }
             str.appendLink(
-                DocString.uri("site?stack=%d&depth=%d",
+                DocString.formattedUri("site?stack=%d&depth=%d",
                     element.getStackId(), element.getStackDepth()),
                 DocString.text(element.getName()));
             return str;
diff --git a/tools/ahat/src/Value.java b/tools/ahat/src/Value.java
index 22c3b8f..9b483fa 100644
--- a/tools/ahat/src/Value.java
+++ b/tools/ahat/src/Value.java
@@ -45,7 +45,7 @@
     // Annotate Strings with their values.
     String stringValue = InstanceUtils.asString(inst);
     if (stringValue != null) {
-      link.append("\"%s\"", stringValue);
+      link.appendFormat("\"%s\"", stringValue);
     }
 
     // Annotate DexCache with its location.
@@ -54,14 +54,14 @@
       link.append(" for " + dexCacheLocation);
     }
 
-    URI objTarget = DocString.uri("object?id=%d", inst.getId());
+    URI objTarget = DocString.formattedUri("object?id=%d", inst.getId());
     DocString formatted = DocString.link(objTarget, link);
 
     // Annotate bitmaps with a thumbnail.
     Instance bitmap = InstanceUtils.getAssociatedBitmapInstance(inst);
     String thumbnail = "";
     if (bitmap != null) {
-      URI uri = DocString.uri("bitmap?id=%d", bitmap.getId());
+      URI uri = DocString.formattedUri("bitmap?id=%d", bitmap.getId());
       formatted.appendThumbnail(uri, "bitmap image");
     }
     return formatted;
@@ -74,7 +74,7 @@
     if (val instanceof Instance) {
       return renderInstance((Instance)val);
     } else {
-      return DocString.text("%s", val);
+      return DocString.format("%s", val);
     }
   }
 }
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 1dced32..2ee87e5 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -27,9 +27,9 @@
 showcommands=
 make_command=
 
-if [[ "$TARGET_PRODUCT" == "armv8" ]]; then
-  linker="linker64"
-fi
+case "$TARGET_PRODUCT" in
+  (armv8|mips64r6) linker="linker64";;
+esac
 
 if [[ "$ART_TEST_ANDROID_ROOT" != "" ]]; then
   android_root="$ART_TEST_ANDROID_ROOT"
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 728991d..7ada189 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -130,28 +130,7 @@
   description: "Crypto failures",
   result: EXEC_FAILED,
   names: ["libcore.javax.crypto.CipherTest#testCipher_ShortBlock_Failure",
-          "libcore.javax.crypto.CipherTest#testCipher_Success",
-          "libcore.javax.crypto.spec.AlgorithmParametersTestDESede#testAlgorithmParameters",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#testDoFinalbyteArrayintintbyteArrayint",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#testUpdatebyteArrayintintbyteArrayint",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinal$BI",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinal$BII$B",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinalLjava_nio_ByteBufferLjava_nio_ByteBuffer",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getAlgorithm",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getBlockSize",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getInstanceLjava_lang_String",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_getOutputSizeI",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithAlgorithmParameterSpec",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithKey",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithKeyAlgorithmParameterSpecSecureRandom",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithSecureRandom",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_unwrap$BLjava_lang_StringI",
-          "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_updateLjava_nio_ByteBufferLjava_nio_ByteBuffer",
-          "org.apache.harmony.crypto.tests.javax.crypto.func.CipherAesWrapTest#test_AesWrap",
-          "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeISO",
-          "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeNoISO",
-          "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeWrapTest#test_DESedeWrap",
-          "org.apache.harmony.crypto.tests.javax.crypto.func.CipherPBETest#test_PBEWithMD5AndDES"]
+          "libcore.javax.crypto.CipherTest#testCipher_Success"]
 },
 {
   description: "Flake when running with libartd.so or interpreter",