Merge "Throw VerifyError when trying to extend a final class."
diff --git a/Android.mk b/Android.mk
index 2df1b13..8735d7c 100644
--- a/Android.mk
+++ b/Android.mk
@@ -386,11 +386,15 @@
 endif
 ifeq (true,$(art_target_include_debug_build))
 LOCAL_REQUIRED_MODULES += \
+    dex2oatd \
+    dexoptanalyzerd \
     libartd \
     libartd-compiler \
     libopenjdkd \
     libopenjdkjvmd \
     libopenjdkjvmtid \
+    patchoatd \
+    profmand \
 
 endif
 endif
diff --git a/benchmark/scoped-primitive-array/scoped_primitive_array.cc b/benchmark/scoped-primitive-array/scoped_primitive_array.cc
index 1664157..005cae4 100644
--- a/benchmark/scoped-primitive-array/scoped_primitive_array.cc
+++ b/benchmark/scoped-primitive-array/scoped_primitive_array.cc
@@ -15,7 +15,7 @@
  */
 
 #include "jni.h"
-#include "ScopedPrimitiveArray.h"
+#include "nativehelper/ScopedPrimitiveArray.h"
 
 extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureByteArray(
     JNIEnv* env, jclass, int reps, jbyteArray arr) {
diff --git a/build/Android.bp b/build/Android.bp
index c5ff486..d617116 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -99,6 +99,15 @@
                 // Bug: 15446488. We don't omit the frame pointer to work around
                 // clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
                 "-fno-omit-frame-pointer",
+                // The build assumes that all our x86/x86_64 hosts (such as buildbots and developer
+                // desktops) support at least sse4.2/popcount. This firstly implies that the ART
+                // runtime binary itself may exploit these features. Secondly, this implies that
+                // the ART runtime passes these feature flags to dex2oat and JIT by calling the
+                // method InstructionSetFeatures::FromCppDefines(). Since invoking dex2oat directly
+                // does not pick up these flags, cross-compiling from a x86/x86_64 host to a
+                // x86/x86_64 target should not be affected.
+                "-msse4.2",
+                "-mpopcnt",
             ],
             host_ldlibs: [
                 "-ldl",
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index cf6d1ec..571c91a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -104,7 +104,7 @@
 
 ART_GTEST_atomic_dex_ref_map_test_DEX_DEPS := Interfaces
 ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB ErroneousInit ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode
-ART_GTEST_class_loader_context_test_DEX_DEPS := Main MultiDex MyClass
+ART_GTEST_class_loader_context_test_DEX_DEPS := Main MultiDex MyClass ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD
 ART_GTEST_class_table_test_DEX_DEPS := XandY
 ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex
 ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
diff --git a/build/art.go b/build/art.go
index 6c9aa89..19b39cd 100644
--- a/build/art.go
+++ b/build/art.go
@@ -153,6 +153,11 @@
 	cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta)
 	cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta)
 
+	if len(ctx.AConfig().SanitizeHost()) > 0 && !envFalse(ctx, "ART_ENABLE_ADDRESS_SANITIZER") {
+		// We enable full sanitization on the host by default.
+		cflags = append(cflags, "-DART_ENABLE_ADDRESS_SANITIZER=1")
+	}
+
 	return cflags
 }
 
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 3683695..07bfe31 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -207,8 +207,10 @@
 
   compiler_options_.reset(new CompilerOptions);
   verification_results_.reset(new VerificationResults(compiler_options_.get()));
-  callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
-                                              CompilerCallbacks::CallbackMode::kCompileApp));
+  QuickCompilerCallbacks* callbacks =
+      new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp);
+  callbacks->SetVerificationResults(verification_results_.get());
+  callbacks_.reset(callbacks);
 }
 
 Compiler::Kind CommonCompilerTest::GetCompilerKind() const {
@@ -265,8 +267,8 @@
   mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
   CHECK(klass != nullptr) << "Class not found " << class_name;
   auto pointer_size = class_linker_->GetImagePointerSize();
-  ArtMethod* method = klass->FindDirectMethod(method_name, signature, pointer_size);
-  CHECK(method != nullptr) << "Direct method not found: "
+  ArtMethod* method = klass->FindClassMethod(method_name, signature, pointer_size);
+  CHECK(method != nullptr && method->IsDirect()) << "Direct method not found: "
       << class_name << "." << method_name << signature;
   CompileMethod(method);
 }
@@ -279,8 +281,8 @@
   mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
   CHECK(klass != nullptr) << "Class not found " << class_name;
   auto pointer_size = class_linker_->GetImagePointerSize();
-  ArtMethod* method = klass->FindVirtualMethod(method_name, signature, pointer_size);
-  CHECK(method != nullptr) << "Virtual method not found: "
+  ArtMethod* method = klass->FindClassMethod(method_name, signature, pointer_size);
+  CHECK(method != nullptr && !method->IsDirect()) << "Virtual method not found: "
       << class_name << "." << method_name << signature;
   CompileMethod(method);
 }
diff --git a/compiler/compiler.h b/compiler/compiler.h
index cd4c591..ba89cb1 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -25,6 +25,7 @@
 
 namespace jit {
   class JitCodeCache;
+  class JitLogger;
 }  // namespace jit
 namespace mirror {
   class ClassLoader;
@@ -76,7 +77,8 @@
   virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
                           jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
                           ArtMethod* method ATTRIBUTE_UNUSED,
-                          bool osr ATTRIBUTE_UNUSED)
+                          bool osr ATTRIBUTE_UNUSED,
+                          jit::JitLogger* jit_logger ATTRIBUTE_UNUSED)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return false;
   }
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index fba1136..9d57b96 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -291,13 +291,14 @@
   ScopedObjectAccess soa(Thread::Current());
 
   ClassLinker* class_linker = unit_.GetClassLinker();
-  ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(
-      GetDexFile(),
-      method_idx,
-      unit_.GetDexCache(),
-      unit_.GetClassLoader(),
-      /* referrer */ nullptr,
-      kVirtual);
+  ArtMethod* resolved_method =
+      class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+          GetDexFile(),
+          method_idx,
+          unit_.GetDexCache(),
+          unit_.GetClassLoader(),
+          /* referrer */ nullptr,
+          kVirtual);
 
   if (UNLIKELY(resolved_method == nullptr)) {
     // Clean up any exception left by type resolution.
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index 2572291..e5ff7fc 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -145,9 +145,8 @@
   DCHECK_EQ(invoke_direct->VRegC_35c(),
             method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_);
   uint32_t method_index = invoke_direct->VRegB_35c();
-  PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-  ArtMethod* target_method =
-      method->GetDexCache()->GetResolvedMethod(method_index, pointer_size);
+  ArtMethod* target_method = Runtime::Current()->GetClassLinker()->LookupResolvedMethod(
+      method_index, method->GetDexCache(), method->GetClassLoader());
   if (kIsDebugBuild && target_method != nullptr) {
     CHECK(!target_method->IsStatic());
     CHECK(target_method->IsConstructor());
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 2100522..a3a6c09 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -26,11 +26,8 @@
 
 class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
   public:
-    QuickCompilerCallbacks(VerificationResults* verification_results,
-                           CompilerCallbacks::CallbackMode mode)
-        : CompilerCallbacks(mode),
-          verification_results_(verification_results),
-          verifier_deps_(nullptr) {}
+    explicit QuickCompilerCallbacks(CompilerCallbacks::CallbackMode mode)
+        : CompilerCallbacks(mode) {}
 
     ~QuickCompilerCallbacks() { }
 
@@ -52,8 +49,12 @@
       verifier_deps_.reset(deps);
     }
 
+    void SetVerificationResults(VerificationResults* verification_results) {
+      verification_results_ = verification_results;
+    }
+
   private:
-    VerificationResults* const verification_results_;
+    VerificationResults* verification_results_ = nullptr;
     std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
 };
 
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index db95bd6..b043929 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -111,7 +111,7 @@
     InvokeType invoke_type) {
   DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
   ArtMethod* resolved_method =
-      mUnit->GetClassLinker()->ResolveMethod<ClassLinker::kForceICCECheck>(
+      mUnit->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
           *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type);
   if (UNLIKELY(resolved_method == nullptr)) {
     DCHECK(soa.Self()->IsExceptionPending());
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 83d7a3d..cf04e41 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -41,31 +41,31 @@
 #include "compiler.h"
 #include "compiler_callbacks.h"
 #include "compiler_driver-inl.h"
-#include "dex_compilation_unit.h"
-#include "dex_file-inl.h"
-#include "dex_instruction-inl.h"
 #include "dex/dex_to_dex_compiler.h"
 #include "dex/verification_results.h"
 #include "dex/verified_method.h"
+#include "dex_compilation_unit.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
 #include "driver/compiler_options.h"
-#include "intrinsics_enum.h"
-#include "jni_internal.h"
-#include "object_lock.h"
-#include "runtime.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/heap_bitmap.h"
 #include "gc/space/image_space.h"
 #include "gc/space/space.h"
-#include "mirror/class_loader.h"
+#include "handle_scope-inl.h"
+#include "intrinsics_enum.h"
+#include "jni_internal.h"
 #include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/throwable.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "object_lock.h"
+#include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
-#include "handle_scope-inl.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "thread_pool.h"
@@ -75,8 +75,8 @@
 #include "utils/dex_cache_arrays_layout-inl.h"
 #include "utils/swap_space.h"
 #include "vdex_file.h"
-#include "verifier/method_verifier.h"
 #include "verifier/method_verifier-inl.h"
+#include "verifier/method_verifier.h"
 #include "verifier/verifier_deps.h"
 #include "verifier/verifier_enums.h"
 
@@ -291,7 +291,6 @@
       instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set),
       instruction_set_features_(instruction_set_features),
       requires_constructor_barrier_lock_("constructor barrier lock"),
-      compiled_classes_lock_("compiled classes lock"),
       non_relative_linker_patch_count_(0u),
       image_classes_(image_classes),
       classes_to_compile_(compiled_classes),
@@ -374,14 +373,12 @@
       REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   PointerSize image_size = class_linker->GetImagePointerSize();
-  mirror::Class* cls = class_linker->FindSystemClass(self, class_name);
+  ObjPtr<mirror::Class> cls = class_linker->FindSystemClass(self, class_name);
   if (cls == nullptr) {
     LOG(FATAL) << "Could not find class of intrinsic " << class_name;
   }
-  ArtMethod* method = (invoke_type == kStatic || invoke_type == kDirect)
-      ? cls->FindDeclaredDirectMethod(method_name, signature, image_size)
-      : cls->FindDeclaredVirtualMethod(method_name, signature, image_size);
-  if (method == nullptr) {
+  ArtMethod* method = cls->FindClassMethod(method_name, signature, image_size);
+  if (method == nullptr || method->GetDeclaringClass() != cls) {
     LOG(FATAL) << "Could not find method of intrinsic "
                << class_name << " " << method_name << " " << signature;
   }
@@ -544,7 +541,7 @@
 
       // TODO: Lookup annotation from DexFile directly without resolving method.
       ArtMethod* method =
-          Runtime::Current()->GetClassLinker()->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+          Runtime::Current()->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
               dex_file,
               method_idx,
               dex_cache,
@@ -1756,7 +1753,7 @@
       }
       if (resolve_fields_and_methods) {
         while (it.HasNextDirectMethod()) {
-          ArtMethod* method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+          ArtMethod* method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
               dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
               it.GetMethodInvokeType(class_def));
           if (method == nullptr) {
@@ -1765,7 +1762,7 @@
           it.Next();
         }
         while (it.HasNextVirtualMethod()) {
-          ArtMethod* method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+          ArtMethod* method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
               dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr,
               it.GetMethodInvokeType(class_def));
           if (method == nullptr) {
@@ -1947,7 +1944,12 @@
         if (compiler_only_verifies) {
           // Just update the compiled_classes_ map. The compiler doesn't need to resolve
           // the type.
-          compiled_classes_.Overwrite(ClassReference(dex_file, i), mirror::Class::kStatusVerified);
+          DexFileReference ref(dex_file, i);
+          mirror::Class::Status existing = mirror::Class::kStatusNotReady;
+          DCHECK(compiled_classes_.Get(ref, &existing)) << ref.dex_file->GetLocation();
+          ClassStateTable::InsertResult result =
+             compiled_classes_.Insert(ref, existing, mirror::Class::kStatusVerified);
+          CHECK_EQ(result, ClassStateTable::kInsertResultSuccess);
         } else {
           // Update the class status, so later compilation stages know they don't need to verify
           // the class.
@@ -1978,6 +1980,13 @@
 void CompilerDriver::Verify(jobject jclass_loader,
                             const std::vector<const DexFile*>& dex_files,
                             TimingLogger* timings) {
+  // Always add the dex files to compiled_classes_. This happens for all compiler filters.
+  for (const DexFile* dex_file : dex_files) {
+    if (!compiled_classes_.HaveDexFile(dex_file)) {
+      compiled_classes_.AddDexFile(dex_file, dex_file->NumClassDefs());
+    }
+  }
+
   if (FastVerify(jclass_loader, dex_files, timings)) {
     return;
   }
@@ -2202,6 +2211,9 @@
                                         size_t thread_count,
                                         TimingLogger* timings) {
   TimingLogger::ScopedTiming t("Verify Dex File", timings);
+  if (!compiled_classes_.HaveDexFile(&dex_file)) {
+    compiled_classes_.AddDexFile(&dex_file, dex_file.NumClassDefs());
+  }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
                                      thread_pool);
@@ -2248,12 +2260,13 @@
     const bool is_app_image = manager_->GetCompiler()->GetCompilerOptions().IsAppImage();
 
     mirror::Class::Status old_status = klass->GetStatus();
+    // Don't initialize classes in boot space when compiling app image
+    if (is_app_image && klass->IsBootStrapClassLoaded()) {
+      // Also return early and don't store the class status in the recorded class status.
+      return;
+    }
     // Only try to initialize classes that were successfully verified.
     if (klass->IsVerified()) {
-      // Don't initialize classes in boot space when compiling app image
-      if (is_app_image && klass->IsBootStrapClassLoaded()) {
-        return;
-      }
       // Attempt to initialize the class but bail if we either need to initialize the super-class
       // or static fields.
       manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
@@ -2860,12 +2873,12 @@
 
 bool CompilerDriver::GetCompiledClass(ClassReference ref, mirror::Class::Status* status) const {
   DCHECK(status != nullptr);
-  MutexLock mu(Thread::Current(), compiled_classes_lock_);
-  ClassStateTable::const_iterator it = compiled_classes_.find(ref);
-  if (it == compiled_classes_.end()) {
+  // The table doesn't know if something wasn't inserted. For this case it will return
+  // kStatusNotReady. To handle this, just assume anything not verified is not compiled.
+  if (!compiled_classes_.Get(DexFileReference(ref.first, ref.second), status) ||
+      *status < mirror::Class::kStatusVerified) {
     return false;
   }
-  *status = it->second;
   return true;
 }
 
@@ -2886,15 +2899,20 @@
           << " of " << status;
   }
 
-  MutexLock mu(Thread::Current(), compiled_classes_lock_);
-  auto it = compiled_classes_.find(ref);
-  if (it == compiled_classes_.end()) {
-    compiled_classes_.Overwrite(ref, status);
-  } else if (status > it->second) {
+  ClassStateTable::InsertResult result;
+  do {
+    DexFileReference dex_ref(ref.first, ref.second);
+    mirror::Class::Status existing = mirror::Class::kStatusNotReady;
+    CHECK(compiled_classes_.Get(dex_ref, &existing)) << dex_ref.dex_file->GetLocation();
+    if (existing >= status) {
+      // Existing status is already better than we expect, break.
+      break;
+    }
     // Update the status if we now have a greater one. This happens with vdex,
     // which records a class is verified, but does not resolve it.
-    it->second = status;
-  }
+    result = compiled_classes_.Insert(dex_ref, existing, status);
+    CHECK(result != ClassStateTable::kInsertResultInvalidDexFile);
+  } while (result != ClassStateTable::kInsertResultSuccess);
 }
 
 CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index a3272d3..93234cb 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -117,12 +117,12 @@
   void CompileAll(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
                   TimingLogger* timings)
-      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
+      REQUIRES(!Locks::mutator_lock_, !dex_to_dex_references_lock_);
 
   // Compile a single Method.
   void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!compiled_classes_lock_, !dex_to_dex_references_lock_);
+      REQUIRES(!dex_to_dex_references_lock_);
 
   VerificationResults* GetVerificationResults() const;
 
@@ -153,8 +153,7 @@
   std::unique_ptr<const std::vector<uint8_t>> CreateQuickResolutionTrampoline() const;
   std::unique_ptr<const std::vector<uint8_t>> CreateQuickToInterpreterBridge() const;
 
-  bool GetCompiledClass(ClassReference ref, mirror::Class::Status* status) const
-      REQUIRES(!compiled_classes_lock_);
+  bool GetCompiledClass(ClassReference ref, mirror::Class::Status* status) const;
 
   CompiledMethod* GetCompiledMethod(MethodReference ref) const;
   size_t GetNonRelativeLinkerPatchCount() const;
@@ -337,8 +336,7 @@
   // according to the profile file.
   bool ShouldVerifyClassBasedOnProfile(const DexFile& dex_file, uint16_t class_idx) const;
 
-  void RecordClassStatus(ClassReference ref, mirror::Class::Status status)
-      REQUIRES(!compiled_classes_lock_);
+  void RecordClassStatus(ClassReference ref, mirror::Class::Status status);
 
   // Checks if the specified method has been verified without failures. Returns
   // false if the method is not in the verification results (GetVerificationResults).
@@ -387,7 +385,7 @@
   void PreCompile(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
                   TimingLogger* timings)
-      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
 
@@ -408,12 +406,9 @@
 
   // Do fast verification through VerifierDeps if possible. Return whether
   // verification was successful.
-  // NO_THREAD_SAFETY_ANALYSIS as the method accesses a guarded value in a
-  // single-threaded way.
   bool FastVerify(jobject class_loader,
                   const std::vector<const DexFile*>& dex_files,
-                  TimingLogger* timings)
-      NO_THREAD_SAFETY_ANALYSIS;
+                  TimingLogger* timings);
 
   void Verify(jobject class_loader,
               const std::vector<const DexFile*>& dex_files,
@@ -441,12 +436,12 @@
   void InitializeClasses(jobject class_loader,
                          const std::vector<const DexFile*>& dex_files,
                          TimingLogger* timings)
-      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
+      REQUIRES(!Locks::mutator_lock_);
   void InitializeClasses(jobject class_loader,
                          const DexFile& dex_file,
                          const std::vector<const DexFile*>& dex_files,
                          TimingLogger* timings)
-      REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_);
+      REQUIRES(!Locks::mutator_lock_);
 
   void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
 
@@ -484,10 +479,9 @@
   std::map<ClassReference, bool> requires_constructor_barrier_
       GUARDED_BY(requires_constructor_barrier_lock_);
 
-  using ClassStateTable = SafeMap<const ClassReference, mirror::Class::Status>;
-  // All class references that this compiler has compiled.
-  mutable Mutex compiled_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  ClassStateTable compiled_classes_ GUARDED_BY(compiled_classes_lock_);
+  // All class references that this compiler has compiled. Indexed by class defs.
+  using ClassStateTable = AtomicDexRefMap<mirror::Class::Status>;
+  ClassStateTable compiled_classes_;
 
   typedef AtomicDexRefMap<CompiledMethod*> MethodTable;
 
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index b4ad325..5d1d972 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -118,10 +118,12 @@
     EXPECT_TRUE(type != nullptr) << "type_idx=" << i
                               << " " << dex.GetTypeDescriptor(dex.GetTypeId(dex::TypeIndex(i)));
   }
-  EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
+  EXPECT_TRUE(dex_cache->StaticMethodSize() == dex_cache->NumResolvedMethods()
+      || dex.NumMethodIds() ==  dex_cache->NumResolvedMethods());
   auto* cl = Runtime::Current()->GetClassLinker();
   auto pointer_size = cl->GetImagePointerSize();
   for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
+    // FIXME: This is outdated for hash-based method array.
     ArtMethod* method = dex_cache->GetResolvedMethod(i, pointer_size);
     EXPECT_TRUE(method != nullptr) << "method_idx=" << i
                                 << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
@@ -133,6 +135,7 @@
   EXPECT_TRUE(dex_cache->StaticArtFieldSize() == dex_cache->NumResolvedFields()
       || dex.NumFieldIds() ==  dex_cache->NumResolvedFields());
   for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
+    // FIXME: This is outdated for hash-based field array.
     ArtField* field = dex_cache->GetResolvedField(i, cl->GetImagePointerSize());
     EXPECT_TRUE(field != nullptr) << "field_idx=" << i
                                << " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index b4777df..0b3ca69 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -102,12 +102,14 @@
       CHECK_ALIGNED(stack_maps_offset, 2);
     }
 
-    method_f_ = my_klass_->FindVirtualMethod("f", "()I", kRuntimePointerSize);
+    method_f_ = my_klass_->FindClassMethod("f", "()I", kRuntimePointerSize);
     ASSERT_TRUE(method_f_ != nullptr);
+    ASSERT_FALSE(method_f_->IsDirect());
     method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
 
-    method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", kRuntimePointerSize);
+    method_g_ = my_klass_->FindClassMethod("g", "(I)V", kRuntimePointerSize);
     ASSERT_TRUE(method_g_ != nullptr);
+    ASSERT_FALSE(method_g_->IsDirect());
     method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
   }
 
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 9d7aff7..252fdd6 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -113,9 +113,9 @@
   mirror::Class* iface_klass = class_linker_->LookupClass(
       self, "LIface;", ObjPtr<mirror::ClassLoader>());
   ASSERT_NE(nullptr, iface_klass);
-  ArtMethod* origin = iface_klass->FindDeclaredVirtualMethod(
-      "defaultMethod", "()V", pointer_size);
+  ArtMethod* origin = iface_klass->FindInterfaceMethod("defaultMethod", "()V", pointer_size);
   ASSERT_NE(nullptr, origin);
+  ASSERT_TRUE(origin->GetDeclaringClass() == iface_klass);
   const void* code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
   // The origin method should have a pointer to quick code
   ASSERT_NE(nullptr, code);
@@ -134,9 +134,11 @@
   mirror::Class* iterable_klass = class_linker_->LookupClass(
       self, "Ljava/lang/Iterable;", ObjPtr<mirror::ClassLoader>());
   ASSERT_NE(nullptr, iterable_klass);
-  origin = iterable_klass->FindDeclaredVirtualMethod(
+  origin = iterable_klass->FindClassMethod(
       "forEach", "(Ljava/util/function/Consumer;)V", pointer_size);
   ASSERT_NE(nullptr, origin);
+  ASSERT_FALSE(origin->IsDirect());
+  ASSERT_TRUE(origin->GetDeclaringClass() == iterable_klass);
   code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
   // the origin method should have a pointer to quick code
   ASSERT_NE(nullptr, code);
diff --git a/compiler/image_test.h b/compiler/image_test.h
index fa714ad..6c3a89b 100644
--- a/compiler/image_test.h
+++ b/compiler/image_test.h
@@ -84,9 +84,10 @@
 
   void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
     CommonCompilerTest::SetUpRuntimeOptions(options);
-    callbacks_.reset(new QuickCompilerCallbacks(
-        verification_results_.get(),
-        CompilerCallbacks::CallbackMode::kCompileBootImage));
+    QuickCompilerCallbacks* new_callbacks =
+        new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileBootImage);
+    new_callbacks->SetVerificationResults(verification_results_.get());
+    callbacks_.reset(new_callbacks);
     options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
   }
 
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4f1fef9..51730cf 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -44,6 +44,7 @@
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/collector/concurrent_copying.h"
 #include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
 #include "gc/space/large_object_space.h"
 #include "gc/space/space-inl.h"
 #include "gc/verification.h"
@@ -117,19 +118,17 @@
   return false;
 }
 
-static void ClearDexFileCookieCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  DCHECK(obj != nullptr);
-  Class* klass = obj->GetClass();
-  if (klass == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_DexFile)) {
-    ArtField* field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
-    // Null out the cookie to enable determinism. b/34090128
-    field->SetObject</*kTransactionActive*/false>(obj, nullptr);
-  }
-}
-
 static void ClearDexFileCookies() REQUIRES_SHARED(Locks::mutator_lock_) {
-  Runtime::Current()->GetHeap()->VisitObjects(ClearDexFileCookieCallback, nullptr);
+  auto visitor = [](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(obj != nullptr);
+    Class* klass = obj->GetClass();
+    if (klass == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_DexFile)) {
+      ArtField* field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
+      // Null out the cookie to enable determinism. b/34090128
+      field->SetObject</*kTransactionActive*/false>(obj, nullptr);
+    }
+  };
+  Runtime::Current()->GetHeap()->VisitObjects(visitor);
 }
 
 bool ImageWriter::PrepareImageAddressSpace() {
@@ -1024,41 +1023,58 @@
 
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
-  ArtMethod* resolution_method = runtime->GetResolutionMethod();
   const DexFile& dex_file = *dex_cache->GetDexFile();
   // Prune methods.
-  ArtMethod** resolved_methods = dex_cache->GetResolvedMethods();
-  for (size_t i = 0, num = dex_cache->NumResolvedMethods(); i != num; ++i) {
-    ArtMethod* method =
-        mirror::DexCache::GetElementPtrSize(resolved_methods, i, target_ptr_size_);
-    DCHECK(method != nullptr) << "Expected resolution method instead of null method";
+  mirror::MethodDexCacheType* resolved_methods = dex_cache->GetResolvedMethods();
+  dex::TypeIndex last_class_idx;  // Initialized to invalid index.
+  ObjPtr<mirror::Class> last_class = nullptr;
+  for (size_t i = 0, num = dex_cache->GetDexFile()->NumMethodIds(); i != num; ++i) {
+    uint32_t slot_idx = dex_cache->MethodSlotIndex(i);
+    auto pair =
+        mirror::DexCache::GetNativePairPtrSize(resolved_methods, slot_idx, target_ptr_size_);
+    uint32_t stored_index = pair.index;
+    ArtMethod* method = pair.object;
+    if (method != nullptr && i > stored_index) {
+      continue;  // Already checked.
+    }
     // Check if the referenced class is in the image. Note that we want to check the referenced
     // class rather than the declaring class to preserve the semantics, i.e. using a MethodId
     // results in resolving the referenced class and that can for example throw OOME.
-    ObjPtr<mirror::Class> referencing_class = class_linker->LookupResolvedType(
-        dex_file,
-        dex_file.GetMethodId(i).class_idx_,
-        dex_cache,
-        class_loader);
-    // Copied methods may be held live by a class which was not an image class but have a
-    // declaring class which is an image class. Set it to the resolution method to be safe and
-    // prevent dangling pointers.
-    if (method->IsCopied() || !KeepClass(referencing_class)) {
-      mirror::DexCache::SetElementPtrSize(resolved_methods,
-                                          i,
-                                          resolution_method,
-                                          target_ptr_size_);
-    } else if (kIsDebugBuild) {
-      // Check that the class is still in the classes table.
-      ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-      CHECK(class_linker->ClassInClassTable(referencing_class)) << "Class "
-          << Class::PrettyClass(referencing_class) << " not in class linker table";
+    const DexFile::MethodId& method_id = dex_file.GetMethodId(i);
+    if (method_id.class_idx_ != last_class_idx) {
+      last_class_idx = method_id.class_idx_;
+      last_class = class_linker->LookupResolvedType(
+          dex_file, last_class_idx, dex_cache, class_loader);
+      if (last_class != nullptr && !KeepClass(last_class)) {
+        last_class = nullptr;
+      }
+    }
+    if (method == nullptr || i < stored_index) {
+      if (last_class != nullptr) {
+        const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
+        Signature signature = dex_file.GetMethodSignature(method_id);
+        if (last_class->IsInterface()) {
+          method = last_class->FindInterfaceMethod(name, signature, target_ptr_size_);
+        } else {
+          method = last_class->FindClassMethod(name, signature, target_ptr_size_);
+        }
+        if (method != nullptr) {
+          // If the referenced class is in the image, the defining class must also be there.
+          DCHECK(KeepClass(method->GetDeclaringClass()));
+          dex_cache->SetResolvedMethod(i, method, target_ptr_size_);
+        }
+      }
+    } else {
+      DCHECK_EQ(i, stored_index);
+      if (last_class == nullptr) {
+        dex_cache->ClearResolvedMethod(stored_index, target_ptr_size_);
+      }
     }
   }
   // Prune fields and make the contents of the field array deterministic.
   mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
-  dex::TypeIndex last_class_idx;  // Initialized to invalid index.
-  ObjPtr<mirror::Class> last_class = nullptr;
+  last_class_idx = dex::TypeIndex();  // Initialized to invalid index.
+  last_class = nullptr;
   for (size_t i = 0, end = dex_file.NumFieldIds(); i < end; ++i) {
     uint32_t slot_idx = dex_cache->FieldSlotIndex(i);
     auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, slot_idx, target_ptr_size_);
@@ -1176,21 +1192,19 @@
 
 void ImageWriter::CheckNonImageClassesRemoved() {
   if (compiler_driver_.GetImageClasses() != nullptr) {
+    auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+      if (obj->IsClass() && !IsInBootImage(obj)) {
+        Class* klass = obj->AsClass();
+        if (!KeepClass(klass)) {
+          DumpImageClasses();
+          std::string temp;
+          CHECK(KeepClass(klass))
+              << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass);
+        }
+      }
+    };
     gc::Heap* heap = Runtime::Current()->GetHeap();
-    heap->VisitObjects(CheckNonImageClassesRemovedCallback, this);
-  }
-}
-
-void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) {
-  ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
-  if (obj->IsClass() && !image_writer->IsInBootImage(obj)) {
-    Class* klass = obj->AsClass();
-    if (!image_writer->KeepClass(klass)) {
-      image_writer->DumpImageClasses();
-      std::string temp;
-      CHECK(image_writer->KeepClass(klass))
-          << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass);
-    }
+    heap->VisitObjects(visitor);
   }
 }
 
@@ -1532,26 +1546,6 @@
   offset += ArtMethod::Size(target_ptr_size_);
 }
 
-void ImageWriter::EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg) {
-  ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
-  DCHECK(writer != nullptr);
-  if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
-    CHECK(writer->IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
-  }
-}
-
-void ImageWriter::DeflateMonitorCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) {
-  Monitor::Deflate(Thread::Current(), obj);
-}
-
-void ImageWriter::UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) {
-  ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
-  DCHECK(writer != nullptr);
-  if (!writer->IsInBootImage(obj)) {
-    writer->UnbinObjectsIntoOffset(obj);
-  }
-}
-
 void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) {
   DCHECK(!IsInBootImage(obj));
   CHECK(obj != nullptr);
@@ -1686,7 +1680,12 @@
 
   // Deflate monitors before we visit roots since deflating acquires the monitor lock. Acquiring
   // this lock while holding other locks may cause lock order violations.
-  heap->VisitObjects(DeflateMonitorCallback, this);
+  {
+    auto deflate_monitor = [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+      Monitor::Deflate(Thread::Current(), obj);
+    };
+    heap->VisitObjects(deflate_monitor);
+  }
 
   // Work list of <object, oat_index> for objects. Everything on the stack must already be
   // assigned a bin slot.
@@ -1748,7 +1747,15 @@
   }
 
   // Verify that all objects have assigned image bin slots.
-  heap->VisitObjects(EnsureBinSlotAssignedCallback, this);
+  {
+    auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
+        CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
+      }
+    };
+    heap->VisitObjects(ensure_bin_slots_assigned);
+  }
 
   // Calculate size of the dex cache arrays slot and prepare offsets.
   PrepareDexCacheArraySlots();
@@ -1812,7 +1819,15 @@
   }
 
   // Transform each object's bin slot into an offset which will be used to do the final copy.
-  heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
+  {
+    auto unbin_objects_into_offset = [&](mirror::Object* obj)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      if (!IsInBootImage(obj)) {
+        UnbinObjectsIntoOffset(obj);
+      }
+    };
+    heap->VisitObjects(unbin_objects_into_offset);
+  }
 
   size_t i = 0;
   for (ImageInfo& image_info : image_infos_) {
@@ -2119,8 +2134,11 @@
 }
 
 void ImageWriter::CopyAndFixupObjects() {
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->VisitObjects(CopyAndFixupObjectsCallback, this);
+  auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(obj != nullptr);
+    CopyAndFixupObject(obj);
+  };
+  Runtime::Current()->GetHeap()->VisitObjects(visitor);
   // Fix up the object previously had hash codes.
   for (const auto& hash_pair : saved_hashcode_map_) {
     Object* obj = hash_pair.first;
@@ -2130,12 +2148,6 @@
   saved_hashcode_map_.clear();
 }
 
-void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
-  DCHECK(obj != nullptr);
-  DCHECK(arg != nullptr);
-  reinterpret_cast<ImageWriter*>(arg)->CopyAndFixupObject(obj);
-}
-
 void ImageWriter::FixupPointerArray(mirror::Object* dst,
                                     mirror::PointerArray* arr,
                                     mirror::Class* klass,
@@ -2406,17 +2418,19 @@
     orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache),
                                        fixup_visitor);
   }
-  ArtMethod** orig_methods = orig_dex_cache->GetResolvedMethods();
+  mirror::MethodDexCacheType* orig_methods = orig_dex_cache->GetResolvedMethods();
   if (orig_methods != nullptr) {
     copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodsOffset(),
                                                NativeLocationInImage(orig_methods),
                                                PointerSize::k64);
-    ArtMethod** copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache);
+    mirror::MethodDexCacheType* copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache);
     for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) {
-      ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, i, target_ptr_size_);
+      mirror::MethodDexCachePair orig_pair =
+          mirror::DexCache::GetNativePairPtrSize(orig_methods, i, target_ptr_size_);
       // NativeLocationInImage also handles runtime methods since these have relocation info.
-      ArtMethod* copy = NativeLocationInImage(orig);
-      mirror::DexCache::SetElementPtrSize(copy_methods, i, copy, target_ptr_size_);
+      mirror::MethodDexCachePair copy_pair(NativeLocationInImage(orig_pair.object),
+                                           orig_pair.index);
+      mirror::DexCache::SetNativePairPtrSize(copy_methods, i, copy_pair, target_ptr_size_);
     }
   }
   mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
@@ -2557,7 +2571,8 @@
 
   CopyReference(copy->GetDeclaringClassAddressWithoutBarrier(), orig->GetDeclaringClassUnchecked());
 
-  ArtMethod** orig_resolved_methods = orig->GetDexCacheResolvedMethods(target_ptr_size_);
+  mirror::MethodDexCacheType* orig_resolved_methods =
+      orig->GetDexCacheResolvedMethods(target_ptr_size_);
   copy->SetDexCacheResolvedMethods(NativeLocationInImage(orig_resolved_methods), target_ptr_size_);
 
   // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index c42523b..ee6fc1d 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -397,8 +397,6 @@
 
   // Verify unwanted classes removed.
   void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
-  static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Lays out where the image objects will be at runtime.
   void CalculateNewObjectOffsets()
@@ -414,18 +412,9 @@
   void UnbinObjectsIntoOffset(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void DeflateMonitorCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Creates the contiguous image in memory and adjusts pointers.
   void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
-  static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 28a3f1e..5fdf9ff 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -184,10 +184,8 @@
   {
     TimingLogger::ScopedTiming t2("Compiling", &logger);
     JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
-    success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method, osr);
-    if (success && (jit_logger_ != nullptr)) {
-      jit_logger_->WriteLog(code_cache, method, osr);
-    }
+    success = compiler_driver_->GetCompiler()->JitCompile(
+        self, code_cache, method, osr, jit_logger_.get());
   }
 
   // Trim maps to reduce memory usage.
diff --git a/compiler/jit/jit_logger.cc b/compiler/jit/jit_logger.cc
index aa4f667..2199b64 100644
--- a/compiler/jit/jit_logger.cc
+++ b/compiler/jit/jit_logger.cc
@@ -50,11 +50,8 @@
   }
 }
 
-void JitLogger::WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) {
+void JitLogger::WritePerfMapLog(const void* ptr, size_t code_size, ArtMethod* method) {
   if (perf_file_ != nullptr) {
-    const void* ptr = osr ? code_cache->LookupOsrMethodHeader(method)->GetCode()
-                          : method->GetEntryPointFromQuickCompiledCode();
-    size_t code_size = code_cache->GetMemorySizeOfCodePointer(ptr);
     std::string method_name = method->PrettyMethod();
 
     std::ostringstream stream;
@@ -270,11 +267,8 @@
   WriteJitDumpHeader();
 }
 
-void JitLogger::WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method, bool osr) {
+void JitLogger::WriteJitDumpLog(const void* ptr, size_t code_size, ArtMethod* method) {
   if (jit_dump_file_ != nullptr) {
-    const void* code = osr ? code_cache->LookupOsrMethodHeader(method)->GetCode()
-                           : method->GetEntryPointFromQuickCompiledCode();
-    size_t code_size = code_cache->GetMemorySizeOfCodePointer(code);
     std::string method_name = method->PrettyMethod();
 
     PerfJitCodeLoad jit_code;
@@ -285,7 +279,7 @@
     jit_code.process_id_ = static_cast<uint32_t>(getpid());
     jit_code.thread_id_ = static_cast<uint32_t>(art::GetTid());
     jit_code.vma_ = 0x0;
-    jit_code.code_address_ = reinterpret_cast<uint64_t>(code);
+    jit_code.code_address_ = reinterpret_cast<uint64_t>(ptr);
     jit_code.code_size_ = code_size;
     jit_code.code_id_ = code_index_++;
 
@@ -297,7 +291,7 @@
     // Use UNUSED() here to avoid compiler warnings.
     UNUSED(jit_dump_file_->WriteFully(reinterpret_cast<const char*>(&jit_code), sizeof(jit_code)));
     UNUSED(jit_dump_file_->WriteFully(method_name.c_str(), method_name.size() + 1));
-    UNUSED(jit_dump_file_->WriteFully(code, code_size));
+    UNUSED(jit_dump_file_->WriteFully(ptr, code_size));
 
     WriteJitDumpDebugInfo();
   }
diff --git a/compiler/jit/jit_logger.h b/compiler/jit/jit_logger.h
index 460864e..19be9aa 100644
--- a/compiler/jit/jit_logger.h
+++ b/compiler/jit/jit_logger.h
@@ -94,10 +94,10 @@
       OpenJitDumpLog();
     }
 
-    void WriteLog(JitCodeCache* code_cache, ArtMethod* method, bool osr)
+    void WriteLog(const void* ptr, size_t code_size, ArtMethod* method)
         REQUIRES_SHARED(Locks::mutator_lock_) {
-      WritePerfMapLog(code_cache, method, osr);
-      WriteJitDumpLog(code_cache, method, osr);
+      WritePerfMapLog(ptr, code_size, method);
+      WriteJitDumpLog(ptr, code_size, method);
     }
 
     void CloseLog() {
@@ -108,13 +108,13 @@
   private:
     // For perf-map profiling
     void OpenPerfMapLog();
-    void WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method, bool osr)
+    void WritePerfMapLog(const void* ptr, size_t code_size, ArtMethod* method)
         REQUIRES_SHARED(Locks::mutator_lock_);
     void ClosePerfMapLog();
 
     // For perf-inject profiling
     void OpenJitDumpLog();
-    void WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method, bool osr)
+    void WriteJitDumpLog(const void* ptr, size_t code_size, ArtMethod* method)
         REQUIRES_SHARED(Locks::mutator_lock_);
     void CloseJitDumpLog();
 
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 6ce7d75..3460efe 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -32,12 +32,12 @@
 #include "mem_map.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
-#include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
 #include "mirror/stack_trace_element.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "nativeloader/native_loader.h"
 #include "runtime.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 
@@ -247,9 +247,9 @@
     // Compile the native method before starting the runtime
     mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader);
     const auto pointer_size = class_linker_->GetImagePointerSize();
-    ArtMethod* method = direct ? c->FindDirectMethod(method_name, method_sig, pointer_size) :
-        c->FindVirtualMethod(method_name, method_sig, pointer_size);
+    ArtMethod* method = c->FindClassMethod(method_name, method_sig, pointer_size);
     ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
+    ASSERT_EQ(direct, method->IsDirect()) << method_name << " " << method_sig;
     if (check_generic_jni_) {
       method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub());
     } else {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 910d7a7..6f89049 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -104,8 +104,8 @@
       compiler_options_->ParseCompilerOption(option, Usage);
     }
     verification_results_.reset(new VerificationResults(compiler_options_.get()));
-    callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
-                                                CompilerCallbacks::CallbackMode::kCompileApp));
+    callbacks_.reset(new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp));
+    callbacks_->SetVerificationResults(verification_results_.get());
     Runtime::Current()->SetCompilerCallbacks(callbacks_.get());
     timer_.reset(new CumulativeLogger("Compilation times"));
     compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 6120ed0..4d258af 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1116,6 +1116,7 @@
                          const std::vector<const DexFile*>* dex_files)
       : OatDexMethodVisitor(writer, offset),
         pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())),
+        class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
         dex_files_(dex_files),
         class_linker_(Runtime::Current()->GetClassLinker()) {}
 
@@ -1131,10 +1132,7 @@
     if (!IsImageClass()) {
       return true;
     }
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    StackHandleScope<1> hs(soa.Self());
-    Handle<mirror::DexCache> dex_cache = hs.NewHandle(
-        class_linker_->FindDexCache(Thread::Current(), *dex_file));
+    ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(Thread::Current(), *dex_file);
     const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
     mirror::Class* klass = dex_cache->GetResolvedType(class_def.class_idx_);
     if (klass != nullptr) {
@@ -1143,11 +1141,13 @@
         // in the copied method should be the same as in the origin
         // method.
         mirror::Class* declaring_class = method.GetDeclaringClass();
-        ArtMethod* origin = declaring_class->FindDeclaredVirtualMethod(
+        ArtMethod* origin = declaring_class->FindClassMethod(
             declaring_class->GetDexCache(),
             method.GetDexMethodIndex(),
             pointer_size_);
         CHECK(origin != nullptr);
+        CHECK(!origin->IsDirect());
+        CHECK(origin->GetDeclaringClass() == declaring_class);
         if (IsInOatFile(&declaring_class->GetDexFile())) {
           const void* code_ptr =
               origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
@@ -1180,36 +1180,36 @@
       ++method_offsets_index_;
     }
 
-    // Unchecked as we hold mutator_lock_ on entry.
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    StackHandleScope<1> hs(soa.Self());
-    Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker_->FindDexCache(
-        Thread::Current(), *dex_file_)));
+    Thread* self = Thread::Current();
+    ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(self, *dex_file_);
     ArtMethod* method;
     if (writer_->HasBootImage()) {
       const InvokeType invoke_type = it.GetMethodInvokeType(
           dex_file_->GetClassDef(class_def_index_));
-      method = class_linker_->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+      // Unchecked as we hold mutator_lock_ on entry.
+      ScopedObjectAccessUnchecked soa(self);
+      StackHandleScope<1> hs(self);
+      method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
           *dex_file_,
           it.GetMemberIndex(),
-          dex_cache,
+          hs.NewHandle(dex_cache),
           ScopedNullHandle<mirror::ClassLoader>(),
           nullptr,
           invoke_type);
       if (method == nullptr) {
         LOG(FATAL_WITHOUT_ABORT) << "Unexpected failure to resolve a method: "
             << dex_file_->PrettyMethod(it.GetMemberIndex(), true);
-        soa.Self()->AssertPendingException();
-        mirror::Throwable* exc = soa.Self()->GetException();
+        self->AssertPendingException();
+        mirror::Throwable* exc = self->GetException();
         std::string dump = exc->Dump();
         LOG(FATAL) << dump;
         UNREACHABLE();
       }
     } else {
-      // Should already have been resolved by the compiler, just peek into the dex cache.
+      // Should already have been resolved by the compiler.
       // It may not be resolved if the class failed to verify, in this case, don't set the
-      // entrypoint. This is not fatal since the dex cache will contain a resolution method.
-      method = dex_cache->GetResolvedMethod(it.GetMemberIndex(), pointer_size_);
+      // entrypoint. This is not fatal since we shall use a resolution method.
+      method = class_linker_->LookupResolvedMethod(it.GetMemberIndex(), dex_cache, class_loader_);
     }
     if (method != nullptr &&
         compiled_method != nullptr &&
@@ -1250,6 +1250,7 @@
 
  private:
   const PointerSize pointer_size_;
+  ObjPtr<mirror::ClassLoader> class_loader_;
   const std::vector<const DexFile*>* dex_files_;
   ClassLinker* const class_linker_;
   std::vector<std::pair<ArtMethod*, ArtMethod*>> methods_to_process_;
@@ -1469,7 +1470,8 @@
     ObjPtr<mirror::DexCache> dex_cache =
         (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
             Thread::Current(), *ref.dex_file);
-    ArtMethod* method = dex_cache->GetResolvedMethod(ref.dex_method_index, pointer_size_);
+    ArtMethod* method =
+        class_linker_->LookupResolvedMethod(ref.dex_method_index, dex_cache, class_loader_);
     CHECK(method != nullptr);
     return method;
   }
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index b9d4700..430cdde 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -8269,19 +8269,41 @@
   const HDataProcWithShifterOp::OpKind op_kind = instruction->GetOpKind();
 
   if (instruction->GetType() == Primitive::kPrimInt) {
-    DCHECK(!HDataProcWithShifterOp::IsExtensionOp(op_kind));
-
+    const vixl32::Register first = InputRegisterAt(instruction, 0);
+    const vixl32::Register output = OutputRegister(instruction);
     const vixl32::Register second = instruction->InputAt(1)->GetType() == Primitive::kPrimLong
         ? LowRegisterFrom(locations->InAt(1))
         : InputRegisterAt(instruction, 1);
 
-    GenerateDataProcInstruction(kind,
-                                OutputRegister(instruction),
-                                InputRegisterAt(instruction, 0),
-                                Operand(second,
-                                        ShiftFromOpKind(op_kind),
-                                        instruction->GetShiftAmount()),
-                                codegen_);
+    if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) {
+      DCHECK_EQ(kind, HInstruction::kAdd);
+
+      switch (op_kind) {
+        case HDataProcWithShifterOp::kUXTB:
+          __ Uxtab(output, first, second);
+          break;
+        case HDataProcWithShifterOp::kUXTH:
+          __ Uxtah(output, first, second);
+          break;
+        case HDataProcWithShifterOp::kSXTB:
+          __ Sxtab(output, first, second);
+          break;
+        case HDataProcWithShifterOp::kSXTH:
+          __ Sxtah(output, first, second);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected operation kind: " << op_kind;
+          UNREACHABLE();
+      }
+    } else {
+      GenerateDataProcInstruction(kind,
+                                  output,
+                                  first,
+                                  Operand(second,
+                                          ShiftFromOpKind(op_kind),
+                                          instruction->GetShiftAmount()),
+                                  codegen_);
+    }
   } else {
     DCHECK_EQ(instruction->GetType(), Primitive::kPrimLong);
 
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 23d188d..b6eb5c1 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -16,6 +16,7 @@
 
 #include "code_generator_mips.h"
 
+#include "arch/mips/asm_support_mips.h"
 #include "arch/mips/entrypoints_direct_mips.h"
 #include "arch/mips/instruction_set_features_mips.h"
 #include "art_method.h"
@@ -40,6 +41,11 @@
 static constexpr int kCurrentMethodStackOffset = 0;
 static constexpr Register kMethodRegisterArgument = A0;
 
+// Flags controlling the use of thunks for Baker read barriers.
+constexpr bool kBakerReadBarrierThunksEnableForFields = true;
+constexpr bool kBakerReadBarrierThunksEnableForArrays = true;
+constexpr bool kBakerReadBarrierThunksEnableForGcRoots = true;
+
 Location MipsReturnLocation(Primitive::Type return_type) {
   switch (return_type) {
     case Primitive::kPrimBoolean:
@@ -1486,7 +1492,8 @@
         __ Mfc1(dst_low, src);
         __ MoveFromFpuHigh(dst_high, src);
       } else {
-        DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+        DCHECK(source.IsDoubleStackSlot())
+            << "Cannot move from " << source << " to " << destination;
         int32_t off = source.GetStackIndex();
         Register r = destination.AsRegisterPairLow<Register>();
         __ LoadFromOffset(kLoadDoubleword, r, SP, off);
@@ -1539,7 +1546,8 @@
       } else if (source.IsFpuRegister()) {
         __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
       } else {
-        DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+        DCHECK(source.IsDoubleStackSlot())
+            << "Cannot move from " << source << " to " << destination;
         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
         __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
         __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
@@ -1763,8 +1771,10 @@
   }
   // A following instruction will add the sign-extended low half of the 32-bit
   // offset to `out` (e.g. lw, jialc, addiu).
-  DCHECK_EQ(info_low->patch_info_high, info_high);
-  __ Bind(&info_low->label);
+  if (info_low != nullptr) {
+    DCHECK_EQ(info_low->patch_info_high, info_high);
+    __ Bind(&info_low->label);
+  }
 }
 
 CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
@@ -1791,25 +1801,26 @@
                                         const uint8_t* roots_data,
                                         const CodeGeneratorMIPS::JitPatchInfo& info,
                                         uint64_t index_in_table) const {
-  uint32_t literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
+  uint32_t high_literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
+  uint32_t low_literal_offset = GetAssembler().GetLabelLocation(&info.low_label);
   uintptr_t address =
       reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
   uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
   // lui reg, addr32_high
-  DCHECK_EQ(code[literal_offset + 0], 0x34);
-  DCHECK_EQ(code[literal_offset + 1], 0x12);
-  DCHECK_EQ((code[literal_offset + 2] & 0xE0), 0x00);
-  DCHECK_EQ(code[literal_offset + 3], 0x3C);
+  DCHECK_EQ(code[high_literal_offset + 0], 0x34);
+  DCHECK_EQ(code[high_literal_offset + 1], 0x12);
+  DCHECK_EQ((code[high_literal_offset + 2] & 0xE0), 0x00);
+  DCHECK_EQ(code[high_literal_offset + 3], 0x3C);
   // instr reg, reg, addr32_low
-  DCHECK_EQ(code[literal_offset + 4], 0x78);
-  DCHECK_EQ(code[literal_offset + 5], 0x56);
+  DCHECK_EQ(code[low_literal_offset + 0], 0x78);
+  DCHECK_EQ(code[low_literal_offset + 1], 0x56);
   addr32 += (addr32 & 0x8000) << 1;  // Account for sign extension in "instr reg, reg, addr32_low".
   // lui reg, addr32_high
-  code[literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
-  code[literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
+  code[high_literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
+  code[high_literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
   // instr reg, reg, addr32_low
-  code[literal_offset + 4] = static_cast<uint8_t>(addr32 >> 0);
-  code[literal_offset + 5] = static_cast<uint8_t>(addr32 >> 8);
+  code[low_literal_offset + 0] = static_cast<uint8_t>(addr32 >> 0);
+  code[low_literal_offset + 1] = static_cast<uint8_t>(addr32 >> 8);
 }
 
 void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
@@ -2545,7 +2556,12 @@
   // We need a temporary register for the read barrier marking slow
   // path in CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier.
   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
-    locations->AddTemp(Location::RequiresRegister());
+    bool temp_needed = instruction->GetIndex()->IsConstant()
+        ? !kBakerReadBarrierThunksEnableForFields
+        : !kBakerReadBarrierThunksEnableForArrays;
+    if (temp_needed) {
+      locations->AddTemp(Location::RequiresRegister());
+    }
   }
 }
 
@@ -2681,16 +2697,32 @@
       // /* HeapReference<Object> */ out =
       //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        Location temp = locations->GetTemp(0);
+        bool temp_needed = index.IsConstant()
+            ? !kBakerReadBarrierThunksEnableForFields
+            : !kBakerReadBarrierThunksEnableForArrays;
+        Location temp = temp_needed ? locations->GetTemp(0) : Location::NoLocation();
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier call.
-        codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
-                                                        out_loc,
-                                                        obj,
-                                                        data_offset,
-                                                        index,
-                                                        temp,
-                                                        /* needs_null_check */ true);
+        DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
+        if (index.IsConstant()) {
+          // Array load with a constant index can be treated as a field load.
+          size_t offset =
+              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+          codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+                                                          out_loc,
+                                                          obj,
+                                                          offset,
+                                                          temp,
+                                                          /* needs_null_check */ false);
+        } else {
+          codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
+                                                          out_loc,
+                                                          obj,
+                                                          data_offset,
+                                                          index,
+                                                          temp,
+                                                          /* needs_null_check */ false);
+        }
       } else {
         Register out = out_loc.AsRegister<Register>();
         if (index.IsConstant()) {
@@ -3093,6 +3125,7 @@
 // Temp is used for read barrier.
 static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
   if (kEmitCompilerReadBarrier &&
+      !(kUseBakerReadBarrier && kBakerReadBarrierThunksEnableForFields) &&
       (kUseBakerReadBarrier ||
        type_check_kind == TypeCheckKind::kAbstractClassCheck ||
        type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -6096,7 +6129,9 @@
     if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
       // We need a temporary register for the read barrier marking slow
       // path in CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier.
-      locations->AddTemp(Location::RequiresRegister());
+      if (!kBakerReadBarrierThunksEnableForFields) {
+        locations->AddTemp(Location::RequiresRegister());
+      }
     }
   }
 }
@@ -6171,7 +6206,8 @@
     if (type == Primitive::kPrimNot) {
       // /* HeapReference<Object> */ dst = *(obj + offset)
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        Location temp_loc = locations->GetTemp(0);
+        Location temp_loc =
+            kBakerReadBarrierThunksEnableForFields ? Location::NoLocation() : locations->GetTemp(0);
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
@@ -6395,7 +6431,9 @@
   Register out_reg = out.AsRegister<Register>();
   if (read_barrier_option == kWithReadBarrier) {
     CHECK(kEmitCompilerReadBarrier);
-    DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+    if (!kUseBakerReadBarrier || !kBakerReadBarrierThunksEnableForFields) {
+      DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+    }
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
@@ -6435,7 +6473,9 @@
   if (read_barrier_option == kWithReadBarrier) {
     CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
-      DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+      if (!kBakerReadBarrierThunksEnableForFields) {
+        DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+      }
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
@@ -6458,67 +6498,172 @@
   }
 }
 
+static inline int GetBakerMarkThunkNumber(Register reg) {
+  static_assert(BAKER_MARK_INTROSPECTION_REGISTER_COUNT == 21, "Expecting equal");
+  if (reg >= V0 && reg <= T7) {  // 14 consequtive regs.
+    return reg - V0;
+  } else if (reg >= S2 && reg <= S7) {  // 6 consequtive regs.
+    return 14 + (reg - S2);
+  } else if (reg == FP) {  // One more.
+    return 20;
+  }
+  LOG(FATAL) << "Unexpected register " << reg;
+  UNREACHABLE();
+}
+
+static inline int GetBakerMarkFieldArrayThunkDisplacement(Register reg, bool short_offset) {
+  int num = GetBakerMarkThunkNumber(reg) +
+      (short_offset ? BAKER_MARK_INTROSPECTION_REGISTER_COUNT : 0);
+  return num * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE;
+}
+
+static inline int GetBakerMarkGcRootThunkDisplacement(Register reg) {
+  return GetBakerMarkThunkNumber(reg) * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE +
+      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET;
+}
+
 void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruction,
                                                            Location root,
                                                            Register obj,
                                                            uint32_t offset,
-                                                           ReadBarrierOption read_barrier_option) {
+                                                           ReadBarrierOption read_barrier_option,
+                                                           MipsLabel* label_low) {
+  bool reordering;
+  if (label_low != nullptr) {
+    DCHECK_EQ(offset, 0x5678u);
+  }
   Register root_reg = root.AsRegister<Register>();
   if (read_barrier_option == kWithReadBarrier) {
     DCHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
       // Baker's read barrier are used:
-      //
-      //   root = obj.field;
-      //   temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-      //   if (temp != null) {
-      //     root = temp(root)
-      //   }
+      if (kBakerReadBarrierThunksEnableForGcRoots) {
+        // Note that we do not actually check the value of `GetIsGcMarking()`
+        // to decide whether to mark the loaded GC root or not.  Instead, we
+        // load into `temp` (T9) the read barrier mark introspection entrypoint.
+        // If `temp` is null, it means that `GetIsGcMarking()` is false, and
+        // vice versa.
+        //
+        // We use thunks for the slow path. That thunk checks the reference
+        // and jumps to the entrypoint if needed.
+        //
+        //     temp = Thread::Current()->pReadBarrierMarkReg00
+        //     // AKA &art_quick_read_barrier_mark_introspection.
+        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
+        //     if (temp != nullptr) {
+        //        temp = &gc_root_thunk<root_reg>
+        //        root = temp(root)
+        //     }
 
-      // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-      __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
-      static_assert(
-          sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
-          "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
-          "have different sizes.");
-      static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
-                    "art::mirror::CompressedReference<mirror::Object> and int32_t "
-                    "have different sizes.");
+        bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+        const int32_t entry_point_offset =
+            Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
+        const int thunk_disp = GetBakerMarkGcRootThunkDisplacement(root_reg);
+        int16_t offset_low = Low16Bits(offset);
+        int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign
+                                                                // extension in lw.
+        bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
+        Register base = short_offset ? obj : TMP;
+        // Loading the entrypoint does not require a load acquire since it is only changed when
+        // threads are suspended or running a checkpoint.
+        __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+        reordering = __ SetReorder(false);
+        if (!short_offset) {
+          DCHECK(!label_low);
+          __ AddUpper(base, obj, offset_high);
+        }
+        __ Beqz(T9, (isR6 ? 2 : 4));  // Skip jialc / addiu+jalr+nop.
+        if (label_low != nullptr) {
+          DCHECK(short_offset);
+          __ Bind(label_low);
+        }
+        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+        __ LoadFromOffset(kLoadWord, root_reg, base, offset_low);  // Single instruction
+                                                                   // in delay slot.
+        if (isR6) {
+          __ Jialc(T9, thunk_disp);
+        } else {
+          __ Addiu(T9, T9, thunk_disp);
+          __ Jalr(T9);
+          __ Nop();
+        }
+        __ SetReorder(reordering);
+      } else {
+        // Note that we do not actually check the value of `GetIsGcMarking()`
+        // to decide whether to mark the loaded GC root or not.  Instead, we
+        // load into `temp` (T9) the read barrier mark entry point corresponding
+        // to register `root`. If `temp` is null, it means that `GetIsGcMarking()`
+        // is false, and vice versa.
+        //
+        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
+        //     temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+        //     if (temp != null) {
+        //       root = temp(root)
+        //     }
 
-      // Slow path marking the GC root `root`.
-      Location temp = Location::RegisterLocation(T9);
-      SlowPathCodeMIPS* slow_path =
-          new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
-              instruction,
-              root,
-              /*entrypoint*/ temp);
-      codegen_->AddSlowPath(slow_path);
+        if (label_low != nullptr) {
+          reordering = __ SetReorder(false);
+          __ Bind(label_low);
+        }
+        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+        __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
+        if (label_low != nullptr) {
+          __ SetReorder(reordering);
+        }
+        static_assert(
+            sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
+            "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
+            "have different sizes.");
+        static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
+                      "art::mirror::CompressedReference<mirror::Object> and int32_t "
+                      "have different sizes.");
 
-      // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-      const int32_t entry_point_offset =
-          Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(root.reg() - 1);
-      // Loading the entrypoint does not require a load acquire since it is only changed when
-      // threads are suspended or running a checkpoint.
-      __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
-      // The entrypoint is null when the GC is not marking, this prevents one load compared to
-      // checking GetIsGcMarking.
-      __ Bnez(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
-      __ Bind(slow_path->GetExitLabel());
+        // Slow path marking the GC root `root`.
+        Location temp = Location::RegisterLocation(T9);
+        SlowPathCodeMIPS* slow_path =
+            new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS(
+                instruction,
+                root,
+                /*entrypoint*/ temp);
+        codegen_->AddSlowPath(slow_path);
+
+        const int32_t entry_point_offset =
+            Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(root.reg() - 1);
+        // Loading the entrypoint does not require a load acquire since it is only changed when
+        // threads are suspended or running a checkpoint.
+        __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
+        __ Bnez(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
+        __ Bind(slow_path->GetExitLabel());
+      }
     } else {
+      if (label_low != nullptr) {
+        reordering = __ SetReorder(false);
+        __ Bind(label_low);
+      }
       // GC root loaded through a slow path for read barriers other
       // than Baker's.
       // /* GcRoot<mirror::Object>* */ root = obj + offset
       __ Addiu32(root_reg, obj, offset);
+      if (label_low != nullptr) {
+        __ SetReorder(reordering);
+      }
       // /* mirror::Object* */ root = root->Read()
       codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
     }
   } else {
+    if (label_low != nullptr) {
+      reordering = __ SetReorder(false);
+      __ Bind(label_low);
+    }
     // Plain GC root load with no read barrier.
     // /* GcRoot<mirror::Object> */ root = *(obj + offset)
     __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
     // Note that GC roots are not affected by heap poisoning, thus we
     // do not have to unpoison `root_reg` here.
+    if (label_low != nullptr) {
+      __ SetReorder(reordering);
+    }
   }
 }
 
@@ -6531,6 +6676,88 @@
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
+  if (kBakerReadBarrierThunksEnableForFields) {
+    // Note that we do not actually check the value of `GetIsGcMarking()`
+    // to decide whether to mark the loaded reference or not.  Instead, we
+    // load into `temp` (T9) the read barrier mark introspection entrypoint.
+    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
+    // vice versa.
+    //
+    // We use thunks for the slow path. That thunk checks the reference
+    // and jumps to the entrypoint if needed. If the holder is not gray,
+    // it issues a load-load memory barrier and returns to the original
+    // reference load.
+    //
+    //     temp = Thread::Current()->pReadBarrierMarkReg00
+    //     // AKA &art_quick_read_barrier_mark_introspection.
+    //     if (temp != nullptr) {
+    //        temp = &field_array_thunk<holder_reg>
+    //        temp()
+    //     }
+    //   not_gray_return_address:
+    //     // If the offset is too large to fit into the lw instruction, we
+    //     // use an adjusted base register (TMP) here. This register
+    //     // receives bits 16 ... 31 of the offset before the thunk invocation
+    //     // and the thunk benefits from it.
+    //     HeapReference<mirror::Object> reference = *(obj+offset);  // Original reference load.
+    //   gray_return_address:
+
+    DCHECK(temp.IsInvalid());
+    bool isR6 = GetInstructionSetFeatures().IsR6();
+    int16_t offset_low = Low16Bits(offset);
+    int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign extension in lw.
+    bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
+    bool reordering = __ SetReorder(false);
+    const int32_t entry_point_offset =
+        Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
+    // There may have or may have not been a null check if the field offset is smaller than
+    // the page size.
+    // There must've been a null check in case it's actually a load from an array.
+    // We will, however, perform an explicit null check in the thunk as it's easier to
+    // do it than not.
+    if (instruction->IsArrayGet()) {
+      DCHECK(!needs_null_check);
+    }
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, short_offset);
+    // Loading the entrypoint does not require a load acquire since it is only changed when
+    // threads are suspended or running a checkpoint.
+    __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+    Register ref_reg = ref.AsRegister<Register>();
+    Register base = short_offset ? obj : TMP;
+    if (short_offset) {
+      if (isR6) {
+        __ Beqzc(T9, 2);  // Skip jialc.
+        __ Nop();  // In forbidden slot.
+        __ Jialc(T9, thunk_disp);
+      } else {
+        __ Beqz(T9, 3);  // Skip jalr+nop.
+        __ Addiu(T9, T9, thunk_disp);  // In delay slot.
+        __ Jalr(T9);
+        __ Nop();  // In delay slot.
+      }
+    } else {
+      if (isR6) {
+        __ Beqz(T9, 2);  // Skip jialc.
+        __ Aui(base, obj, offset_high);  // In delay slot.
+        __ Jialc(T9, thunk_disp);
+      } else {
+        __ Lui(base, offset_high);
+        __ Beqz(T9, 2);  // Skip jalr.
+        __ Addiu(T9, T9, thunk_disp);  // In delay slot.
+        __ Jalr(T9);
+        __ Addu(base, base, obj);  // In delay slot.
+      }
+    }
+    // /* HeapReference<Object> */ ref = *(obj + offset)
+    __ LoadFromOffset(kLoadWord, ref_reg, base, offset_low);  // Single instruction.
+    if (needs_null_check) {
+      MaybeRecordImplicitNullCheck(instruction);
+    }
+    __ MaybeUnpoisonHeapReference(ref_reg);
+    __ SetReorder(reordering);
+    return;
+  }
+
   // /* HeapReference<Object> */ ref = *(obj + offset)
   Location no_index = Location::NoLocation();
   ScaleFactor no_scale_factor = TIMES_1;
@@ -6557,9 +6784,69 @@
   static_assert(
       sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+  ScaleFactor scale_factor = TIMES_4;
+
+  if (kBakerReadBarrierThunksEnableForArrays) {
+    // Note that we do not actually check the value of `GetIsGcMarking()`
+    // to decide whether to mark the loaded reference or not.  Instead, we
+    // load into `temp` (T9) the read barrier mark introspection entrypoint.
+    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
+    // vice versa.
+    //
+    // We use thunks for the slow path. That thunk checks the reference
+    // and jumps to the entrypoint if needed. If the holder is not gray,
+    // it issues a load-load memory barrier and returns to the original
+    // reference load.
+    //
+    //     temp = Thread::Current()->pReadBarrierMarkReg00
+    //     // AKA &art_quick_read_barrier_mark_introspection.
+    //     if (temp != nullptr) {
+    //        temp = &field_array_thunk<holder_reg>
+    //        temp()
+    //     }
+    //   not_gray_return_address:
+    //     // The element address is pre-calculated in the TMP register before the
+    //     // thunk invocation and the thunk benefits from it.
+    //     HeapReference<mirror::Object> reference = data[index];  // Original reference load.
+    //   gray_return_address:
+
+    DCHECK(temp.IsInvalid());
+    DCHECK(index.IsValid());
+    bool reordering = __ SetReorder(false);
+    const int32_t entry_point_offset =
+        Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
+    // We will not do the explicit null check in the thunk as some form of a null check
+    // must've been done earlier.
+    DCHECK(!needs_null_check);
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+    // Loading the entrypoint does not require a load acquire since it is only changed when
+    // threads are suspended or running a checkpoint.
+    __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+    Register ref_reg = ref.AsRegister<Register>();
+    Register index_reg = index.IsRegisterPair()
+        ? index.AsRegisterPairLow<Register>()
+        : index.AsRegister<Register>();
+    if (GetInstructionSetFeatures().IsR6()) {
+      __ Beqz(T9, 2);  // Skip jialc.
+      __ Lsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
+      __ Jialc(T9, thunk_disp);
+    } else {
+      __ Sll(TMP, index_reg, scale_factor);
+      __ Beqz(T9, 2);  // Skip jalr.
+      __ Addiu(T9, T9, thunk_disp);  // In delay slot.
+      __ Jalr(T9);
+      __ Addu(TMP, TMP, obj);  // In delay slot.
+    }
+    // /* HeapReference<Object> */ ref = *(obj + data_offset + (index << scale_factor))
+    DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))) << data_offset;
+    __ LoadFromOffset(kLoadWord, ref_reg, TMP, data_offset);  // Single instruction.
+    __ MaybeUnpoisonHeapReference(ref_reg);
+    __ SetReorder(reordering);
+    return;
+  }
+
   // /* HeapReference<Object> */ ref =
   //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  ScaleFactor scale_factor = TIMES_4;
   GenerateReferenceLoadWithBakerReadBarrier(instruction,
                                             ref,
                                             obj,
@@ -7461,10 +7748,14 @@
       bool reordering = __ SetReorder(false);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
                                                      temp,
-                                                     base_or_current_method_reg,
-                                                     info_low);
-      GenerateGcRootFieldLoad(cls, out_loc, temp, /* placeholder */ 0x5678, read_barrier_option);
+                                                     base_or_current_method_reg);
       __ SetReorder(reordering);
+      GenerateGcRootFieldLoad(cls,
+                              out_loc,
+                              temp,
+                              /* placeholder */ 0x5678,
+                              read_barrier_option,
+                              &info_low->label);
       generate_null_check = true;
       break;
     }
@@ -7475,8 +7766,13 @@
       bool reordering = __ SetReorder(false);
       __ Bind(&info->high_label);
       __ Lui(out, /* placeholder */ 0x1234);
-      GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678, read_barrier_option);
       __ SetReorder(reordering);
+      GenerateGcRootFieldLoad(cls,
+                              out_loc,
+                              out,
+                              /* placeholder */ 0x5678,
+                              read_barrier_option,
+                              &info->low_label);
       break;
     }
     case HLoadClass::LoadKind::kRuntimeCall:
@@ -7623,14 +7919,14 @@
       bool reordering = __ SetReorder(false);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      temp,
-                                                     base_or_current_method_reg,
-                                                     info_low);
+                                                     base_or_current_method_reg);
+      __ SetReorder(reordering);
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               temp,
                               /* placeholder */ 0x5678,
-                              kCompilerReadBarrierOption);
-      __ SetReorder(reordering);
+                              kCompilerReadBarrierOption,
+                              &info_low->label);
       SlowPathCodeMIPS* slow_path =
           new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load, info_high);
       codegen_->AddSlowPath(slow_path);
@@ -7646,12 +7942,13 @@
       bool reordering = __ SetReorder(false);
       __ Bind(&info->high_label);
       __ Lui(out, /* placeholder */ 0x1234);
+      __ SetReorder(reordering);
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               out,
                               /* placeholder */ 0x5678,
-                              kCompilerReadBarrierOption);
-      __ SetReorder(reordering);
+                              kCompilerReadBarrierOption,
+                              &info->low_label);
       return;
     }
     default:
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 52ee852..7195b9d 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -285,7 +285,8 @@
                                Location root,
                                Register obj,
                                uint32_t offset,
-                               ReadBarrierOption read_barrier_option);
+                               ReadBarrierOption read_barrier_option,
+                               MipsLabel* label_low = nullptr);
 
   void GenerateIntCompare(IfCondition cond, LocationSummary* locations);
   // When the function returns `false` it means that the condition holds if `dst` is non-zero
@@ -637,7 +638,7 @@
   void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
                                             Register out,
                                             Register base,
-                                            PcRelativePatchInfo* info_low);
+                                            PcRelativePatchInfo* info_low = nullptr);
 
   // The JitPatchInfo is used for JIT string and class loads.
   struct JitPatchInfo {
@@ -649,8 +650,9 @@
     // String/type index.
     uint64_t index;
     // Label for the instruction loading the most significant half of the address.
-    // The least significant half is loaded with the instruction that follows immediately.
     MipsLabel high_label;
+    // Label for the instruction supplying the least significant half of the address.
+    MipsLabel low_label;
   };
 
   void PatchJitRootUse(uint8_t* code,
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 454a2dd..3e79f47 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -16,6 +16,7 @@
 
 #include "code_generator_mips64.h"
 
+#include "arch/mips64/asm_support_mips64.h"
 #include "art_method.h"
 #include "code_generator_utils.h"
 #include "compiled_method.h"
@@ -38,6 +39,11 @@
 static constexpr int kCurrentMethodStackOffset = 0;
 static constexpr GpuRegister kMethodRegisterArgument = A0;
 
+// Flags controlling the use of thunks for Baker read barriers.
+constexpr bool kBakerReadBarrierThunksEnableForFields = true;
+constexpr bool kBakerReadBarrierThunksEnableForArrays = true;
+constexpr bool kBakerReadBarrierThunksEnableForGcRoots = true;
+
 Location Mips64ReturnLocation(Primitive::Type return_type) {
   switch (return_type) {
     case Primitive::kPrimBoolean:
@@ -1649,8 +1655,10 @@
   __ Auipc(out, /* placeholder */ 0x1234);
   // A following instruction will add the sign-extended low half of the 32-bit
   // offset to `out` (e.g. ld, jialc, daddiu).
-  DCHECK_EQ(info_low->patch_info_high, info_high);
-  __ Bind(&info_low->label);
+  if (info_low != nullptr) {
+    DCHECK_EQ(info_low->patch_info_high, info_high);
+    __ Bind(&info_low->label);
+  }
 }
 
 Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -2117,7 +2125,12 @@
   // We need a temporary register for the read barrier marking slow
   // path in CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier.
   if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
-    locations->AddTemp(Location::RequiresRegister());
+    bool temp_needed = instruction->GetIndex()->IsConstant()
+        ? !kBakerReadBarrierThunksEnableForFields
+        : !kBakerReadBarrierThunksEnableForArrays;
+    if (temp_needed) {
+      locations->AddTemp(Location::RequiresRegister());
+    }
   }
 }
 
@@ -2254,16 +2267,32 @@
       // /* HeapReference<Object> */ out =
       //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        Location temp = locations->GetTemp(0);
+        bool temp_needed = index.IsConstant()
+            ? !kBakerReadBarrierThunksEnableForFields
+            : !kBakerReadBarrierThunksEnableForArrays;
+        Location temp = temp_needed ? locations->GetTemp(0) : Location::NoLocation();
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier call.
-        codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
-                                                        out_loc,
-                                                        obj,
-                                                        data_offset,
-                                                        index,
-                                                        temp,
-                                                        /* needs_null_check */ true);
+        DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
+        if (index.IsConstant()) {
+          // Array load with a constant index can be treated as a field load.
+          size_t offset =
+              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+          codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
+                                                          out_loc,
+                                                          obj,
+                                                          offset,
+                                                          temp,
+                                                          /* needs_null_check */ false);
+        } else {
+          codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
+                                                          out_loc,
+                                                          obj,
+                                                          data_offset,
+                                                          index,
+                                                          temp,
+                                                          /* needs_null_check */ false);
+        }
       } else {
         GpuRegister out = out_loc.AsRegister<GpuRegister>();
         if (index.IsConstant()) {
@@ -2666,6 +2695,7 @@
 // Temp is used for read barrier.
 static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
   if (kEmitCompilerReadBarrier &&
+      !(kUseBakerReadBarrier && kBakerReadBarrierThunksEnableForFields) &&
       (kUseBakerReadBarrier ||
        type_check_kind == TypeCheckKind::kAbstractClassCheck ||
        type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -4118,7 +4148,9 @@
   if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
     // We need a temporary register for the read barrier marking slow
     // path in CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier.
-    locations->AddTemp(Location::RequiresRegister());
+    if (!kBakerReadBarrierThunksEnableForFields) {
+      locations->AddTemp(Location::RequiresRegister());
+    }
   }
 }
 
@@ -4168,7 +4200,8 @@
     if (type == Primitive::kPrimNot) {
       // /* HeapReference<Object> */ dst = *(obj + offset)
       if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        Location temp_loc = locations->GetTemp(0);
+        Location temp_loc =
+            kBakerReadBarrierThunksEnableForFields ? Location::NoLocation() : locations->GetTemp(0);
         // Note that a potential implicit null check is handled in this
         // CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier call.
         codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
@@ -4318,7 +4351,9 @@
   GpuRegister out_reg = out.AsRegister<GpuRegister>();
   if (read_barrier_option == kWithReadBarrier) {
     CHECK(kEmitCompilerReadBarrier);
-    DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+    if (!kUseBakerReadBarrier || !kBakerReadBarrierThunksEnableForFields) {
+      DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+    }
     if (kUseBakerReadBarrier) {
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(out + offset)
@@ -4358,7 +4393,9 @@
   if (read_barrier_option == kWithReadBarrier) {
     CHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
-      DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+      if (!kBakerReadBarrierThunksEnableForFields) {
+        DCHECK(maybe_temp.IsRegister()) << maybe_temp;
+      }
       // Load with fast path based Baker's read barrier.
       // /* HeapReference<Object> */ out = *(obj + offset)
       codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
@@ -4381,55 +4418,134 @@
   }
 }
 
-void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(
-    HInstruction* instruction,
-    Location root,
-    GpuRegister obj,
-    uint32_t offset,
-    ReadBarrierOption read_barrier_option) {
+static inline int GetBakerMarkThunkNumber(GpuRegister reg) {
+  static_assert(BAKER_MARK_INTROSPECTION_REGISTER_COUNT == 20, "Expecting equal");
+  if (reg >= V0 && reg <= T2) {  // 13 consequtive regs.
+    return reg - V0;
+  } else if (reg >= S2 && reg <= S7) {  // 6 consequtive regs.
+    return 13 + (reg - S2);
+  } else if (reg == S8) {  // One more.
+    return 19;
+  }
+  LOG(FATAL) << "Unexpected register " << reg;
+  UNREACHABLE();
+}
+
+static inline int GetBakerMarkFieldArrayThunkDisplacement(GpuRegister reg, bool short_offset) {
+  int num = GetBakerMarkThunkNumber(reg) +
+      (short_offset ? BAKER_MARK_INTROSPECTION_REGISTER_COUNT : 0);
+  return num * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE;
+}
+
+static inline int GetBakerMarkGcRootThunkDisplacement(GpuRegister reg) {
+  return GetBakerMarkThunkNumber(reg) * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE +
+      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET;
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instruction,
+                                                             Location root,
+                                                             GpuRegister obj,
+                                                             uint32_t offset,
+                                                             ReadBarrierOption read_barrier_option,
+                                                             Mips64Label* label_low) {
+  if (label_low != nullptr) {
+    DCHECK_EQ(offset, 0x5678u);
+  }
   GpuRegister root_reg = root.AsRegister<GpuRegister>();
   if (read_barrier_option == kWithReadBarrier) {
     DCHECK(kEmitCompilerReadBarrier);
     if (kUseBakerReadBarrier) {
       // Fast path implementation of art::ReadBarrier::BarrierForRoot when
       // Baker's read barrier are used:
-      //
-      //   root = obj.field;
-      //   temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-      //   if (temp != null) {
-      //     root = temp(root)
-      //   }
+      if (kBakerReadBarrierThunksEnableForGcRoots) {
+        // Note that we do not actually check the value of `GetIsGcMarking()`
+        // to decide whether to mark the loaded GC root or not.  Instead, we
+        // load into `temp` (T9) the read barrier mark introspection entrypoint.
+        // If `temp` is null, it means that `GetIsGcMarking()` is false, and
+        // vice versa.
+        //
+        // We use thunks for the slow path. That thunk checks the reference
+        // and jumps to the entrypoint if needed.
+        //
+        //     temp = Thread::Current()->pReadBarrierMarkReg00
+        //     // AKA &art_quick_read_barrier_mark_introspection.
+        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
+        //     if (temp != nullptr) {
+        //        temp = &gc_root_thunk<root_reg>
+        //        root = temp(root)
+        //     }
 
-      // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-      __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
-      static_assert(
-          sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
-          "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
-          "have different sizes.");
-      static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
-                    "art::mirror::CompressedReference<mirror::Object> and int32_t "
-                    "have different sizes.");
+        const int32_t entry_point_offset =
+            Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(0);
+        const int thunk_disp = GetBakerMarkGcRootThunkDisplacement(root_reg);
+        int16_t offset_low = Low16Bits(offset);
+        int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign
+                                                                // extension in lwu.
+        bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
+        GpuRegister base = short_offset ? obj : TMP;
+        // Loading the entrypoint does not require a load acquire since it is only changed when
+        // threads are suspended or running a checkpoint.
+        __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
+        if (!short_offset) {
+          DCHECK(!label_low);
+          __ Daui(base, obj, offset_high);
+        }
+        __ Beqz(T9, 2);  // Skip jialc.
+        if (label_low != nullptr) {
+          DCHECK(short_offset);
+          __ Bind(label_low);
+        }
+        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+        __ LoadFromOffset(kLoadUnsignedWord, root_reg, base, offset_low);  // Single instruction
+                                                                           // in delay slot.
+        __ Jialc(T9, thunk_disp);
+      } else {
+        // Note that we do not actually check the value of `GetIsGcMarking()`
+        // to decide whether to mark the loaded GC root or not.  Instead, we
+        // load into `temp` (T9) the read barrier mark entry point corresponding
+        // to register `root`. If `temp` is null, it means that `GetIsGcMarking()`
+        // is false, and vice versa.
+        //
+        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
+        //     temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
+        //     if (temp != null) {
+        //       root = temp(root)
+        //     }
 
-      // Slow path marking the GC root `root`.
-      Location temp = Location::RegisterLocation(T9);
-      SlowPathCodeMIPS64* slow_path =
-          new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(
-              instruction,
-              root,
-              /*entrypoint*/ temp);
-      codegen_->AddSlowPath(slow_path);
+        if (label_low != nullptr) {
+          __ Bind(label_low);
+        }
+        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
+        __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
+        static_assert(
+            sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
+            "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
+            "have different sizes.");
+        static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
+                      "art::mirror::CompressedReference<mirror::Object> and int32_t "
+                      "have different sizes.");
 
-      // temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-      const int32_t entry_point_offset =
-          Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(root.reg() - 1);
-      // Loading the entrypoint does not require a load acquire since it is only changed when
-      // threads are suspended or running a checkpoint.
-      __ LoadFromOffset(kLoadDoubleword, temp.AsRegister<GpuRegister>(), TR, entry_point_offset);
-      // The entrypoint is null when the GC is not marking, this prevents one load compared to
-      // checking GetIsGcMarking.
-      __ Bnezc(temp.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-      __ Bind(slow_path->GetExitLabel());
+        // Slow path marking the GC root `root`.
+        Location temp = Location::RegisterLocation(T9);
+        SlowPathCodeMIPS64* slow_path =
+            new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathMIPS64(
+                instruction,
+                root,
+                /*entrypoint*/ temp);
+        codegen_->AddSlowPath(slow_path);
+
+        const int32_t entry_point_offset =
+            Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(root.reg() - 1);
+        // Loading the entrypoint does not require a load acquire since it is only changed when
+        // threads are suspended or running a checkpoint.
+        __ LoadFromOffset(kLoadDoubleword, temp.AsRegister<GpuRegister>(), TR, entry_point_offset);
+        __ Bnezc(temp.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
+        __ Bind(slow_path->GetExitLabel());
+      }
     } else {
+      if (label_low != nullptr) {
+        __ Bind(label_low);
+      }
       // GC root loaded through a slow path for read barriers other
       // than Baker's.
       // /* GcRoot<mirror::Object>* */ root = obj + offset
@@ -4438,6 +4554,9 @@
       codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
     }
   } else {
+    if (label_low != nullptr) {
+      __ Bind(label_low);
+    }
     // Plain GC root load with no read barrier.
     // /* GcRoot<mirror::Object> */ root = *(obj + offset)
     __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
@@ -4455,6 +4574,71 @@
   DCHECK(kEmitCompilerReadBarrier);
   DCHECK(kUseBakerReadBarrier);
 
+  if (kBakerReadBarrierThunksEnableForFields) {
+    // Note that we do not actually check the value of `GetIsGcMarking()`
+    // to decide whether to mark the loaded reference or not.  Instead, we
+    // load into `temp` (T9) the read barrier mark introspection entrypoint.
+    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
+    // vice versa.
+    //
+    // We use thunks for the slow path. That thunk checks the reference
+    // and jumps to the entrypoint if needed. If the holder is not gray,
+    // it issues a load-load memory barrier and returns to the original
+    // reference load.
+    //
+    //     temp = Thread::Current()->pReadBarrierMarkReg00
+    //     // AKA &art_quick_read_barrier_mark_introspection.
+    //     if (temp != nullptr) {
+    //        temp = &field_array_thunk<holder_reg>
+    //        temp()
+    //     }
+    //   not_gray_return_address:
+    //     // If the offset is too large to fit into the lw instruction, we
+    //     // use an adjusted base register (TMP) here. This register
+    //     // receives bits 16 ... 31 of the offset before the thunk invocation
+    //     // and the thunk benefits from it.
+    //     HeapReference<mirror::Object> reference = *(obj+offset);  // Original reference load.
+    //   gray_return_address:
+
+    DCHECK(temp.IsInvalid());
+    bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
+    const int32_t entry_point_offset =
+        Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(0);
+    // There may have or may have not been a null check if the field offset is smaller than
+    // the page size.
+    // There must've been a null check in case it's actually a load from an array.
+    // We will, however, perform an explicit null check in the thunk as it's easier to
+    // do it than not.
+    if (instruction->IsArrayGet()) {
+      DCHECK(!needs_null_check);
+    }
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, short_offset);
+    // Loading the entrypoint does not require a load acquire since it is only changed when
+    // threads are suspended or running a checkpoint.
+    __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
+    GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
+    if (short_offset) {
+      __ Beqzc(T9, 2);  // Skip jialc.
+      __ Nop();  // In forbidden slot.
+      __ Jialc(T9, thunk_disp);
+      // /* HeapReference<Object> */ ref = *(obj + offset)
+      __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, offset);  // Single instruction.
+    } else {
+      int16_t offset_low = Low16Bits(offset);
+      int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign extension in lwu.
+      __ Beqz(T9, 2);  // Skip jialc.
+      __ Daui(TMP, obj, offset_high);  // In delay slot.
+      __ Jialc(T9, thunk_disp);
+      // /* HeapReference<Object> */ ref = *(obj + offset)
+      __ LoadFromOffset(kLoadUnsignedWord, ref_reg, TMP, offset_low);  // Single instruction.
+    }
+    if (needs_null_check) {
+      MaybeRecordImplicitNullCheck(instruction);
+    }
+    __ MaybeUnpoisonHeapReference(ref_reg);
+    return;
+  }
+
   // /* HeapReference<Object> */ ref = *(obj + offset)
   Location no_index = Location::NoLocation();
   ScaleFactor no_scale_factor = TIMES_1;
@@ -4481,9 +4665,57 @@
   static_assert(
       sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
+  ScaleFactor scale_factor = TIMES_4;
+
+  if (kBakerReadBarrierThunksEnableForArrays) {
+    // Note that we do not actually check the value of `GetIsGcMarking()`
+    // to decide whether to mark the loaded reference or not.  Instead, we
+    // load into `temp` (T9) the read barrier mark introspection entrypoint.
+    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
+    // vice versa.
+    //
+    // We use thunks for the slow path. That thunk checks the reference
+    // and jumps to the entrypoint if needed. If the holder is not gray,
+    // it issues a load-load memory barrier and returns to the original
+    // reference load.
+    //
+    //     temp = Thread::Current()->pReadBarrierMarkReg00
+    //     // AKA &art_quick_read_barrier_mark_introspection.
+    //     if (temp != nullptr) {
+    //        temp = &field_array_thunk<holder_reg>
+    //        temp()
+    //     }
+    //   not_gray_return_address:
+    //     // The element address is pre-calculated in the TMP register before the
+    //     // thunk invocation and the thunk benefits from it.
+    //     HeapReference<mirror::Object> reference = data[index];  // Original reference load.
+    //   gray_return_address:
+
+    DCHECK(temp.IsInvalid());
+    DCHECK(index.IsValid());
+    const int32_t entry_point_offset =
+        Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(0);
+    // We will not do the explicit null check in the thunk as some form of a null check
+    // must've been done earlier.
+    DCHECK(!needs_null_check);
+    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+    // Loading the entrypoint does not require a load acquire since it is only changed when
+    // threads are suspended or running a checkpoint.
+    __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
+    __ Beqz(T9, 2);  // Skip jialc.
+    GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
+    GpuRegister index_reg = index.AsRegister<GpuRegister>();
+    __ Dlsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
+    __ Jialc(T9, thunk_disp);
+    // /* HeapReference<Object> */ ref = *(obj + data_offset + (index << scale_factor))
+    DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))) << data_offset;
+    __ LoadFromOffset(kLoadUnsignedWord, ref_reg, TMP, data_offset);  // Single instruction.
+    __ MaybeUnpoisonHeapReference(ref_reg);
+    return;
+  }
+
   // /* HeapReference<Object> */ ref =
   //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  ScaleFactor scale_factor = TIMES_4;
   GenerateReferenceLoadWithBakerReadBarrier(instruction,
                                             ref,
                                             obj,
@@ -5278,8 +5510,13 @@
       GpuRegister temp = non_baker_read_barrier
           ? out
           : locations->GetTemp(0).AsRegister<GpuRegister>();
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, temp, info_low);
-      GenerateGcRootFieldLoad(cls, out_loc, temp, /* placeholder */ 0x5678, read_barrier_option);
+      codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, temp);
+      GenerateGcRootFieldLoad(cls,
+                              out_loc,
+                              temp,
+                              /* placeholder */ 0x5678,
+                              read_barrier_option,
+                              &info_low->label);
       generate_null_check = true;
       break;
     }
@@ -5399,12 +5636,13 @@
       GpuRegister temp = non_baker_read_barrier
           ? out
           : locations->GetTemp(0).AsRegister<GpuRegister>();
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, temp, info_low);
+      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, temp);
       GenerateGcRootFieldLoad(load,
                               out_loc,
                               temp,
                               /* placeholder */ 0x5678,
-                              kCompilerReadBarrierOption);
+                              kCompilerReadBarrierOption,
+                              &info_low->label);
       SlowPathCodeMIPS64* slow_path =
           new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load, info_high);
       codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index c94cc93..d03a9ea 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -281,7 +281,8 @@
                                Location root,
                                GpuRegister obj,
                                uint32_t offset,
-                               ReadBarrierOption read_barrier_option);
+                               ReadBarrierOption read_barrier_option,
+                               Mips64Label* label_low = nullptr);
 
   void GenerateTestAndBranch(HInstruction* instruction,
                              size_t condition_input_index,
@@ -592,7 +593,7 @@
 
   void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
                                             GpuRegister out,
-                                            PcRelativePatchInfo* info_low);
+                                            PcRelativePatchInfo* info_low = nullptr);
 
   void PatchJitRootUse(uint8_t* code,
                        const uint8_t* roots_data,
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 01304ac..8fcceed 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -227,14 +227,6 @@
   return Location::FpuRegisterPairLocation(low.GetCode(), high.GetCode());
 }
 
-inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
-  DCHECK(HasShifterOperand(instruction, kArm));
-  // TODO: HAdd applied to the other integral types could make use of
-  // the SXTAB, SXTAH, UXTAB and UXTAH instructions.
-  return instruction->GetType() == Primitive::kPrimLong &&
-         (instruction->IsAdd() || instruction->IsSub());
-}
-
 }  // namespace helpers
 }  // namespace arm
 }  // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 142c957..18390cc 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -146,7 +146,10 @@
   //   that this method is actually inlined;
   // - if a method's name contains the substring "$noinline$", do not
   //   inline that method.
-  const bool honor_inlining_directives = IsCompilingWithCoreImage();
+  // We limit this to AOT compilation, as the JIT may or may not inline
+  // depending on the state of classes at runtime.
+  const bool honor_inlining_directives =
+      IsCompilingWithCoreImage() && Runtime::Current()->IsAotCompiler();
 
   // Keep a copy of all blocks when starting the visit.
   ArenaVector<HBasicBlock*> blocks = graph_->GetReversePostOrder();
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 839f328..8054140 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -664,10 +664,7 @@
     // TODO: remove redundant constructor fences (b/36656456).
     if (RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_)) {
       // Compiling instance constructor.
-      if (kIsDebugBuild) {
-        std::string method_name = graph_->GetMethodName();
-        CHECK_EQ(std::string("<init>"), method_name);
-      }
+      DCHECK_STREQ("<init>", graph_->GetMethodName());
 
       HInstruction* fence_target = current_this_parameter_;
       DCHECK(fence_target != nullptr);
@@ -710,29 +707,18 @@
 
 ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType invoke_type) {
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<2> hs(soa.Self());
 
   ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
   Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
-  Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
-  // We fetch the referenced class eagerly (that is, the class pointed by in the MethodId
-  // at method_idx), as `CanAccessResolvedMethod` expects it be be in the dex cache.
-  Handle<mirror::Class> methods_class(hs.NewHandle(class_linker->ResolveReferencedClassOfMethod(
-      method_idx, dex_compilation_unit_->GetDexCache(), class_loader)));
 
-  if (UNLIKELY(methods_class == nullptr)) {
-    // Clean up any exception left by type resolution.
-    soa.Self()->ClearException();
-    return nullptr;
-  }
-
-  ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(
-      *dex_compilation_unit_->GetDexFile(),
-      method_idx,
-      dex_compilation_unit_->GetDexCache(),
-      class_loader,
-      /* referrer */ nullptr,
-      invoke_type);
+  ArtMethod* resolved_method =
+      class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+          *dex_compilation_unit_->GetDexFile(),
+          method_idx,
+          dex_compilation_unit_->GetDexCache(),
+          class_loader,
+          graph_->GetArtMethod(),
+          invoke_type);
 
   if (UNLIKELY(resolved_method == nullptr)) {
     // Clean up any exception left by type resolution.
@@ -740,17 +726,14 @@
     return nullptr;
   }
 
-  // Check access. The class linker has a fast path for looking into the dex cache
-  // and does not check the access if it hits it.
-  if (compiling_class == nullptr) {
+  // The referrer may be unresolved for AOT if we're compiling a class that cannot be
+  // resolved because, for example, we don't find a superclass in the classpath.
+  if (graph_->GetArtMethod() == nullptr) {
+    // The class linker cannot check access without a referrer, so we have to do it.
+    // Fall back to HInvokeUnresolved if the method isn't public.
     if (!resolved_method->IsPublic()) {
       return nullptr;
     }
-  } else if (!compiling_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
-                                                       resolved_method,
-                                                       dex_compilation_unit_->GetDexCache().Get(),
-                                                       method_idx)) {
-    return nullptr;
   }
 
   // We have to special case the invoke-super case, as ClassLinker::ResolveMethod does not.
@@ -758,19 +741,26 @@
   // make this an invoke-unresolved to handle cross-dex invokes or abstract super methods, both of
   // which require runtime handling.
   if (invoke_type == kSuper) {
+    ObjPtr<mirror::Class> compiling_class = GetCompilingClass();
     if (compiling_class == nullptr) {
       // We could not determine the method's class we need to wait until runtime.
       DCHECK(Runtime::Current()->IsAotCompiler());
       return nullptr;
     }
-    if (!methods_class->IsAssignableFrom(compiling_class.Get())) {
+    ObjPtr<mirror::Class> referenced_class = class_linker->LookupResolvedType(
+        *dex_compilation_unit_->GetDexFile(),
+        dex_compilation_unit_->GetDexFile()->GetMethodId(method_idx).class_idx_,
+        dex_compilation_unit_->GetDexCache().Get(),
+        class_loader.Get());
+    DCHECK(referenced_class != nullptr);  // We have already resolved a method from this class.
+    if (!referenced_class->IsAssignableFrom(compiling_class)) {
       // We cannot statically determine the target method. The runtime will throw a
       // NoSuchMethodError on this one.
       return nullptr;
     }
     ArtMethod* actual_method;
-    if (methods_class->IsInterface()) {
-      actual_method = methods_class->FindVirtualMethodForInterfaceSuper(
+    if (referenced_class->IsInterface()) {
+      actual_method = referenced_class->FindVirtualMethodForInterfaceSuper(
           resolved_method, class_linker->GetImagePointerSize());
     } else {
       uint16_t vtable_index = resolved_method->GetMethodIndex();
@@ -797,12 +787,6 @@
     resolved_method = actual_method;
   }
 
-  // Check for incompatible class changes. The class linker has a fast path for
-  // looking into the dex cache and does not check incompatible class changes if it hits it.
-  if (resolved_method->CheckIncompatibleClassChange(invoke_type)) {
-    return nullptr;
-  }
-
   return resolved_method;
 }
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index d147166..f2a8cc0 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1867,33 +1867,35 @@
       ArtMethod* method = nullptr;
       switch (source_component_type) {
         case Primitive::kPrimBoolean:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([ZI[ZII)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([ZI[ZII)V", image_size);
           break;
         case Primitive::kPrimByte:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([BI[BII)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([BI[BII)V", image_size);
           break;
         case Primitive::kPrimChar:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([CI[CII)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([CI[CII)V", image_size);
           break;
         case Primitive::kPrimShort:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([SI[SII)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([SI[SII)V", image_size);
           break;
         case Primitive::kPrimInt:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([II[III)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([II[III)V", image_size);
           break;
         case Primitive::kPrimFloat:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([FI[FII)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([FI[FII)V", image_size);
           break;
         case Primitive::kPrimLong:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([JI[JII)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([JI[JII)V", image_size);
           break;
         case Primitive::kPrimDouble:
-          method = system->FindDeclaredDirectMethod("arraycopy", "([DI[DII)V", image_size);
+          method = system->FindClassMethod("arraycopy", "([DI[DII)V", image_size);
           break;
         default:
           LOG(FATAL) << "Unreachable";
       }
       DCHECK(method != nullptr);
+      DCHECK(method->IsStatic());
+      DCHECK(method->GetDeclaringClass() == system);
       invoke->SetResolvedMethod(method);
       // Sharpen the new invoke. Note that we do not update the dex method index of
       // the invoke, as we would need to look it up in the current dex file, and it
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index fe22595..a025fb1 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -29,8 +29,6 @@
 
 namespace arm {
 
-using helpers::ShifterOperandSupportsExtension;
-
 bool InstructionSimplifierArmVisitor::TryMergeIntoShifterOperand(HInstruction* use,
                                                                  HInstruction* bitfield_op,
                                                                  bool do_merge) {
@@ -76,7 +74,7 @@
       : kMaxLongShiftDistance;
 
   if (HDataProcWithShifterOp::IsExtensionOp(op_kind)) {
-    if (!ShifterOperandSupportsExtension(use)) {
+    if (!use->IsAdd() && (!use->IsSub() || use->GetType() != Primitive::kPrimLong)) {
       return false;
     }
   // Shift by 1 is a special case that results in the same number and type of instructions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 890ba67..b76a0df 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -76,6 +76,7 @@
 #include "jit/debugger_interface.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
+#include "jit/jit_logger.h"
 #include "jni/quick/jni_compiler.h"
 #include "licm.h"
 #include "load_store_analysis.h"
@@ -334,7 +335,11 @@
     }
   }
 
-  bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr)
+  bool JitCompile(Thread* self,
+                  jit::JitCodeCache* code_cache,
+                  ArtMethod* method,
+                  bool osr,
+                  jit::JitLogger* jit_logger)
       OVERRIDE
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1136,7 +1141,8 @@
 bool OptimizingCompiler::JitCompile(Thread* self,
                                     jit::JitCodeCache* code_cache,
                                     ArtMethod* method,
-                                    bool osr) {
+                                    bool osr,
+                                    jit::JitLogger* jit_logger) {
   StackHandleScope<3> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
       method->GetDeclaringClass()->GetClassLoader()));
@@ -1272,6 +1278,9 @@
   }
 
   Runtime::Current()->GetJit()->AddMemoryUsage(method, arena.BytesUsed());
+  if (jit_logger != nullptr) {
+    jit_logger->WriteLog(code, code_allocator.GetSize(), method);
+  }
 
   return true;
 }
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 98332d3..f172e16 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -525,8 +525,8 @@
       // Use a null loader. We should probably use the compiling method's class loader,
       // but then we would need to pass it to RTPVisitor just for this debug check. Since
       // the method is from the String class, the null loader is good enough.
-      Handle<mirror::ClassLoader> loader;
-      ArtMethod* method = cl->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+      Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
+      ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
           dex_file, invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect);
       DCHECK(method != nullptr);
       mirror::Class* declaring_class = method->GetDeclaringClass();
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index e78cd78..f025c0a 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -167,22 +167,346 @@
   HandleShiftLatencies(instr);
 }
 
-void SchedulingLatencyVisitorARM::VisitCondition(HCondition* instr) {
-  switch (instr->GetLeft()->GetType()) {
-    case Primitive::kPrimLong:
-      last_visited_internal_latency_ = 4 * kArmIntegerOpLatency;
+void SchedulingLatencyVisitorARM::HandleGenerateConditionWithZero(IfCondition condition) {
+  switch (condition) {
+    case kCondEQ:
+    case kCondBE:
+    case kCondNE:
+    case kCondA:
+      last_visited_internal_latency_ += kArmIntegerOpLatency;
+      last_visited_latency_ = kArmIntegerOpLatency;
       break;
-    case Primitive::kPrimFloat:
-    case Primitive::kPrimDouble:
-      last_visited_internal_latency_ = 2 * kArmFloatingPointOpLatency;
+    case kCondGE:
+      // Mvn
+      last_visited_internal_latency_ += kArmIntegerOpLatency;
+      FALLTHROUGH_INTENDED;
+    case kCondLT:
+      // Lsr
+      last_visited_latency_ = kArmIntegerOpLatency;
+      break;
+    case kCondAE:
+      // Trivially true.
+      // Mov
+      last_visited_latency_ = kArmIntegerOpLatency;
+      break;
+    case kCondB:
+      // Trivially false.
+      // Mov
+      last_visited_latency_ = kArmIntegerOpLatency;
       break;
     default:
-      last_visited_internal_latency_ = 2 * kArmIntegerOpLatency;
-      break;
+      LOG(FATAL) << "Unexpected condition " << condition;
+      UNREACHABLE();
   }
+}
+
+void SchedulingLatencyVisitorARM::HandleGenerateLongTestConstant(HCondition* condition) {
+  DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
+
+  IfCondition cond = condition->GetCondition();
+
+  HInstruction* right = condition->InputAt(1);
+
+  int64_t value = Uint64ConstantFrom(right);
+
+  // Comparisons against 0 are common enough, so codegen has special handling for them.
+  if (value == 0) {
+    switch (cond) {
+      case kCondNE:
+      case kCondA:
+      case kCondEQ:
+      case kCondBE:
+        // Orrs
+        last_visited_internal_latency_ += kArmIntegerOpLatency;
+        return;
+      case kCondLT:
+      case kCondGE:
+        // Cmp
+        last_visited_internal_latency_ += kArmIntegerOpLatency;
+        return;
+      case kCondB:
+      case kCondAE:
+        // Cmp
+        last_visited_internal_latency_ += kArmIntegerOpLatency;
+        return;
+      default:
+        break;
+    }
+  }
+
+  switch (cond) {
+    case kCondEQ:
+    case kCondNE:
+    case kCondB:
+    case kCondBE:
+    case kCondA:
+    case kCondAE: {
+      // Cmp, IT, Cmp
+      last_visited_internal_latency_ += 3 * kArmIntegerOpLatency;
+      break;
+    }
+    case kCondLE:
+    case kCondGT:
+      // Trivially true or false.
+      if (value == std::numeric_limits<int64_t>::max()) {
+        // Cmp
+        last_visited_internal_latency_ += kArmIntegerOpLatency;
+        break;
+      }
+      FALLTHROUGH_INTENDED;
+    case kCondGE:
+    case kCondLT: {
+      // Cmp, Sbcs
+      last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unreachable";
+      UNREACHABLE();
+  }
+}
+
+void SchedulingLatencyVisitorARM::HandleGenerateLongTest(HCondition* condition) {
+  DCHECK_EQ(condition->GetLeft()->GetType(), Primitive::kPrimLong);
+
+  IfCondition cond = condition->GetCondition();
+
+  switch (cond) {
+    case kCondEQ:
+    case kCondNE:
+    case kCondB:
+    case kCondBE:
+    case kCondA:
+    case kCondAE: {
+      // Cmp, IT, Cmp
+      last_visited_internal_latency_ += 3 * kArmIntegerOpLatency;
+      break;
+    }
+    case kCondLE:
+    case kCondGT:
+    case kCondGE:
+    case kCondLT: {
+      // Cmp, Sbcs
+      last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unreachable";
+      UNREACHABLE();
+  }
+}
+
+// The GenerateTest series of function all counted as internal latency.
+void SchedulingLatencyVisitorARM::HandleGenerateTest(HCondition* condition) {
+  const Primitive::Type type = condition->GetLeft()->GetType();
+
+  if (type == Primitive::kPrimLong) {
+    condition->InputAt(1)->IsConstant()
+        ? HandleGenerateLongTestConstant(condition)
+        : HandleGenerateLongTest(condition);
+  } else if (Primitive::IsFloatingPointType(type)) {
+    // GenerateVcmp + Vmrs
+    last_visited_internal_latency_ += 2 * kArmFloatingPointOpLatency;
+  } else {
+    // Cmp
+    last_visited_internal_latency_ += kArmIntegerOpLatency;
+  }
+}
+
+bool SchedulingLatencyVisitorARM::CanGenerateTest(HCondition* condition) {
+  if (condition->GetLeft()->GetType() == Primitive::kPrimLong) {
+    HInstruction* right = condition->InputAt(1);
+
+    if (right->IsConstant()) {
+      IfCondition c = condition->GetCondition();
+      const uint64_t value = Uint64ConstantFrom(right);
+
+      if (c < kCondLT || c > kCondGE) {
+        if (value != 0) {
+          return false;
+        }
+      } else if (c == kCondLE || c == kCondGT) {
+        if (value < std::numeric_limits<int64_t>::max() &&
+            !codegen_->GetAssembler()->ShifterOperandCanHold(SBC, High32Bits(value + 1), kCcSet)) {
+          return false;
+        }
+      } else if (!codegen_->GetAssembler()->ShifterOperandCanHold(SBC, High32Bits(value), kCcSet)) {
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+void SchedulingLatencyVisitorARM::HandleGenerateConditionGeneric(HCondition* cond) {
+  HandleGenerateTest(cond);
+
+  // Unlike codegen pass, we cannot check 'out' register IsLow() here,
+  // because scheduling is before liveness(location builder) and register allocator,
+  // so we can only choose to follow one path of codegen by assuming otu.IsLow() is true.
+  last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
   last_visited_latency_ = kArmIntegerOpLatency;
 }
 
+void SchedulingLatencyVisitorARM::HandleGenerateEqualLong(HCondition* cond) {
+  DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong);
+
+  IfCondition condition = cond->GetCondition();
+
+  last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
+
+  if (condition == kCondNE) {
+    // Orrs, IT, Mov
+    last_visited_internal_latency_ += 3 * kArmIntegerOpLatency;
+  } else {
+    last_visited_internal_latency_ += kArmIntegerOpLatency;
+    HandleGenerateConditionWithZero(condition);
+  }
+}
+
+void SchedulingLatencyVisitorARM::HandleGenerateLongComparesAndJumps() {
+  last_visited_internal_latency_ += 4 * kArmIntegerOpLatency;
+  last_visited_internal_latency_ += kArmBranchLatency;
+}
+
+void SchedulingLatencyVisitorARM::HandleGenerateConditionLong(HCondition* cond) {
+  DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong);
+
+  IfCondition condition = cond->GetCondition();
+  HInstruction* right = cond->InputAt(1);
+
+  if (right->IsConstant()) {
+    // Comparisons against 0 are common enough, so codegen has special handling for them.
+    if (Uint64ConstantFrom(right) == 0) {
+      switch (condition) {
+        case kCondNE:
+        case kCondA:
+        case kCondEQ:
+        case kCondBE:
+          // Orr
+          last_visited_internal_latency_ += kArmIntegerOpLatency;
+          HandleGenerateConditionWithZero(condition);
+          return;
+        case kCondLT:
+        case kCondGE:
+          FALLTHROUGH_INTENDED;
+        case kCondAE:
+        case kCondB:
+          HandleGenerateConditionWithZero(condition);
+          return;
+        case kCondLE:
+        case kCondGT:
+        default:
+          break;
+      }
+    }
+  }
+
+  if ((condition == kCondEQ || condition == kCondNE) &&
+      !CanGenerateTest(cond)) {
+    HandleGenerateEqualLong(cond);
+    return;
+  }
+
+  if (CanGenerateTest(cond)) {
+    HandleGenerateConditionGeneric(cond);
+    return;
+  }
+
+  HandleGenerateLongComparesAndJumps();
+
+  last_visited_internal_latency_ += kArmIntegerOpLatency;
+  last_visited_latency_ = kArmBranchLatency;;
+}
+
+void SchedulingLatencyVisitorARM::HandleGenerateConditionIntegralOrNonPrimitive(HCondition* cond) {
+  const Primitive::Type type = cond->GetLeft()->GetType();
+
+  DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
+
+  if (type == Primitive::kPrimLong) {
+    HandleGenerateConditionLong(cond);
+    return;
+  }
+
+  IfCondition condition = cond->GetCondition();
+  HInstruction* right = cond->InputAt(1);
+  int64_t value;
+
+  if (right->IsConstant()) {
+    value = Uint64ConstantFrom(right);
+
+    // Comparisons against 0 are common enough, so codegen has special handling for them.
+    if (value == 0) {
+      switch (condition) {
+        case kCondNE:
+        case kCondA:
+        case kCondEQ:
+        case kCondBE:
+        case kCondLT:
+        case kCondGE:
+        case kCondAE:
+        case kCondB:
+          HandleGenerateConditionWithZero(condition);
+          return;
+        case kCondLE:
+        case kCondGT:
+        default:
+          break;
+      }
+    }
+  }
+
+  if (condition == kCondEQ || condition == kCondNE) {
+    if (condition == kCondNE) {
+      // CMP, IT, MOV.ne
+      last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
+      last_visited_latency_ = kArmIntegerOpLatency;
+    } else {
+      last_visited_internal_latency_ += kArmIntegerOpLatency;
+      HandleGenerateConditionWithZero(condition);
+    }
+    return;
+  }
+
+  HandleGenerateConditionGeneric(cond);
+}
+
+void SchedulingLatencyVisitorARM::HandleCondition(HCondition* cond) {
+  if (cond->IsEmittedAtUseSite()) {
+    last_visited_latency_ = 0;
+    return;
+  }
+
+  const Primitive::Type type = cond->GetLeft()->GetType();
+
+  if (Primitive::IsFloatingPointType(type)) {
+    HandleGenerateConditionGeneric(cond);
+    return;
+  }
+
+  DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
+
+  const IfCondition condition = cond->GetCondition();
+
+  if (type == Primitive::kPrimBoolean &&
+      cond->GetRight()->GetType() == Primitive::kPrimBoolean &&
+      (condition == kCondEQ || condition == kCondNE)) {
+    if (condition == kCondEQ) {
+      last_visited_internal_latency_ = kArmIntegerOpLatency;
+    }
+    last_visited_latency_ = kArmIntegerOpLatency;
+    return;
+  }
+
+  HandleGenerateConditionIntegralOrNonPrimitive(cond);
+}
+
+void SchedulingLatencyVisitorARM::VisitCondition(HCondition* instr) {
+  HandleCondition(instr);
+}
+
 void SchedulingLatencyVisitorARM::VisitCompare(HCompare* instr) {
   Primitive::Type type = instr->InputAt(0)->GetType();
   switch (type) {
@@ -269,7 +593,6 @@
   const HDataProcWithShifterOp::OpKind op_kind = instruction->GetOpKind();
 
   if (instruction->GetType() == Primitive::kPrimInt) {
-    DCHECK(!HDataProcWithShifterOp::IsExtensionOp(op_kind));
     HandleGenerateDataProcInstruction();
   } else {
     DCHECK_EQ(instruction->GetType(), Primitive::kPrimLong);
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index a9f2295..fe274d2 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -109,6 +109,17 @@
 #undef DECLARE_VISIT_INSTRUCTION
 
  private:
+  bool CanGenerateTest(HCondition* cond);
+  void HandleGenerateConditionWithZero(IfCondition cond);
+  void HandleGenerateLongTestConstant(HCondition* cond);
+  void HandleGenerateLongTest(HCondition* cond);
+  void HandleGenerateLongComparesAndJumps();
+  void HandleGenerateTest(HCondition* cond);
+  void HandleGenerateConditionGeneric(HCondition* cond);
+  void HandleGenerateEqualLong(HCondition* cond);
+  void HandleGenerateConditionLong(HCondition* cond);
+  void HandleGenerateConditionIntegralOrNonPrimitive(HCondition* cond);
+  void HandleCondition(HCondition* instr);
   void HandleBinaryOperationLantencies(HBinaryOperation* instr);
   void HandleBitwiseOperationLantencies(HBinaryOperation* instr);
   void HandleShiftLatencies(HBinaryOperation* instr);
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index c581f1c..24e3450 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -828,6 +828,22 @@
   DsFsmInstrRrr(EmitI(0xf, rs, rt, imm16), rt, rt, rs);
 }
 
+void MipsAssembler::AddUpper(Register rt, Register rs, uint16_t imm16, Register tmp) {
+  bool increment = (rs == rt);
+  if (increment) {
+    CHECK_NE(rs, tmp);
+  }
+  if (IsR6()) {
+    Aui(rt, rs, imm16);
+  } else if (increment) {
+    Lui(tmp, imm16);
+    Addu(rt, rs, tmp);
+  } else {
+    Lui(rt, imm16);
+    Addu(rt, rs, rt);
+  }
+}
+
 void MipsAssembler::Sync(uint32_t stype) {
   DsFsmInstrNop(EmitR(0, ZERO, ZERO, ZERO, stype & 0x1f, 0xf));
 }
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 33803bb..e42bb3f 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -280,6 +280,7 @@
   void Lwpc(Register rs, uint32_t imm19);  // R6
   void Lui(Register rt, uint16_t imm16);
   void Aui(Register rt, Register rs, uint16_t imm16);  // R6
+  void AddUpper(Register rt, Register rs, uint16_t imm16, Register tmp = AT);
   void Sync(uint32_t stype);
   void Mfhi(Register rd);  // R2
   void Mflo(Register rd);  // R2
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 24900a7..9039854 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -795,6 +795,10 @@
   EmitFI(0x11, 0xD, ft, imm16);
 }
 
+void Mips64Assembler::Beqz(GpuRegister rt, uint16_t imm16) {
+  EmitI(0x4, ZERO, rt, imm16);
+}
+
 void Mips64Assembler::EmitBcondc(BranchCondition cond,
                                  GpuRegister rs,
                                  GpuRegister rt,
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 773db9b..5e88033 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -563,6 +563,7 @@
   void Bnezc(GpuRegister rs, uint32_t imm21);
   void Bc1eqz(FpuRegister ft, uint16_t imm16);
   void Bc1nez(FpuRegister ft, uint16_t imm16);
+  void Beqz(GpuRegister rt, uint16_t imm16);
 
   void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
   void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 686da21..72e2a6c 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -155,13 +155,14 @@
 
     ArtMethod* method = nullptr;
     while (it.HasNextDirectMethod()) {
-      ArtMethod* resolved_method = class_linker_->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
-          *primary_dex_file_,
-          it.GetMemberIndex(),
-          dex_cache_handle,
-          class_loader_handle,
-          nullptr,
-          it.GetMethodInvokeType(*class_def));
+      ArtMethod* resolved_method =
+          class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
+              *primary_dex_file_,
+              it.GetMemberIndex(),
+              dex_cache_handle,
+              class_loader_handle,
+              nullptr,
+              it.GetMethodInvokeType(*class_def));
       CHECK(resolved_method != nullptr);
       if (method_name == resolved_method->GetName()) {
         method = resolved_method;
@@ -369,18 +370,14 @@
 
   // Iterates over all method resolution records, finds an entry which matches
   // the given field kind+class+name+signature and tests its properties.
-  bool HasMethod(const std::string& expected_kind,
-                 const std::string& expected_klass,
+  bool HasMethod(const std::string& expected_klass,
                  const std::string& expected_name,
                  const std::string& expected_signature,
                  bool expected_resolved,
                  const std::string& expected_access_flags = "",
                  const std::string& expected_decl_klass = "") {
     for (auto& dex_dep : verifier_deps_->dex_deps_) {
-      auto& storage = (expected_kind == "direct") ? dex_dep.second->direct_methods_
-                          : (expected_kind == "virtual") ? dex_dep.second->virtual_methods_
-                              : dex_dep.second->interface_methods_;
-      for (auto& entry : storage) {
+      for (const VerifierDeps::MethodResolution& entry : dex_dep.second->methods_) {
         if (expected_resolved != entry.IsResolved()) {
           continue;
         }
@@ -441,9 +438,7 @@
       has_assignability |= !entry.second->unassignable_types_.empty();
       has_classes |= !entry.second->classes_.empty();
       has_fields |= !entry.second->fields_.empty();
-      has_methods |= !entry.second->direct_methods_.empty();
-      has_methods |= !entry.second->virtual_methods_.empty();
-      has_methods |= !entry.second->interface_methods_.empty();
+      has_methods |= !entry.second->methods_.empty();
       has_unverified_classes |= !entry.second->unverified_classes_.empty();
     }
 
@@ -455,18 +450,6 @@
            has_unverified_classes;
   }
 
-  static std::set<VerifierDeps::MethodResolution>* GetMethods(
-      VerifierDeps::DexFileDeps* deps, MethodResolutionKind resolution_kind) {
-    if (resolution_kind == kDirectMethodResolution) {
-      return &deps->direct_methods_;
-    } else if (resolution_kind == kVirtualMethodResolution) {
-      return &deps->virtual_methods_;
-    } else {
-      DCHECK_EQ(resolution_kind, kInterfaceMethodResolution);
-      return &deps->interface_methods_;
-    }
-  }
-
   std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
   std::vector<const DexFile*> dex_files_;
   const DexFile* primary_dex_file_;
@@ -604,11 +587,10 @@
   ASSERT_TRUE(VerifyMethod("InvokeArgumentType"));
   ASSERT_TRUE(HasClass("Ljava/text/SimpleDateFormat;", true, "public"));
   ASSERT_TRUE(HasClass("Ljava/util/SimpleTimeZone;", true, "public"));
-  ASSERT_TRUE(HasMethod("virtual",
-                        "Ljava/text/SimpleDateFormat;",
+  ASSERT_TRUE(HasMethod("Ljava/text/SimpleDateFormat;",
                         "setTimeZone",
                         "(Ljava/util/TimeZone;)V",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/text/DateFormat;"));
   ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
@@ -840,11 +822,10 @@
 TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInReferenced) {
   ASSERT_TRUE(VerifyMethod("InvokeStatic_Resolved_DeclaredInReferenced"));
   ASSERT_TRUE(HasClass("Ljava/net/Socket;", true, "public"));
-  ASSERT_TRUE(HasMethod("direct",
-                        "Ljava/net/Socket;",
+  ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        true,
+                        /* expect_resolved */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -852,22 +833,20 @@
 TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass1) {
   ASSERT_TRUE(VerifyMethod("InvokeStatic_Resolved_DeclaredInSuperclass1"));
   ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public"));
-  ASSERT_TRUE(HasMethod("direct",
-                        "Ljavax/net/ssl/SSLSocket;",
+  ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        true,
+                        /* expect_resolved */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass2) {
   ASSERT_TRUE(VerifyMethod("InvokeStatic_Resolved_DeclaredInSuperclass2"));
-  ASSERT_TRUE(HasMethod("direct",
-                        "LMySSLSocket;",
+  ASSERT_TRUE(HasMethod("LMySSLSocket;",
                         "setSocketImplFactory",
                         "(Ljava/net/SocketImplFactory;)V",
-                        true,
+                        /* expect_resolved */ true,
                         "public static",
                         "Ljava/net/Socket;"));
 }
@@ -875,11 +854,10 @@
 TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface1) {
   ASSERT_TRUE(VerifyMethod("InvokeStatic_DeclaredInInterface1"));
   ASSERT_TRUE(HasClass("Ljava/util/Map$Entry;", true, "public interface"));
-  ASSERT_TRUE(HasMethod("direct",
-                        "Ljava/util/Map$Entry;",
+  ASSERT_TRUE(HasMethod("Ljava/util/Map$Entry;",
                         "comparingByKey",
                         "()Ljava/util/Comparator;",
-                        true,
+                        /* expect_resolved */ true,
                         "public static",
                         "Ljava/util/Map$Entry;"));
 }
@@ -887,68 +865,85 @@
 TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface2) {
   ASSERT_FALSE(VerifyMethod("InvokeStatic_DeclaredInInterface2"));
   ASSERT_TRUE(HasClass("Ljava/util/AbstractMap$SimpleEntry;", true, "public"));
-  ASSERT_TRUE(HasMethod("direct",
-                        "Ljava/util/AbstractMap$SimpleEntry;",
+  ASSERT_TRUE(HasMethod("Ljava/util/AbstractMap$SimpleEntry;",
                         "comparingByKey",
                         "()Ljava/util/Comparator;",
-                        false));
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) {
   ASSERT_FALSE(VerifyMethod("InvokeStatic_Unresolved1"));
   ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public"));
-  ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "x", "()V", false));
+  ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
+                        "x",
+                        "()V",
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeStatic_Unresolved2) {
   ASSERT_FALSE(VerifyMethod("InvokeStatic_Unresolved2"));
-  ASSERT_TRUE(HasMethod("direct", "LMySSLSocket;", "x", "()V", false));
+  ASSERT_TRUE(HasMethod("LMySSLSocket;",
+                        "x",
+                        "()V",
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) {
   ASSERT_TRUE(VerifyMethod("InvokeDirect_Resolved_DeclaredInReferenced"));
   ASSERT_TRUE(HasClass("Ljava/net/Socket;", true, "public"));
-  ASSERT_TRUE(HasMethod(
-      "direct", "Ljava/net/Socket;", "<init>", "()V", true, "public", "Ljava/net/Socket;"));
+  ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
+                        "<init>",
+                        "()V",
+                        /* expect_resolved */ true,
+                        "public",
+                        "Ljava/net/Socket;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass1) {
   ASSERT_FALSE(VerifyMethod("InvokeDirect_Resolved_DeclaredInSuperclass1"));
   ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public"));
-  ASSERT_TRUE(HasMethod("direct",
-                        "Ljavax/net/ssl/SSLSocket;",
+  ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
                         "checkOldImpl",
                         "()V",
-                        true,
+                        /* expect_resolved */ true,
                         "private",
                         "Ljava/net/Socket;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass2) {
   ASSERT_FALSE(VerifyMethod("InvokeDirect_Resolved_DeclaredInSuperclass2"));
-  ASSERT_TRUE(HasMethod(
-      "direct", "LMySSLSocket;", "checkOldImpl", "()V", true, "private", "Ljava/net/Socket;"));
+  ASSERT_TRUE(HasMethod("LMySSLSocket;",
+                        "checkOldImpl",
+                        "()V",
+                        /* expect_resolved */ true,
+                        "private",
+                        "Ljava/net/Socket;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Unresolved1) {
   ASSERT_FALSE(VerifyMethod("InvokeDirect_Unresolved1"));
   ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public"));
-  ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "x", "()V", false));
+  ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
+                        "x",
+                        "()V",
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeDirect_Unresolved2) {
   ASSERT_FALSE(VerifyMethod("InvokeDirect_Unresolved2"));
-  ASSERT_TRUE(HasMethod("direct", "LMySSLSocket;", "x", "()V", false));
+  ASSERT_TRUE(HasMethod("LMySSLSocket;",
+                        "x",
+                        "()V",
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInReferenced) {
   ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInReferenced"));
   ASSERT_TRUE(HasClass("Ljava/lang/Throwable;", true, "public"));
-  ASSERT_TRUE(HasMethod("virtual",
-                        "Ljava/lang/Throwable;",
+  ASSERT_TRUE(HasMethod("Ljava/lang/Throwable;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
@@ -958,11 +953,10 @@
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass1) {
   ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInSuperclass1"));
   ASSERT_TRUE(HasClass("Ljava/io/InterruptedIOException;", true, "public"));
-  ASSERT_TRUE(HasMethod("virtual",
-                        "Ljava/io/InterruptedIOException;",
+  ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
   // Type dependency on `this` argument.
@@ -971,22 +965,20 @@
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass2) {
   ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInSuperclass2"));
-  ASSERT_TRUE(HasMethod("virtual",
-                        "LMySocketTimeoutException;",
+  ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
                         "getMessage",
                         "()Ljava/lang/String;",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/lang/Throwable;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperinterface) {
   ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInSuperinterface"));
-  ASSERT_TRUE(HasMethod("virtual",
-                        "LMyThreadSet;",
+  ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "size",
                         "()I",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/util/Set;"));
 }
@@ -994,61 +986,59 @@
 TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved1) {
   ASSERT_FALSE(VerifyMethod("InvokeVirtual_Unresolved1"));
   ASSERT_TRUE(HasClass("Ljava/io/InterruptedIOException;", true, "public"));
-  ASSERT_TRUE(HasMethod("virtual", "Ljava/io/InterruptedIOException;", "x", "()V", false));
+  ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
+                        "x",
+                        "()V",
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved2) {
   ASSERT_FALSE(VerifyMethod("InvokeVirtual_Unresolved2"));
-  ASSERT_TRUE(HasMethod("virtual", "LMySocketTimeoutException;", "x", "()V", false));
-}
-
-TEST_F(VerifierDepsTest, InvokeVirtual_ActuallyDirect) {
-  ASSERT_FALSE(VerifyMethod("InvokeVirtual_ActuallyDirect"));
-  ASSERT_TRUE(HasMethod("virtual", "LMyThread;", "activeCount", "()I", false));
-  ASSERT_TRUE(HasMethod("direct",
-                        "LMyThread;",
-                        "activeCount",
-                        "()I",
-                        true,
-                        "public static",
-                        "Ljava/lang/Thread;"));
+  ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
+                        "x",
+                        "()V",
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) {
   ASSERT_TRUE(VerifyMethod("InvokeInterface_Resolved_DeclaredInReferenced"));
   ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public interface"));
-  ASSERT_TRUE(HasMethod("interface",
-                        "Ljava/lang/Runnable;",
+  ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "run",
                         "()V",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/lang/Runnable;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperclass) {
   ASSERT_FALSE(VerifyMethod("InvokeInterface_Resolved_DeclaredInSuperclass"));
-  ASSERT_TRUE(HasMethod("interface", "LMyThread;", "join", "()V", false));
+  // TODO: Maybe we should not record dependency if the invoke type does not match the lookup type.
+  ASSERT_TRUE(HasMethod("LMyThread;",
+                        "join",
+                        "()V",
+                        /* expect_resolved */ true,
+                        "public",
+                        "Ljava/lang/Thread;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface1) {
   ASSERT_FALSE(VerifyMethod("InvokeInterface_Resolved_DeclaredInSuperinterface1"));
-  ASSERT_TRUE(HasMethod("interface",
-                        "LMyThreadSet;",
+  // TODO: Maybe we should not record dependency if the invoke type does not match the lookup type.
+  ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "run",
                         "()V",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
-                        "Ljava/lang/Runnable;"));
+                        "Ljava/lang/Thread;"));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface2) {
   ASSERT_FALSE(VerifyMethod("InvokeInterface_Resolved_DeclaredInSuperinterface2"));
-  ASSERT_TRUE(HasMethod("interface",
-                        "LMyThreadSet;",
+  ASSERT_TRUE(HasMethod("LMyThreadSet;",
                         "isEmpty",
                         "()Z",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/util/Set;"));
 }
@@ -1056,23 +1046,25 @@
 TEST_F(VerifierDepsTest, InvokeInterface_Unresolved1) {
   ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved1"));
   ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public interface"));
-  ASSERT_TRUE(HasMethod("interface", "Ljava/lang/Runnable;", "x", "()V", false));
+  ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
+                        "x",
+                        "()V",
+                        /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeInterface_Unresolved2) {
   ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved2"));
-  ASSERT_TRUE(HasMethod("interface", "LMyThreadSet;", "x", "()V", false));
+  ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved */ false));
 }
 
 TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) {
   ASSERT_TRUE(VerifyMethod("InvokeSuper_ThisAssignable"));
   ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public interface"));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Runnable;", "Ljava/lang/Thread;", true));
-  ASSERT_TRUE(HasMethod("interface",
-                        "Ljava/lang/Runnable;",
+  ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
                         "run",
                         "()V",
-                        true,
+                        /* expect_resolved */ true,
                         "public",
                         "Ljava/lang/Runnable;"));
 }
@@ -1081,8 +1073,10 @@
   ASSERT_FALSE(VerifyMethod("InvokeSuper_ThisNotAssignable"));
   ASSERT_TRUE(HasClass("Ljava/lang/Integer;", true, "public"));
   ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/Thread;", false));
-  ASSERT_TRUE(HasMethod(
-      "virtual", "Ljava/lang/Integer;", "intValue", "()I", true, "public", "Ljava/lang/Integer;"));
+  ASSERT_TRUE(HasMethod("Ljava/lang/Integer;",
+                        "intValue", "()I",
+                        /* expect_resolved */ true,
+                        "public", "Ljava/lang/Integer;"));
 }
 
 TEST_F(VerifierDepsTest, ArgumentType_ResolvedReferenceArray) {
@@ -1150,18 +1144,6 @@
   ASSERT_TRUE(HasUnverifiedClass("LMyClassWithNoSuperButFailures;"));
 }
 
-// Returns the next resolution kind in the enum.
-static MethodResolutionKind GetNextResolutionKind(MethodResolutionKind resolution_kind) {
-  if (resolution_kind == kDirectMethodResolution) {
-    return kVirtualMethodResolution;
-  } else if (resolution_kind == kVirtualMethodResolution) {
-    return kInterfaceMethodResolution;
-  } else {
-    DCHECK_EQ(resolution_kind, kInterfaceMethodResolution);
-    return kDirectMethodResolution;
-  }
-}
-
 TEST_F(VerifierDepsTest, VerifyDeps) {
   VerifyDexFile();
 
@@ -1338,131 +1320,82 @@
   }
 
   // Mess up with methods.
-  for (MethodResolutionKind resolution_kind :
-            { kDirectMethodResolution, kVirtualMethodResolution, kInterfaceMethodResolution }) {
-    {
-      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
-      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
-      bool found = false;
-      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
-      for (const auto& entry : *methods) {
-        if (entry.IsResolved()) {
-          methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
-                                                         VerifierDeps::kUnresolvedMarker,
-                                                         entry.GetDeclaringClassIndex()));
-          found = true;
-          break;
-        }
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    std::set<VerifierDeps::MethodResolution>* methods = &deps->methods_;
+    for (const auto& entry : *methods) {
+      if (entry.IsResolved()) {
+        methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                                       VerifierDeps::kUnresolvedMarker,
+                                                       entry.GetDeclaringClassIndex()));
+        found = true;
+        break;
       }
-      ASSERT_TRUE(found);
-      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
-      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
     }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
 
-    {
-      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
-      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
-      bool found = false;
-      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
-      for (const auto& entry : *methods) {
-        if (!entry.IsResolved()) {
-          constexpr dex::StringIndex kStringIndexZero(0);  // We know there is a class there.
-          methods->insert(VerifierDeps::MethodResolution(0 /* we know there is a method there */,
-                                                         VerifierDeps::kUnresolvedMarker - 1,
-                                                         kStringIndexZero));
-          found = true;
-          break;
-        }
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    std::set<VerifierDeps::MethodResolution>* methods = &deps->methods_;
+    for (const auto& entry : *methods) {
+      if (!entry.IsResolved()) {
+        constexpr dex::StringIndex kStringIndexZero(0);  // We know there is a class there.
+        methods->insert(VerifierDeps::MethodResolution(0 /* we know there is a method there */,
+                                                       VerifierDeps::kUnresolvedMarker - 1,
+                                                       kStringIndexZero));
+        found = true;
+        break;
       }
-      ASSERT_TRUE(found);
-      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
-      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
     }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
 
-    {
-      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
-      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
-      bool found = false;
-      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
-      for (const auto& entry : *methods) {
-        if (entry.IsResolved()) {
-          methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
-                                                         entry.GetAccessFlags() - 1,
-                                                         entry.GetDeclaringClassIndex()));
-          found = true;
-          break;
-        }
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    std::set<VerifierDeps::MethodResolution>* methods = &deps->methods_;
+    for (const auto& entry : *methods) {
+      if (entry.IsResolved()) {
+        methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                                       entry.GetAccessFlags() - 1,
+                                                       entry.GetDeclaringClassIndex()));
+        found = true;
+        break;
       }
-      ASSERT_TRUE(found);
-      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
-      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
     }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
+  }
 
-    {
-      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
-      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
-      bool found = false;
-      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
-      for (const auto& entry : *methods) {
-        constexpr dex::StringIndex kNewTypeIndex(0);
-        if (entry.IsResolved() && entry.GetDeclaringClassIndex() != kNewTypeIndex) {
-          methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
-                                                         entry.GetAccessFlags(),
-                                                         kNewTypeIndex));
-          found = true;
-          break;
-        }
+  {
+    VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
+    VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
+    bool found = false;
+    std::set<VerifierDeps::MethodResolution>* methods = &deps->methods_;
+    for (const auto& entry : *methods) {
+      constexpr dex::StringIndex kNewTypeIndex(0);
+      if (entry.IsResolved() && entry.GetDeclaringClassIndex() != kNewTypeIndex) {
+        methods->insert(VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
+                                                       entry.GetAccessFlags(),
+                                                       kNewTypeIndex));
+        found = true;
+        break;
       }
-      ASSERT_TRUE(found);
-      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
-      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
     }
-
-    // The two tests below make sure that fiddling with the method kind
-    // (static, virtual, interface) is detected by `ValidateDependencies`.
-
-    // An interface method lookup can succeed with a virtual method lookup on the same class.
-    // That's OK, as we only want to make sure there is a method being defined with the right
-    // flags. Therefore, polluting the interface methods with virtual methods does not have
-    // to fail verification.
-    if (resolution_kind != kVirtualMethodResolution) {
-      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
-      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
-      bool found = false;
-      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
-      for (const auto& entry : *methods) {
-        if (entry.IsResolved()) {
-          GetMethods(deps, GetNextResolutionKind(resolution_kind))->insert(
-              VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
-                                             entry.GetAccessFlags(),
-                                             entry.GetDeclaringClassIndex()));
-          found = true;
-        }
-      }
-      ASSERT_TRUE(found);
-      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
-      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
-    }
-
-    // See comment above that applies the same way.
-    if (resolution_kind != kInterfaceMethodResolution) {
-      VerifierDeps decoded_deps(dex_files_, ArrayRef<const uint8_t>(buffer));
-      VerifierDeps::DexFileDeps* deps = decoded_deps.GetDexFileDeps(*primary_dex_file_);
-      bool found = false;
-      std::set<VerifierDeps::MethodResolution>* methods = GetMethods(deps, resolution_kind);
-      for (const auto& entry : *methods) {
-        if (entry.IsResolved()) {
-          GetMethods(deps, GetNextResolutionKind(GetNextResolutionKind(resolution_kind)))->insert(
-              VerifierDeps::MethodResolution(entry.GetDexMethodIndex(),
-                                             entry.GetAccessFlags(),
-                                             entry.GetDeclaringClassIndex()));
-          found = true;
-        }
-      }
-      ASSERT_TRUE(found);
-      new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
-      ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
-    }
+    ASSERT_TRUE(found);
+    new_class_loader.Assign(soa.Decode<mirror::ClassLoader>(LoadDex("VerifierDeps")));
+    ASSERT_FALSE(decoded_deps.ValidateDependencies(new_class_loader, soa.Self()));
   }
 }
 
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index 85debe4..e735e2f 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -22,9 +22,9 @@
 #include <memory>
 
 #include "jni.h"
-#include "JniInvocation.h"
-#include "ScopedLocalRef.h"
-#include "toStringArray.h"
+#include "nativehelper/JniInvocation.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/toStringArray.h"
 
 namespace art {
 
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 113bdb5..dadea76 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -76,13 +76,13 @@
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "oat_file.h"
 #include "oat_file_assistant.h"
 #include "oat_writer.h"
 #include "os.h"
 #include "runtime.h"
 #include "runtime_options.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "utils.h"
 #include "vdex_file.h"
@@ -414,16 +414,20 @@
   UsageError("      ");
   UsageError("      The chain is interpreted in the natural 'parent order', meaning that class");
   UsageError("      loader 'i+1' will be the parent of class loader 'i'.");
-  UsageError("      The compilation sources will be added to the classpath of the last class");
-  UsageError("      loader. This allows the compiled dex files to be loaded at runtime in");
-  UsageError("      a class loader that contains other dex files as well (e.g. shared libraries).");
+  UsageError("      The compilation sources will be appended to the classpath of the first class");
+  UsageError("      loader.");
+  UsageError("      ");
+  UsageError("      E.g. if the context is 'PCL[lib1.dex];DLC[lib2.dex]' and ");
+  UsageError("      --dex-file=src.dex then dex2oat will setup a PathClassLoader with classpath ");
+  UsageError("      'lib1.dex:src.dex' and set its parent to a DelegateLastClassLoader with ");
+  UsageError("      classpath 'lib2.dex'.");
   UsageError("      ");
   UsageError("      Note that the compiler will be tolerant if the source dex files specified");
   UsageError("      with --dex-file are found in the classpath. The source dex files will be");
   UsageError("      removed from any class loader's classpath possibly resulting in empty");
   UsageError("      class loaders.");
   UsageError("      ");
-  UsageError("      Example: --classloader-spec=PCL[lib1.dex:lib2.dex];DLC[lib3.dex]");
+  UsageError("      Example: --class-loader-context=PCL[lib1.dex:lib2.dex];DLC[lib3.dex]");
   UsageError("");
   std::cerr << "See log for usage error information\n";
   exit(EXIT_FAILURE);
@@ -1516,18 +1520,15 @@
       return dex2oat::ReturnCode::kOther;
     }
 
-    if (CompilerFilter::IsAnyCompilationEnabled(compiler_options_->GetCompilerFilter())) {
-      // Only modes with compilation require verification results.
-      verification_results_.reset(new VerificationResults(compiler_options_.get()));
-    }
+    // Verification results are null since we don't know if we will need them yet as the compler
+    // filter may change.
     callbacks_.reset(new QuickCompilerCallbacks(
-        verification_results_.get(),
         IsBootImage() ?
             CompilerCallbacks::CallbackMode::kCompileBootImage :
             CompilerCallbacks::CallbackMode::kCompileApp));
 
     RuntimeArgumentMap runtime_options;
-    if (!PrepareRuntimeOptions(&runtime_options)) {
+    if (!PrepareRuntimeOptions(&runtime_options, callbacks_.get())) {
       return dex2oat::ReturnCode::kOther;
     }
 
@@ -1658,6 +1659,28 @@
 
     dex_files_ = MakeNonOwningPointerVector(opened_dex_files_);
 
+    // If we need to downgrade the compiler-filter for size reasons.
+    if (!IsBootImage() && IsVeryLarge(dex_files_)) {
+      if (!CompilerFilter::IsAsGoodAs(kLargeAppFilter, compiler_options_->GetCompilerFilter())) {
+        LOG(INFO) << "Very large app, downgrading to verify.";
+        // Note: this change won't be reflected in the key-value store, as that had to be
+        //       finalized before loading the dex files. This setup is currently required
+        //       to get the size from the DexFile objects.
+        // TODO: refactor. b/29790079
+        compiler_options_->SetCompilerFilter(kLargeAppFilter);
+      }
+    }
+
+    if (CompilerFilter::IsAnyCompilationEnabled(compiler_options_->GetCompilerFilter())) {
+      // Only modes with compilation require verification results, do this here instead of when we
+      // create the compilation callbacks since the compilation mode may have been changed by the
+      // very large app logic.
+      // Avoiding setting the verification results saves RAM by not adding the dex files later in
+      // the function.
+      verification_results_.reset(new VerificationResults(compiler_options_.get()));
+      callbacks_->SetVerificationResults(verification_results_.get());
+    }
+
     // We had to postpone the swap decision till now, as this is the point when we actually
     // know about the dex files we're going to use.
 
@@ -1674,19 +1697,6 @@
       }
     }
     // Note that dex2oat won't close the swap_fd_. The compiler driver's swap space will do that.
-
-    // If we need to downgrade the compiler-filter for size reasons, do that check now.
-    if (!IsBootImage() && IsVeryLarge(dex_files_)) {
-      if (!CompilerFilter::IsAsGoodAs(kLargeAppFilter, compiler_options_->GetCompilerFilter())) {
-        LOG(INFO) << "Very large app, downgrading to verify.";
-        // Note: this change won't be reflected in the key-value store, as that had to be
-        //       finalized before loading the dex files. This setup is currently required
-        //       to get the size from the DexFile objects.
-        // TODO: refactor. b/29790079
-        compiler_options_->SetCompilerFilter(kLargeAppFilter);
-      }
-    }
-
     if (IsBootImage()) {
       // For boot image, pass opened dex files to the Runtime::Create().
       // Note: Runtime acquires ownership of these dex files.
@@ -1783,7 +1793,7 @@
         for (const DexFile* dex_file : *dex_file_vector) {
           for (const std::string& filter : no_inline_filters) {
             // Use dex_file->GetLocation() rather than dex_file->GetBaseLocation(). This
-            // allows tests to specify <test-dexfile>:classes2.dex if needed but if the
+            // allows tests to specify <test-dexfile>!classes2.dex if needed but if the
             // base location passes the StartsWith() test, so do all extra locations.
             std::string dex_location = dex_file->GetLocation();
             if (filter.find('/') == std::string::npos) {
@@ -2452,7 +2462,8 @@
     }
   }
 
-  bool PrepareRuntimeOptions(RuntimeArgumentMap* runtime_options) {
+  bool PrepareRuntimeOptions(RuntimeArgumentMap* runtime_options,
+                             QuickCompilerCallbacks* callbacks) {
     RuntimeOptions raw_options;
     if (boot_image_filename_.empty()) {
       std::string boot_class_path = "-Xbootclasspath:";
@@ -2470,7 +2481,7 @@
       raw_options.push_back(std::make_pair(runtime_args_[i], nullptr));
     }
 
-    raw_options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+    raw_options.push_back(std::make_pair("compilercallbacks", callbacks));
     raw_options.push_back(
         std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set_)));
 
@@ -2542,7 +2553,6 @@
         runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
       }
     }
-    runtime_->GetClassLinker()->FixupDexCaches(runtime_->GetResolutionMethod());
 
     // Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
     // set up.
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index ed1aee6..68ec0b5 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -1102,4 +1102,16 @@
   RunTest(context.c_str(), kEmptyClassPathKey, /*expected_success*/ true);
 }
 
+TEST_F(Dex2oatClassLoaderContextTest, ChainContext) {
+  std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+  std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+  std::string context = "PCL[" + GetTestDexFileName("Nested") + "];" +
+      "DLC[" + GetTestDexFileName("MultiDex") + "]";
+  std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "];" +
+      "DLC[" + CreateClassPathWithChecksums(dex_files2) + "]";
+
+  RunTest(context.c_str(), expected_classpath_key.c_str(), true);
+}
+
 }  // namespace art
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 2763c07..5d9e361 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -50,6 +50,803 @@
 
 using android::base::StringPrintf;
 
+namespace {
+
+constexpr size_t kMaxAddressPrint = 5;
+
+enum class ProcessType {
+  kZygote,
+  kRemote
+};
+
+enum class RemoteProcesses {
+  kImageOnly,
+  kZygoteOnly,
+  kImageAndZygote
+};
+
+struct MappingData {
+  // The count of pages that are considered dirty by the OS.
+  size_t dirty_pages = 0;
+  // The count of pages that differ by at least one byte.
+  size_t different_pages = 0;
+  // The count of differing bytes.
+  size_t different_bytes = 0;
+  // The count of differing four-byte units.
+  size_t different_int32s = 0;
+  // The count of pages that have mapping count == 1.
+  size_t private_pages = 0;
+  // The count of private pages that are also dirty.
+  size_t private_dirty_pages = 0;
+  // The count of pages that are marked dirty but do not differ.
+  size_t false_dirty_pages = 0;
+  // Set of the local virtual page indices that are dirty.
+  std::set<size_t> dirty_page_set;
+};
+
+static std::string GetClassDescriptor(mirror::Class* klass)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  CHECK(klass != nullptr);
+
+  std::string descriptor;
+  const char* descriptor_str = klass->GetDescriptor(&descriptor /*out*/);
+
+  return std::string(descriptor_str);
+}
+
+static std::string PrettyFieldValue(ArtField* field, mirror::Object* object)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  std::ostringstream oss;
+  switch (field->GetTypeAsPrimitiveType()) {
+    case Primitive::kPrimNot: {
+      oss << object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+          field->GetOffset());
+      break;
+    }
+    case Primitive::kPrimBoolean: {
+      oss << static_cast<bool>(object->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
+      break;
+    }
+    case Primitive::kPrimByte: {
+      oss << static_cast<int32_t>(object->GetFieldByte<kVerifyNone>(field->GetOffset()));
+      break;
+    }
+    case Primitive::kPrimChar: {
+      oss << object->GetFieldChar<kVerifyNone>(field->GetOffset());
+      break;
+    }
+    case Primitive::kPrimShort: {
+      oss << object->GetFieldShort<kVerifyNone>(field->GetOffset());
+      break;
+    }
+    case Primitive::kPrimInt: {
+      oss << object->GetField32<kVerifyNone>(field->GetOffset());
+      break;
+    }
+    case Primitive::kPrimLong: {
+      oss << object->GetField64<kVerifyNone>(field->GetOffset());
+      break;
+    }
+    case Primitive::kPrimFloat: {
+      oss << object->GetField32<kVerifyNone>(field->GetOffset());
+      break;
+    }
+    case Primitive::kPrimDouble: {
+      oss << object->GetField64<kVerifyNone>(field->GetOffset());
+      break;
+    }
+    case Primitive::kPrimVoid: {
+      oss << "void";
+      break;
+    }
+  }
+  return oss.str();
+}
+
+template <typename K, typename V, typename D>
+static std::vector<std::pair<V, K>> SortByValueDesc(
+    const std::map<K, D> map,
+    std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
+  // Store value->key so that we can use the default sort from pair which
+  // sorts by value first and then key
+  std::vector<std::pair<V, K>> value_key_vector;
+
+  for (const auto& kv_pair : map) {
+    value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
+  }
+
+  // Sort in reverse (descending order)
+  std::sort(value_key_vector.rbegin(), value_key_vector.rend());
+  return value_key_vector;
+}
+
+// Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
+// Returned pointer will point to inside of remote_contents.
+template <typename T>
+static T* FixUpRemotePointer(T* remote_ptr,
+                             std::vector<uint8_t>& remote_contents,
+                             const backtrace_map_t& boot_map) {
+  if (remote_ptr == nullptr) {
+    return nullptr;
+  }
+
+  uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr);
+
+  CHECK_LE(boot_map.start, remote);
+  CHECK_GT(boot_map.end, remote);
+
+  off_t boot_offset = remote - boot_map.start;
+
+  return reinterpret_cast<T*>(&remote_contents[boot_offset]);
+}
+
+template <typename T>
+static T* RemoteContentsPointerToLocal(T* remote_ptr,
+                                       std::vector<uint8_t>& remote_contents,
+                                       const ImageHeader& image_header) {
+  if (remote_ptr == nullptr) {
+    return nullptr;
+  }
+
+  uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr);
+  ptrdiff_t boot_offset = remote - &remote_contents[0];
+
+  const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
+
+  return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
+}
+
+template <typename T> size_t EntrySize(T* entry);
+template<> size_t EntrySize(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
+  return object->SizeOf();
+}
+template<> size_t EntrySize(ArtMethod* art_method) REQUIRES_SHARED(Locks::mutator_lock_) {
+  return sizeof(*art_method);
+}
+
+template <typename T>
+static bool EntriesDiffer(T* entry1, T* entry2) REQUIRES_SHARED(Locks::mutator_lock_) {
+  return memcmp(entry1, entry2, EntrySize(entry1)) != 0;
+}
+
+template <typename T>
+struct RegionCommon {
+ public:
+  RegionCommon(std::ostream* os,
+               std::vector<uint8_t>* remote_contents,
+               std::vector<uint8_t>* zygote_contents,
+               const backtrace_map_t& boot_map,
+               const ImageHeader& image_header) :
+    os_(*os),
+    remote_contents_(remote_contents),
+    zygote_contents_(zygote_contents),
+    boot_map_(boot_map),
+    image_header_(image_header),
+    different_entries_(0),
+    dirty_entry_bytes_(0),
+    false_dirty_entry_bytes_(0) {
+    CHECK(remote_contents != nullptr);
+    CHECK(zygote_contents != nullptr);
+  }
+
+  void DumpSamplesAndOffsetCount() {
+    os_ << "      sample object addresses: ";
+    for (size_t i = 0; i < dirty_entries_.size() && i < kMaxAddressPrint; ++i) {
+      T* entry = dirty_entries_[i];
+      os_ << reinterpret_cast<void*>(entry) << ", ";
+    }
+    os_ << "\n";
+    os_ << "      dirty byte +offset:count list = ";
+    std::vector<std::pair<size_t, off_t>> field_dirty_count_sorted =
+        SortByValueDesc<off_t, size_t, size_t>(field_dirty_count_);
+    for (const std::pair<size_t, off_t>& pair : field_dirty_count_sorted) {
+      off_t offset = pair.second;
+      size_t count = pair.first;
+      os_ << "+" << offset << ":" << count << ", ";
+    }
+    os_ << "\n";
+  }
+
+  size_t GetDifferentEntryCount() const { return different_entries_; }
+  size_t GetDirtyEntryBytes() const { return dirty_entry_bytes_; }
+  size_t GetFalseDirtyEntryCount() const { return false_dirty_entries_.size(); }
+  size_t GetFalseDirtyEntryBytes() const { return false_dirty_entry_bytes_; }
+  size_t GetZygoteDirtyEntryCount() const { return zygote_dirty_entries_.size(); }
+
+ protected:
+  bool IsEntryOnDirtyPage(T* entry, const std::set<size_t>& dirty_pages) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    size_t size = EntrySize(entry);
+    size_t page_off = 0;
+    size_t current_page_idx;
+    uintptr_t entry_address = reinterpret_cast<uintptr_t>(entry);
+    // Iterate every page this entry belongs to
+    do {
+      current_page_idx = entry_address / kPageSize + page_off;
+      if (dirty_pages.find(current_page_idx) != dirty_pages.end()) {
+        // This entry is on a dirty page
+        return true;
+      }
+      page_off++;
+    } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment));
+    return false;
+  }
+
+  void AddZygoteDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
+    zygote_dirty_entries_.insert(entry);
+  }
+
+  void AddImageDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
+    image_dirty_entries_.insert(entry);
+  }
+
+  void AddFalseDirtyEntry(T* entry) REQUIRES_SHARED(Locks::mutator_lock_) {
+    false_dirty_entries_.push_back(entry);
+    false_dirty_entry_bytes_ += EntrySize(entry);
+  }
+
+  // The output stream to write to.
+  std::ostream& os_;
+  // The byte contents of the remote (image) process' image.
+  std::vector<uint8_t>* remote_contents_;
+  // The byte contents of the zygote process' image.
+  std::vector<uint8_t>* zygote_contents_;
+  const backtrace_map_t& boot_map_;
+  const ImageHeader& image_header_;
+
+  // Count of entries that are different.
+  size_t different_entries_;
+
+  // Local entries that are dirty (differ in at least one byte).
+  size_t dirty_entry_bytes_;
+  std::vector<T*> dirty_entries_;
+
+  // Local entries that are clean, but located on dirty pages.
+  size_t false_dirty_entry_bytes_;
+  std::vector<T*> false_dirty_entries_;
+
+  // Image dirty entries
+  // If zygote_pid_only_ == true, these are shared dirty entries in the zygote.
+  // If zygote_pid_only_ == false, these are private dirty entries in the application.
+  std::set<T*> image_dirty_entries_;
+
+  // Zygote dirty entries (probably private dirty).
+  // We only add entries here if they differed in both the image and the zygote, so
+  // they are probably private dirty.
+  std::set<T*> zygote_dirty_entries_;
+
+  std::map<off_t /* field offset */, size_t /* count */> field_dirty_count_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(RegionCommon);
+};
+
+template <typename T>
+class RegionSpecializedBase : public RegionCommon<T> {
+};
+
+// Region analysis for mirror::Objects
+template<>
+class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object> {
+ public:
+  RegionSpecializedBase(std::ostream* os,
+                        std::vector<uint8_t>* remote_contents,
+                        std::vector<uint8_t>* zygote_contents,
+                        const backtrace_map_t& boot_map,
+                        const ImageHeader& image_header) :
+    RegionCommon<mirror::Object>(os, remote_contents, zygote_contents, boot_map, image_header),
+    os_(*os) { }
+
+  void CheckEntrySanity(const uint8_t* current) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CHECK_ALIGNED(current, kObjectAlignment);
+    mirror::Object* entry = reinterpret_cast<mirror::Object*>(const_cast<uint8_t*>(current));
+    // Sanity check that we are reading a real mirror::Object
+    CHECK(entry->GetClass() != nullptr) << "Image object at address "
+                                        << entry
+                                        << " has null class";
+    if (kUseBakerReadBarrier) {
+      entry->AssertReadBarrierState();
+    }
+  }
+
+  mirror::Object* GetNextEntry(mirror::Object* entry)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint8_t* next =
+        reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
+    return reinterpret_cast<mirror::Object*>(next);
+  }
+
+  void VisitEntry(mirror::Object* entry)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Unconditionally store the class descriptor in case we need it later
+    mirror::Class* klass = entry->GetClass();
+    class_data_[klass].descriptor = GetClassDescriptor(klass);
+  }
+
+  void AddCleanEntry(mirror::Object* entry)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    class_data_[entry->GetClass()].AddCleanObject();
+  }
+
+  void AddFalseDirtyEntry(mirror::Object* entry)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    RegionCommon<mirror::Object>::AddFalseDirtyEntry(entry);
+    class_data_[entry->GetClass()].AddFalseDirtyObject(entry);
+  }
+
+  void AddDirtyEntry(mirror::Object* entry, mirror::Object* entry_remote)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    size_t entry_size = EntrySize(entry);
+    ++different_entries_;
+    dirty_entry_bytes_ += entry_size;
+    // Log dirty count and objects for class objects only.
+    mirror::Class* klass = entry->GetClass();
+    if (klass->IsClassClass()) {
+      // Increment counts for the fields that are dirty
+      const uint8_t* current = reinterpret_cast<const uint8_t*>(entry);
+      const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(entry_remote);
+      for (size_t i = 0; i < entry_size; ++i) {
+        if (current[i] != current_remote[i]) {
+          field_dirty_count_[i]++;
+        }
+      }
+      dirty_entries_.push_back(entry);
+    }
+    class_data_[klass].AddDirtyObject(entry, entry_remote);
+  }
+
+  void DiffEntryContents(mirror::Object* entry, uint8_t* remote_bytes)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    const char* tabs = "    ";
+    // Attempt to find fields for all dirty bytes.
+    mirror::Class* klass = entry->GetClass();
+    if (entry->IsClass()) {
+      os_ << tabs
+          << "Class " << mirror::Class::PrettyClass(entry->AsClass()) << " " << entry << "\n";
+    } else {
+      os_ << tabs
+          << "Instance of " << mirror::Class::PrettyClass(klass) << " " << entry << "\n";
+    }
+
+    std::unordered_set<ArtField*> dirty_instance_fields;
+    std::unordered_set<ArtField*> dirty_static_fields;
+    // Examine the bytes comprising the Object, computing which fields are dirty
+    // and recording them for later display.  If the Object is an array object,
+    // compute the dirty entries.
+    const uint8_t* entry_bytes = reinterpret_cast<const uint8_t*>(entry);
+    mirror::Object* remote_entry = reinterpret_cast<mirror::Object*>(remote_bytes);
+    for (size_t i = 0, count = entry->SizeOf(); i < count; ++i) {
+      if (entry_bytes[i] != remote_bytes[i]) {
+        ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
+        if (field != nullptr) {
+          dirty_instance_fields.insert(field);
+        } else if (entry->IsClass()) {
+          field = ArtField::FindStaticFieldWithOffset</*exact*/false>(entry->AsClass(), i);
+          if (field != nullptr) {
+            dirty_static_fields.insert(field);
+          }
+        }
+        if (field == nullptr) {
+          if (klass->IsArrayClass()) {
+            mirror::Class* component_type = klass->GetComponentType();
+            Primitive::Type primitive_type = component_type->GetPrimitiveType();
+            size_t component_size = Primitive::ComponentSize(primitive_type);
+            size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
+            if (i >= data_offset) {
+              os_ << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
+              // Skip to next element to prevent spam.
+              i += component_size - 1;
+              continue;
+            }
+          }
+          os_ << tabs << "No field for byte offset " << i << "\n";
+        }
+      }
+    }
+    // Dump different fields.
+    if (!dirty_instance_fields.empty()) {
+      os_ << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
+      for (ArtField* field : dirty_instance_fields) {
+        os_ << tabs << ArtField::PrettyField(field)
+            << " original=" << PrettyFieldValue(field, entry)
+            << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
+      }
+    }
+    if (!dirty_static_fields.empty()) {
+      os_ << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
+      for (ArtField* field : dirty_static_fields) {
+        os_ << tabs << ArtField::PrettyField(field)
+            << " original=" << PrettyFieldValue(field, entry)
+            << " remote=" << PrettyFieldValue(field, remote_entry) << "\n";
+      }
+    }
+    os_ << "\n";
+  }
+
+  void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // vector of pairs (size_t count, Class*)
+    auto dirty_object_class_values =
+        SortByValueDesc<mirror::Class*, size_t, ClassData>(
+            class_data_,
+            [](const ClassData& d) { return d.dirty_object_count; });
+    os_ << "\n" << "  Dirty object count by class:\n";
+    for (const auto& vk_pair : dirty_object_class_values) {
+      size_t dirty_object_count = vk_pair.first;
+      mirror::Class* klass = vk_pair.second;
+      ClassData& class_data = class_data_[klass];
+      size_t object_sizes = class_data.dirty_object_size_in_bytes;
+      float avg_dirty_bytes_per_class =
+          class_data.dirty_object_byte_count * 1.0f / object_sizes;
+      float avg_object_size = object_sizes * 1.0f / dirty_object_count;
+      const std::string& descriptor = class_data.descriptor;
+      os_ << "    " << mirror::Class::PrettyClass(klass) << " ("
+          << "objects: " << dirty_object_count << ", "
+          << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
+          << "avg object size: " << avg_object_size << ", "
+          << "class descriptor: '" << descriptor << "'"
+          << ")\n";
+      if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
+        DumpSamplesAndOffsetCount();
+        os_ << "      field contents:\n";
+        for (mirror::Object* object : class_data.dirty_objects) {
+          // remote class object
+          auto remote_klass = reinterpret_cast<mirror::Class*>(object);
+          // local class object
+          auto local_klass =
+              RemoteContentsPointerToLocal(remote_klass,
+                                           *RegionCommon<mirror::Object>::remote_contents_,
+                                           RegionCommon<mirror::Object>::image_header_);
+          os_ << "        " << reinterpret_cast<const void*>(object) << " ";
+          os_ << "  class_status (remote): " << remote_klass->GetStatus() << ", ";
+          os_ << "  class_status (local): " << local_klass->GetStatus();
+          os_ << "\n";
+        }
+      }
+    }
+  }
+
+  void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // vector of pairs (size_t count, Class*)
+    auto false_dirty_object_class_values =
+        SortByValueDesc<mirror::Class*, size_t, ClassData>(
+            class_data_,
+            [](const ClassData& d) { return d.false_dirty_object_count; });
+    os_ << "\n" << "  False-dirty object count by class:\n";
+    for (const auto& vk_pair : false_dirty_object_class_values) {
+      size_t object_count = vk_pair.first;
+      mirror::Class* klass = vk_pair.second;
+      ClassData& class_data = class_data_[klass];
+      size_t object_sizes = class_data.false_dirty_byte_count;
+      float avg_object_size = object_sizes * 1.0f / object_count;
+      const std::string& descriptor = class_data.descriptor;
+      os_ << "    " << mirror::Class::PrettyClass(klass) << " ("
+          << "objects: " << object_count << ", "
+          << "avg object size: " << avg_object_size << ", "
+          << "total bytes: " << object_sizes << ", "
+          << "class descriptor: '" << descriptor << "'"
+          << ")\n";
+    }
+  }
+
+  void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // vector of pairs (size_t count, Class*)
+    auto clean_object_class_values =
+        SortByValueDesc<mirror::Class*, size_t, ClassData>(
+            class_data_,
+            [](const ClassData& d) { return d.clean_object_count; });
+    os_ << "\n" << "  Clean object count by class:\n";
+    for (const auto& vk_pair : clean_object_class_values) {
+      os_ << "    " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
+    }
+  }
+
+ private:
+  // Aggregate and detail class data from an image diff.
+  struct ClassData {
+    size_t dirty_object_count = 0;
+    // Track only the byte-per-byte dirtiness (in bytes)
+    size_t dirty_object_byte_count = 0;
+    // Track the object-by-object dirtiness (in bytes)
+    size_t dirty_object_size_in_bytes = 0;
+    size_t clean_object_count = 0;
+    std::string descriptor;
+    size_t false_dirty_byte_count = 0;
+    size_t false_dirty_object_count = 0;
+    std::vector<mirror::Object*> false_dirty_objects;
+    // Remote pointers to dirty objects
+    std::vector<mirror::Object*> dirty_objects;
+
+    void AddCleanObject() REQUIRES_SHARED(Locks::mutator_lock_) {
+      ++clean_object_count;
+    }
+
+    void AddDirtyObject(mirror::Object* object, mirror::Object* object_remote)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      ++dirty_object_count;
+      dirty_object_byte_count += CountDirtyBytes(object, object_remote);
+      dirty_object_size_in_bytes += EntrySize(object);
+      dirty_objects.push_back(object_remote);
+    }
+
+    void AddFalseDirtyObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
+      ++false_dirty_object_count;
+      false_dirty_objects.push_back(object);
+      false_dirty_byte_count += EntrySize(object);
+    }
+
+   private:
+    // Go byte-by-byte and figure out what exactly got dirtied
+    static size_t CountDirtyBytes(mirror::Object* object1, mirror::Object* object2)
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      const uint8_t* cur1 = reinterpret_cast<const uint8_t*>(object1);
+      const uint8_t* cur2 = reinterpret_cast<const uint8_t*>(object2);
+      size_t dirty_bytes = 0;
+      size_t object_size = EntrySize(object1);
+      for (size_t i = 0; i < object_size; ++i) {
+        if (cur1[i] != cur2[i]) {
+          dirty_bytes++;
+        }
+      }
+      return dirty_bytes;
+    }
+  };
+
+  std::ostream& os_;
+  std::map<mirror::Class*, ClassData> class_data_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
+};
+
+// Region analysis for ArtMethods.
+// TODO: most of these need work.
+template<>
+class RegionSpecializedBase<ArtMethod> : RegionCommon<ArtMethod> {
+ public:
+  RegionSpecializedBase(std::ostream* os,
+                        std::vector<uint8_t>* remote_contents,
+                        std::vector<uint8_t>* zygote_contents,
+                        const backtrace_map_t& boot_map,
+                        const ImageHeader& image_header) :
+    RegionCommon<ArtMethod>(os, remote_contents, zygote_contents, boot_map, image_header),
+    os_(*os) { }
+
+  void CheckEntrySanity(const uint8_t* current ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+  }
+
+  ArtMethod* GetNextEntry(ArtMethod* entry)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint8_t* next = reinterpret_cast<uint8_t*>(entry) + RoundUp(EntrySize(entry), kObjectAlignment);
+    return reinterpret_cast<ArtMethod*>(next);
+  }
+
+  void VisitEntry(ArtMethod* method ATTRIBUTE_UNUSED)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+  }
+
+  void AddFalseDirtyEntry(ArtMethod* method)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    RegionCommon<ArtMethod>::AddFalseDirtyEntry(method);
+  }
+
+  void AddCleanEntry(ArtMethod* method ATTRIBUTE_UNUSED) {
+  }
+
+  void AddDirtyEntry(ArtMethod* method, ArtMethod* method_remote)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    size_t entry_size = EntrySize(method);
+    ++different_entries_;
+    dirty_entry_bytes_ += entry_size;
+    // Increment counts for the fields that are dirty
+    const uint8_t* current = reinterpret_cast<const uint8_t*>(method);
+    const uint8_t* current_remote = reinterpret_cast<const uint8_t*>(method_remote);
+    // ArtMethods always log their dirty count and entries.
+    for (size_t i = 0; i < entry_size; ++i) {
+      if (current[i] != current_remote[i]) {
+        field_dirty_count_[i]++;
+      }
+    }
+    dirty_entries_.push_back(method);
+  }
+
+  void DiffEntryContents(ArtMethod* method ATTRIBUTE_UNUSED,
+                         uint8_t* remote_bytes ATTRIBUTE_UNUSED)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+  }
+
+  void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DumpSamplesAndOffsetCount();
+    os_ << "      field contents:\n";
+    for (ArtMethod* method : dirty_entries_) {
+      // remote method
+      auto art_method = reinterpret_cast<ArtMethod*>(method);
+      // remote class
+      mirror::Class* remote_declaring_class =
+        FixUpRemotePointer(art_method->GetDeclaringClass(),
+                           *RegionCommon<ArtMethod>::remote_contents_,
+                           RegionCommon<ArtMethod>::boot_map_);
+      // local class
+      mirror::Class* declaring_class =
+        RemoteContentsPointerToLocal(remote_declaring_class,
+                                     *RegionCommon<ArtMethod>::remote_contents_,
+                                     RegionCommon<ArtMethod>::image_header_);
+      DumpOneArtMethod(art_method, declaring_class, remote_declaring_class);
+    }
+  }
+
+  void DumpFalseDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
+    os_ << "      field contents:\n";
+    for (ArtMethod* method : false_dirty_entries_) {
+      // local class
+      mirror::Class* declaring_class = method->GetDeclaringClass();
+      DumpOneArtMethod(method, declaring_class, nullptr);
+    }
+  }
+
+  void DumpCleanEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
+  }
+
+ private:
+  std::ostream& os_;
+
+  void DumpOneArtMethod(ArtMethod* art_method,
+                        mirror::Class* declaring_class,
+                        mirror::Class* remote_declaring_class)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
+    os_ << "        " << reinterpret_cast<const void*>(art_method) << " ";
+    os_ << "  entryPointFromJni: "
+        << reinterpret_cast<const void*>(art_method->GetDataPtrSize(pointer_size)) << ", ";
+    os_ << "  entryPointFromQuickCompiledCode: "
+        << reinterpret_cast<const void*>(
+               art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size))
+        << ", ";
+    os_ << "  isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
+    os_ << "  class_status (local): " << declaring_class->GetStatus();
+    if (remote_declaring_class != nullptr) {
+      os_ << ",  class_status (remote): " << remote_declaring_class->GetStatus();
+    }
+    os_ << "\n";
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(RegionSpecializedBase);
+};
+
+template <typename T>
+class RegionData : public RegionSpecializedBase<T> {
+ public:
+  RegionData(std::ostream* os,
+             std::vector<uint8_t>* remote_contents,
+             std::vector<uint8_t>* zygote_contents,
+             const backtrace_map_t& boot_map,
+             const ImageHeader& image_header) :
+    RegionSpecializedBase<T>(os, remote_contents, zygote_contents, boot_map, image_header),
+    os_(*os) {
+    CHECK(remote_contents != nullptr);
+    CHECK(zygote_contents != nullptr);
+  }
+
+  // Walk over the type T entries in theregion between begin_image_ptr and end_image_ptr,
+  // collecting and reporting data regarding dirty, difference, etc.
+  void ProcessRegion(const MappingData& mapping_data,
+                     RemoteProcesses remotes,
+                     const uint8_t* begin_image_ptr,
+                     const uint8_t* end_image_ptr)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    const uint8_t* current = begin_image_ptr + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+    T* entry = reinterpret_cast<T*>(const_cast<uint8_t*>(current));
+    while (reinterpret_cast<uintptr_t>(entry) < reinterpret_cast<uintptr_t>(end_image_ptr)) {
+      ComputeEntryDirty(entry, begin_image_ptr, mapping_data.dirty_page_set);
+
+      entry = RegionSpecializedBase<T>::GetNextEntry(entry);
+    }
+
+    // Looking at only dirty pages, figure out how many of those bytes belong to dirty entries.
+    // TODO: fix this now that there are multiple regions in a mapping.
+    float true_dirtied_percent =
+        RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize);
+
+    // Entry specific statistics.
+    os_ << RegionCommon<T>::GetDifferentEntryCount() << " different entries, \n  "
+        << RegionCommon<T>::GetDirtyEntryBytes() << " different entry [bytes], \n  "
+        << RegionCommon<T>::GetFalseDirtyEntryCount() << " false dirty entries,\n  "
+        << RegionCommon<T>::GetFalseDirtyEntryBytes() << " false dirty entry [bytes], \n  "
+        << true_dirtied_percent << " different entries-vs-total in a dirty page;\n  "
+        << "";
+
+    if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
+      // We only reach this point if both pids were specified.  Furthermore,
+      // entries are only displayed here if they differed in both the image
+      // and the zygote, so they are probably private dirty.
+      CHECK(remotes == RemoteProcesses::kImageAndZygote);
+      os_ << "\n" << "  Zygote dirty entries (probably shared dirty): ";
+      DiffDirtyEntries(ProcessType::kZygote, begin_image_ptr, RegionCommon<T>::zygote_contents_);
+    }
+    os_ << "\n";
+    switch (remotes) {
+      case RemoteProcesses::kZygoteOnly:
+        os_ << "  Zygote shared dirty entries: ";
+        break;
+      case RemoteProcesses::kImageAndZygote:
+        os_ << "  Application dirty entries (private dirty): ";
+        break;
+      case RemoteProcesses::kImageOnly:
+        os_ << "  Application dirty entries (unknown whether private or shared dirty): ";
+        break;
+    }
+    DiffDirtyEntries(ProcessType::kRemote, begin_image_ptr, RegionCommon<T>::remote_contents_);
+    RegionSpecializedBase<T>::DumpDirtyEntries();
+    RegionSpecializedBase<T>::DumpFalseDirtyEntries();
+    RegionSpecializedBase<T>::DumpCleanEntries();
+  }
+
+ private:
+  std::ostream& os_;
+
+  void DiffDirtyEntries(ProcessType process_type,
+                        const uint8_t* begin_image_ptr,
+                        std::vector<uint8_t>* contents)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    os_ << RegionCommon<T>::dirty_entries_.size() << "\n";
+    const std::set<T*>& entries =
+        (process_type == ProcessType::kZygote) ?
+            RegionCommon<T>::zygote_dirty_entries_:
+            RegionCommon<T>::image_dirty_entries_;
+    for (T* entry : entries) {
+      uint8_t* entry_bytes = reinterpret_cast<uint8_t*>(entry);
+      ptrdiff_t offset = entry_bytes - begin_image_ptr;
+      uint8_t* remote_bytes = &(*contents)[offset];
+      RegionSpecializedBase<T>::DiffEntryContents(entry, remote_bytes);
+    }
+  }
+
+  void ComputeEntryDirty(T* entry,
+                         const uint8_t* begin_image_ptr,
+                         const std::set<size_t>& dirty_pages)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Set up pointers in the remote and the zygote for comparison.
+    uint8_t* current = reinterpret_cast<uint8_t*>(entry);
+    ptrdiff_t offset = current - begin_image_ptr;
+    T* entry_remote =
+        reinterpret_cast<T*>(const_cast<uint8_t*>(&(*RegionCommon<T>::remote_contents_)[offset]));
+    const uint8_t* current_zygote =
+        RegionCommon<T>::zygote_contents_->empty() ? nullptr :
+                                                     &(*RegionCommon<T>::zygote_contents_)[offset];
+    T* entry_zygote = reinterpret_cast<T*>(const_cast<uint8_t*>(current_zygote));
+    // Visit and classify entries at the current location.
+    RegionSpecializedBase<T>::VisitEntry(entry);
+    bool different_image_entry = EntriesDiffer(entry, entry_remote);
+    if (different_image_entry) {
+      bool different_zygote_entry = false;
+      if (entry_zygote != nullptr) {
+        different_zygote_entry = EntriesDiffer(entry, entry_zygote);
+      }
+      if (different_zygote_entry) {
+        // Different from zygote.
+        RegionCommon<T>::AddZygoteDirtyEntry(entry);
+        RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
+      } else {
+        // Just different from image.
+        RegionCommon<T>::AddImageDirtyEntry(entry);
+        RegionSpecializedBase<T>::AddDirtyEntry(entry, entry_remote);
+      }
+    } else {
+      RegionSpecializedBase<T>::AddCleanEntry(entry);
+    }
+    if (!different_image_entry && RegionCommon<T>::IsEntryOnDirtyPage(entry, dirty_pages)) {
+      // This entry was either never mutated or got mutated back to the same value.
+      // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
+      RegionSpecializedBase<T>::AddFalseDirtyEntry(entry);
+    }
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(RegionData);
+};
+
+}  // namespace
+
+
 class ImgDiagDumper {
  public:
   explicit ImgDiagDumper(std::ostream* os,
@@ -123,8 +920,6 @@
     CHECK(boot_map_.end >= boot_map_.start);
     boot_map_size_ = boot_map_.end - boot_map_.start;
 
-    pointer_size_ = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
-
     // Open /proc/<image_diff_pid_>/mem and read as remote_contents_.
     std::string image_file_name =
         StringPrintf("/proc/%ld/mem", static_cast<long>(image_diff_pid_));  // NOLINT [runtime/int]
@@ -188,7 +983,7 @@
       return false;
     }
 
-    // Commit the mappings, etc., to the object state.
+    // Commit the mappings, etc.
     proc_maps_ = std::move(tmp_proc_maps);
     remote_contents_ = std::move(tmp_remote_contents);
     zygote_contents_ = std::move(tmp_zygote_contents);
@@ -228,14 +1023,7 @@
     return DumpImageDiffMap();
   }
 
-  bool ComputeDirtyBytes(const uint8_t* image_begin,
-                         size_t* dirty_pages /*out*/,
-                         size_t* different_pages /*out*/,
-                         size_t* different_bytes /*out*/,
-                         size_t* different_int32s /*out*/,
-                         size_t* private_pages /*out*/,
-                         size_t* private_dirty_pages /*out*/,
-                         std::set<size_t>* dirty_page_set_local) {
+  bool ComputeDirtyBytes(const uint8_t* image_begin, MappingData* mapping_data /*out*/) {
     std::ostream& os = *os_;
 
     size_t virtual_page_idx = 0;   // Virtual page number (for an absolute memory address)
@@ -254,7 +1042,7 @@
       uint8_t* remote_ptr = &remote_contents_[offset];
 
       if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) {
-        ++*different_pages;
+        mapping_data->different_pages++;
 
         // Count the number of 32-bit integers that are different.
         for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) {
@@ -262,7 +1050,7 @@
           const uint32_t* local_ptr_int32 = reinterpret_cast<const uint32_t*>(local_ptr);
 
           if (remote_ptr_int32[i] != local_ptr_int32[i]) {
-            ++*different_int32s;
+            mapping_data->different_int32s++;
           }
         }
       }
@@ -286,7 +1074,7 @@
       page_idx = (offset + page_off_begin) / kPageSize;
       if (*local_ptr != *remote_ptr) {
         // Track number of bytes that are different
-        ++*different_bytes;
+        mapping_data->different_bytes++;
       }
 
       // Independently count the # of dirty pages on the remote side
@@ -307,294 +1095,38 @@
           os << error_msg;
           return false;
         } else if (dirtiness > 0) {
-          ++*dirty_pages;
-          dirty_page_set_local->insert(dirty_page_set_local->end(), virtual_page_idx);
+          mapping_data->dirty_pages++;
+          mapping_data->dirty_page_set.insert(mapping_data->dirty_page_set.end(), virtual_page_idx);
         }
 
         bool is_dirty = dirtiness > 0;
         bool is_private = page_count == 1;
 
         if (page_count == 1) {
-          ++*private_pages;
+          mapping_data->private_pages++;
         }
 
         if (is_dirty && is_private) {
-          ++*private_dirty_pages;
+          mapping_data->private_dirty_pages++;
         }
       }
     }
+    mapping_data->false_dirty_pages = mapping_data->dirty_pages - mapping_data->different_pages;
+    // Print low-level (bytes, int32s, pages) statistics.
+    os << mapping_data->different_bytes << " differing bytes,\n  "
+       << mapping_data->different_int32s << " differing int32s,\n  "
+       << mapping_data->different_pages << " differing pages,\n  "
+       << mapping_data->dirty_pages << " pages are dirty;\n  "
+       << mapping_data->false_dirty_pages << " pages are false dirty;\n  "
+       << mapping_data->private_pages << " pages are private;\n  "
+       << mapping_data->private_dirty_pages << " pages are Private_Dirty\n  ";
+
     return true;
   }
 
-  bool ObjectIsOnDirtyPage(const uint8_t* item,
-                           size_t size,
-                           const std::set<size_t>& dirty_page_set_local) {
-    size_t page_off = 0;
-    size_t current_page_idx;
-    uintptr_t object_address = reinterpret_cast<uintptr_t>(item);
-    // Iterate every page this object belongs to
-    do {
-      current_page_idx = object_address / kPageSize + page_off;
-
-      if (dirty_page_set_local.find(current_page_idx) != dirty_page_set_local.end()) {
-        // This object is on a dirty page
-        return true;
-      }
-
-      page_off++;
-    } while ((current_page_idx * kPageSize) < RoundUp(object_address + size, kObjectAlignment));
-
-    return false;
-  }
-
-  static std::string PrettyFieldValue(ArtField* field, mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    std::ostringstream oss;
-    switch (field->GetTypeAsPrimitiveType()) {
-      case Primitive::kPrimNot: {
-        oss << obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
-            field->GetOffset());
-        break;
-      }
-      case Primitive::kPrimBoolean: {
-        oss << static_cast<bool>(obj->GetFieldBoolean<kVerifyNone>(field->GetOffset()));
-        break;
-      }
-      case Primitive::kPrimByte: {
-        oss << static_cast<int32_t>(obj->GetFieldByte<kVerifyNone>(field->GetOffset()));
-        break;
-      }
-      case Primitive::kPrimChar: {
-        oss << obj->GetFieldChar<kVerifyNone>(field->GetOffset());
-        break;
-      }
-      case Primitive::kPrimShort: {
-        oss << obj->GetFieldShort<kVerifyNone>(field->GetOffset());
-        break;
-      }
-      case Primitive::kPrimInt: {
-        oss << obj->GetField32<kVerifyNone>(field->GetOffset());
-        break;
-      }
-      case Primitive::kPrimLong: {
-        oss << obj->GetField64<kVerifyNone>(field->GetOffset());
-        break;
-      }
-      case Primitive::kPrimFloat: {
-        oss << obj->GetField32<kVerifyNone>(field->GetOffset());
-        break;
-      }
-      case Primitive::kPrimDouble: {
-        oss << obj->GetField64<kVerifyNone>(field->GetOffset());
-        break;
-      }
-      case Primitive::kPrimVoid: {
-        oss << "void";
-        break;
-      }
-    }
-    return oss.str();
-  }
-
-  // Aggregate and detail class data from an image diff.
-  struct ClassData {
-    size_t dirty_object_count = 0;
-
-    // Track only the byte-per-byte dirtiness (in bytes)
-    size_t dirty_object_byte_count = 0;
-
-    // Track the object-by-object dirtiness (in bytes)
-    size_t dirty_object_size_in_bytes = 0;
-
-    size_t clean_object_count = 0;
-
-    std::string descriptor;
-
-    size_t false_dirty_byte_count = 0;
-    size_t false_dirty_object_count = 0;
-    std::vector<const uint8_t*> false_dirty_objects;
-
-    // Remote pointers to dirty objects
-    std::vector<const uint8_t*> dirty_objects;
-  };
-
-  void DiffObjectContents(mirror::Object* obj,
-                          uint8_t* remote_bytes,
-                          std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_) {
-    const char* tabs = "    ";
-    // Attempt to find fields for all dirty bytes.
-    mirror::Class* klass = obj->GetClass();
-    if (obj->IsClass()) {
-      os << tabs << "Class " << mirror::Class::PrettyClass(obj->AsClass()) << " " << obj << "\n";
-    } else {
-      os << tabs << "Instance of " << mirror::Class::PrettyClass(klass) << " " << obj << "\n";
-    }
-
-    std::unordered_set<ArtField*> dirty_instance_fields;
-    std::unordered_set<ArtField*> dirty_static_fields;
-    const uint8_t* obj_bytes = reinterpret_cast<const uint8_t*>(obj);
-    mirror::Object* remote_obj = reinterpret_cast<mirror::Object*>(remote_bytes);
-    for (size_t i = 0, count = obj->SizeOf(); i < count; ++i) {
-      if (obj_bytes[i] != remote_bytes[i]) {
-        ArtField* field = ArtField::FindInstanceFieldWithOffset</*exact*/false>(klass, i);
-        if (field != nullptr) {
-          dirty_instance_fields.insert(field);
-        } else if (obj->IsClass()) {
-          field = ArtField::FindStaticFieldWithOffset</*exact*/false>(obj->AsClass(), i);
-          if (field != nullptr) {
-            dirty_static_fields.insert(field);
-          }
-        }
-        if (field == nullptr) {
-          if (klass->IsArrayClass()) {
-            mirror::Class* component_type = klass->GetComponentType();
-            Primitive::Type primitive_type = component_type->GetPrimitiveType();
-            size_t component_size = Primitive::ComponentSize(primitive_type);
-            size_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value();
-            if (i >= data_offset) {
-              os << tabs << "Dirty array element " << (i - data_offset) / component_size << "\n";
-              // Skip to next element to prevent spam.
-              i += component_size - 1;
-              continue;
-            }
-          }
-          os << tabs << "No field for byte offset " << i << "\n";
-        }
-      }
-    }
-    // Dump different fields. TODO: Dump field contents.
-    if (!dirty_instance_fields.empty()) {
-      os << tabs << "Dirty instance fields " << dirty_instance_fields.size() << "\n";
-      for (ArtField* field : dirty_instance_fields) {
-        os << tabs << ArtField::PrettyField(field)
-           << " original=" << PrettyFieldValue(field, obj)
-           << " remote=" << PrettyFieldValue(field, remote_obj) << "\n";
-      }
-    }
-    if (!dirty_static_fields.empty()) {
-      os << tabs << "Dirty static fields " << dirty_static_fields.size() << "\n";
-      for (ArtField* field : dirty_static_fields) {
-        os << tabs << ArtField::PrettyField(field)
-           << " original=" << PrettyFieldValue(field, obj)
-           << " remote=" << PrettyFieldValue(field, remote_obj) << "\n";
-      }
-    }
-    os << "\n";
-  }
-
-  struct ObjectRegionData {
-    // Count of objects that are different.
-    size_t different_objects = 0;
-
-    // Local objects that are dirty (differ in at least one byte).
-    size_t dirty_object_bytes = 0;
-    std::vector<const uint8_t*>* dirty_objects;
-
-    // Local objects that are clean, but located on dirty pages.
-    size_t false_dirty_object_bytes = 0;
-    std::vector<const uint8_t*> false_dirty_objects;
-
-    // Image dirty objects
-    // If zygote_pid_only_ == true, these are shared dirty objects in the zygote.
-    // If zygote_pid_only_ == false, these are private dirty objects in the application.
-    std::set<const uint8_t*> image_dirty_objects;
-
-    // Zygote dirty objects (probably private dirty).
-    // We only add objects here if they differed in both the image and the zygote, so
-    // they are probably private dirty.
-    std::set<const uint8_t*> zygote_dirty_objects;
-
-    std::map<off_t /* field offset */, size_t /* count */>* field_dirty_count;
-  };
-
-  void ComputeObjectDirty(const uint8_t* current,
-                          const uint8_t* current_remote,
-                          const uint8_t* current_zygote,
-                          ClassData* obj_class_data,
-                          size_t obj_size,
-                          const std::set<size_t>& dirty_page_set_local,
-                          ObjectRegionData* region_data /*out*/) {
-    bool different_image_object = memcmp(current, current_remote, obj_size) != 0;
-    if (different_image_object) {
-      bool different_zygote_object = false;
-      if (!zygote_contents_.empty()) {
-        different_zygote_object = memcmp(current, current_zygote, obj_size) != 0;
-      }
-      if (different_zygote_object) {
-        // Different from zygote.
-        region_data->zygote_dirty_objects.insert(current);
-      } else {
-        // Just different from image.
-        region_data->image_dirty_objects.insert(current);
-      }
-
-      ++region_data->different_objects;
-      region_data->dirty_object_bytes += obj_size;
-
-      ++obj_class_data->dirty_object_count;
-
-      // Go byte-by-byte and figure out what exactly got dirtied
-      size_t dirty_byte_count_per_object = 0;
-      for (size_t i = 0; i < obj_size; ++i) {
-        if (current[i] != current_remote[i]) {
-          dirty_byte_count_per_object++;
-        }
-      }
-      obj_class_data->dirty_object_byte_count += dirty_byte_count_per_object;
-      obj_class_data->dirty_object_size_in_bytes += obj_size;
-      obj_class_data->dirty_objects.push_back(current_remote);
-    } else {
-      ++obj_class_data->clean_object_count;
-    }
-
-    if (different_image_object) {
-      if (region_data->dirty_objects != nullptr) {
-        // print the fields that are dirty
-        for (size_t i = 0; i < obj_size; ++i) {
-          if (current[i] != current_remote[i]) {
-            size_t dirty_count = 0;
-            if (region_data->field_dirty_count->find(i) != region_data->field_dirty_count->end()) {
-              dirty_count = (*region_data->field_dirty_count)[i];
-            }
-            (*region_data->field_dirty_count)[i] = dirty_count + 1;
-          }
-        }
-
-        region_data->dirty_objects->push_back(current);
-      }
-      /*
-       * TODO: Resurrect this stuff in the client when we add ArtMethod iterator.
-      } else {
-        std::string descriptor = GetClassDescriptor(klass);
-        if (strcmp(descriptor.c_str(), "Ljava/lang/reflect/ArtMethod;") == 0) {
-          // this is an ArtMethod
-          ArtMethod* art_method = reinterpret_cast<ArtMethod*>(remote_obj);
-
-          // print the fields that are dirty
-          for (size_t i = 0; i < obj_size; ++i) {
-            if (current[i] != current_remote[i]) {
-              art_method_field_dirty_count[i]++;
-            }
-          }
-
-          art_method_dirty_objects.push_back(art_method);
-        }
-      }
-      */
-    } else if (ObjectIsOnDirtyPage(current, obj_size, dirty_page_set_local)) {
-      // This object was either never mutated or got mutated back to the same value.
-      // TODO: Do I want to distinguish a "different" vs a "dirty" page here?
-      region_data->false_dirty_objects.push_back(current);
-      obj_class_data->false_dirty_objects.push_back(current);
-      region_data->false_dirty_object_bytes += obj_size;
-      obj_class_data->false_dirty_byte_count += obj_size;
-      obj_class_data->false_dirty_object_count += 1;
-    }
-  }
-
   // Look at /proc/$pid/mem and only diff the things from there
   bool DumpImageDiffMap()
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     std::ostream& os = *os_;
     std::string error_msg;
 
@@ -624,384 +1156,37 @@
       // If we wanted even more validation we could map the ImageHeader from the file
     }
 
-    size_t dirty_pages = 0;
-    size_t different_pages = 0;
-    size_t different_bytes = 0;
-    size_t different_int32s = 0;
-    size_t private_pages = 0;
-    size_t private_dirty_pages = 0;
+    MappingData mapping_data;
 
-    // Set of the local virtual page indices that are dirty
-    std::set<size_t> dirty_page_set_local;
-
-    if (!ComputeDirtyBytes(image_begin,
-                           &dirty_pages,
-                           &different_pages,
-                           &different_bytes,
-                           &different_int32s,
-                           &private_pages,
-                           &private_dirty_pages,
-                           &dirty_page_set_local)) {
+    os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
+       << reinterpret_cast<void*>(boot_map_.end) << ") had:\n  ";
+    if (!ComputeDirtyBytes(image_begin, &mapping_data)) {
       return false;
     }
 
-    std::map<mirror::Class*, ClassData> class_data;
+    RegionData<mirror::Object> object_region_data(os_,
+                                                  &remote_contents_,
+                                                  &zygote_contents_,
+                                                  boot_map_,
+                                                  image_header_);
 
-    // Walk each object in the remote image space and compare it against ours
-    std::map<off_t /* field offset */, int /* count */> art_method_field_dirty_count;
-    std::vector<ArtMethod*> art_method_dirty_objects;
-
-    std::map<off_t /* field offset */, size_t /* count */> class_field_dirty_count;
-    std::vector<const uint8_t*> class_dirty_objects;
-
-
-    // Look up remote classes by their descriptor
-    std::map<std::string, mirror::Class*> remote_class_map;
-    // Look up local classes by their descriptor
-    std::map<std::string, mirror::Class*> local_class_map;
-
-    const uint8_t* begin_image_ptr = image_begin_unaligned;
-    const uint8_t* end_image_ptr = image_mirror_end_unaligned;
-
-    ObjectRegionData region_data;
-
-    const uint8_t* current = begin_image_ptr + RoundUp(sizeof(ImageHeader), kObjectAlignment);
-    while (reinterpret_cast<uintptr_t>(current) < reinterpret_cast<uintptr_t>(end_image_ptr)) {
-      CHECK_ALIGNED(current, kObjectAlignment);
-      mirror::Object* obj = reinterpret_cast<mirror::Object*>(const_cast<uint8_t*>(current));
-
-      // Sanity check that we are reading a real object
-      CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
-      if (kUseBakerReadBarrier) {
-        obj->AssertReadBarrierState();
-      }
-
-      mirror::Class* klass = obj->GetClass();
-      size_t obj_size = obj->SizeOf();
-      ClassData& obj_class_data = class_data[klass];
-
-      // Check against the other object and see if they are different
-      ptrdiff_t offset = current - begin_image_ptr;
-      const uint8_t* current_remote = &remote_contents_[offset];
-      const uint8_t* current_zygote =
-          zygote_contents_.empty() ? nullptr : &zygote_contents_[offset];
-
-      if (klass->IsClassClass()) {
-        region_data.field_dirty_count = &class_field_dirty_count;
-        region_data.dirty_objects = &class_dirty_objects;
-      } else {
-        region_data.field_dirty_count = nullptr;
-        region_data.dirty_objects = nullptr;
-      }
-
-
-      ComputeObjectDirty(current,
-                         current_remote,
-                         current_zygote,
-                         &obj_class_data,
-                         obj_size,
-                         dirty_page_set_local,
-                         &region_data);
-
-      // Object specific stuff.
-      std::string descriptor = GetClassDescriptor(klass);
-      if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
-        local_class_map[descriptor] = reinterpret_cast<mirror::Class*>(obj);
-        mirror::Object* remote_obj = reinterpret_cast<mirror::Object*>(
-            const_cast<uint8_t*>(current_remote));
-        remote_class_map[descriptor] = reinterpret_cast<mirror::Class*>(remote_obj);
-      }
-
-      // Unconditionally store the class descriptor in case we need it later
-      obj_class_data.descriptor = descriptor;
-
-      current += RoundUp(obj_size, kObjectAlignment);
-    }
-
-    // Looking at only dirty pages, figure out how many of those bytes belong to dirty objects.
-    float true_dirtied_percent = region_data.dirty_object_bytes * 1.0f / (dirty_pages * kPageSize);
-    size_t false_dirty_pages = dirty_pages - different_pages;
-
-    os << "Mapping at [" << reinterpret_cast<void*>(boot_map_.start) << ", "
-       << reinterpret_cast<void*>(boot_map_.end) << ") had: \n  "
-       << different_bytes << " differing bytes, \n  "
-       << different_int32s << " differing int32s, \n  "
-       << region_data.different_objects << " different objects, \n  "
-       << region_data.dirty_object_bytes << " different object [bytes], \n  "
-       << region_data.false_dirty_objects.size() << " false dirty objects,\n  "
-       << region_data.false_dirty_object_bytes << " false dirty object [bytes], \n  "
-       << true_dirtied_percent << " different objects-vs-total in a dirty page;\n  "
-       << different_pages << " different pages; \n  "
-       << dirty_pages << " pages are dirty; \n  "
-       << false_dirty_pages << " pages are false dirty; \n  "
-       << private_pages << " pages are private; \n  "
-       << private_dirty_pages << " pages are Private_Dirty\n  "
-       << "";
-
-    // vector of pairs (int count, Class*)
-    auto dirty_object_class_values = SortByValueDesc<mirror::Class*, int, ClassData>(
-        class_data, [](const ClassData& d) { return d.dirty_object_count; });
-    auto clean_object_class_values = SortByValueDesc<mirror::Class*, int, ClassData>(
-        class_data, [](const ClassData& d) { return d.clean_object_count; });
-
-    if (!region_data.zygote_dirty_objects.empty()) {
-      // We only reach this point if both pids were specified.  Furthermore,
-      // objects are only displayed here if they differed in both the image
-      // and the zygote, so they are probably private dirty.
-      CHECK(image_diff_pid_ > 0 && zygote_diff_pid_ > 0);
-      os << "\n" << "  Zygote dirty objects (probably shared dirty): "
-         << region_data.zygote_dirty_objects.size() << "\n";
-      for (const uint8_t* obj_bytes : region_data.zygote_dirty_objects) {
-        auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(obj_bytes));
-        ptrdiff_t offset = obj_bytes - begin_image_ptr;
-        uint8_t* remote_bytes = &zygote_contents_[offset];
-        DiffObjectContents(obj, remote_bytes, os);
-      }
-    }
-    os << "\n";
+    RemoteProcesses remotes;
     if (zygote_pid_only_) {
-      // image_diff_pid_ is the zygote process.
-      os << "  Zygote shared dirty objects: ";
+      remotes = RemoteProcesses::kZygoteOnly;
+    } else if (zygote_diff_pid_ > 0) {
+      remotes = RemoteProcesses::kImageAndZygote;
     } else {
-      // image_diff_pid_ is actually the image (application) process.
-      if (zygote_diff_pid_ > 0) {
-        os << "  Application dirty objects (private dirty): ";
-      } else {
-        os << "  Application dirty objects (unknown whether private or shared dirty): ";
-      }
-    }
-    os << region_data.image_dirty_objects.size() << "\n";
-    for (const uint8_t* obj_bytes : region_data.image_dirty_objects) {
-      auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(obj_bytes));
-      ptrdiff_t offset = obj_bytes - begin_image_ptr;
-      uint8_t* remote_bytes = &remote_contents_[offset];
-      DiffObjectContents(obj, remote_bytes, os);
+      remotes = RemoteProcesses::kImageOnly;
     }
 
-    os << "\n" << "  Dirty object count by class:\n";
-    for (const auto& vk_pair : dirty_object_class_values) {
-      int dirty_object_count = vk_pair.first;
-      mirror::Class* klass = vk_pair.second;
-      int object_sizes = class_data[klass].dirty_object_size_in_bytes;
-      float avg_dirty_bytes_per_class =
-          class_data[klass].dirty_object_byte_count * 1.0f / object_sizes;
-      float avg_object_size = object_sizes * 1.0f / dirty_object_count;
-      const std::string& descriptor = class_data[klass].descriptor;
-      os << "    " << mirror::Class::PrettyClass(klass) << " ("
-         << "objects: " << dirty_object_count << ", "
-         << "avg dirty bytes: " << avg_dirty_bytes_per_class << ", "
-         << "avg object size: " << avg_object_size << ", "
-         << "class descriptor: '" << descriptor << "'"
-         << ")\n";
-
-      constexpr size_t kMaxAddressPrint = 5;
-      if (strcmp(descriptor.c_str(), "Ljava/lang/reflect/ArtMethod;") == 0) {
-        os << "      sample object addresses: ";
-        for (size_t i = 0; i < art_method_dirty_objects.size() && i < kMaxAddressPrint; ++i) {
-          auto art_method = art_method_dirty_objects[i];
-
-          os << reinterpret_cast<void*>(art_method) << ", ";
-        }
-        os << "\n";
-
-        os << "      dirty byte +offset:count list = ";
-        auto art_method_field_dirty_count_sorted =
-            SortByValueDesc<off_t, int, int>(art_method_field_dirty_count);
-        for (auto pair : art_method_field_dirty_count_sorted) {
-          off_t offset = pair.second;
-          int count = pair.first;
-
-          os << "+" << offset << ":" << count << ", ";
-        }
-
-        os << "\n";
-
-        os << "      field contents:\n";
-        const auto& dirty_objects_list = class_data[klass].dirty_objects;
-        for (const uint8_t* uobj : dirty_objects_list) {
-          auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(uobj));
-          // remote method
-          auto art_method = reinterpret_cast<ArtMethod*>(obj);
-
-          // remote class
-          mirror::Class* remote_declaring_class =
-            FixUpRemotePointer(art_method->GetDeclaringClass(), remote_contents_, boot_map_);
-
-          // local class
-          mirror::Class* declaring_class =
-            RemoteContentsPointerToLocal(remote_declaring_class, remote_contents_, image_header_);
-
-          os << "        " << reinterpret_cast<void*>(obj) << " ";
-          os << "  entryPointFromJni: "
-             << reinterpret_cast<const void*>(
-                    art_method->GetDataPtrSize(pointer_size_)) << ", ";
-          os << "  entryPointFromQuickCompiledCode: "
-             << reinterpret_cast<const void*>(
-                    art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_))
-             << ", ";
-          os << "  isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
-          os << "  class_status (local): " << declaring_class->GetStatus();
-          os << "  class_status (remote): " << remote_declaring_class->GetStatus();
-          os << "\n";
-        }
-      }
-      if (strcmp(descriptor.c_str(), "Ljava/lang/Class;") == 0) {
-        os << "       sample object addresses: ";
-        for (size_t i = 0; i < class_dirty_objects.size() && i < kMaxAddressPrint; ++i) {
-          auto class_ptr = class_dirty_objects[i];
-
-          os << reinterpret_cast<const void*>(class_ptr) << ", ";
-        }
-        os << "\n";
-
-        os << "       dirty byte +offset:count list = ";
-        auto class_field_dirty_count_sorted =
-            SortByValueDesc<off_t, int, size_t>(class_field_dirty_count);
-        for (auto pair : class_field_dirty_count_sorted) {
-          off_t offset = pair.second;
-          int count = pair.first;
-
-          os << "+" << offset << ":" << count << ", ";
-        }
-        os << "\n";
-
-        os << "      field contents:\n";
-        // TODO: templatize this to avoid the awful casts down to uint8_t* and back.
-        const auto& dirty_objects_list = class_data[klass].dirty_objects;
-        for (const uint8_t* uobj : dirty_objects_list) {
-          auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(uobj));
-          // remote class object
-          auto remote_klass = reinterpret_cast<mirror::Class*>(obj);
-
-          // local class object
-          auto local_klass = RemoteContentsPointerToLocal(remote_klass,
-                                                          remote_contents_,
-                                                          image_header_);
-
-          os << "        " << reinterpret_cast<const void*>(obj) << " ";
-          os << "  class_status (remote): " << remote_klass->GetStatus() << ", ";
-          os << "  class_status (local): " << local_klass->GetStatus();
-          os << "\n";
-        }
-      }
-    }
-
-    auto false_dirty_object_class_values = SortByValueDesc<mirror::Class*, int, ClassData>(
-        class_data, [](const ClassData& d) { return d.false_dirty_object_count; });
-
-    os << "\n" << "  False-dirty object count by class:\n";
-    for (const auto& vk_pair : false_dirty_object_class_values) {
-      int object_count = vk_pair.first;
-      mirror::Class* klass = vk_pair.second;
-      int object_sizes = class_data[klass].false_dirty_byte_count;
-      float avg_object_size = object_sizes * 1.0f / object_count;
-      const std::string& descriptor = class_data[klass].descriptor;
-      os << "    " << mirror::Class::PrettyClass(klass) << " ("
-         << "objects: " << object_count << ", "
-         << "avg object size: " << avg_object_size << ", "
-         << "total bytes: " << object_sizes << ", "
-         << "class descriptor: '" << descriptor << "'"
-         << ")\n";
-
-      if (strcmp(descriptor.c_str(), "Ljava/lang/reflect/ArtMethod;") == 0) {
-        // TODO: templatize this to avoid the awful casts down to uint8_t* and back.
-        auto& art_method_false_dirty_objects = class_data[klass].false_dirty_objects;
-
-        os << "      field contents:\n";
-        for (const uint8_t* uobj : art_method_false_dirty_objects) {
-          auto obj = const_cast<mirror::Object*>(reinterpret_cast<const mirror::Object*>(uobj));
-          // local method
-          auto art_method = reinterpret_cast<ArtMethod*>(obj);
-
-          // local class
-          mirror::Class* declaring_class = art_method->GetDeclaringClass();
-
-          os << "        " << reinterpret_cast<const void*>(obj) << " ";
-          os << "  entryPointFromJni: "
-             << reinterpret_cast<const void*>(
-                    art_method->GetDataPtrSize(pointer_size_)) << ", ";
-          os << "  entryPointFromQuickCompiledCode: "
-             << reinterpret_cast<const void*>(
-                    art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_))
-             << ", ";
-          os << "  isNative? " << (art_method->IsNative() ? "yes" : "no") << ", ";
-          os << "  class_status (local): " << declaring_class->GetStatus();
-          os << "\n";
-        }
-      }
-    }
-
-    os << "\n" << "  Clean object count by class:\n";
-    for (const auto& vk_pair : clean_object_class_values) {
-      os << "    " << mirror::Class::PrettyClass(vk_pair.second) << " (" << vk_pair.first << ")\n";
-    }
+    object_region_data.ProcessRegion(mapping_data,
+                                     remotes,
+                                     image_begin_unaligned,
+                                     image_mirror_end_unaligned);
 
     return true;
   }
 
-  // Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
-  // Returned pointer will point to inside of remote_contents.
-  template <typename T>
-  static T* FixUpRemotePointer(T* remote_ptr,
-                               std::vector<uint8_t>& remote_contents,
-                               const backtrace_map_t& boot_map) {
-    if (remote_ptr == nullptr) {
-      return nullptr;
-    }
-
-    uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr);
-
-    CHECK_LE(boot_map.start, remote);
-    CHECK_GT(boot_map.end, remote);
-
-    off_t boot_offset = remote - boot_map.start;
-
-    return reinterpret_cast<T*>(&remote_contents[boot_offset]);
-  }
-
-  template <typename T>
-  static T* RemoteContentsPointerToLocal(T* remote_ptr,
-                                         std::vector<uint8_t>& remote_contents,
-                                         const ImageHeader& image_header) {
-    if (remote_ptr == nullptr) {
-      return nullptr;
-    }
-
-    uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr);
-    ptrdiff_t boot_offset = remote - &remote_contents[0];
-
-    const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
-
-    return reinterpret_cast<T*>(const_cast<uint8_t*>(local_ptr));
-  }
-
-  static std::string GetClassDescriptor(mirror::Class* klass)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-    CHECK(klass != nullptr);
-
-    std::string descriptor;
-    const char* descriptor_str = klass->GetDescriptor(&descriptor);
-
-    return std::string(descriptor_str);
-  }
-
-  template <typename K, typename V, typename D>
-  static std::vector<std::pair<V, K>> SortByValueDesc(
-      const std::map<K, D> map,
-      std::function<V(const D&)> value_mapper = [](const D& d) { return static_cast<V>(d); }) {
-    // Store value->key so that we can use the default sort from pair which
-    // sorts by value first and then key
-    std::vector<std::pair<V, K>> value_key_vector;
-
-    for (const auto& kv_pair : map) {
-      value_key_vector.push_back(std::make_pair(value_mapper(kv_pair.second), kv_pair.first));
-    }
-
-    // Sort in reverse (descending order)
-    std::sort(value_key_vector.rbegin(), value_key_vector.rend());
-    return value_key_vector;
-  }
-
   static bool GetPageFrameNumber(File* page_map_file,
                                 size_t virtual_page_index,
                                 uint64_t* page_frame_number,
@@ -1142,8 +1327,6 @@
   pid_t zygote_diff_pid_;  // Dump image diff against zygote boot.art if pid is non-negative
   bool zygote_pid_only_;  // The user only specified a pid for the zygote.
 
-  // Pointer size constant for object fields, etc.
-  PointerSize pointer_size_;
   // BacktraceMap used for finding the memory mapping of the image file.
   std::unique_ptr<BacktraceMap> proc_maps_;
   // Boot image mapping.
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 066c66a..99168c9 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -34,14 +34,15 @@
 #include "art_method-inl.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
-#include "class_linker.h"
 #include "class_linker-inl.h"
+#include "class_linker.h"
 #include "debug/elf_debug_writer.h"
 #include "debug/method_debug_info.h"
 #include "dex_file-inl.h"
 #include "dex_instruction-inl.h"
 #include "disassembler.h"
 #include "elf_builder.h"
+#include "gc/accounting/space_bitmap-inl.h"
 #include "gc/space/image_space.h"
 #include "gc/space/large_object_space.h"
 #include "gc/space/space-inl.h"
@@ -56,13 +57,13 @@
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "oat.h"
 #include "oat_file-inl.h"
 #include "oat_file_manager.h"
 #include "os.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "stack.h"
 #include "stack_map.h"
 #include "string_reference.h"
@@ -1930,9 +1931,12 @@
           }
         }
       }
+      auto dump_visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+        DumpObject(obj);
+      };
       ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
       // Dump the normal objects before ArtMethods.
-      image_space_.GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+      image_space_.GetLiveBitmap()->Walk(dump_visitor);
       indent_os << "\n";
       // TODO: Dump fields.
       // Dump methods after.
@@ -1941,7 +1945,7 @@
                                           image_space_.Begin(),
                                           image_header_.GetPointerSize());
       // Dump the large objects separately.
-      heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+      heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(dump_visitor);
       indent_os << "\n";
     }
     os << "STATS:\n" << std::flush;
@@ -2156,20 +2160,18 @@
     return oat_code_begin + GetQuickOatCodeSize(m);
   }
 
-  static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+  void DumpObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
-    DCHECK(arg != nullptr);
-    ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
-    if (!state->InDumpSpace(obj)) {
+    if (!InDumpSpace(obj)) {
       return;
     }
 
     size_t object_bytes = obj->SizeOf();
     size_t alignment_bytes = RoundUp(object_bytes, kObjectAlignment) - object_bytes;
-    state->stats_.object_bytes += object_bytes;
-    state->stats_.alignment_bytes += alignment_bytes;
+    stats_.object_bytes += object_bytes;
+    stats_.alignment_bytes += alignment_bytes;
 
-    std::ostream& os = state->vios_.Stream();
+    std::ostream& os = vios_.Stream();
 
     mirror::Class* obj_class = obj->GetClass();
     if (obj_class->IsArrayClass()) {
@@ -2186,9 +2188,9 @@
     } else {
       os << StringPrintf("%p: %s\n", obj, obj_class->PrettyDescriptor().c_str());
     }
-    ScopedIndentation indent1(&state->vios_);
+    ScopedIndentation indent1(&vios_);
     DumpFields(os, obj, obj_class);
-    const PointerSize image_pointer_size = state->image_header_.GetPointerSize();
+    const PointerSize image_pointer_size = image_header_.GetPointerSize();
     if (obj->IsObjectArray()) {
       auto* obj_array = obj->AsObjectArray<mirror::Object>();
       for (int32_t i = 0, length = obj_array->GetLength(); i < length; i++) {
@@ -2215,32 +2217,31 @@
       mirror::Class* klass = obj->AsClass();
       if (klass->NumStaticFields() != 0) {
         os << "STATICS:\n";
-        ScopedIndentation indent2(&state->vios_);
+        ScopedIndentation indent2(&vios_);
         for (ArtField& field : klass->GetSFields()) {
           PrintField(os, &field, field.GetDeclaringClass());
         }
       }
     } else {
-      auto it = state->dex_caches_.find(obj);
-      if (it != state->dex_caches_.end()) {
+      auto it = dex_caches_.find(obj);
+      if (it != dex_caches_.end()) {
         auto* dex_cache = down_cast<mirror::DexCache*>(obj);
-        const auto& field_section = state->image_header_.GetImageSection(
+        const auto& field_section = image_header_.GetImageSection(
             ImageHeader::kSectionArtFields);
-        const auto& method_section = state->image_header_.GetMethodsSection();
+        const auto& method_section = image_header_.GetMethodsSection();
         size_t num_methods = dex_cache->NumResolvedMethods();
         if (num_methods != 0u) {
           os << "Methods (size=" << num_methods << "):\n";
-          ScopedIndentation indent2(&state->vios_);
-          auto* resolved_methods = dex_cache->GetResolvedMethods();
+          ScopedIndentation indent2(&vios_);
+          mirror::MethodDexCacheType* resolved_methods = dex_cache->GetResolvedMethods();
           for (size_t i = 0, length = dex_cache->NumResolvedMethods(); i < length; ++i) {
-            auto* elem = mirror::DexCache::GetElementPtrSize(resolved_methods,
-                                                             i,
-                                                             image_pointer_size);
+            ArtMethod* elem = mirror::DexCache::GetNativePairPtrSize(
+                resolved_methods, i, image_pointer_size).object;
             size_t run = 0;
             for (size_t j = i + 1;
-                 j != length && elem == mirror::DexCache::GetElementPtrSize(resolved_methods,
-                                                                            j,
-                                                                            image_pointer_size);
+                 j != length &&
+                 elem == mirror::DexCache::GetNativePairPtrSize(
+                     resolved_methods, j, image_pointer_size).object;
                  ++j) {
               ++run;
             }
@@ -2254,7 +2255,7 @@
             if (elem == nullptr) {
               msg = "null";
             } else if (method_section.Contains(
-                reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin())) {
+                reinterpret_cast<uint8_t*>(elem) - image_space_.Begin())) {
               msg = reinterpret_cast<ArtMethod*>(elem)->PrettyMethod();
             } else {
               msg = "<not in method section>";
@@ -2265,10 +2266,10 @@
         size_t num_fields = dex_cache->NumResolvedFields();
         if (num_fields != 0u) {
           os << "Fields (size=" << num_fields << "):\n";
-          ScopedIndentation indent2(&state->vios_);
+          ScopedIndentation indent2(&vios_);
           auto* resolved_fields = dex_cache->GetResolvedFields();
           for (size_t i = 0, length = dex_cache->NumResolvedFields(); i < length; ++i) {
-            auto* elem = mirror::DexCache::GetNativePairPtrSize(
+            ArtField* elem = mirror::DexCache::GetNativePairPtrSize(
                 resolved_fields, i, image_pointer_size).object;
             size_t run = 0;
             for (size_t j = i + 1;
@@ -2288,7 +2289,7 @@
             if (elem == nullptr) {
               msg = "null";
             } else if (field_section.Contains(
-                reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin())) {
+                reinterpret_cast<uint8_t*>(elem) - image_space_.Begin())) {
               msg = reinterpret_cast<ArtField*>(elem)->PrettyField();
             } else {
               msg = "<not in field section>";
@@ -2299,7 +2300,7 @@
         size_t num_types = dex_cache->NumResolvedTypes();
         if (num_types != 0u) {
           os << "Types (size=" << num_types << "):\n";
-          ScopedIndentation indent2(&state->vios_);
+          ScopedIndentation indent2(&vios_);
           auto* resolved_types = dex_cache->GetResolvedTypes();
           for (size_t i = 0; i < num_types; ++i) {
             auto pair = resolved_types[i].load(std::memory_order_relaxed);
@@ -2331,7 +2332,7 @@
       }
     }
     std::string temp;
-    state->stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
+    stats_.Update(obj_class->GetDescriptor(&temp), object_bytes);
   }
 
   void DumpMethod(ArtMethod* method, std::ostream& indent_os)
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 149960e..1ee2fbd 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -535,17 +535,18 @@
       orig_dex_cache->FixupResolvedTypes(RelocatedCopyOf(orig_types),
                                          RelocatedPointerVisitor(this));
     }
-    ArtMethod** orig_methods = orig_dex_cache->GetResolvedMethods();
-    ArtMethod** relocated_methods = RelocatedAddressOfPointer(orig_methods);
+    mirror::MethodDexCacheType* orig_methods = orig_dex_cache->GetResolvedMethods();
+    mirror::MethodDexCacheType* relocated_methods = RelocatedAddressOfPointer(orig_methods);
     copy_dex_cache->SetField64<false>(
         mirror::DexCache::ResolvedMethodsOffset(),
         static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_methods)));
     if (orig_methods != nullptr) {
-      ArtMethod** copy_methods = RelocatedCopyOf(orig_methods);
+      mirror::MethodDexCacheType* copy_methods = RelocatedCopyOf(orig_methods);
       for (size_t j = 0, num = orig_dex_cache->NumResolvedMethods(); j != num; ++j) {
-        ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, j, pointer_size);
-        ArtMethod* copy = RelocatedAddressOfPointer(orig);
-        mirror::DexCache::SetElementPtrSize(copy_methods, j, copy, pointer_size);
+        mirror::MethodDexCachePair orig =
+            mirror::DexCache::GetNativePairPtrSize(orig_methods, j, pointer_size);
+        mirror::MethodDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
+        mirror::DexCache::SetNativePairPtrSize(copy_methods, j, copy, pointer_size);
       }
     }
     mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
@@ -614,7 +615,10 @@
     TimingLogger::ScopedTiming t("Walk Bitmap", timings_);
     // Walk the bitmap.
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    bitmap_->Walk(PatchOat::BitmapCallback, this);
+    auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+      VisitObject(obj);
+    };
+    bitmap_->Walk(visitor);
   }
   return true;
 }
@@ -638,7 +642,7 @@
   copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
 }
 
-// Called by BitmapCallback
+// Called by PatchImage.
 void PatchOat::VisitObject(mirror::Object* object) {
   mirror::Object* copy = RelocatedCopyOf(object);
   CHECK(copy != nullptr);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index e15a6bc..182ce94 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -79,11 +79,6 @@
   static bool ReplaceOatFileWithSymlink(const std::string& input_oat_filename,
                                         const std::string& output_oat_filename);
 
-  static void BitmapCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
-  }
-
   void VisitObject(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupMethod(ArtMethod* object, ArtMethod* copy)
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 838ae40..dd98f51 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -129,6 +129,10 @@
 #undef FRAME_SIZE_SAVE_REFS_AND_ARGS
 static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
 #undef FRAME_SIZE_SAVE_EVERYTHING
+#undef BAKER_MARK_INTROSPECTION_REGISTER_COUNT
+#undef BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE
+#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET
+#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE
 }  // namespace mips
 
 namespace mips64 {
@@ -141,6 +145,10 @@
 #undef FRAME_SIZE_SAVE_REFS_AND_ARGS
 static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
 #undef FRAME_SIZE_SAVE_EVERYTHING
+#undef BAKER_MARK_INTROSPECTION_REGISTER_COUNT
+#undef BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE
+#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET
+#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE
 }  // namespace mips64
 
 namespace x86 {
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 8384460..0942356 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -279,10 +279,9 @@
     return false;
   }
   const ArmInstructionSetFeatures* other_as_arm = other->AsArmInstructionSetFeatures();
-
-  return (has_div_ || (has_div_ == other_as_arm->has_div_))
-      && (has_atomic_ldrd_strd_ || (has_atomic_ldrd_strd_ == other_as_arm->has_atomic_ldrd_strd_))
-      && (has_armv8a_ || (has_armv8a_ == other_as_arm->has_armv8a_));
+  return (has_div_ || !other_as_arm->has_div_)
+      && (has_atomic_ldrd_strd_ || !other_as_arm->has_atomic_ldrd_strd_)
+      && (has_armv8a_ || !other_as_arm->has_armv8a_);
 }
 
 uint32_t ArmInstructionSetFeatures::AsBitmap() const {
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 0de5905..375768e 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1585,31 +1585,98 @@
      *
      * Note that this stub writes to r0, r4, and r12.
      */
+    .extern artLookupResolvedMethod
 ENTRY art_quick_imt_conflict_trampoline
-    ldr r4, [sp, #0]  // Load referrer
-    ldr r4, [r4, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_32]   // Load dex cache methods array
-    ldr r12, [r4, r12, lsl #POINTER_SIZE_SHIFT]  // Load interface method
-    ldr r0, [r0, #ART_METHOD_JNI_OFFSET_32]  // Load ImtConflictTable
-    ldr r4, [r0]  // Load first entry in ImtConflictTable.
+    push    {r1-r2}
+    .cfi_adjust_cfa_offset (2 * 4)
+    .cfi_rel_offset r1, 0
+    .cfi_rel_offset r2, 4
+    ldr     r4, [sp, #(2 * 4)]  // Load referrer.
+    ubfx    r1, r12, #0, #METHOD_DEX_CACHE_HASH_BITS  // Calculate DexCache method slot index.
+    ldr     r4, [r4, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_32]   // Load dex cache methods array
+    add     r4, r4, r1, lsl #(POINTER_SIZE_SHIFT + 1)  // Load DexCache method slot address.
+    ldr     r2, [r0, #ART_METHOD_JNI_OFFSET_32]  // Load ImtConflictTable
+
+// FIXME: Configure the build to use the faster code when appropriate.
+//        Currently we fall back to the slower version.
+#if HAS_ATOMIC_LDRD
+    ldrd    r0, r1, [r4]
+#else
+    push    {r3}
+    .cfi_adjust_cfa_offset 4
+    .cfi_rel_offset r3, 0
+.Limt_conflict_trampoline_retry_load:
+    ldrexd  r0, r1, [r4]
+    strexd  r3, r0, r1, [r4]
+    cmp     r3, #0
+    bne     .Limt_conflict_trampoline_retry_load
+    pop     {r3}
+    .cfi_adjust_cfa_offset -4
+    .cfi_restore r3
+#endif
+
+    ldr     r4, [r2]  // Load first entry in ImtConflictTable.
+    cmp     r1, r12   // Compare method index to see if we had a DexCache method hit.
+    bne     .Limt_conflict_trampoline_dex_cache_miss
 .Limt_table_iterate:
-    cmp r4, r12
+    cmp     r4, r0
     // Branch if found. Benchmarks have shown doing a branch here is better.
-    beq .Limt_table_found
+    beq     .Limt_table_found
     // If the entry is null, the interface method is not in the ImtConflictTable.
-    cbz r4, .Lconflict_trampoline
+    cbz     r4, .Lconflict_trampoline
     // Iterate over the entries of the ImtConflictTable.
-    ldr r4, [r0, #(2 * __SIZEOF_POINTER__)]!
+    ldr     r4, [r2, #(2 * __SIZEOF_POINTER__)]!
     b .Limt_table_iterate
 .Limt_table_found:
     // We successfully hit an entry in the table. Load the target method
     // and jump to it.
-    ldr r0, [r0, #__SIZEOF_POINTER__]
-    ldr pc, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32]
+    ldr     r0, [r2, #__SIZEOF_POINTER__]
+    .cfi_remember_state
+    pop     {r1-r2}
+    .cfi_adjust_cfa_offset -(2 * 4)
+    .cfi_restore r1
+    .cfi_restore r2
+    ldr     pc, [r0, #ART_METHOD_QUICK_CODE_OFFSET_32]
+    .cfi_restore_state
 .Lconflict_trampoline:
     // Call the runtime stub to populate the ImtConflictTable and jump to the
     // resolved method.
-    mov r0, r12  // Load interface method
+    .cfi_remember_state
+    pop     {r1-r2}
+    .cfi_adjust_cfa_offset -(2 * 4)
+    .cfi_restore r1
+    .cfi_restore r2
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
+    .cfi_restore_state
+.Limt_conflict_trampoline_dex_cache_miss:
+    // We're not creating a proper runtime method frame here,
+    // artLookupResolvedMethod() is not allowed to walk the stack.
+
+    // Save ImtConflictTable (r2), remaining arg (r3), first entry (r4), return address (lr).
+    push    {r2-r4, lr}
+    .cfi_adjust_cfa_offset (4 * 4)
+    .cfi_rel_offset r3, 4
+    .cfi_rel_offset lr, 12
+    // Save FPR args.
+    vpush   {d0-d7}
+    .cfi_adjust_cfa_offset (8 * 8)
+
+    mov     r0, ip                      // Pass method index.
+    ldr     r1, [sp, #(8 * 8 + 6 * 4)]  // Pass referrer.
+    bl      artLookupResolvedMethod     // (uint32_t method_index, ArtMethod* referrer)
+
+    // Restore FPR args.
+    vpop    {d0-d7}
+    .cfi_adjust_cfa_offset -(8 * 8)
+    // Restore ImtConflictTable (r2), remaining arg (r3), first entry (r4), return address (lr).
+    pop     {r2-r4, lr}
+    .cfi_adjust_cfa_offset -(4 * 4)
+    .cfi_restore r3
+    .cfi_restore lr
+
+    cmp     r0, #0                  // If the method wasn't resolved,
+    beq     .Lconflict_trampoline   //   skip the lookup and go to artInvokeInterfaceTrampoline().
+    b       .Limt_table_iterate
 END art_quick_imt_conflict_trampoline
 
     .extern artQuickResolutionTrampoline
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index e097a33..d15f5b8 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2052,17 +2052,28 @@
      * x0 is the conflict ArtMethod.
      * xIP1 is a hidden argument that holds the target interface method's dex method index.
      *
-     * Note that this stub writes to xIP0, xIP1, and x0.
+     * Note that this stub writes to xIP0, xIP1, x13-x15, and x0.
      */
-    .extern artInvokeInterfaceTrampoline
+    .extern artLookupResolvedMethod
 ENTRY art_quick_imt_conflict_trampoline
     ldr xIP0, [sp, #0]  // Load referrer
+    ubfx x15, xIP1, #0, #METHOD_DEX_CACHE_HASH_BITS  // Calculate DexCache method slot index.
     ldr xIP0, [xIP0, #ART_METHOD_DEX_CACHE_METHODS_OFFSET_64]   // Load dex cache methods array
-    ldr xIP0, [xIP0, xIP1, lsl #POINTER_SIZE_SHIFT]  // Load interface method
+    add xIP0, xIP0, x15, lsl #(POINTER_SIZE_SHIFT + 1)  // Load DexCache method slot address.
+
+    // Relaxed atomic load x14:x15 from the dex cache slot.
+.Limt_conflict_trampoline_retry_load:
+    ldxp x14, x15, [xIP0]
+    stxp w13, x14, x15, [xIP0]
+    cbnz w13, .Limt_conflict_trampoline_retry_load
+
+    cmp x15, xIP1       // Compare method index to see if we had a DexCache method hit.
+    bne .Limt_conflict_trampoline_dex_cache_miss
+.Limt_conflict_trampoline_have_interface_method:
     ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64]  // Load ImtConflictTable
     ldr x0, [xIP1]  // Load first entry in ImtConflictTable.
 .Limt_table_iterate:
-    cmp x0, xIP0
+    cmp x0, x14
     // Branch if found. Benchmarks have shown doing a branch here is better.
     beq .Limt_table_found
     // If the entry is null, the interface method is not in the ImtConflictTable.
@@ -2079,8 +2090,46 @@
 .Lconflict_trampoline:
     // Call the runtime stub to populate the ImtConflictTable and jump to the
     // resolved method.
-    mov x0, xIP0  // Load interface method
+    mov x0, x14  // Load interface method
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
+.Limt_conflict_trampoline_dex_cache_miss:
+    // We're not creating a proper runtime method frame here,
+    // artLookupResolvedMethod() is not allowed to walk the stack.
+
+    // Save GPR args and return address, allocate space for FPR args, align stack.
+    SAVE_TWO_REGS_INCREASE_FRAME x0, x1, (8 * 8 + 8 * 8 + 8 + 8)
+    SAVE_TWO_REGS x2, x3, 16
+    SAVE_TWO_REGS x4, x5, 32
+    SAVE_TWO_REGS x6, x7, 48
+    SAVE_REG      xLR, (8 * 8 + 8 * 8 + 8)
+
+    // Save FPR args.
+    stp d0, d1, [sp, #64]
+    stp d2, d3, [sp, #80]
+    stp d4, d5, [sp, #96]
+    stp d6, d7, [sp, #112]
+
+    mov x0, xIP1                            // Pass method index.
+    ldr x1, [sp, #(8 * 8 + 8 * 8 + 8 + 8)]  // Pass referrer.
+    bl artLookupResolvedMethod              // (uint32_t method_index, ArtMethod* referrer)
+    mov x14, x0   // Move the interface method to x14 where the loop above expects it.
+
+    // Restore FPR args.
+    ldp d0, d1, [sp, #64]
+    ldp d2, d3, [sp, #80]
+    ldp d4, d5, [sp, #96]
+    ldp d6, d7, [sp, #112]
+
+    // Restore GPR args and return address.
+    RESTORE_REG      xLR, (8 * 8 + 8 * 8 + 8)
+    RESTORE_TWO_REGS x2, x3, 16
+    RESTORE_TWO_REGS x4, x5, 32
+    RESTORE_TWO_REGS x6, x7, 48
+    RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, (8 * 8 + 8 * 8 + 8 + 8)
+
+    // If the method wasn't resolved, skip the lookup and go to artInvokeInterfaceTrampoline().
+    cbz x14, .Lconflict_trampoline
+    b .Limt_conflict_trampoline_have_interface_method
 END art_quick_imt_conflict_trampoline
 
 ENTRY art_quick_resolution_trampoline
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 948b06c..50095ae 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -127,6 +127,13 @@
 #endif  // USE_HEAP_POISONING
 .endm
 
+// Byte size of the instructions (un)poisoning heap references.
+#ifdef USE_HEAP_POISONING
+#define HEAP_POISON_INSTR_SIZE 4
+#else
+#define HEAP_POISON_INSTR_SIZE 0
+#endif  // USE_HEAP_POISONING
+
 // Based on contents of creg select the minimum integer
 // At the end of the macro the original value of creg is lost
 .macro MINint dreg,rreg,sreg,creg
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 7437774..9d8572f 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -24,4 +24,24 @@
 #define FRAME_SIZE_SAVE_REFS_AND_ARGS 112
 #define FRAME_SIZE_SAVE_EVERYTHING 256
 
+// &art_quick_read_barrier_mark_introspection is the first of many entry points:
+//   21 entry points for long field offsets, large array indices and variable array indices
+//     (see macro BRB_FIELD_LONG_OFFSET_ENTRY)
+//   21 entry points for short field offsets and small array indices
+//     (see macro BRB_FIELD_SHORT_OFFSET_ENTRY)
+//   21 entry points for GC roots
+//     (see macro BRB_GC_ROOT_ENTRY)
+
+// There are as many entry points of each kind as there are registers that
+// can hold a reference: V0-V1, A0-A3, T0-T7, S2-S8.
+#define BAKER_MARK_INTROSPECTION_REGISTER_COUNT 21
+
+#define BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE (8 * 4)  // 8 instructions in
+                                                                 // BRB_FIELD_*_OFFSET_ENTRY.
+
+#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET \
+    (2 * BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE)
+
+#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE (4 * 4)  // 4 instructions in BRB_GC_ROOT_ENTRY.
+
 #endif  // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 9978da5..3010246 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -16,6 +16,7 @@
 
 #include <string.h>
 
+#include "arch/mips/asm_support_mips.h"
 #include "atomic.h"
 #include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/quick/quick_alloc_entrypoints.h"
@@ -59,6 +60,10 @@
 extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
 extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
 
+extern "C" mirror::Object* art_quick_read_barrier_mark_introspection(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots(mirror::Object*);
+extern "C" void art_quick_read_barrier_mark_introspection_end_of_entries(void);
+
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
 extern int32_t CmplDouble(double a, double b);
@@ -87,6 +92,23 @@
 extern "C" int64_t __moddi3(int64_t, int64_t);
 
 void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
+  intptr_t introspection_field_array_entries_size =
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots) -
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection);
+  static_assert(
+      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET == 2 *
+          BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE,
+      "Expecting equal");
+  DCHECK_EQ(introspection_field_array_entries_size,
+            BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET);
+  intptr_t introspection_gc_root_entries_size =
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_end_of_entries) -
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots);
+  DCHECK_EQ(introspection_gc_root_entries_size,
+            BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE);
+  qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_introspection : nullptr;
+  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg00),
+                "Non-direct C stub marked direct.");
   qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
   static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg01),
                 "Non-direct C stub marked direct.");
@@ -416,9 +438,6 @@
   // Cannot use the following registers to pass arguments:
   // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
   // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
-  qpoints->pReadBarrierMarkReg00 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg00),
-                "Non-direct C stub marked direct.");
   qpoints->pReadBarrierMarkReg15 = nullptr;
   static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg15),
                 "Non-direct C stub marked direct.");
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 00e3d67..974e876 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2066,6 +2066,10 @@
      * Note that this stub writes to a0, t7 and t8.
      */
 ENTRY art_quick_imt_conflict_trampoline
+// FIXME: The DexCache method array has been changed to hash-based cache with eviction.
+// We need a relaxed atomic load of a 64-bit location to try and load the method
+// and call artQuickResolutionTrampoline() if the index does not match.
+#if 0
     lw      $t8, 0($sp)                                      # Load referrer.
     lw      $t8, ART_METHOD_DEX_CACHE_METHODS_OFFSET_32($t8) # Load dex cache methods array.
     sll     $t7, $t7, POINTER_SIZE_SHIFT                     # Calculate offset.
@@ -2095,6 +2099,9 @@
 .Lconflict_trampoline:
     # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
     move    $a0, $t7                                         # Load interface method.
+#else
+    move   $a0, $zero
+#endif
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
 END art_quick_imt_conflict_trampoline
 
@@ -2721,6 +2728,385 @@
 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
 // RA (register 31) is reserved.
 
+// Caller code:
+// Short constant offset/index:
+// R2:                           | R6:
+//  lw      $t9, pReadBarrierMarkReg00
+//  beqz    $t9, skip_call       |  beqzc   $t9, skip_call
+//  addiu   $t9, $t9, thunk_disp |  nop
+//  jalr    $t9                  |  jialc   $t9, thunk_disp
+//  nop                          |
+// skip_call:                    | skip_call:
+//  lw      `out`, ofs(`obj`)    |  lw      `out`, ofs(`obj`)
+// [subu    `out`, $zero, `out`] | [subu    `out`, $zero, `out`]  # Unpoison reference.
+.macro BRB_FIELD_SHORT_OFFSET_ENTRY obj
+1:
+    # Explicit null check. May be redundant (for array elements or when the field
+    # offset is larger than the page size, 4KB).
+    # $ra will be adjusted to point to lw's stack map when throwing NPE.
+    beqz    \obj, .Lintrospection_throw_npe
+#if defined(_MIPS_ARCH_MIPS32R6)
+    lapc    $gp, .Lintrospection_exits                  # $gp = address of .Lintrospection_exits.
+#else
+    addiu   $gp, $t9, (.Lintrospection_exits - 1b)      # $gp = address of .Lintrospection_exits.
+#endif
+    .set push
+    .set noat
+    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
+    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
+                                                                # to sign bit.
+    bltz    $at, .Lintrospection_field_array            # If gray, load reference, mark.
+    move    $t8, \obj                                   # Move `obj` to $t8 for common code.
+    .set pop
+    jalr    $zero, $ra                                  # Otherwise, load-load barrier and return.
+    sync
+.endm
+
+// Caller code (R2):
+// Long constant offset/index:   | Variable index:
+//  lw      $t9, pReadBarrierMarkReg00
+//  lui     $t8, ofs_hi          |  sll     $t8, `index`, 2
+//  beqz    $t9, skip_call       |  beqz    $t9, skip_call
+//  addiu   $t9, $t9, thunk_disp |  addiu   $t9, $t9, thunk_disp
+//  jalr    $t9                  |  jalr    $t9
+// skip_call:                    | skip_call:
+//  addu    $t8, $t8, `obj`      |  addu    $t8, $t8, `obj`
+//  lw      `out`, ofs_lo($t8)   |  lw      `out`, ofs($t8)
+// [subu    `out`, $zero, `out`] | [subu    `out`, $zero, `out`]  # Unpoison reference.
+//
+// Caller code (R6):
+// Long constant offset/index:   | Variable index:
+//  lw      $t9, pReadBarrierMarkReg00
+//  beqz    $t9, skip_call       |  beqz    $t9, skip_call
+//  aui     $t8, `obj`, ofs_hi   |  lsa     $t8, `index`, `obj`, 2
+//  jialc   $t9, thunk_disp      |  jialc   $t9, thunk_disp
+// skip_call:                    | skip_call:
+//  lw      `out`, ofs_lo($t8)   |  lw      `out`, ofs($t8)
+// [subu    `out`, $zero, `out`] | [subu    `out`, $zero, `out`]  # Unpoison reference.
+.macro BRB_FIELD_LONG_OFFSET_ENTRY obj
+1:
+    # No explicit null check for variable indices or large constant indices/offsets
+    # as it must have been done earlier.
+#if defined(_MIPS_ARCH_MIPS32R6)
+    lapc    $gp, .Lintrospection_exits                  # $gp = address of .Lintrospection_exits.
+#else
+    addiu   $gp, $t9, (.Lintrospection_exits - 1b)      # $gp = address of .Lintrospection_exits.
+#endif
+    .set push
+    .set noat
+    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
+    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
+                                                                # to sign bit.
+    bltz    $at, .Lintrospection_field_array            # If gray, load reference, mark.
+    nop
+    .set pop
+    jalr    $zero, $ra                                  # Otherwise, load-load barrier and return.
+    sync
+    break                                               # Padding to 8 instructions.
+.endm
+
+.macro BRB_GC_ROOT_ENTRY root
+1:
+#if defined(_MIPS_ARCH_MIPS32R6)
+    lapc    $gp, .Lintrospection_exit_\root             # $gp = exit point address.
+#else
+    addiu   $gp, $t9, (.Lintrospection_exit_\root - 1b)  # $gp = exit point address.
+#endif
+    bnez    \root, .Lintrospection_common
+    move    $t8, \root                                  # Move reference to $t8 for common code.
+    jalr    $zero, $ra                                  # Return if null.
+    # The next instruction (from the following BRB_GC_ROOT_ENTRY) fills the delay slot.
+    # This instruction has no effect (actual NOP for the last entry; otherwise changes $gp,
+    # which is unused after that anyway).
+.endm
+
+.macro BRB_FIELD_EXIT out
+.Lintrospection_exit_\out:
+    jalr    $zero, $ra
+    move    \out, $t8                                   # Return reference in expected register.
+.endm
+
+.macro BRB_FIELD_EXIT_BREAK
+    break
+    break
+.endm
+
+ENTRY_NO_GP art_quick_read_barrier_mark_introspection
+    # Entry points for offsets/indices not fitting into int16_t and for variable indices.
+    BRB_FIELD_LONG_OFFSET_ENTRY $v0
+    BRB_FIELD_LONG_OFFSET_ENTRY $v1
+    BRB_FIELD_LONG_OFFSET_ENTRY $a0
+    BRB_FIELD_LONG_OFFSET_ENTRY $a1
+    BRB_FIELD_LONG_OFFSET_ENTRY $a2
+    BRB_FIELD_LONG_OFFSET_ENTRY $a3
+    BRB_FIELD_LONG_OFFSET_ENTRY $t0
+    BRB_FIELD_LONG_OFFSET_ENTRY $t1
+    BRB_FIELD_LONG_OFFSET_ENTRY $t2
+    BRB_FIELD_LONG_OFFSET_ENTRY $t3
+    BRB_FIELD_LONG_OFFSET_ENTRY $t4
+    BRB_FIELD_LONG_OFFSET_ENTRY $t5
+    BRB_FIELD_LONG_OFFSET_ENTRY $t6
+    BRB_FIELD_LONG_OFFSET_ENTRY $t7
+    BRB_FIELD_LONG_OFFSET_ENTRY $s2
+    BRB_FIELD_LONG_OFFSET_ENTRY $s3
+    BRB_FIELD_LONG_OFFSET_ENTRY $s4
+    BRB_FIELD_LONG_OFFSET_ENTRY $s5
+    BRB_FIELD_LONG_OFFSET_ENTRY $s6
+    BRB_FIELD_LONG_OFFSET_ENTRY $s7
+    BRB_FIELD_LONG_OFFSET_ENTRY $s8
+
+    # Entry points for offsets/indices fitting into int16_t.
+    BRB_FIELD_SHORT_OFFSET_ENTRY $v0
+    BRB_FIELD_SHORT_OFFSET_ENTRY $v1
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a0
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a1
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a2
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a3
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t0
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t1
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t2
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t3
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t4
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t5
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t6
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t7
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s2
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s3
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s4
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s5
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s6
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s7
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s8
+
+    .global art_quick_read_barrier_mark_introspection_gc_roots
+art_quick_read_barrier_mark_introspection_gc_roots:
+    # Entry points for GC roots.
+    BRB_GC_ROOT_ENTRY $v0
+    BRB_GC_ROOT_ENTRY $v1
+    BRB_GC_ROOT_ENTRY $a0
+    BRB_GC_ROOT_ENTRY $a1
+    BRB_GC_ROOT_ENTRY $a2
+    BRB_GC_ROOT_ENTRY $a3
+    BRB_GC_ROOT_ENTRY $t0
+    BRB_GC_ROOT_ENTRY $t1
+    BRB_GC_ROOT_ENTRY $t2
+    BRB_GC_ROOT_ENTRY $t3
+    BRB_GC_ROOT_ENTRY $t4
+    BRB_GC_ROOT_ENTRY $t5
+    BRB_GC_ROOT_ENTRY $t6
+    BRB_GC_ROOT_ENTRY $t7
+    BRB_GC_ROOT_ENTRY $s2
+    BRB_GC_ROOT_ENTRY $s3
+    BRB_GC_ROOT_ENTRY $s4
+    BRB_GC_ROOT_ENTRY $s5
+    BRB_GC_ROOT_ENTRY $s6
+    BRB_GC_ROOT_ENTRY $s7
+    BRB_GC_ROOT_ENTRY $s8
+    .global art_quick_read_barrier_mark_introspection_end_of_entries
+art_quick_read_barrier_mark_introspection_end_of_entries:
+    nop                         # Fill the delay slot of the last BRB_GC_ROOT_ENTRY.
+
+.Lintrospection_throw_npe:
+    b       art_quick_throw_null_pointer_exception
+    addiu   $ra, $ra, 4         # Skip lw, make $ra point to lw's stack map.
+
+    .set push
+    .set noat
+
+    // Fields and array elements.
+
+.Lintrospection_field_array:
+    // Get the field/element address using $t8 and the offset from the lw instruction.
+    lh      $at, 0($ra)         # $ra points to lw: $at = field/element offset.
+    addiu   $ra, $ra, 4 + HEAP_POISON_INSTR_SIZE  # Skip lw(+subu).
+    addu    $t8, $t8, $at       # $t8 = field/element address.
+
+    // Calculate the address of the exit point, store it in $gp and load the reference into $t8.
+    lb      $at, (-HEAP_POISON_INSTR_SIZE - 2)($ra)   # $ra-HEAP_POISON_INSTR_SIZE-4 points to
+                                                      # "lw `out`, ...".
+    andi    $at, $at, 31        # Extract `out` from lw.
+    sll     $at, $at, 3         # Multiply `out` by the exit point size (BRB_FIELD_EXIT* macros).
+
+    lw      $t8, 0($t8)         # $t8 = reference.
+    UNPOISON_HEAP_REF $t8
+
+    // Return if null reference.
+    bnez    $t8, .Lintrospection_common
+    addu    $gp, $gp, $at       # $gp = address of the exit point.
+
+    // Early return through the exit point.
+.Lintrospection_return_early:
+    jalr    $zero, $gp          # Move $t8 to `out` and return.
+    nop
+
+    // Code common for GC roots, fields and array elements.
+
+.Lintrospection_common:
+    // Check lock word for mark bit, if marked return.
+    lw      $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET($t8)
+    sll     $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT     # Move mark bit to sign bit.
+    bltz    $at, .Lintrospection_return_early
+#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
+    // The below code depends on the lock word state being in the highest bits
+    // and the "forwarding address" state having all bits set.
+#error "Unexpected lock word state shift or forwarding address state value."
+#endif
+    // Test that both the forwarding state bits are 1.
+    sll     $at, $t9, 1
+    and     $at, $at, $t9                               # Sign bit = 1 IFF both bits are 1.
+    bgez    $at, .Lintrospection_mark
+    nop
+
+    .set pop
+
+    // Shift left by the forwarding address shift. This clears out the state bits since they are
+    // in the top 2 bits of the lock word.
+    jalr    $zero, $gp          # Move $t8 to `out` and return.
+    sll     $t8, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
+
+.Lintrospection_mark:
+    // Partially set up the stack frame preserving only $ra.
+    addiu   $sp, $sp, -160      # Includes 16 bytes of space for argument registers $a0-$a3.
+    .cfi_adjust_cfa_offset 160
+    sw      $ra, 156($sp)
+    .cfi_rel_offset 31, 156
+
+    // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
+    bal     1f
+    sw      $gp, 152($sp)       # Preserve the exit point address.
+1:
+    .cpload $ra
+
+    // Finalize the stack frame and call.
+    sw      $t7, 148($sp)
+    .cfi_rel_offset 15, 148
+    sw      $t6, 144($sp)
+    .cfi_rel_offset 14, 144
+    sw      $t5, 140($sp)
+    .cfi_rel_offset 13, 140
+    sw      $t4, 136($sp)
+    .cfi_rel_offset 12, 136
+    sw      $t3, 132($sp)
+    .cfi_rel_offset 11, 132
+    sw      $t2, 128($sp)
+    .cfi_rel_offset 10, 128
+    sw      $t1, 124($sp)
+    .cfi_rel_offset 9, 124
+    sw      $t0, 120($sp)
+    .cfi_rel_offset 8, 120
+    sw      $a3, 116($sp)
+    .cfi_rel_offset 7, 116
+    sw      $a2, 112($sp)
+    .cfi_rel_offset 6, 112
+    sw      $a1, 108($sp)
+    .cfi_rel_offset 5, 108
+    sw      $a0, 104($sp)
+    .cfi_rel_offset 4, 104
+    sw      $v1, 100($sp)
+    .cfi_rel_offset 3, 100
+    sw      $v0, 96($sp)
+    .cfi_rel_offset 2, 96
+
+    la      $t9, artReadBarrierMark
+
+    sdc1    $f18, 88($sp)
+    sdc1    $f16, 80($sp)
+    sdc1    $f14, 72($sp)
+    sdc1    $f12, 64($sp)
+    sdc1    $f10, 56($sp)
+    sdc1    $f8,  48($sp)
+    sdc1    $f6,  40($sp)
+    sdc1    $f4,  32($sp)
+    sdc1    $f2,  24($sp)
+    sdc1    $f0,  16($sp)
+
+    jalr    $t9                 # $v0 <- artReadBarrierMark(reference)
+    move    $a0, $t8            # Pass reference in $a0.
+    move    $t8, $v0
+
+    lw      $ra, 156($sp)
+    .cfi_restore 31
+    lw      $gp, 152($sp)       # $gp = address of the exit point.
+    lw      $t7, 148($sp)
+    .cfi_restore 15
+    lw      $t6, 144($sp)
+    .cfi_restore 14
+    lw      $t5, 140($sp)
+    .cfi_restore 13
+    lw      $t4, 136($sp)
+    .cfi_restore 12
+    lw      $t3, 132($sp)
+    .cfi_restore 11
+    lw      $t2, 128($sp)
+    .cfi_restore 10
+    lw      $t1, 124($sp)
+    .cfi_restore 9
+    lw      $t0, 120($sp)
+    .cfi_restore 8
+    lw      $a3, 116($sp)
+    .cfi_restore 7
+    lw      $a2, 112($sp)
+    .cfi_restore 6
+    lw      $a1, 108($sp)
+    .cfi_restore 5
+    lw      $a0, 104($sp)
+    .cfi_restore 4
+    lw      $v1, 100($sp)
+    .cfi_restore 3
+    lw      $v0, 96($sp)
+    .cfi_restore 2
+
+    ldc1    $f18, 88($sp)
+    ldc1    $f16, 80($sp)
+    ldc1    $f14, 72($sp)
+    ldc1    $f12, 64($sp)
+    ldc1    $f10, 56($sp)
+    ldc1    $f8,  48($sp)
+    ldc1    $f6,  40($sp)
+    ldc1    $f4,  32($sp)
+    ldc1    $f2,  24($sp)
+    ldc1    $f0,  16($sp)
+
+    // Return through the exit point.
+    jalr    $zero, $gp          # Move $t8 to `out` and return.
+    addiu   $sp, $sp, 160
+    .cfi_adjust_cfa_offset -160
+
+.Lintrospection_exits:
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT $v0
+    BRB_FIELD_EXIT $v1
+    BRB_FIELD_EXIT $a0
+    BRB_FIELD_EXIT $a1
+    BRB_FIELD_EXIT $a2
+    BRB_FIELD_EXIT $a3
+    BRB_FIELD_EXIT $t0
+    BRB_FIELD_EXIT $t1
+    BRB_FIELD_EXIT $t2
+    BRB_FIELD_EXIT $t3
+    BRB_FIELD_EXIT $t4
+    BRB_FIELD_EXIT $t5
+    BRB_FIELD_EXIT $t6
+    BRB_FIELD_EXIT $t7
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT $s2
+    BRB_FIELD_EXIT $s3
+    BRB_FIELD_EXIT $s4
+    BRB_FIELD_EXIT $s5
+    BRB_FIELD_EXIT $s6
+    BRB_FIELD_EXIT $s7
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT $s8
+    BRB_FIELD_EXIT_BREAK
+END art_quick_read_barrier_mark_introspection
+
 .extern artInvokePolymorphic
 ENTRY art_quick_invoke_polymorphic
     SETUP_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index ef82bd2..a6b249a 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -83,6 +83,13 @@
 #endif  // USE_HEAP_POISONING
 .endm
 
+// Byte size of the instructions (un)poisoning heap references.
+#ifdef USE_HEAP_POISONING
+#define HEAP_POISON_INSTR_SIZE 8
+#else
+#define HEAP_POISON_INSTR_SIZE 0
+#endif  // USE_HEAP_POISONING
+
 // Based on contents of creg select the minimum integer
 // At the end of the macro the original value of creg is lost
 .macro MINint dreg,rreg,sreg,creg
diff --git a/runtime/arch/mips64/asm_support_mips64.h b/runtime/arch/mips64/asm_support_mips64.h
index 9063d20..7185da5 100644
--- a/runtime/arch/mips64/asm_support_mips64.h
+++ b/runtime/arch/mips64/asm_support_mips64.h
@@ -28,4 +28,24 @@
 // $f0-$f31, $at, $v0-$v1, $a0-$a7, $t0-$t3, $s0-$s7, $t8-$t9, $gp, $s8, $ra + padding + method*
 #define FRAME_SIZE_SAVE_EVERYTHING 496
 
+// &art_quick_read_barrier_mark_introspection is the first of many entry points:
+//   20 entry points for long field offsets, large array indices and variable array indices
+//     (see macro BRB_FIELD_LONG_OFFSET_ENTRY)
+//   20 entry points for short field offsets and small array indices
+//     (see macro BRB_FIELD_SHORT_OFFSET_ENTRY)
+//   20 entry points for GC roots
+//     (see macro BRB_GC_ROOT_ENTRY)
+
+// There are as many entry points of each kind as there are registers that
+// can hold a reference: V0-V1, A0-A7, T0-T2, S2-S8.
+#define BAKER_MARK_INTROSPECTION_REGISTER_COUNT 20
+
+#define BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE (8 * 4)  // 8 instructions in
+                                                                 // BRB_FIELD_*_OFFSET_ENTRY.
+
+#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET \
+    (2 * BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE)
+
+#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE (4 * 4)  // 4 instructions in BRB_GC_ROOT_ENTRY.
+
 #endif  // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 007f7b3..5e58827 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -17,6 +17,7 @@
 #include <math.h>
 #include <string.h>
 
+#include "arch/mips64/asm_support_mips64.h"
 #include "atomic.h"
 #include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/quick/quick_alloc_entrypoints.h"
@@ -59,6 +60,10 @@
 extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
 extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
 
+extern "C" mirror::Object* art_quick_read_barrier_mark_introspection(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots(mirror::Object*);
+extern "C" void art_quick_read_barrier_mark_introspection_end_of_entries(void);
+
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
 extern int32_t CmplDouble(double a, double b);
@@ -88,6 +93,21 @@
 
 // No read barrier entrypoints for marking registers.
 void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
+  intptr_t introspection_field_array_entries_size =
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots) -
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection);
+  static_assert(
+      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET == 2 *
+          BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE,
+      "Expecting equal");
+  DCHECK_EQ(introspection_field_array_entries_size,
+            BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET);
+  intptr_t introspection_gc_root_entries_size =
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_end_of_entries) -
+      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots);
+  DCHECK_EQ(introspection_gc_root_entries_size,
+            BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE);
+  qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_introspection : nullptr;
   qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
   qpoints->pReadBarrierMarkReg02 = is_active ? art_quick_read_barrier_mark_reg02 : nullptr;
   qpoints->pReadBarrierMarkReg03 = is_active ? art_quick_read_barrier_mark_reg03 : nullptr;
@@ -173,7 +193,6 @@
   // Cannot use the following registers to pass arguments:
   // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
   // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
-  qpoints->pReadBarrierMarkReg00 = nullptr;
   qpoints->pReadBarrierMarkReg14 = nullptr;
   qpoints->pReadBarrierMarkReg15 = nullptr;
   qpoints->pReadBarrierMarkReg16 = nullptr;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index d427fe3..bcb315f 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -847,7 +847,7 @@
     dla  $t9, artThrowNullPointerExceptionFromSignal
     jalr $zero, $t9                 # artThrowNullPointerExceptionFromSignal(uinptr_t, Thread*)
     move $a1, rSELF                 # pass Thread::Current
-END art_quick_throw_null_pointer_exception
+END art_quick_throw_null_pointer_exception_from_signal
 
     /*
      * Called by managed code to create and deliver an ArithmeticException
@@ -1989,6 +1989,10 @@
      * Mote that this stub writes to a0, t0 and t1.
      */
 ENTRY art_quick_imt_conflict_trampoline
+// FIXME: The DexCache method array has been changed to hash-based cache with eviction.
+// We need a relaxed atomic load of a 128-bit location to try and load the method
+// and call artQuickResolutionTrampoline() if the index does not match.
+#if 0
     ld      $t1, 0($sp)                                      # Load referrer.
     ld      $t1, ART_METHOD_DEX_CACHE_METHODS_OFFSET_64($t1) # Load dex cache methods array.
     dsll    $t0, $t0, POINTER_SIZE_SHIFT                     # Calculate offset.
@@ -2017,6 +2021,9 @@
 .Lconflict_trampoline:
     # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
     move   $a0, $t0                                          # Load interface method.
+#else
+    move   $a0, $zero
+#endif
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
 END art_quick_imt_conflict_trampoline
 
@@ -2567,6 +2574,375 @@
 READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
 // RA (register 31) is reserved.
 
+// Caller code:
+// Short constant offset/index:
+//  ld      $t9, pReadBarrierMarkReg00
+//  beqzc   $t9, skip_call
+//  nop
+//  jialc   $t9, thunk_disp
+// skip_call:
+//  lwu     `out`, ofs(`obj`)
+// [dsubu   `out`, $zero, `out`
+//  dext    `out`, `out`, 0, 32]  # Unpoison reference.
+.macro BRB_FIELD_SHORT_OFFSET_ENTRY obj
+    # Explicit null check. May be redundant (for array elements or when the field
+    # offset is larger than the page size, 4KB).
+    # $ra will be adjusted to point to lwu's stack map when throwing NPE.
+    beqzc   \obj, .Lintrospection_throw_npe
+    lapc    $t3, .Lintrospection_exits                  # $t3 = address of .Lintrospection_exits.
+    .set push
+    .set noat
+    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
+    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
+                                                                # to sign bit.
+    bltz    $at, .Lintrospection_field_array            # If gray, load reference, mark.
+    move    $t8, \obj                                   # Move `obj` to $t8 for common code.
+    .set pop
+    jalr    $zero, $ra                                  # Otherwise, load-load barrier and return.
+    sync
+.endm
+
+// Caller code:
+// Long constant offset/index:   | Variable index:
+//  ld      $t9, pReadBarrierMarkReg00
+//  beqz    $t9, skip_call       |  beqz    $t9, skip_call
+//  daui    $t8, `obj`, ofs_hi   |  dlsa    $t8, `index`, `obj`, 2
+//  jialc   $t9, thunk_disp      |  jialc   $t9, thunk_disp
+// skip_call:                    | skip_call:
+//  lwu     `out`, ofs_lo($t8)   |  lwu     `out`, ofs($t8)
+// [dsubu   `out`, $zero, `out`  | [dsubu   `out`, $zero, `out`
+//  dext    `out`, `out`, 0, 32] |  dext    `out`, `out`, 0, 32]  # Unpoison reference.
+.macro BRB_FIELD_LONG_OFFSET_ENTRY obj
+    # No explicit null check for variable indices or large constant indices/offsets
+    # as it must have been done earlier.
+    lapc    $t3, .Lintrospection_exits                  # $t3 = address of .Lintrospection_exits.
+    .set push
+    .set noat
+    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
+    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
+                                                                # to sign bit.
+    bltzc   $at, .Lintrospection_field_array            # If gray, load reference, mark.
+    .set pop
+    sync                                                # Otherwise, load-load barrier and return.
+    jic     $ra, 0
+    break                                               # Padding to 8 instructions.
+    break
+.endm
+
+.macro BRB_GC_ROOT_ENTRY root
+    lapc    $t3, .Lintrospection_exit_\root             # $t3 = exit point address.
+    bnez    \root, .Lintrospection_common
+    move    $t8, \root                                  # Move reference to $t8 for common code.
+    jic     $ra, 0                                      # Return if null.
+.endm
+
+.macro BRB_FIELD_EXIT out
+.Lintrospection_exit_\out:
+    jalr    $zero, $ra
+    move    \out, $t8                                   # Return reference in expected register.
+.endm
+
+.macro BRB_FIELD_EXIT_BREAK
+    break
+    break
+.endm
+
+ENTRY_NO_GP art_quick_read_barrier_mark_introspection
+    # Entry points for offsets/indices not fitting into int16_t and for variable indices.
+    BRB_FIELD_LONG_OFFSET_ENTRY $v0
+    BRB_FIELD_LONG_OFFSET_ENTRY $v1
+    BRB_FIELD_LONG_OFFSET_ENTRY $a0
+    BRB_FIELD_LONG_OFFSET_ENTRY $a1
+    BRB_FIELD_LONG_OFFSET_ENTRY $a2
+    BRB_FIELD_LONG_OFFSET_ENTRY $a3
+    BRB_FIELD_LONG_OFFSET_ENTRY $a4
+    BRB_FIELD_LONG_OFFSET_ENTRY $a5
+    BRB_FIELD_LONG_OFFSET_ENTRY $a6
+    BRB_FIELD_LONG_OFFSET_ENTRY $a7
+    BRB_FIELD_LONG_OFFSET_ENTRY $t0
+    BRB_FIELD_LONG_OFFSET_ENTRY $t1
+    BRB_FIELD_LONG_OFFSET_ENTRY $t2
+    BRB_FIELD_LONG_OFFSET_ENTRY $s2
+    BRB_FIELD_LONG_OFFSET_ENTRY $s3
+    BRB_FIELD_LONG_OFFSET_ENTRY $s4
+    BRB_FIELD_LONG_OFFSET_ENTRY $s5
+    BRB_FIELD_LONG_OFFSET_ENTRY $s6
+    BRB_FIELD_LONG_OFFSET_ENTRY $s7
+    BRB_FIELD_LONG_OFFSET_ENTRY $s8
+
+    # Entry points for offsets/indices fitting into int16_t.
+    BRB_FIELD_SHORT_OFFSET_ENTRY $v0
+    BRB_FIELD_SHORT_OFFSET_ENTRY $v1
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a0
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a1
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a2
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a3
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a4
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a5
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a6
+    BRB_FIELD_SHORT_OFFSET_ENTRY $a7
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t0
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t1
+    BRB_FIELD_SHORT_OFFSET_ENTRY $t2
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s2
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s3
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s4
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s5
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s6
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s7
+    BRB_FIELD_SHORT_OFFSET_ENTRY $s8
+
+    .global art_quick_read_barrier_mark_introspection_gc_roots
+art_quick_read_barrier_mark_introspection_gc_roots:
+    # Entry points for GC roots.
+    BRB_GC_ROOT_ENTRY $v0
+    BRB_GC_ROOT_ENTRY $v1
+    BRB_GC_ROOT_ENTRY $a0
+    BRB_GC_ROOT_ENTRY $a1
+    BRB_GC_ROOT_ENTRY $a2
+    BRB_GC_ROOT_ENTRY $a3
+    BRB_GC_ROOT_ENTRY $a4
+    BRB_GC_ROOT_ENTRY $a5
+    BRB_GC_ROOT_ENTRY $a6
+    BRB_GC_ROOT_ENTRY $a7
+    BRB_GC_ROOT_ENTRY $t0
+    BRB_GC_ROOT_ENTRY $t1
+    BRB_GC_ROOT_ENTRY $t2
+    BRB_GC_ROOT_ENTRY $s2
+    BRB_GC_ROOT_ENTRY $s3
+    BRB_GC_ROOT_ENTRY $s4
+    BRB_GC_ROOT_ENTRY $s5
+    BRB_GC_ROOT_ENTRY $s6
+    BRB_GC_ROOT_ENTRY $s7
+    BRB_GC_ROOT_ENTRY $s8
+    .global art_quick_read_barrier_mark_introspection_end_of_entries
+art_quick_read_barrier_mark_introspection_end_of_entries:
+
+.Lintrospection_throw_npe:
+    b       art_quick_throw_null_pointer_exception
+    daddiu  $ra, $ra, 4         # Skip lwu, make $ra point to lwu's stack map.
+
+    .set push
+    .set noat
+
+    // Fields and array elements.
+
+.Lintrospection_field_array:
+    // Get the field/element address using $t8 and the offset from the lwu instruction.
+    lh      $at, 0($ra)         # $ra points to lwu: $at = low 16 bits of field/element offset.
+    daddiu  $ra, $ra, 4 + HEAP_POISON_INSTR_SIZE   # Skip lwu(+dsubu+dext).
+    daddu   $t8, $t8, $at       # $t8 = field/element address.
+
+    // Calculate the address of the exit point, store it in $t3 and load the reference into $t8.
+    lb      $at, (-HEAP_POISON_INSTR_SIZE - 2)($ra)   # $ra-HEAP_POISON_INSTR_SIZE-4 points to
+                                                      # "lwu `out`, ...".
+    andi    $at, $at, 31        # Extract `out` from lwu.
+
+    lwu     $t8, 0($t8)         # $t8 = reference.
+    UNPOISON_HEAP_REF $t8
+
+    // Return if null reference.
+    bnez    $t8, .Lintrospection_common
+    dlsa    $t3, $at, $t3, 3    # $t3 = address of the exit point
+                                # (BRB_FIELD_EXIT* macro is 8 bytes).
+
+    // Early return through the exit point.
+.Lintrospection_return_early:
+    jic     $t3, 0              # Move $t8 to `out` and return.
+
+    // Code common for GC roots, fields and array elements.
+
+.Lintrospection_common:
+    // Check lock word for mark bit, if marked return.
+    lw      $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET($t8)
+    sll     $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT     # Move mark bit to sign bit.
+    bltzc   $at, .Lintrospection_return_early
+#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
+    // The below code depends on the lock word state being in the highest bits
+    // and the "forwarding address" state having all bits set.
+#error "Unexpected lock word state shift or forwarding address state value."
+#endif
+    // Test that both the forwarding state bits are 1.
+    sll     $at, $t9, 1
+    and     $at, $at, $t9                               # Sign bit = 1 IFF both bits are 1.
+    bgezc   $at, .Lintrospection_mark
+
+    .set pop
+
+    // Shift left by the forwarding address shift. This clears out the state bits since they are
+    // in the top 2 bits of the lock word.
+    sll     $t8, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
+    jalr    $zero, $t3          # Move $t8 to `out` and return.
+    dext    $t8, $t8, 0, 32     # Make sure the address is zero-extended.
+
+.Lintrospection_mark:
+    // Partially set up the stack frame preserving only $ra.
+    daddiu  $sp, $sp, -320
+    .cfi_adjust_cfa_offset 320
+    sd      $ra, 312($sp)
+    .cfi_rel_offset 31, 312
+
+    // Set up $gp, clobbering $ra.
+    lapc    $ra, 1f
+1:
+    .cpsetup $ra, 304, 1b       # Save old $gp in 304($sp).
+
+    // Finalize the stack frame and call.
+    sd      $t3, 296($sp)       # Preserve the exit point address.
+    sd      $t2, 288($sp)
+    .cfi_rel_offset 14, 288
+    sd      $t1, 280($sp)
+    .cfi_rel_offset 13, 280
+    sd      $t0, 272($sp)
+    .cfi_rel_offset 12, 272
+    sd      $a7, 264($sp)
+    .cfi_rel_offset 11, 264
+    sd      $a6, 256($sp)
+    .cfi_rel_offset 10, 256
+    sd      $a5, 248($sp)
+    .cfi_rel_offset 9, 248
+    sd      $a4, 240($sp)
+    .cfi_rel_offset 8, 240
+    sd      $a3, 232($sp)
+    .cfi_rel_offset 7, 232
+    sd      $a2, 224($sp)
+    .cfi_rel_offset 6, 224
+    sd      $a1, 216($sp)
+    .cfi_rel_offset 5, 216
+    sd      $a0, 208($sp)
+    .cfi_rel_offset 4, 208
+    sd      $v1, 200($sp)
+    .cfi_rel_offset 3, 200
+    sd      $v0, 192($sp)
+    .cfi_rel_offset 2, 192
+
+    dla     $t9, artReadBarrierMark
+
+    sdc1    $f23, 184($sp)
+    sdc1    $f22, 176($sp)
+    sdc1    $f21, 168($sp)
+    sdc1    $f20, 160($sp)
+    sdc1    $f19, 152($sp)
+    sdc1    $f18, 144($sp)
+    sdc1    $f17, 136($sp)
+    sdc1    $f16, 128($sp)
+    sdc1    $f15, 120($sp)
+    sdc1    $f14, 112($sp)
+    sdc1    $f13, 104($sp)
+    sdc1    $f12,  96($sp)
+    sdc1    $f11,  88($sp)
+    sdc1    $f10,  80($sp)
+    sdc1    $f9,   72($sp)
+    sdc1    $f8,   64($sp)
+    sdc1    $f7,   56($sp)
+    sdc1    $f6,   48($sp)
+    sdc1    $f5,   40($sp)
+    sdc1    $f4,   32($sp)
+    sdc1    $f3,   24($sp)
+    sdc1    $f2,   16($sp)
+    sdc1    $f1,    8($sp)
+    sdc1    $f0,    0($sp)
+
+    jalr    $t9                 # $v0 <- artReadBarrierMark(reference)
+    move    $a0, $t8            # Pass reference in $a0.
+    move    $t8, $v0
+
+    ld      $ra, 312($sp)
+    .cfi_restore 31
+    .cpreturn                   # Restore old $gp from 304($sp).
+    ld      $t3, 296($sp)       # $t3 = address of the exit point.
+    ld      $t2, 288($sp)
+    .cfi_restore 14
+    ld      $t1, 280($sp)
+    .cfi_restore 13
+    ld      $t0, 272($sp)
+    .cfi_restore 12
+    ld      $a7, 264($sp)
+    .cfi_restore 11
+    ld      $a6, 256($sp)
+    .cfi_restore 10
+    ld      $a5, 248($sp)
+    .cfi_restore 9
+    ld      $a4, 240($sp)
+    .cfi_restore 8
+    ld      $a3, 232($sp)
+    .cfi_restore 7
+    ld      $a2, 224($sp)
+    .cfi_restore 6
+    ld      $a1, 216($sp)
+    .cfi_restore 5
+    ld      $a0, 208($sp)
+    .cfi_restore 4
+    ld      $v1, 200($sp)
+    .cfi_restore 3
+    ld      $v0, 192($sp)
+    .cfi_restore 2
+
+    ldc1    $f23, 184($sp)
+    ldc1    $f22, 176($sp)
+    ldc1    $f21, 168($sp)
+    ldc1    $f20, 160($sp)
+    ldc1    $f19, 152($sp)
+    ldc1    $f18, 144($sp)
+    ldc1    $f17, 136($sp)
+    ldc1    $f16, 128($sp)
+    ldc1    $f15, 120($sp)
+    ldc1    $f14, 112($sp)
+    ldc1    $f13, 104($sp)
+    ldc1    $f12,  96($sp)
+    ldc1    $f11,  88($sp)
+    ldc1    $f10,  80($sp)
+    ldc1    $f9,   72($sp)
+    ldc1    $f8,   64($sp)
+    ldc1    $f7,   56($sp)
+    ldc1    $f6,   48($sp)
+    ldc1    $f5,   40($sp)
+    ldc1    $f4,   32($sp)
+    ldc1    $f3,   24($sp)
+    ldc1    $f2,   16($sp)
+    ldc1    $f1,    8($sp)
+    ldc1    $f0,    0($sp)
+
+    // Return through the exit point.
+    jalr    $zero, $t3          # Move $t8 to `out` and return.
+    daddiu  $sp, $sp, 320
+    .cfi_adjust_cfa_offset -320
+
+.Lintrospection_exits:
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT $v0
+    BRB_FIELD_EXIT $v1
+    BRB_FIELD_EXIT $a0
+    BRB_FIELD_EXIT $a1
+    BRB_FIELD_EXIT $a2
+    BRB_FIELD_EXIT $a3
+    BRB_FIELD_EXIT $a4
+    BRB_FIELD_EXIT $a5
+    BRB_FIELD_EXIT $a6
+    BRB_FIELD_EXIT $a7
+    BRB_FIELD_EXIT $t0
+    BRB_FIELD_EXIT $t1
+    BRB_FIELD_EXIT $t2
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT $s2
+    BRB_FIELD_EXIT $s3
+    BRB_FIELD_EXIT $s4
+    BRB_FIELD_EXIT $s5
+    BRB_FIELD_EXIT $s6
+    BRB_FIELD_EXIT $s7
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT_BREAK
+    BRB_FIELD_EXIT $s8
+    BRB_FIELD_EXIT_BREAK
+END art_quick_read_barrier_mark_introspection
+
 .extern artInvokePolymorphic
 ENTRY art_quick_invoke_polymorphic
     SETUP_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index cc0bdf2..ea5a90d 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -230,6 +230,19 @@
       (has_POPCNT_ == other_as_x86->has_POPCNT_);
 }
 
+bool X86InstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
+  if (GetInstructionSet() != other->GetInstructionSet()) {
+    return false;
+  }
+  const X86InstructionSetFeatures* other_as_x86 = other->AsX86InstructionSetFeatures();
+  return (has_SSSE3_ || !other_as_x86->has_SSSE3_) &&
+      (has_SSE4_1_ || !other_as_x86->has_SSE4_1_) &&
+      (has_SSE4_2_ || !other_as_x86->has_SSE4_2_) &&
+      (has_AVX_ || !other_as_x86->has_AVX_) &&
+      (has_AVX2_ || !other_as_x86->has_AVX2_) &&
+      (has_POPCNT_ || !other_as_x86->has_POPCNT_);
+}
+
 uint32_t X86InstructionSetFeatures::AsBitmap() const {
   return (has_SSSE3_ ? kSsse3Bitfield : 0) |
       (has_SSE4_1_ ? kSse4_1Bitfield : 0) |
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index eb8a710..56cb07e 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -29,12 +29,11 @@
  public:
   // Process a CPU variant string like "atom" or "nehalem" and create InstructionSetFeatures.
   static X86FeaturesUniquePtr FromVariant(const std::string& variant,
-                                                                      std::string* error_msg,
-                                                                      bool x86_64 = false);
+                                          std::string* error_msg,
+                                          bool x86_64 = false);
 
   // Parse a bitmap and create an InstructionSetFeatures.
-  static X86FeaturesUniquePtr FromBitmap(uint32_t bitmap,
-                                                                     bool x86_64 = false);
+  static X86FeaturesUniquePtr FromBitmap(uint32_t bitmap, bool x86_64 = false);
 
   // Turn C pre-processor #defines into the equivalent instruction set features.
   static X86FeaturesUniquePtr FromCppDefines(bool x86_64 = false);
@@ -52,6 +51,8 @@
 
   bool Equals(const InstructionSetFeatures* other) const OVERRIDE;
 
+  bool HasAtLeast(const InstructionSetFeatures* other) const OVERRIDE;
+
   virtual InstructionSet GetInstructionSet() const OVERRIDE {
     return kX86;
   }
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 031b36b..48d2de9 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1780,35 +1780,90 @@
      */
 DEFINE_FUNCTION art_quick_imt_conflict_trampoline
     PUSH EDI
-    movl 8(%esp), %edi // Load referrer
-    movl ART_METHOD_DEX_CACHE_METHODS_OFFSET_32(%edi), %edi   // Load dex cache methods array
+    PUSH ESI
+    PUSH EDX
+    movl 16(%esp), %edi         // Load referrer.
+    movl ART_METHOD_DEX_CACHE_METHODS_OFFSET_32(%edi), %edi   // Load dex cache methods array.
     pushl ART_METHOD_JNI_OFFSET_32(%eax)  // Push ImtConflictTable.
     CFI_ADJUST_CFA_OFFSET(4)
-    movd %xmm7, %eax              // get target method index stored in xmm7
-    movl 0(%edi, %eax, __SIZEOF_POINTER__), %edi  // Load interface method
-    popl %eax  // Pop ImtConflictTable.
+    movd %xmm7, %eax            // Get target method index stored in xmm7.
+    movl %eax, %esi             // Remember method index in ESI.
+    andl LITERAL(METHOD_DEX_CACHE_SIZE_MINUS_ONE), %eax  // Calculate DexCache method slot index.
+    leal 0(%edi, %eax, 2 * __SIZEOF_POINTER__), %edi  // Load DexCache method slot address.
+    mov %ecx, %edx              // Make EDX:EAX == ECX:EBX so that LOCK CMPXCHG8B makes no changes.
+    mov %ebx, %eax              // (The actual value does not matter.)
+    lock cmpxchg8b (%edi)       // Relaxed atomic load EDX:EAX from the dex cache slot.
+    popl %edi                   // Pop ImtConflictTable.
     CFI_ADJUST_CFA_OFFSET(-4)
+    cmp %edx, %esi              // Compare method index to see if we had a DexCache method hit.
+    jne .Limt_conflict_trampoline_dex_cache_miss
 .Limt_table_iterate:
-    cmpl %edi, 0(%eax)
+    cmpl %eax, 0(%edi)
     jne .Limt_table_next_entry
     // We successfully hit an entry in the table. Load the target method
     // and jump to it.
+    movl __SIZEOF_POINTER__(%edi), %eax
+    CFI_REMEMBER_STATE
+    POP EDX
+    POP ESI
     POP EDI
-    movl __SIZEOF_POINTER__(%eax), %eax
     jmp *ART_METHOD_QUICK_CODE_OFFSET_32(%eax)
+    CFI_RESTORE_STATE
 .Limt_table_next_entry:
     // If the entry is null, the interface method is not in the ImtConflictTable.
-    cmpl LITERAL(0), 0(%eax)
+    cmpl LITERAL(0), 0(%edi)
     jz .Lconflict_trampoline
     // Iterate over the entries of the ImtConflictTable.
-    addl LITERAL(2 * __SIZEOF_POINTER__), %eax
+    addl LITERAL(2 * __SIZEOF_POINTER__), %edi
     jmp .Limt_table_iterate
 .Lconflict_trampoline:
     // Call the runtime stub to populate the ImtConflictTable and jump to the
     // resolved method.
-    movl %edi, %eax  // Load interface method
+    CFI_REMEMBER_STATE
+    POP EDX
+    POP ESI
     POP EDI
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
+    CFI_RESTORE_STATE
+.Limt_conflict_trampoline_dex_cache_miss:
+    // We're not creating a proper runtime method frame here,
+    // artLookupResolvedMethod() is not allowed to walk the stack.
+
+    // Save core register args; EDX is already saved.
+    PUSH ebx
+    PUSH ecx
+
+    // Save FPR args.
+    subl MACRO_LITERAL(32), %esp
+    CFI_ADJUST_CFA_OFFSET(32)
+    movsd %xmm0, 0(%esp)
+    movsd %xmm1, 8(%esp)
+    movsd %xmm2, 16(%esp)
+    movsd %xmm3, 24(%esp)
+
+    pushl 32+8+16(%esp)         // Pass referrer.
+    CFI_ADJUST_CFA_OFFSET(4)
+    pushl %esi                  // Pass method index.
+    CFI_ADJUST_CFA_OFFSET(4)
+    call SYMBOL(artLookupResolvedMethod)  // (uint32_t method_index, ArtMethod* referrer)
+    addl LITERAL(8), %esp       // Pop arguments.
+    CFI_ADJUST_CFA_OFFSET(-8)
+
+    // Restore FPR args.
+    movsd 0(%esp), %xmm0
+    movsd 8(%esp), %xmm1
+    movsd 16(%esp), %xmm2
+    movsd 24(%esp), %xmm3
+    addl MACRO_LITERAL(32), %esp
+    CFI_ADJUST_CFA_OFFSET(-32)
+
+    // Restore core register args.
+    POP ecx
+    POP ebx
+
+    cmp LITERAL(0), %eax        // If the method wasn't resolved,
+    je .Lconflict_trampoline    //   skip the lookup and go to artInvokeInterfaceTrampoline().
+    jmp .Limt_table_iterate
 END_FUNCTION art_quick_imt_conflict_trampoline
 
 DEFINE_FUNCTION art_quick_resolution_trampoline
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ad06873..0a9199e 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1641,17 +1641,29 @@
     int3
     int3
 #else
-    movq __SIZEOF_POINTER__(%rsp), %r10 // Load referrer
-    movq ART_METHOD_DEX_CACHE_METHODS_OFFSET_64(%r10), %r10   // Load dex cache methods array
-    movq 0(%r10, %rax, __SIZEOF_POINTER__), %r10 // Load interface method
+    movq __SIZEOF_POINTER__(%rsp), %r10 // Load referrer.
+    movq ART_METHOD_DEX_CACHE_METHODS_OFFSET_64(%r10), %r10   // Load dex cache methods array.
+    mov %eax, %r11d  // Remember method index in R11.
+    andl LITERAL(METHOD_DEX_CACHE_SIZE_MINUS_ONE), %eax  // Calculate DexCache method slot index.
+    shll LITERAL(1), %eax       // Multiply by 2 as entries have size 2 * __SIZEOF_POINTER__.
+    leaq 0(%r10, %rax, __SIZEOF_POINTER__), %r10 // Load DexCache method slot address.
+    PUSH rdx                    // Preserve RDX as we need to clobber it by LOCK CMPXCHG16B.
+    mov %rcx, %rdx              // Make RDX:RAX == RCX:RBX so that LOCK CMPXCHG16B makes no changes.
+    mov %rbx, %rax              // (The actual value does not matter.)
+    lock cmpxchg16b (%r10)      // Relaxed atomic load RDX:RAX from the dex cache slot.
     movq ART_METHOD_JNI_OFFSET_64(%rdi), %rdi  // Load ImtConflictTable
+    cmp %rdx, %r11              // Compare method index to see if we had a DexCache method hit.
+    jne .Limt_conflict_trampoline_dex_cache_miss
 .Limt_table_iterate:
-    cmpq %r10, 0(%rdi)
+    cmpq %rax, 0(%rdi)
     jne .Limt_table_next_entry
     // We successfully hit an entry in the table. Load the target method
     // and jump to it.
     movq __SIZEOF_POINTER__(%rdi), %rdi
+    CFI_REMEMBER_STATE
+    POP rdx
     jmp *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi)
+    CFI_RESTORE_STATE
 .Limt_table_next_entry:
     // If the entry is null, the interface method is not in the ImtConflictTable.
     cmpq LITERAL(0), 0(%rdi)
@@ -1662,8 +1674,66 @@
 .Lconflict_trampoline:
     // Call the runtime stub to populate the ImtConflictTable and jump to the
     // resolved method.
-    movq %r10, %rdi  // Load interface method
+    CFI_REMEMBER_STATE
+    POP rdx
+    movq %rax, %rdi  // Load interface method
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
+    CFI_RESTORE_STATE
+.Limt_conflict_trampoline_dex_cache_miss:
+    // We're not creating a proper runtime method frame here,
+    // artLookupResolvedMethod() is not allowed to walk the stack.
+
+    // Save GPR args and ImtConflictTable; RDX is already saved.
+    PUSH r9   // Quick arg 5.
+    PUSH r8   // Quick arg 4.
+    PUSH rsi  // Quick arg 1.
+    PUSH rcx  // Quick arg 3.
+    PUSH rdi  // ImtConflictTable
+    // Save FPR args and callee-saves, align stack to 16B.
+    subq MACRO_LITERAL(12 * 8 + 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(12 * 8 + 8)
+    movq %xmm0, 0(%rsp)
+    movq %xmm1, 8(%rsp)
+    movq %xmm2, 16(%rsp)
+    movq %xmm3, 24(%rsp)
+    movq %xmm4, 32(%rsp)
+    movq %xmm5, 40(%rsp)
+    movq %xmm6, 48(%rsp)
+    movq %xmm7, 56(%rsp)
+    movq %xmm12, 64(%rsp)  // XMM12-15 are callee-save in ART compiled code ABI
+    movq %xmm13, 72(%rsp)  // but caller-save in native ABI.
+    movq %xmm14, 80(%rsp)
+    movq %xmm15, 88(%rsp)
+
+    movq %r11, %rdi             // Pass method index.
+    movq 12 * 8 + 8 + 6 * 8 + 8(%rsp), %rsi   // Pass referrer.
+    call SYMBOL(artLookupResolvedMethod)  // (uint32_t method_index, ArtMethod* referrer)
+
+    // Restore FPRs.
+    movq 0(%rsp), %xmm0
+    movq 8(%rsp), %xmm1
+    movq 16(%rsp), %xmm2
+    movq 24(%rsp), %xmm3
+    movq 32(%rsp), %xmm4
+    movq 40(%rsp), %xmm5
+    movq 48(%rsp), %xmm6
+    movq 56(%rsp), %xmm7
+    movq 64(%rsp), %xmm12
+    movq 72(%rsp), %xmm13
+    movq 80(%rsp), %xmm14
+    movq 88(%rsp), %xmm15
+    addq MACRO_LITERAL(12 * 8 + 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(-(12 * 8 + 8))
+    // Restore ImtConflictTable and GPR args.
+    POP rdi
+    POP rcx
+    POP rsi
+    POP r8
+    POP r9
+
+    cmp LITERAL(0), %rax        // If the method wasn't resolved,
+    je .Lconflict_trampoline    //   skip the lookup and go to artInvokeInterfaceTrampoline().
+    jmp .Limt_table_iterate
 #endif  // __APPLE__
 END_FUNCTION art_quick_imt_conflict_trampoline
 
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 40d7e5c..4300544 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -102,20 +102,21 @@
   return GetDexMethodIndexUnchecked();
 }
 
-inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(PointerSize pointer_size) {
-  return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size),
-                                       pointer_size);
+inline mirror::MethodDexCacheType* ArtMethod::GetDexCacheResolvedMethods(PointerSize pointer_size) {
+  return GetNativePointer<mirror::MethodDexCacheType*>(DexCacheResolvedMethodsOffset(pointer_size),
+                                                       pointer_size);
 }
 
 inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index,
                                                        PointerSize pointer_size) {
   // NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
   // without accessing the DexCache and we don't want to do that in release build.
-  DCHECK_LT(method_index,
-            GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
-  ArtMethod* method = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
-                                                          method_index,
-                                                          pointer_size);
+  DCHECK_LT(method_index, GetInterfaceMethodIfProxy(pointer_size)->GetDexFile()->NumMethodIds());
+  uint32_t slot_idx = method_index % mirror::DexCache::kDexCacheMethodCacheSize;
+  DCHECK_LT(slot_idx, GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
+  mirror::MethodDexCachePair pair = mirror::DexCache::GetNativePairPtrSize(
+      GetDexCacheResolvedMethods(pointer_size), slot_idx, pointer_size);
+  ArtMethod* method = pair.GetObjectForIndex(method_index);
   if (LIKELY(method != nullptr)) {
     auto* declaring_class = method->GetDeclaringClass();
     if (LIKELY(declaring_class == nullptr || !declaring_class->IsErroneous())) {
@@ -130,29 +131,29 @@
                                                  PointerSize pointer_size) {
   // NOTE: Unchecked, i.e. not throwing AIOOB. We don't even know the length here
   // without accessing the DexCache and we don't want to do that in release build.
-  DCHECK_LT(method_index,
-            GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
+  DCHECK_LT(method_index, GetInterfaceMethodIfProxy(pointer_size)->GetDexFile()->NumMethodIds());
   DCHECK(new_method == nullptr || new_method->GetDeclaringClass() != nullptr);
-  mirror::DexCache::SetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
-                                      method_index,
-                                      new_method,
-                                      pointer_size);
+  uint32_t slot_idx = method_index % mirror::DexCache::kDexCacheMethodCacheSize;
+  DCHECK_LT(slot_idx, GetInterfaceMethodIfProxy(pointer_size)->GetDexCache()->NumResolvedMethods());
+  mirror::MethodDexCachePair pair(new_method, method_index);
+  mirror::DexCache::SetNativePairPtrSize(
+      GetDexCacheResolvedMethods(pointer_size), slot_idx, pair, pointer_size);
 }
 
 inline bool ArtMethod::HasDexCacheResolvedMethods(PointerSize pointer_size) {
   return GetDexCacheResolvedMethods(pointer_size) != nullptr;
 }
 
-inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod** other_cache,
-                                                      PointerSize pointer_size) {
-  return GetDexCacheResolvedMethods(pointer_size) == other_cache;
-}
-
 inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size) {
   return GetDexCacheResolvedMethods(pointer_size) ==
       other->GetDexCacheResolvedMethods(pointer_size);
 }
 
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(mirror::MethodDexCacheType* other_cache,
+                                                      PointerSize pointer_size) {
+  return GetDexCacheResolvedMethods(pointer_size) == other_cache;
+}
+
 inline mirror::Class* ArtMethod::GetClassFromTypeIndex(dex::TypeIndex type_idx, bool resolve) {
   // TODO: Refactor this function into two functions, Resolve...() and Lookup...()
   // so that we can properly annotate it with no-suspension possible / suspension possible.
@@ -381,17 +382,21 @@
   if (LIKELY(!IsProxyMethod())) {
     return this;
   }
-  ArtMethod* interface_method = mirror::DexCache::GetElementPtrSize(
-      GetDexCacheResolvedMethods(pointer_size),
-      GetDexMethodIndex(),
-      pointer_size);
-  DCHECK(interface_method != nullptr);
-  DCHECK_EQ(interface_method,
-            Runtime::Current()->GetClassLinker()->FindMethodForProxy(GetDeclaringClass(), this));
+  uint32_t method_index = GetDexMethodIndex();
+  uint32_t slot_idx = method_index % mirror::DexCache::kDexCacheMethodCacheSize;
+  mirror::MethodDexCachePair pair = mirror::DexCache::GetNativePairPtrSize(
+      GetDexCacheResolvedMethods(pointer_size), slot_idx, pointer_size);
+  ArtMethod* interface_method = pair.GetObjectForIndex(method_index);
+  if (LIKELY(interface_method != nullptr)) {
+    DCHECK_EQ(interface_method, Runtime::Current()->GetClassLinker()->FindMethodForProxy(this));
+  } else {
+    interface_method = Runtime::Current()->GetClassLinker()->FindMethodForProxy(this);
+    DCHECK(interface_method != nullptr);
+  }
   return interface_method;
 }
 
-inline void ArtMethod::SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
+inline void ArtMethod::SetDexCacheResolvedMethods(mirror::MethodDexCacheType* new_dex_cache_methods,
                                                   PointerSize pointer_size) {
   SetNativePointer(DexCacheResolvedMethodsOffset(pointer_size),
                    new_dex_cache_methods,
@@ -462,14 +467,8 @@
     if (UNLIKELY(klass->IsProxyClass())) {
       // For normal methods, dex cache shortcuts will be visited through the declaring class.
       // However, for proxies we need to keep the interface method alive, so we visit its roots.
-      ArtMethod* interface_method = mirror::DexCache::GetElementPtrSize(
-          GetDexCacheResolvedMethods(pointer_size),
-          GetDexMethodIndex(),
-          pointer_size);
+      ArtMethod* interface_method = GetInterfaceMethodIfProxy(pointer_size);
       DCHECK(interface_method != nullptr);
-      DCHECK_EQ(interface_method,
-                Runtime::Current()->GetClassLinker()->FindMethodForProxy<kReadBarrierOption>(
-                    klass, this));
       interface_method->VisitRoots(visitor, pointer_size);
     }
   }
@@ -483,8 +482,8 @@
   if (old_class != new_class) {
     SetDeclaringClass(new_class);
   }
-  ArtMethod** old_methods = GetDexCacheResolvedMethods(pointer_size);
-  ArtMethod** new_methods = visitor(old_methods);
+  mirror::MethodDexCacheType* old_methods = GetDexCacheResolvedMethods(pointer_size);
+  mirror::MethodDexCacheType* new_methods = visitor(old_methods);
   if (old_methods != new_methods) {
     SetDexCacheResolvedMethods(new_methods, pointer_size);
   }
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 45dd596..d8984e8 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -67,9 +67,10 @@
     return this;
   } else {
     mirror::Class* declaring_class = GetDeclaringClass();
-    ArtMethod* ret = declaring_class->FindDeclaredVirtualMethod(declaring_class->GetDexCache(),
-                                                                GetDexMethodIndex(),
-                                                                pointer_size);
+    DCHECK(declaring_class->IsInterface());
+    ArtMethod* ret = declaring_class->FindInterfaceMethod(declaring_class->GetDexCache(),
+                                                          GetDexMethodIndex(),
+                                                          pointer_size);
     DCHECK(ret != nullptr);
     return ret;
   }
@@ -215,11 +216,8 @@
   } else {
     // Method didn't override superclass method so search interfaces
     if (IsProxyMethod()) {
-      result = mirror::DexCache::GetElementPtrSize(GetDexCacheResolvedMethods(pointer_size),
-                                                   GetDexMethodIndex(),
-                                                   pointer_size);
-      CHECK_EQ(result,
-               Runtime::Current()->GetClassLinker()->FindMethodForProxy(GetDeclaringClass(), this));
+      result = GetInterfaceMethodIfProxy(pointer_size);
+      DCHECK(result != nullptr);
     } else {
       mirror::IfTable* iftable = GetDeclaringClass()->GetIfTable();
       for (size_t i = 0; i < iftable->Count() && result == nullptr; i++) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 4b3e8ef..511ac83 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -53,6 +53,10 @@
 template <typename MirrorType> class ObjectArray;
 class PointerArray;
 class String;
+
+template <typename T> struct NativeDexCachePair;
+using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
+using MethodDexCacheType = std::atomic<MethodDexCachePair>;
 }  // namespace mirror
 
 class ArtMethod FINAL {
@@ -352,7 +356,7 @@
     dex_method_index_ = new_idx;
   }
 
-  ALWAYS_INLINE ArtMethod** GetDexCacheResolvedMethods(PointerSize pointer_size)
+  ALWAYS_INLINE mirror::MethodDexCacheType* GetDexCacheResolvedMethods(PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
   ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
                                                      PointerSize pointer_size)
@@ -362,13 +366,14 @@
                                                ArtMethod* new_method,
                                                PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  ALWAYS_INLINE void SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
+  ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::MethodDexCacheType* new_dex_cache_methods,
                                                 PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
   bool HasDexCacheResolvedMethods(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
   bool HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  bool HasSameDexCacheResolvedMethods(ArtMethod** other_cache, PointerSize pointer_size)
+  bool HasSameDexCacheResolvedMethods(mirror::MethodDexCacheType* other_cache,
+                                      PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the Class* from the type index into this method's dex cache.
@@ -714,7 +719,7 @@
   // Must be the last fields in the method.
   struct PtrSizedFields {
     // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
-    ArtMethod** dex_cache_resolved_methods_;
+    mirror::MethodDexCacheType* dex_cache_resolved_methods_;
 
     // Pointer to JNI function registered to this method, or a function to resolve the JNI function,
     // or the profiling data for non-native methods, or an ImtConflictTable, or the
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 25dd1a3..09eae40 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -187,7 +187,7 @@
 template<typename T>
 class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
  public:
-  Atomic<T>() : std::atomic<T>(0) { }
+  Atomic<T>() : std::atomic<T>(T()) { }
 
   explicit Atomic<T>(T value) : std::atomic<T>(value) { }
 
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index a472b67..03dda12 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -233,8 +233,27 @@
     for (int i = kLockLevelCount - 1; i >= 0; --i) {
       if (i != level_) {
         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
-        // We expect waits to happen while holding the thread list suspend thread lock.
-        if (held_mutex != nullptr) {
+        // We allow the thread to wait even if the user_code_suspension_lock_ is held so long as we
+        // are some thread's resume_cond_ (level_ == kThreadSuspendCountLock). This just means that
+        // gc or some other internal process is suspending the thread while it is trying to suspend
+        // some other thread. So long as the current thread is not being suspended by a
+        // SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear) this is
+        // fine.
+        if (held_mutex == Locks::user_code_suspension_lock_ && level_ == kThreadSuspendCountLock) {
+          // No thread safety analysis is fine since we have both the user_code_suspension_lock_
+          // from the line above and the ThreadSuspendCountLock since it is our level_. We use this
+          // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS.
+          auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS {
+            return self->GetUserCodeSuspendCount() != 0;
+          };
+          if (is_suspending_for_user_code()) {
+            LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
+                      << "(level " << LockLevel(i) << ") while performing wait on "
+                      << "\"" << name_ << "\" (level " << level_ << ") "
+                      << "with SuspendReason::kForUserCode pending suspensions";
+            bad_mutexes_held = true;
+          }
+        } else if (held_mutex != nullptr) {
           LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
                      << "(level " << LockLevel(i) << ") while performing wait on "
                      << "\"" << name_ << "\" (level " << level_ << ")";
@@ -243,7 +262,7 @@
       }
     }
     if (gAborting == 0) {  // Avoid recursive aborts.
-      CHECK(!bad_mutexes_held);
+      CHECK(!bad_mutexes_held) << this;
     }
   }
 }
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 3c51f52..9a73697 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -90,33 +90,129 @@
   return resolved_type.Ptr();
 }
 
-inline ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer) {
-  ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx, image_pointer_size_);
-  if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
-    return nullptr;
+template <bool kThrowOnError, typename ClassGetter>
+inline bool ClassLinker::CheckInvokeClassMismatch(ObjPtr<mirror::DexCache> dex_cache,
+                                                  InvokeType type,
+                                                  ClassGetter class_getter) {
+  switch (type) {
+    case kStatic:
+    case kSuper:
+      break;
+    case kInterface: {
+      // We have to check whether the method id really belongs to an interface (dex static bytecode
+      // constraints A15, A16). Otherwise you must not invoke-interface on it.
+      ObjPtr<mirror::Class> klass = class_getter();
+      if (UNLIKELY(!klass->IsInterface())) {
+        if (kThrowOnError) {
+          ThrowIncompatibleClassChangeError(klass,
+                                            "Found class %s, but interface was expected",
+                                            klass->PrettyDescriptor().c_str());
+        }
+        return true;
+      }
+      break;
+    }
+    case kDirect:
+      if (dex_cache->GetDexFile()->GetVersion() >= DexFile::kDefaultMethodsVersion) {
+        break;
+      }
+      FALLTHROUGH_INTENDED;
+    case kVirtual: {
+      // Similarly, invoke-virtual (and invoke-direct without default methods) must reference
+      // a non-interface class (dex static bytecode constraint A24, A25).
+      ObjPtr<mirror::Class> klass = class_getter();
+      if (UNLIKELY(klass->IsInterface())) {
+        if (kThrowOnError) {
+          ThrowIncompatibleClassChangeError(klass,
+                                            "Found interface %s, but class was expected",
+                                            klass->PrettyDescriptor().c_str());
+        }
+        return true;
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unreachable - invocation type: " << type;
+      UNREACHABLE();
   }
-  return resolved_method;
+  return false;
 }
 
-inline mirror::Class* ClassLinker::ResolveReferencedClassOfMethod(
-    uint32_t method_idx,
-    Handle<mirror::DexCache> dex_cache,
-    Handle<mirror::ClassLoader> class_loader) {
-  // NB: We cannot simply use `GetResolvedMethod(method_idx, ...)->GetDeclaringClass()`. This is
-  // because if we did so than an invoke-super could be incorrectly dispatched in cases where
-  // GetMethodId(method_idx).class_idx_ refers to a non-interface, non-direct-superclass
-  // (super*-class?) of the referrer and the direct superclass of the referrer contains a concrete
-  // implementation of the method. If this class's implementation of the method is copied from an
-  // interface (either miranda, default or conflict) we would incorrectly assume that is what we
-  // want to invoke on, instead of the 'concrete' implementation that the direct superclass
-  // contains.
-  const DexFile* dex_file = dex_cache->GetDexFile();
-  const DexFile::MethodId& method = dex_file->GetMethodId(method_idx);
-  ObjPtr<mirror::Class> resolved_type = dex_cache->GetResolvedType(method.class_idx_);
-  if (UNLIKELY(resolved_type == nullptr)) {
-    resolved_type = ResolveType(*dex_file, method.class_idx_, dex_cache, class_loader);
+template <bool kThrow>
+inline bool ClassLinker::CheckInvokeClassMismatch(ObjPtr<mirror::DexCache> dex_cache,
+                                                  InvokeType type,
+                                                  uint32_t method_idx,
+                                                  ObjPtr<mirror::ClassLoader> class_loader) {
+  return CheckInvokeClassMismatch<kThrow>(
+      dex_cache,
+      type,
+      [this, dex_cache, method_idx, class_loader]() REQUIRES_SHARED(Locks::mutator_lock_) {
+        const DexFile& dex_file = *dex_cache->GetDexFile();
+        const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+        ObjPtr<mirror::Class> klass =
+            LookupResolvedType(dex_file, method_id.class_idx_, dex_cache, class_loader);
+        DCHECK(klass != nullptr);
+        return klass;
+      });
+}
+
+inline ArtMethod* ClassLinker::LookupResolvedMethod(uint32_t method_idx,
+                                                    ObjPtr<mirror::DexCache> dex_cache,
+                                                    ObjPtr<mirror::ClassLoader> class_loader) {
+  PointerSize pointer_size = image_pointer_size_;
+  ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, pointer_size);
+  if (resolved == nullptr) {
+    const DexFile& dex_file = *dex_cache->GetDexFile();
+    const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
+    ObjPtr<mirror::Class> klass = LookupResolvedType(method_id.class_idx_, dex_cache, class_loader);
+    if (klass != nullptr) {
+      if (klass->IsInterface()) {
+        resolved = klass->FindInterfaceMethod(dex_cache, method_idx, pointer_size);
+      } else {
+        resolved = klass->FindClassMethod(dex_cache, method_idx, pointer_size);
+      }
+      if (resolved != nullptr) {
+        dex_cache->SetResolvedMethod(method_idx, resolved, pointer_size);
+      }
+    }
   }
-  return resolved_type.Ptr();
+  return resolved;
+}
+
+template <InvokeType type, ClassLinker::ResolveMode kResolveMode>
+inline ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer) {
+  DCHECK(referrer != nullptr);
+  // Note: The referrer can be a Proxy constructor. In that case, we need to do the
+  // lookup in the context of the original method from where it steals the code.
+  // However, we delay the GetInterfaceMethodIfProxy() until needed.
+  DCHECK(!referrer->IsProxyMethod() || referrer->IsConstructor());
+  ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx, image_pointer_size_);
+  if (resolved_method == nullptr) {
+    return nullptr;
+  }
+  DCHECK(!resolved_method->IsRuntimeMethod());
+  if (kResolveMode == ResolveMode::kCheckICCEAndIAE) {
+    referrer = referrer->GetInterfaceMethodIfProxy(image_pointer_size_);
+    // Check if the invoke type matches the class type.
+    ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
+    ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
+    if (CheckInvokeClassMismatch</* kThrow */ false>(dex_cache, type, method_idx, class_loader)) {
+      return nullptr;
+    }
+    // Check access.
+    ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
+    if (!referring_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
+                                                  resolved_method,
+                                                  dex_cache,
+                                                  method_idx)) {
+      return nullptr;
+    }
+    // Check if the invoke type matches the method type.
+    if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
+      return nullptr;
+    }
+  }
+  return resolved_method;
 }
 
 template <ClassLinker::ResolveMode kResolveMode>
@@ -124,9 +220,16 @@
                                              uint32_t method_idx,
                                              ArtMethod* referrer,
                                              InvokeType type) {
-  ArtMethod* resolved_method = GetResolvedMethod(method_idx, referrer);
+  DCHECK(referrer != nullptr);
+  // Note: The referrer can be a Proxy constructor. In that case, we need to do the
+  // lookup in the context of the original method from where it steals the code.
+  // However, we delay the GetInterfaceMethodIfProxy() until needed.
+  DCHECK(!referrer->IsProxyMethod() || referrer->IsConstructor());
   Thread::PoisonObjectPointersIfDebug();
+  ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx, image_pointer_size_);
+  DCHECK(resolved_method == nullptr || !resolved_method->IsRuntimeMethod());
   if (UNLIKELY(resolved_method == nullptr)) {
+    referrer = referrer->GetInterfaceMethodIfProxy(image_pointer_size_);
     ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
     StackHandleScope<2> hs(self);
     Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer->GetDexCache()));
@@ -138,6 +241,33 @@
                                                   h_class_loader,
                                                   referrer,
                                                   type);
+  } else if (kResolveMode == ResolveMode::kCheckICCEAndIAE) {
+    referrer = referrer->GetInterfaceMethodIfProxy(image_pointer_size_);
+    // Check if the invoke type matches the class type.
+    ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
+    ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
+    if (CheckInvokeClassMismatch</* kThrow */ true>(dex_cache, type, method_idx, class_loader)) {
+      DCHECK(Thread::Current()->IsExceptionPending());
+      return nullptr;
+    }
+    // Check access.
+    ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
+    if (!referring_class->CheckResolvedMethodAccess(resolved_method->GetDeclaringClass(),
+                                                    resolved_method,
+                                                    dex_cache,
+                                                    method_idx,
+                                                    type)) {
+      DCHECK(Thread::Current()->IsExceptionPending());
+      return nullptr;
+    }
+    // Check if the invoke type matches the method type.
+    if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
+      ThrowIncompatibleClassChangeError(type,
+                                        resolved_method->GetInvokeType(),
+                                        resolved_method,
+                                        referrer);
+      return nullptr;
+    }
   }
   // Note: We cannot check here to see whether we added the method to the cache. It
   //       might be an erroneous class, which results in it being hidden from us.
@@ -182,35 +312,6 @@
   return klass.Ptr();
 }
 
-template<ReadBarrierOption kReadBarrierOption>
-ArtMethod* ClassLinker::FindMethodForProxy(ObjPtr<mirror::Class> proxy_class,
-                                           ArtMethod* proxy_method) {
-  DCHECK(proxy_class->IsProxyClass());
-  DCHECK(proxy_method->IsProxyMethod());
-  {
-    Thread* const self = Thread::Current();
-    ReaderMutexLock mu(self, *Locks::dex_lock_);
-    // Locate the dex cache of the original interface/Object
-    for (const DexCacheData& data : dex_caches_) {
-      if (!self->IsJWeakCleared(data.weak_root) &&
-          proxy_method->HasSameDexCacheResolvedMethods(data.resolved_methods,
-                                                       image_pointer_size_)) {
-        ObjPtr<mirror::DexCache> dex_cache =
-            ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
-        if (dex_cache != nullptr) {
-          ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
-              proxy_method->GetDexMethodIndex(), image_pointer_size_);
-          CHECK(resolved_method != nullptr);
-          return resolved_method;
-        }
-      }
-    }
-  }
-  LOG(FATAL) << "Didn't find dex cache for " << proxy_class->PrettyClass() << " "
-      << proxy_method->PrettyMethod();
-  UNREACHABLE();
-}
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_CLASS_LINKER_INL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 199fb46..5863d91 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -45,6 +45,7 @@
 #include "base/value_object.h"
 #include "cha.h"
 #include "class_linker-inl.h"
+#include "class_loader_utils.h"
 #include "class_table-inl.h"
 #include "compiler_callbacks.h"
 #include "debugger.h"
@@ -52,14 +53,15 @@
 #include "entrypoints/entrypoint_utils.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "experimental_flags.h"
-#include "gc_root-inl.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/heap_bitmap-inl.h"
 #include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap-visit-objects-inl.h"
 #include "gc/heap.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "gc/space/image_space.h"
 #include "gc/space/space-inl.h"
+#include "gc_root-inl.h"
 #include "handle_scope-inl.h"
 #include "image-inl.h"
 #include "imt_conflict_table.h"
@@ -74,37 +76,37 @@
 #include "leb128.h"
 #include "linear_alloc.h"
 #include "mirror/call_site.h"
-#include "mirror/class.h"
 #include "mirror/class-inl.h"
+#include "mirror/class.h"
 #include "mirror/class_ext.h"
 #include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
 #include "mirror/dex_cache-inl.h"
+#include "mirror/dex_cache.h"
 #include "mirror/emulated_stack_frame.h"
 #include "mirror/field.h"
 #include "mirror/iftable-inl.h"
 #include "mirror/method.h"
-#include "mirror/method_type.h"
 #include "mirror/method_handle_impl.h"
 #include "mirror/method_handles_lookup.h"
+#include "mirror/method_type.h"
 #include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "mirror/object_array-inl.h"
 #include "mirror/proxy.h"
 #include "mirror/reference-inl.h"
 #include "mirror/stack_trace_element.h"
 #include "mirror/string-inl.h"
 #include "native/dalvik_system_DexFile.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "oat.h"
-#include "oat_file.h"
 #include "oat_file-inl.h"
+#include "oat_file.h"
 #include "oat_file_assistant.h"
 #include "oat_file_manager.h"
 #include "object_lock.h"
 #include "os.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
 #include "thread_list.h"
@@ -148,8 +150,8 @@
     return false;
   }
 
-  ArtMethod* exception_init_method = exception_class->FindDeclaredDirectMethod(
-      "<init>", "(Ljava/lang/String;)V", class_linker->GetImagePointerSize());
+  ArtMethod* exception_init_method = exception_class->FindConstructor(
+      "(Ljava/lang/String;)V", class_linker->GetImagePointerSize());
   return exception_init_method != nullptr;
 }
 
@@ -863,24 +865,6 @@
   bool error;
 };
 
-static void CheckTrampolines(mirror::Object* obj, void* arg) NO_THREAD_SAFETY_ANALYSIS {
-  if (obj->IsClass()) {
-    ObjPtr<mirror::Class> klass = obj->AsClass();
-    TrampolineCheckData* d = reinterpret_cast<TrampolineCheckData*>(arg);
-    for (ArtMethod& m : klass->GetMethods(d->pointer_size)) {
-      const void* entrypoint = m.GetEntryPointFromQuickCompiledCodePtrSize(d->pointer_size);
-      if (entrypoint == d->quick_resolution_trampoline ||
-          entrypoint == d->quick_imt_conflict_trampoline ||
-          entrypoint == d->quick_generic_jni_trampoline ||
-          entrypoint == d->quick_to_interpreter_bridge_trampoline) {
-        d->m = &m;
-        d->error = true;
-        return;
-      }
-    }
-  }
-}
-
 bool ClassLinker::InitFromBootImage(std::string* error_msg) {
   VLOG(startup) << __FUNCTION__ << " entering";
   CHECK(!init_done_);
@@ -945,7 +929,24 @@
         data.quick_generic_jni_trampoline = ith_quick_generic_jni_trampoline;
         data.quick_to_interpreter_bridge_trampoline = ith_quick_to_interpreter_bridge_trampoline;
         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-        spaces[i]->GetLiveBitmap()->Walk(CheckTrampolines, &data);
+        auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+          if (obj->IsClass()) {
+            ObjPtr<mirror::Class> klass = obj->AsClass();
+            for (ArtMethod& m : klass->GetMethods(data.pointer_size)) {
+              const void* entrypoint =
+                  m.GetEntryPointFromQuickCompiledCodePtrSize(data.pointer_size);
+              if (entrypoint == data.quick_resolution_trampoline ||
+                  entrypoint == data.quick_imt_conflict_trampoline ||
+                  entrypoint == data.quick_generic_jni_trampoline ||
+                  entrypoint == data.quick_to_interpreter_bridge_trampoline) {
+                data.m = &m;
+                data.error = true;
+                return;
+              }
+            }
+          }
+        };
+        spaces[i]->GetLiveBitmap()->Walk(visitor);
         if (data.error) {
           ArtMethod* m = data.m;
           LOG(ERROR) << "Found a broken ArtMethod: " << ArtMethod::PrettyMethod(m);
@@ -1113,7 +1114,8 @@
 
   virtual void Visit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
     const bool is_copied = method->IsCopied();
-    ArtMethod** resolved_methods = method->GetDexCacheResolvedMethods(kRuntimePointerSize);
+    mirror::MethodDexCacheType* resolved_methods =
+        method->GetDexCacheResolvedMethods(kRuntimePointerSize);
     if (resolved_methods != nullptr) {
       bool in_image_space = false;
       if (kIsDebugBuild || is_copied) {
@@ -1283,6 +1285,25 @@
   }
 }
 
+template <typename T>
+static void CopyNativeDexCachePairs(std::atomic<mirror::NativeDexCachePair<T>>* src,
+                                    size_t count,
+                                    std::atomic<mirror::NativeDexCachePair<T>>* dst,
+                                    PointerSize pointer_size) {
+  DCHECK_NE(count, 0u);
+  DCHECK(mirror::DexCache::GetNativePairPtrSize(src, 0, pointer_size).object != nullptr ||
+         mirror::DexCache::GetNativePairPtrSize(src, 0, pointer_size).index != 0u);
+  for (size_t i = 0; i < count; ++i) {
+    DCHECK_EQ(mirror::DexCache::GetNativePairPtrSize(dst, i, pointer_size).index, 0u);
+    DCHECK(mirror::DexCache::GetNativePairPtrSize(dst, i, pointer_size).object == nullptr);
+    mirror::NativeDexCachePair<T> source =
+        mirror::DexCache::GetNativePairPtrSize(src, i, pointer_size);
+    if (source.index != 0u || source.object != nullptr) {
+      mirror::DexCache::SetNativePairPtrSize(dst, i, source, pointer_size);
+    }
+  }
+}
+
 // new_class_set is the set of classes that were read from the class table section in the image.
 // If there was no class table section, it is null.
 // Note: using a class here to avoid having to make ClassLinker internals public.
@@ -1362,7 +1383,10 @@
         if (dex_file->NumTypeIds() < num_types) {
           num_types = dex_file->NumTypeIds();
         }
-        const size_t num_methods = dex_file->NumMethodIds();
+        size_t num_methods = mirror::DexCache::kDexCacheMethodCacheSize;
+        if (dex_file->NumMethodIds() < num_methods) {
+          num_methods = dex_file->NumMethodIds();
+        }
         size_t num_fields = mirror::DexCache::kDexCacheFieldCacheSize;
         if (dex_file->NumFieldIds() < num_fields) {
           num_fields = dex_file->NumFieldIds();
@@ -1395,37 +1419,18 @@
           dex_cache->SetResolvedTypes(types);
         }
         if (num_methods != 0u) {
-          ArtMethod** const methods = reinterpret_cast<ArtMethod**>(
-              raw_arrays + layout.MethodsOffset());
-          ArtMethod** const image_resolved_methods = dex_cache->GetResolvedMethods();
-          for (size_t j = 0; kIsDebugBuild && j < num_methods; ++j) {
-            DCHECK(methods[j] == nullptr);
-          }
-          CopyNonNull(image_resolved_methods,
-                      num_methods,
-                      methods,
-                      [] (const ArtMethod* method) {
-                          return method == nullptr;
-                      });
+          mirror::MethodDexCacheType* const image_resolved_methods =
+              dex_cache->GetResolvedMethods();
+          mirror::MethodDexCacheType* const methods =
+              reinterpret_cast<mirror::MethodDexCacheType*>(raw_arrays + layout.MethodsOffset());
+          CopyNativeDexCachePairs(image_resolved_methods, num_methods, methods, image_pointer_size);
           dex_cache->SetResolvedMethods(methods);
         }
         if (num_fields != 0u) {
           mirror::FieldDexCacheType* const image_resolved_fields = dex_cache->GetResolvedFields();
           mirror::FieldDexCacheType* const fields =
               reinterpret_cast<mirror::FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
-          for (size_t j = 0; j < num_fields; ++j) {
-            DCHECK_EQ(mirror::DexCache::GetNativePairPtrSize(fields, j, image_pointer_size).index,
-                      0u);
-            DCHECK(mirror::DexCache::GetNativePairPtrSize(fields, j, image_pointer_size).object ==
-                   nullptr);
-            mirror::DexCache::SetNativePairPtrSize(
-                fields,
-                j,
-                mirror::DexCache::GetNativePairPtrSize(image_resolved_fields,
-                                                       j,
-                                                       image_pointer_size),
-                image_pointer_size);
-          }
+          CopyNativeDexCachePairs(image_resolved_fields, num_fields, fields, image_pointer_size);
           dex_cache->SetResolvedFields(fields);
         }
         if (num_method_types != 0u) {
@@ -1620,59 +1625,55 @@
   static void CheckObjects(gc::Heap* heap, ClassLinker* class_linker)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ImageSanityChecks isc(heap, class_linker);
-    heap->VisitObjects(ImageSanityChecks::SanityCheckObjectsCallback, &isc);
+    auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+      DCHECK(obj != nullptr);
+      CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
+      CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
+      if (obj->IsClass()) {
+        auto klass = obj->AsClass();
+        for (ArtField& field : klass->GetIFields()) {
+          CHECK_EQ(field.GetDeclaringClass(), klass);
+        }
+        for (ArtField& field : klass->GetSFields()) {
+          CHECK_EQ(field.GetDeclaringClass(), klass);
+        }
+        const auto pointer_size = isc.pointer_size_;
+        for (auto& m : klass->GetMethods(pointer_size)) {
+          isc.SanityCheckArtMethod(&m, klass);
+        }
+        auto* vtable = klass->GetVTable();
+        if (vtable != nullptr) {
+          isc.SanityCheckArtMethodPointerArray(vtable, nullptr);
+        }
+        if (klass->ShouldHaveImt()) {
+          ImTable* imt = klass->GetImt(pointer_size);
+          for (size_t i = 0; i < ImTable::kSize; ++i) {
+            isc.SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr);
+          }
+        }
+        if (klass->ShouldHaveEmbeddedVTable()) {
+          for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
+            isc.SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr);
+          }
+        }
+        mirror::IfTable* iftable = klass->GetIfTable();
+        for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
+          if (iftable->GetMethodArrayCount(i) > 0) {
+            isc.SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr);
+          }
+        }
+      }
+    };
+    heap->VisitObjects(visitor);
   }
 
-  static void CheckPointerArray(gc::Heap* heap,
-                                ClassLinker* class_linker,
-                                ArtMethod** arr,
-                                size_t size)
+  static void CheckArtMethodDexCacheArray(gc::Heap* heap,
+                                          ClassLinker* class_linker,
+                                          mirror::MethodDexCacheType* arr,
+                                          size_t size)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ImageSanityChecks isc(heap, class_linker);
-    isc.SanityCheckArtMethodPointerArray(arr, size);
-  }
-
-  static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(obj != nullptr);
-    CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
-    CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
-    if (obj->IsClass()) {
-      ImageSanityChecks* isc = reinterpret_cast<ImageSanityChecks*>(arg);
-
-      auto klass = obj->AsClass();
-      for (ArtField& field : klass->GetIFields()) {
-        CHECK_EQ(field.GetDeclaringClass(), klass);
-      }
-      for (ArtField& field : klass->GetSFields()) {
-        CHECK_EQ(field.GetDeclaringClass(), klass);
-      }
-      const auto pointer_size = isc->pointer_size_;
-      for (auto& m : klass->GetMethods(pointer_size)) {
-        isc->SanityCheckArtMethod(&m, klass);
-      }
-      auto* vtable = klass->GetVTable();
-      if (vtable != nullptr) {
-        isc->SanityCheckArtMethodPointerArray(vtable, nullptr);
-      }
-      if (klass->ShouldHaveImt()) {
-        ImTable* imt = klass->GetImt(pointer_size);
-        for (size_t i = 0; i < ImTable::kSize; ++i) {
-          isc->SanityCheckArtMethod(imt->Get(i, pointer_size), nullptr);
-        }
-      }
-      if (klass->ShouldHaveEmbeddedVTable()) {
-        for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
-          isc->SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr);
-        }
-      }
-      mirror::IfTable* iftable = klass->GetIfTable();
-      for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
-        if (iftable->GetMethodArrayCount(i) > 0) {
-          isc->SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr);
-        }
-      }
-    }
+    isc.SanityCheckArtMethodDexCacheArray(arr, size);
   }
 
  private:
@@ -1727,7 +1728,7 @@
     }
   }
 
-  void SanityCheckArtMethodPointerArray(ArtMethod** arr, size_t size)
+  void SanityCheckArtMethodDexCacheArray(mirror::MethodDexCacheType* arr, size_t size)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     CHECK_EQ(arr != nullptr, size != 0u);
     if (arr != nullptr) {
@@ -1743,7 +1744,8 @@
       CHECK(contains);
     }
     for (size_t j = 0; j < size; ++j) {
-      ArtMethod* method = mirror::DexCache::GetElementPtrSize(arr, j, pointer_size_);
+      auto pair = mirror::DexCache::GetNativePairPtrSize(arr, j, pointer_size_);
+      ArtMethod* method = pair.object;
       // expected_class == null means we are a dex cache.
       if (method != nullptr) {
         SanityCheckArtMethod(method, nullptr);
@@ -1854,10 +1856,10 @@
       }
     } else {
       if (kSanityCheckObjects) {
-        ImageSanityChecks::CheckPointerArray(heap,
-                                             this,
-                                             dex_cache->GetResolvedMethods(),
-                                             dex_cache->NumResolvedMethods());
+        ImageSanityChecks::CheckArtMethodDexCacheArray(heap,
+                                                       this,
+                                                       dex_cache->GetResolvedMethods(),
+                                                       dex_cache->NumResolvedMethods());
       }
       // Register dex files, keep track of existing ones that are conflicts.
       AppendToBootClassPath(*dex_file.get(), dex_cache);
@@ -2485,27 +2487,6 @@
   return ClassPathEntry(nullptr, nullptr);
 }
 
-// Returns true if the given class loader is either a PathClassLoader or a DexClassLoader.
-// (they both have the same behaviour with respect to class lockup order)
-static bool IsPathOrDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
-                                   Handle<mirror::ClassLoader> class_loader)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  mirror::Class* class_loader_class = class_loader->GetClass();
-  return
-      (class_loader_class ==
-          soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader)) ||
-      (class_loader_class ==
-          soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DexClassLoader));
-}
-
-static bool IsDelegateLastClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
-                                      Handle<mirror::ClassLoader> class_loader)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  mirror::Class* class_loader_class = class_loader->GetClass();
-  return class_loader_class ==
-      soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DelegateLastClassLoader);
-}
-
 bool ClassLinker::FindClassInBaseDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                                 Thread* self,
                                                 const char* descriptor,
@@ -3746,20 +3727,6 @@
   return DexCacheData();
 }
 
-void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) {
-  Thread* const self = Thread::Current();
-  ReaderMutexLock mu(self, *Locks::dex_lock_);
-  for (const DexCacheData& data : dex_caches_) {
-    if (!self->IsJWeakCleared(data.weak_root)) {
-      ObjPtr<mirror::DexCache> dex_cache = ObjPtr<mirror::DexCache>::DownCast(
-          self->DecodeJObject(data.weak_root));
-      if (dex_cache != nullptr) {
-        dex_cache->Fixup(resolution_method, image_pointer_size_);
-      }
-    }
-  }
-}
-
 mirror::Class* ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) {
   ObjPtr<mirror::Class> klass =
       AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_));
@@ -4662,10 +4629,8 @@
 
   // Find the <init>(InvocationHandler)V method. The exact method offset varies depending
   // on which front-end compiler was used to build the libcore DEX files.
-  ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->
-      FindDeclaredDirectMethod("<init>",
-                               "(Ljava/lang/reflect/InvocationHandler;)V",
-                               image_pointer_size_);
+  ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->FindConstructor(
+      "(Ljava/lang/reflect/InvocationHandler;)V", image_pointer_size_);
   DCHECK(proxy_constructor != nullptr)
       << "Could not find <init> method in java.lang.reflect.Proxy";
 
@@ -4677,8 +4642,9 @@
   // code_ too)
   DCHECK(out != nullptr);
   out->CopyFrom(proxy_constructor, image_pointer_size_);
-  // Make this constructor public and fix the class to be our Proxy version
+  // Make this constructor public and fix the class to be our Proxy version.
   // Mark kAccCompileDontBother so that we don't take JIT samples for the method. b/62349349
+  // Note that the compiler calls a ResolveMethod() overload that does not handle a Proxy referrer.
   out->SetAccessFlags((out->GetAccessFlags() & ~kAccProtected) |
                       kAccPublic |
                       kAccCompileDontBother);
@@ -6917,7 +6883,8 @@
       // Check that there are no stale methods are in the dex cache array.
       auto* resolved_methods = klass_->GetDexCache()->GetResolvedMethods();
       for (size_t i = 0, count = klass_->GetDexCache()->NumResolvedMethods(); i < count; ++i) {
-        auto* m = mirror::DexCache::GetElementPtrSize(resolved_methods, i, pointer_size);
+        auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_methods, i, pointer_size);
+        ArtMethod* m = pair.object;
         CHECK(move_table_.find(m) == move_table_.end() ||
               // The original versions of copied methods will still be present so allow those too.
               // Note that if the first check passes this might fail to GetDeclaringClass().
@@ -7393,10 +7360,8 @@
   // defaults. This means we don't need to do any trickery when creating the Miranda methods, since
   // they will already be null. This has the additional benefit that the declarer of a miranda
   // method will actually declare an abstract method.
-  for (size_t i = ifcount; i != 0; ) {
+  for (size_t i = ifcount; i != 0u; ) {
     --i;
-
-    DCHECK_GE(i, 0u);
     DCHECK_LT(i, ifcount);
 
     size_t num_methods = iftable->GetInterface(i)->NumDeclaredVirtualMethods();
@@ -7977,201 +7942,96 @@
                                       ArtMethod* referrer,
                                       InvokeType type) {
   DCHECK(dex_cache != nullptr);
+  DCHECK(referrer == nullptr || !referrer->IsProxyMethod());
   // Check for hit in the dex cache.
-  ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, image_pointer_size_);
+  PointerSize pointer_size = image_pointer_size_;
+  ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, pointer_size);
   Thread::PoisonObjectPointersIfDebug();
-  if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
+  DCHECK(resolved == nullptr || !resolved->IsRuntimeMethod());
+  bool valid_dex_cache_method = resolved != nullptr;
+  if (kResolveMode == ResolveMode::kNoChecks && valid_dex_cache_method) {
+    // We have a valid method from the DexCache and no checks to perform.
     DCHECK(resolved->GetDeclaringClassUnchecked() != nullptr) << resolved->GetDexMethodIndex();
-    if (kResolveMode == ClassLinker::kForceICCECheck) {
-      if (resolved->CheckIncompatibleClassChange(type)) {
-        ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer);
-        return nullptr;
-      }
-    }
     return resolved;
   }
-  // Fail, get the declaring class.
   const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
-  ObjPtr<mirror::Class> klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
-  if (klass == nullptr) {
+  ObjPtr<mirror::Class> klass = nullptr;
+  if (valid_dex_cache_method) {
+    // We have a valid method from the DexCache but we need to perform ICCE and IAE checks.
+    DCHECK(resolved->GetDeclaringClassUnchecked() != nullptr) << resolved->GetDexMethodIndex();
+    klass = LookupResolvedType(dex_file, method_id.class_idx_, dex_cache.Get(), class_loader.Get());
+    DCHECK(klass != nullptr);
+  } else {
+    // The method was not in the DexCache, resolve the declaring class.
+    klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
+    if (klass == nullptr) {
+      DCHECK(Thread::Current()->IsExceptionPending());
+      return nullptr;
+    }
+  }
+
+  // Check if the invoke type matches the class type.
+  if (kResolveMode == ResolveMode::kCheckICCEAndIAE &&
+      CheckInvokeClassMismatch</* kThrow */ true>(
+          dex_cache.Get(), type, [klass]() { return klass; })) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
-  // Scan using method_idx, this saves string compares but will only hit for matching dex
-  // caches/files.
-  switch (type) {
-    case kDirect:  // Fall-through.
-    case kStatic:
-      resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx, image_pointer_size_);
-      DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr);
-      break;
-    case kInterface:
-      // We have to check whether the method id really belongs to an interface (dex static bytecode
-      // constraint A15). Otherwise you must not invoke-interface on it.
-      //
-      // This is not symmetric to A12-A14 (direct, static, virtual), as using FindInterfaceMethod
-      // assumes that the given type is an interface, and will check the interface table if the
-      // method isn't declared in the class. So it may find an interface method (usually by name
-      // in the handling below, but we do the constraint check early). In that case,
-      // CheckIncompatibleClassChange will succeed (as it is called on an interface method)
-      // unexpectedly.
-      // Example:
-      //    interface I {
-      //      foo()
-      //    }
-      //    class A implements I {
-      //      ...
-      //    }
-      //    class B extends A {
-      //      ...
-      //    }
-      //    invoke-interface B.foo
-      //      -> FindInterfaceMethod finds I.foo (interface method), not A.foo (miranda method)
-      if (UNLIKELY(!klass->IsInterface())) {
-        ThrowIncompatibleClassChangeError(klass,
-                                          "Found class %s, but interface was expected",
-                                          klass->PrettyDescriptor().c_str());
-        return nullptr;
-      } else {
-        resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_);
-        DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
-      }
-      break;
-    case kSuper:
-      if (klass->IsInterface()) {
-        resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_);
-      } else {
-        resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_);
-      }
-      break;
-    case kVirtual:
-      resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_);
-      break;
-    default:
-      LOG(FATAL) << "Unreachable - invocation type: " << type;
-      UNREACHABLE();
-  }
-  if (resolved == nullptr) {
-    // Search by name, which works across dex files.
-    const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
-    const Signature signature = dex_file.GetMethodSignature(method_id);
-    switch (type) {
-      case kDirect:  // Fall-through.
-      case kStatic:
-        resolved = klass->FindDirectMethod(name, signature, image_pointer_size_);
-        DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr);
-        break;
-      case kInterface:
-        resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
-        DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
-        break;
-      case kSuper:
-        if (klass->IsInterface()) {
-          resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
-        } else {
-          resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
-        }
-        break;
-      case kVirtual:
-        resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
-        break;
+
+  if (!valid_dex_cache_method) {
+    // Search for the method using dex_cache and method_idx. The Class::Find*Method()
+    // functions can optimize the search if the dex_cache is the same as the DexCache
+    // of the class, with fall-back to name and signature search otherwise.
+    if (klass->IsInterface()) {
+      resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, pointer_size);
+    } else {
+      resolved = klass->FindClassMethod(dex_cache.Get(), method_idx, pointer_size);
+    }
+    DCHECK(resolved == nullptr || resolved->GetDeclaringClassUnchecked() != nullptr);
+    if (resolved != nullptr) {
+      // Be a good citizen and update the dex cache to speed subsequent calls.
+      dex_cache->SetResolvedMethod(method_idx, resolved, pointer_size);
     }
   }
+
+  // Note: We can check for IllegalAccessError only if we have a referrer.
+  if (kResolveMode == ResolveMode::kCheckICCEAndIAE && resolved != nullptr && referrer != nullptr) {
+    ObjPtr<mirror::Class> methods_class = resolved->GetDeclaringClass();
+    ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
+    if (!referring_class->CheckResolvedMethodAccess(methods_class,
+                                                    resolved,
+                                                    dex_cache.Get(),
+                                                    method_idx,
+                                                    type)) {
+      DCHECK(Thread::Current()->IsExceptionPending());
+      return nullptr;
+    }
+  }
+
   // If we found a method, check for incompatible class changes.
-  if (LIKELY(resolved != nullptr && !resolved->CheckIncompatibleClassChange(type))) {
-    // Be a good citizen and update the dex cache to speed subsequent calls.
-    dex_cache->SetResolvedMethod(method_idx, resolved, image_pointer_size_);
+  if (LIKELY(resolved != nullptr) &&
+      LIKELY(kResolveMode == ResolveMode::kNoChecks ||
+             !resolved->CheckIncompatibleClassChange(type))) {
     return resolved;
   } else {
-    // If we had a method, it's an incompatible-class-change error.
+    // If we had a method, or if we can find one with another lookup type,
+    // it's an incompatible-class-change error.
+    if (resolved == nullptr) {
+      if (klass->IsInterface()) {
+        resolved = klass->FindClassMethod(dex_cache.Get(), method_idx, pointer_size);
+      } else {
+        // If there was an interface method with the same signature,
+        // we would have found it also in the "copied" methods.
+        DCHECK(klass->FindInterfaceMethod(dex_cache.Get(), method_idx, pointer_size) == nullptr);
+      }
+    }
     if (resolved != nullptr) {
       ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer);
     } else {
-      // We failed to find the method which means either an access error, an incompatible class
-      // change, or no such method. First try to find the method among direct and virtual methods.
+      // We failed to find the method (using all lookup types), so throw a NoSuchMethodError.
       const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
       const Signature signature = dex_file.GetMethodSignature(method_id);
-      switch (type) {
-        case kDirect:
-        case kStatic:
-          resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
-          // Note: kDirect and kStatic are also mutually exclusive, but in that case we would
-          //       have had a resolved method before, which triggers the "true" branch above.
-          break;
-        case kInterface:
-        case kVirtual:
-        case kSuper:
-          resolved = klass->FindDirectMethod(name, signature, image_pointer_size_);
-          break;
-      }
-
-      // If we found something, check that it can be accessed by the referrer.
-      bool exception_generated = false;
-      if (resolved != nullptr && referrer != nullptr) {
-        ObjPtr<mirror::Class> methods_class = resolved->GetDeclaringClass();
-        ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
-        if (!referring_class->CanAccess(methods_class)) {
-          ThrowIllegalAccessErrorClassForMethodDispatch(referring_class,
-                                                        methods_class,
-                                                        resolved,
-                                                        type);
-          exception_generated = true;
-        } else if (!referring_class->CanAccessMember(methods_class, resolved->GetAccessFlags())) {
-          ThrowIllegalAccessErrorMethod(referring_class, resolved);
-          exception_generated = true;
-        }
-      }
-      if (!exception_generated) {
-        // Otherwise, throw an IncompatibleClassChangeError if we found something, and check
-        // interface methods and throw if we find the method there. If we find nothing, throw a
-        // NoSuchMethodError.
-        switch (type) {
-          case kDirect:
-          case kStatic:
-            if (resolved != nullptr) {
-              ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer);
-            } else {
-              resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
-              if (resolved != nullptr) {
-                ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer);
-              } else {
-                ThrowNoSuchMethodError(type, klass, name, signature);
-              }
-            }
-            break;
-          case kInterface:
-            if (resolved != nullptr) {
-              ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
-            } else {
-              resolved = klass->FindVirtualMethod(name, signature, image_pointer_size_);
-              if (resolved != nullptr) {
-                ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer);
-              } else {
-                ThrowNoSuchMethodError(type, klass, name, signature);
-              }
-            }
-            break;
-          case kSuper:
-            if (resolved != nullptr) {
-              ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
-            } else {
-              ThrowNoSuchMethodError(type, klass, name, signature);
-            }
-            break;
-          case kVirtual:
-            if (resolved != nullptr) {
-              ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer);
-            } else {
-              resolved = klass->FindInterfaceMethod(name, signature, image_pointer_size_);
-              if (resolved != nullptr) {
-                ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer);
-              } else {
-                ThrowNoSuchMethodError(type, klass, name, signature);
-              }
-            }
-            break;
-        }
-      }
+      ThrowNoSuchMethodError(type, klass, name, signature);
     }
     Thread::Current()->AssertPendingException();
     return nullptr;
@@ -8184,27 +8044,23 @@
                                                        Handle<mirror::ClassLoader> class_loader) {
   ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx, image_pointer_size_);
   Thread::PoisonObjectPointersIfDebug();
-  if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
+  if (resolved != nullptr) {
+    DCHECK(!resolved->IsRuntimeMethod());
     DCHECK(resolved->GetDeclaringClassUnchecked() != nullptr) << resolved->GetDexMethodIndex();
     return resolved;
   }
   // Fail, get the declaring class.
   const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
-  ObjPtr<mirror::Class> klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
+  ObjPtr<mirror::Class> klass =
+      ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
   if (klass == nullptr) {
     Thread::Current()->AssertPendingException();
     return nullptr;
   }
   if (klass->IsInterface()) {
-    LOG(FATAL) << "ResolveAmbiguousMethod: unexpected method in interface: "
-               << klass->PrettyClass();
-    return nullptr;
-  }
-
-  // Search both direct and virtual methods
-  resolved = klass->FindDirectMethod(dex_cache.Get(), method_idx, image_pointer_size_);
-  if (resolved == nullptr) {
-    resolved = klass->FindVirtualMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+    resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, image_pointer_size_);
+  } else {
+    resolved = klass->FindClassMethod(dex_cache.Get(), method_idx, image_pointer_size_);
   }
 
   return resolved;
@@ -8519,19 +8375,19 @@
     case DexFile::MethodHandleType::kInvokeStatic: {
       kind = mirror::MethodHandle::Kind::kInvokeStatic;
       receiver_count = 0;
-      target_method = ResolveMethod<kNoICCECheckForCache>(self,
-                                                          method_handle.field_or_method_idx_,
-                                                          referrer,
-                                                          InvokeType::kStatic);
+      target_method = ResolveMethod<ResolveMode::kNoChecks>(self,
+                                                            method_handle.field_or_method_idx_,
+                                                            referrer,
+                                                            InvokeType::kStatic);
       break;
     }
     case DexFile::MethodHandleType::kInvokeInstance: {
       kind = mirror::MethodHandle::Kind::kInvokeVirtual;
       receiver_count = 1;
-      target_method = ResolveMethod<kNoICCECheckForCache>(self,
-                                                          method_handle.field_or_method_idx_,
-                                                          referrer,
-                                                          InvokeType::kVirtual);
+      target_method = ResolveMethod<ResolveMode::kNoChecks>(self,
+                                                            method_handle.field_or_method_idx_,
+                                                            referrer,
+                                                            InvokeType::kVirtual);
       break;
     }
     case DexFile::MethodHandleType::kInvokeConstructor: {
@@ -8539,10 +8395,10 @@
       // are special cased later in this method.
       kind = mirror::MethodHandle::Kind::kInvokeTransform;
       receiver_count = 0;
-      target_method = ResolveMethod<kNoICCECheckForCache>(self,
-                                                          method_handle.field_or_method_idx_,
-                                                          referrer,
-                                                          InvokeType::kDirect);
+      target_method = ResolveMethod<ResolveMode::kNoChecks>(self,
+                                                            method_handle.field_or_method_idx_,
+                                                            referrer,
+                                                            InvokeType::kDirect);
       break;
     }
     case DexFile::MethodHandleType::kInvokeDirect: {
@@ -8565,16 +8421,16 @@
 
       if (target_method->IsPrivate()) {
         kind = mirror::MethodHandle::Kind::kInvokeDirect;
-        target_method = ResolveMethod<kNoICCECheckForCache>(self,
-                                                            method_handle.field_or_method_idx_,
-                                                            referrer,
-                                                            InvokeType::kDirect);
+        target_method = ResolveMethod<ResolveMode::kNoChecks>(self,
+                                                              method_handle.field_or_method_idx_,
+                                                              referrer,
+                                                              InvokeType::kDirect);
       } else {
         kind = mirror::MethodHandle::Kind::kInvokeSuper;
-        target_method = ResolveMethod<kNoICCECheckForCache>(self,
-                                                            method_handle.field_or_method_idx_,
-                                                            referrer,
-                                                            InvokeType::kSuper);
+        target_method = ResolveMethod<ResolveMode::kNoChecks>(self,
+                                                              method_handle.field_or_method_idx_,
+                                                              referrer,
+                                                              InvokeType::kSuper);
         if (UNLIKELY(target_method == nullptr)) {
           break;
         }
@@ -8590,10 +8446,10 @@
     case DexFile::MethodHandleType::kInvokeInterface: {
       kind = mirror::MethodHandle::Kind::kInvokeInterface;
       receiver_count = 1;
-      target_method = ResolveMethod<kNoICCECheckForCache>(self,
-                                                          method_handle.field_or_method_idx_,
-                                                          referrer,
-                                                          InvokeType::kInterface);
+      target_method = ResolveMethod<ResolveMode::kNoChecks>(self,
+                                                            method_handle.field_or_method_idx_,
+                                                            referrer,
+                                                            InvokeType::kInterface);
       break;
     }
   }
@@ -9209,15 +9065,62 @@
                              ifcount * mirror::IfTable::kMax));
 }
 
+ArtMethod* ClassLinker::FindMethodForProxy(ArtMethod* proxy_method) {
+  DCHECK(proxy_method->IsProxyMethod());
+  {
+    uint32_t method_index = proxy_method->GetDexMethodIndex();
+    PointerSize pointer_size = image_pointer_size_;
+    Thread* const self = Thread::Current();
+    ReaderMutexLock mu(self, *Locks::dex_lock_);
+    // Locate the dex cache of the original interface/Object
+    for (const DexCacheData& data : dex_caches_) {
+      if (!self->IsJWeakCleared(data.weak_root) &&
+          proxy_method->HasSameDexCacheResolvedMethods(data.resolved_methods, pointer_size)) {
+        ObjPtr<mirror::DexCache> dex_cache =
+            ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
+        if (dex_cache != nullptr) {
+          // Lookup up the method. Instead of going through LookupResolvedMethod()
+          // and thus LookupResolvedType(), use the ClassTable from the DexCacheData.
+          ArtMethod* resolved_method = dex_cache->GetResolvedMethod(method_index, pointer_size);
+          if (resolved_method == nullptr) {
+            const DexFile::MethodId& method_id = data.dex_file->GetMethodId(method_index);
+            ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(method_id.class_idx_);
+            if (klass == nullptr) {
+              const char* descriptor = data.dex_file->StringByTypeIdx(method_id.class_idx_);
+              klass = data.class_table->Lookup(descriptor, ComputeModifiedUtf8Hash(descriptor));
+              DCHECK(klass != nullptr);
+              dex_cache->SetResolvedType(method_id.class_idx_, klass);
+            }
+            if (klass->IsInterface()) {
+              resolved_method = klass->FindInterfaceMethod(dex_cache, method_index, pointer_size);
+            } else {
+              DCHECK(
+                  klass == WellKnownClasses::ToClass(WellKnownClasses::java_lang_reflect_Proxy) ||
+                  klass == WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object));
+              resolved_method = klass->FindClassMethod(dex_cache, method_index, pointer_size);
+            }
+            CHECK(resolved_method != nullptr);
+            dex_cache->SetResolvedMethod(method_index, resolved_method, pointer_size);
+          }
+          return resolved_method;
+        }
+      }
+    }
+  }
+  // Note: Do not use proxy_method->PrettyMethod() as it can call back here.
+  LOG(FATAL) << "Didn't find dex cache for " << proxy_method->GetDeclaringClass()->PrettyClass();
+  UNREACHABLE();
+}
+
 // Instantiate ResolveMethod.
-template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>(
+template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
     const DexFile& dex_file,
     uint32_t method_idx,
     Handle<mirror::DexCache> dex_cache,
     Handle<mirror::ClassLoader> class_loader,
     ArtMethod* referrer,
     InvokeType type);
-template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
     const DexFile& dex_file,
     uint32_t method_idx,
     Handle<mirror::DexCache> dex_cache,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 864d37f..4a99c66 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -55,6 +55,9 @@
   class MethodType;
   template<class T> class ObjectArray;
   class StackTraceElement;
+  template <typename T> struct NativeDexCachePair;
+  using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
+  using MethodDexCacheType = std::atomic<MethodDexCachePair>;
 }  // namespace mirror
 
 class ClassTable;
@@ -281,12 +284,18 @@
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
   // Determine whether a dex cache result should be trusted, or an IncompatibleClassChangeError
-  // check should be performed even after a hit.
-  enum ResolveMode {  // private.
-    kNoICCECheckForCache,
-    kForceICCECheck
+  // check and IllegalAccessError check should be performed even after a hit.
+  enum class ResolveMode {  // private.
+    kNoChecks,
+    kCheckICCEAndIAE
   };
 
+  // Look up a previously resolved method with the given index.
+  ArtMethod* LookupResolvedMethod(uint32_t method_idx,
+                                  ObjPtr<mirror::DexCache> dex_cache,
+                                  ObjPtr<mirror::ClassLoader> class_loader)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Resolve a method with a given ID from the DexFile, storing the
   // result in DexCache. The ClassLinker and ClassLoader are used as
   // in ResolveType. What is unique is the method type argument which
@@ -302,17 +311,10 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
+  template <InvokeType type, ResolveMode kResolveMode>
   ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // This returns the class referred to by GetMethodId(method_idx).class_idx_. This might be
-  // different then the declaring class of the resolved method due to copied
-  // miranda/default/conflict methods.
-  mirror::Class* ResolveReferencedClassOfMethod(uint32_t method_idx,
-                                                Handle<mirror::DexCache> dex_cache,
-                                                Handle<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
   template <ResolveMode kResolveMode>
   ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -430,9 +432,6 @@
   ClassTable* FindClassTable(Thread* self, ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES(!Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void FixupDexCaches(ArtMethod* resolution_method)
-      REQUIRES(!Locks::dex_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 
   LengthPrefixedArray<ArtField>* AllocArtFieldArray(Thread* self,
                                                     LinearAlloc* allocator,
@@ -482,8 +481,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   std::string GetDescriptorForProxy(ObjPtr<mirror::Class> proxy_class)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ArtMethod* FindMethodForProxy(ObjPtr<mirror::Class> proxy_class, ArtMethod* proxy_method)
+  ArtMethod* FindMethodForProxy(ArtMethod* proxy_method)
       REQUIRES(!Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -699,7 +697,7 @@
     // jweak decode that triggers read barriers (and mark them alive unnecessarily and mess with
     // class unloading.)
     const DexFile* dex_file;
-    ArtMethod** resolved_methods;
+    mirror::MethodDexCacheType* resolved_methods;
     // Identify the associated class loader's class table. This is used to make sure that
     // the Java call to native DexCache.setResolvedType() inserts the resolved type in that
     // class table. It is also used to make sure we don't register the same dex cache with
@@ -1205,6 +1203,23 @@
                              bool* new_conflict,
                              ArtMethod** imt) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Check invoke type against the referenced class. Throws IncompatibleClassChangeError
+  // (if `kThrowOnError`) and returns true on mismatch (kInterface on a non-interface class,
+  // kVirtual on interface, kDefault on interface for dex files not supporting default methods),
+  // otherwise returns false.
+  template <bool kThrowOnError, typename ClassGetter>
+  static bool CheckInvokeClassMismatch(ObjPtr<mirror::DexCache> dex_cache,
+                                       InvokeType type,
+                                       ClassGetter class_getter)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  // Helper that feeds the above function with `ClassGetter` doing `LookupResolvedType()`.
+  template <bool kThrow>
+  bool CheckInvokeClassMismatch(ObjPtr<mirror::DexCache> dex_cache,
+                                InvokeType type,
+                                uint32_t method_idx,
+                                ObjPtr<mirror::ClassLoader> class_loader)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   std::vector<const DexFile*> boot_class_path_;
   std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
 
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 03cc6c5..39d77f0 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -440,14 +440,6 @@
     }
     TestRootVisitor visitor;
     class_linker_->VisitRoots(&visitor, kVisitRootFlagAllRoots);
-    // Verify the dex cache has resolution methods in all resolved method slots
-    ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(Thread::Current(), dex);
-    auto* resolved_methods = dex_cache->GetResolvedMethods();
-    for (size_t i = 0, num_methods = dex_cache->NumResolvedMethods(); i != num_methods; ++i) {
-      EXPECT_TRUE(
-          mirror::DexCache::GetElementPtrSize(resolved_methods, i, kRuntimePointerSize) != nullptr)
-          << dex.GetLocation() << " i=" << i;
-    }
   }
 
   class TestRootVisitor : public SingleRootVisitor {
@@ -1121,7 +1113,7 @@
   // Static final primitives that are initialized by a compile-time constant
   // expression resolve to a copy of a constant value from the constant pool.
   // So <clinit> should be null.
-  ArtMethod* clinit = statics->FindDirectMethod("<clinit>", "()V", kRuntimePointerSize);
+  ArtMethod* clinit = statics->FindClassMethod("<clinit>", "()V", kRuntimePointerSize);
   EXPECT_TRUE(clinit == nullptr);
 
   EXPECT_EQ(9U, statics->NumStaticFields());
@@ -1208,24 +1200,30 @@
   EXPECT_TRUE(J->IsAssignableFrom(B.Get()));
 
   const Signature void_sig = I->GetDexCache()->GetDexFile()->CreateSignature("()V");
-  ArtMethod* Ii = I->FindVirtualMethod("i", void_sig, kRuntimePointerSize);
-  ArtMethod* Jj1 = J->FindVirtualMethod("j1", void_sig, kRuntimePointerSize);
-  ArtMethod* Jj2 = J->FindVirtualMethod("j2", void_sig, kRuntimePointerSize);
+  ArtMethod* Ii = I->FindClassMethod("i", void_sig, kRuntimePointerSize);
+  ArtMethod* Jj1 = J->FindClassMethod("j1", void_sig, kRuntimePointerSize);
+  ArtMethod* Jj2 = J->FindClassMethod("j2", void_sig, kRuntimePointerSize);
   ArtMethod* Kj1 = K->FindInterfaceMethod("j1", void_sig, kRuntimePointerSize);
   ArtMethod* Kj2 = K->FindInterfaceMethod("j2", void_sig, kRuntimePointerSize);
   ArtMethod* Kk = K->FindInterfaceMethod("k", void_sig, kRuntimePointerSize);
-  ArtMethod* Ai = A->FindVirtualMethod("i", void_sig, kRuntimePointerSize);
-  ArtMethod* Aj1 = A->FindVirtualMethod("j1", void_sig, kRuntimePointerSize);
-  ArtMethod* Aj2 = A->FindVirtualMethod("j2", void_sig, kRuntimePointerSize);
+  ArtMethod* Ai = A->FindClassMethod("i", void_sig, kRuntimePointerSize);
+  ArtMethod* Aj1 = A->FindClassMethod("j1", void_sig, kRuntimePointerSize);
+  ArtMethod* Aj2 = A->FindClassMethod("j2", void_sig, kRuntimePointerSize);
   ASSERT_TRUE(Ii != nullptr);
+  ASSERT_FALSE(Ii->IsDirect());
   ASSERT_TRUE(Jj1 != nullptr);
+  ASSERT_FALSE(Jj1->IsDirect());
   ASSERT_TRUE(Jj2 != nullptr);
+  ASSERT_FALSE(Jj2->IsDirect());
   ASSERT_TRUE(Kj1 != nullptr);
   ASSERT_TRUE(Kj2 != nullptr);
   ASSERT_TRUE(Kk != nullptr);
   ASSERT_TRUE(Ai != nullptr);
+  ASSERT_FALSE(Ai->IsDirect());
   ASSERT_TRUE(Aj1 != nullptr);
+  ASSERT_FALSE(Aj1->IsDirect());
   ASSERT_TRUE(Aj2 != nullptr);
+  ASSERT_FALSE(Aj2->IsDirect());
   EXPECT_NE(Ii, Ai);
   EXPECT_NE(Jj1, Aj1);
   EXPECT_NE(Jj2, Aj2);
@@ -1266,7 +1264,10 @@
       hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
   mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LStaticsFromCode;", class_loader);
   ArtMethod* clinit = klass->FindClassInitializer(kRuntimePointerSize);
-  ArtMethod* getS0 = klass->FindDirectMethod("getS0", "()Ljava/lang/Object;", kRuntimePointerSize);
+  ArtMethod* getS0 =
+      klass->FindClassMethod("getS0", "()Ljava/lang/Object;", kRuntimePointerSize);
+  ASSERT_TRUE(getS0 != nullptr);
+  ASSERT_TRUE(getS0->IsStatic());
   const DexFile::TypeId* type_id = dex_file->FindTypeId("LStaticsFromCode;");
   ASSERT_TRUE(type_id != nullptr);
   dex::TypeIndex type_idx = dex_file->GetIndexForTypeId(*type_id);
@@ -1489,9 +1490,12 @@
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LMethodTypes;", class_loader)));
   class_linker_->EnsureInitialized(soa.Self(), method_types, true, true);
 
-  ArtMethod* method1 = method_types->FindVirtualMethod("method1",
-                                                       "(Ljava/lang/String;)Ljava/lang/String;",
-                                                       kRuntimePointerSize);
+  ArtMethod* method1 = method_types->FindClassMethod(
+      "method1",
+      "(Ljava/lang/String;)Ljava/lang/String;",
+      kRuntimePointerSize);
+  ASSERT_TRUE(method1 != nullptr);
+  ASSERT_FALSE(method1->IsDirect());
 
   const DexFile& dex_file = *(method1->GetDexFile());
   Handle<mirror::DexCache> dex_cache = hs.NewHandle(
@@ -1522,10 +1526,12 @@
 
   // Resolve the MethodType associated with a different method signature
   // and assert it's different.
-  ArtMethod* method2 = method_types->FindVirtualMethod(
+  ArtMethod* method2 = method_types->FindClassMethod(
       "method2",
       "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;",
       kRuntimePointerSize);
+  ASSERT_TRUE(method2 != nullptr);
+  ASSERT_FALSE(method2->IsDirect());
   const DexFile::MethodId& method2_id = dex_file.GetMethodId(method2->GetDexMethodIndex());
   Handle<mirror::MethodType> method2_type = hs.NewHandle(
       class_linker_->ResolveMethodType(dex_file, method2_id.proto_idx_, dex_cache, class_loader));
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 2bed1d5..eab3b86 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -16,14 +16,20 @@
 
 #include "class_loader_context.h"
 
+#include "art_field-inl.h"
 #include "base/dchecked_vector.h"
 #include "base/stl_util.h"
 #include "class_linker.h"
+#include "class_loader_utils.h"
 #include "dex_file.h"
+#include "handle_scope-inl.h"
+#include "jni_internal.h"
 #include "oat_file_assistant.h"
+#include "obj_ptr-inl.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
+#include "well_known_classes.h"
 
 namespace art {
 
@@ -38,7 +44,29 @@
 ClassLoaderContext::ClassLoaderContext()
     : special_shared_library_(false),
       dex_files_open_attempted_(false),
-      dex_files_open_result_(false) {}
+      dex_files_open_result_(false),
+      owns_the_dex_files_(true) {}
+
+ClassLoaderContext::ClassLoaderContext(bool owns_the_dex_files)
+    : special_shared_library_(false),
+      dex_files_open_attempted_(true),
+      dex_files_open_result_(true),
+      owns_the_dex_files_(owns_the_dex_files) {}
+
+ClassLoaderContext::~ClassLoaderContext() {
+  if (!owns_the_dex_files_) {
+    // If the context does not own the dex/oat files release the unique pointers to
+    // make sure we do not de-allocate them.
+    for (ClassLoaderInfo& info : class_loader_chain_) {
+      for (std::unique_ptr<OatFile>& oat_file : info.opened_oat_files) {
+        oat_file.release();
+      }
+      for (std::unique_ptr<const DexFile>& dex_file : info.opened_dex_files) {
+        dex_file.release();
+      }
+    }
+  }
+}
 
 std::unique_ptr<ClassLoaderContext> ClassLoaderContext::Create(const std::string& spec) {
   std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext());
@@ -277,24 +305,41 @@
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
 
-  std::vector<const DexFile*> class_path_files;
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
 
-  // TODO(calin): Transition period: assume we only have a classloader until
-  // the oat file assistant implements the full class loader check.
-  if (!class_loader_chain_.empty()) {
-    CHECK_EQ(1u, class_loader_chain_.size());
-    CHECK_EQ(kPathClassLoader, class_loader_chain_[0].type);
-    class_path_files = MakeNonOwningPointerVector(class_loader_chain_[0].opened_dex_files);
+  if (class_loader_chain_.empty()) {
+    return class_linker->CreatePathClassLoader(self, compilation_sources);
   }
 
-  // Classpath: first the class-path given; then the dex files we'll compile.
-  // Thus we'll resolve the class-path first.
-  class_path_files.insert(class_path_files.end(),
-                          compilation_sources.begin(),
-                          compilation_sources.end());
+  // Create the class loaders starting from the top most parent (the one on the last position
+  // in the chain) but omit the first class loader which will contain the compilation_sources and
+  // needs special handling.
+  jobject current_parent = nullptr;  // the starting parent is the BootClassLoader.
+  for (size_t i = class_loader_chain_.size() - 1; i > 0; i--) {
+    std::vector<const DexFile*> class_path_files = MakeNonOwningPointerVector(
+        class_loader_chain_[i].opened_dex_files);
+    current_parent = class_linker->CreateWellKnownClassLoader(
+        self,
+        class_path_files,
+        GetClassLoaderClass(class_loader_chain_[i].type),
+        current_parent);
+  }
 
-  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
-  return class_linker->CreatePathClassLoader(self, class_path_files);
+  // We set up all the parents. Move on to create the first class loader.
+  // Its classpath comes first, followed by compilation sources. This ensures that whenever
+  // we need to resolve classes from it the classpath elements come first.
+
+  std::vector<const DexFile*> first_class_loader_classpath = MakeNonOwningPointerVector(
+      class_loader_chain_[0].opened_dex_files);
+  first_class_loader_classpath.insert(first_class_loader_classpath.end(),
+                                    compilation_sources.begin(),
+                                    compilation_sources.end());
+
+  return class_linker->CreateWellKnownClassLoader(
+      self,
+      first_class_loader_classpath,
+      GetClassLoaderClass(class_loader_chain_[0].type),
+      current_parent);
 }
 
 std::vector<const DexFile*> ClassLoaderContext::FlattenOpenedDexFiles() const {
@@ -325,36 +370,285 @@
       << "attempt=" << dex_files_open_attempted_ << ", result=" << dex_files_open_result_;
 }
 
-bool ClassLoaderContext::DecodePathClassLoaderContextFromOatFileKey(
-    const std::string& context_spec,
-    std::vector<std::string>* out_classpath,
-    std::vector<uint32_t>* out_checksums,
-    bool* out_is_special_shared_library) {
-  ClassLoaderContext context;
-  if (!context.Parse(context_spec, /*parse_checksums*/ true)) {
-    LOG(ERROR) << "Invalid class loader context: " << context_spec;
+// Collects the dex files from the give Java dex_file object. Only the dex files with
+// at least 1 class are collected. If a null java_dex_file is passed this method does nothing.
+static bool CollectDexFilesFromJavaDexFile(ObjPtr<mirror::Object> java_dex_file,
+                                           ArtField* const cookie_field,
+                                           std::vector<const DexFile*>* out_dex_files)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (java_dex_file == nullptr) {
+    return true;
+  }
+  // On the Java side, the dex files are stored in the cookie field.
+  mirror::LongArray* long_array = cookie_field->GetObject(java_dex_file)->AsLongArray();
+  if (long_array == nullptr) {
+    // This should never happen so log a warning.
+    LOG(ERROR) << "Unexpected null cookie";
+    return false;
+  }
+  int32_t long_array_size = long_array->GetLength();
+  // Index 0 from the long array stores the oat file. The dex files start at index 1.
+  for (int32_t j = 1; j < long_array_size; ++j) {
+    const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
+        long_array->GetWithoutChecks(j)));
+    if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
+      // TODO(calin): It's unclear why the dex files with no classes are skipped here and when
+      // cp_dex_file can be null.
+      out_dex_files->push_back(cp_dex_file);
+    }
+  }
+  return true;
+}
+
+// Collects all the dex files loaded by the given class loader.
+// Returns true for success or false if an unexpected state is discovered (e.g. a null dex cookie,
+// a null list of dex elements or a null dex element).
+static bool CollectDexFilesFromSupportedClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                                    Handle<mirror::ClassLoader> class_loader,
+                                                    std::vector<const DexFile*>* out_dex_files)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+  CHECK(IsPathOrDexClassLoader(soa, class_loader) || IsDelegateLastClassLoader(soa, class_loader));
+
+  // All supported class loaders inherit from BaseDexClassLoader.
+  // We need to get the DexPathList and loop through it.
+  ArtField* const cookie_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* const dex_file_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+  ObjPtr<mirror::Object> dex_path_list =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
+          GetObject(class_loader.Get());
+  CHECK(cookie_field != nullptr);
+  CHECK(dex_file_field != nullptr);
+  if (dex_path_list == nullptr) {
+    // This may be null if the current class loader is under construction and it does not
+    // have its fields setup yet.
+    return true;
+  }
+  // DexPathList has an array dexElements of Elements[] which each contain a dex file.
+  ObjPtr<mirror::Object> dex_elements_obj =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
+          GetObject(dex_path_list);
+  // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
+  // at the mCookie which is a DexFile vector.
+  if (dex_elements_obj == nullptr) {
+    // TODO(calin): It's unclear if we should just assert here. For now be prepared for the worse
+    // and assume we have no elements.
+    return true;
+  } else {
+    StackHandleScope<1> hs(soa.Self());
+    Handle<mirror::ObjectArray<mirror::Object>> dex_elements(
+        hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>()));
+    for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
+      mirror::Object* element = dex_elements->GetWithoutChecks(i);
+      if (element == nullptr) {
+        // Should never happen, log an error and break.
+        // TODO(calin): It's unclear if we should just assert here.
+        // This code was propagated to oat_file_manager from the class linker where it would
+        // throw a NPE. For now, return false which will mark this class loader as unsupported.
+        LOG(ERROR) << "Unexpected null in the dex element list";
+        return false;
+      }
+      ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
+      if (!CollectDexFilesFromJavaDexFile(dex_file, cookie_field, out_dex_files)) {
+        return false;
+      }
+    }
+  }
+
+  return true;
+}
+
+static bool GetDexFilesFromDexElementsArray(
+    ScopedObjectAccessAlreadyRunnable& soa,
+    Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
+    std::vector<const DexFile*>* out_dex_files) REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(dex_elements != nullptr);
+
+  ArtField* const cookie_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
+  ArtField* const dex_file_field =
+      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
+  ObjPtr<mirror::Class> const element_class = soa.Decode<mirror::Class>(
+      WellKnownClasses::dalvik_system_DexPathList__Element);
+  ObjPtr<mirror::Class> const dexfile_class = soa.Decode<mirror::Class>(
+      WellKnownClasses::dalvik_system_DexFile);
+
+  for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
+    mirror::Object* element = dex_elements->GetWithoutChecks(i);
+    // We can hit a null element here because this is invoked with a partially filled dex_elements
+    // array from DexPathList. DexPathList will open each dex sequentially, each time passing the
+    // list of dex files which were opened before.
+    if (element == nullptr) {
+      continue;
+    }
+
+    // We support this being dalvik.system.DexPathList$Element and dalvik.system.DexFile.
+    // TODO(calin): Code caried over oat_file_manager: supporting both classes seem to be
+    // a historical glitch. All the java code opens dex files using an array of Elements.
+    ObjPtr<mirror::Object> dex_file;
+    if (element_class == element->GetClass()) {
+      dex_file = dex_file_field->GetObject(element);
+    } else if (dexfile_class == element->GetClass()) {
+      dex_file = element;
+    } else {
+      LOG(ERROR) << "Unsupported element in dex_elements: "
+                 << mirror::Class::PrettyClass(element->GetClass());
+      return false;
+    }
+
+    if (!CollectDexFilesFromJavaDexFile(dex_file, cookie_field, out_dex_files)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// Adds the `class_loader` info to the `context`.
+// The dex file present in `dex_elements` array (if not null) will be added at the end of
+// the classpath.
+// This method is recursive (w.r.t. the class loader parent) and will stop once it reaches the
+// BootClassLoader. Note that the class loader chain is expected to be short.
+bool ClassLoaderContext::AddInfoToContextFromClassLoader(
+      ScopedObjectAccessAlreadyRunnable& soa,
+      Handle<mirror::ClassLoader> class_loader,
+      Handle<mirror::ObjectArray<mirror::Object>> dex_elements)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (ClassLinker::IsBootClassLoader(soa, class_loader.Get())) {
+    // Nothing to do for the boot class loader as we don't add its dex files to the context.
+    return true;
+  }
+
+  ClassLoaderContext::ClassLoaderType type;
+  if (IsPathOrDexClassLoader(soa, class_loader)) {
+    type = kPathClassLoader;
+  } else if (IsDelegateLastClassLoader(soa, class_loader)) {
+    type = kDelegateLastClassLoader;
+  } else {
+    LOG(WARNING) << "Unsupported class loader";
     return false;
   }
 
-  *out_is_special_shared_library = context.special_shared_library_;
-  if (context.special_shared_library_) {
+  // Inspect the class loader for its dex files.
+  std::vector<const DexFile*> dex_files_loaded;
+  CollectDexFilesFromSupportedClassLoader(soa, class_loader, &dex_files_loaded);
+
+  // If we have a dex_elements array extract its dex elements now.
+  // This is used in two situations:
+  //   1) when a new ClassLoader is created DexPathList will open each dex file sequentially
+  //      passing the list of already open dex files each time. This ensures that we see the
+  //      correct context even if the ClassLoader under construction is not fully build.
+  //   2) when apk splits are loaded on the fly, the framework will load their dex files by
+  //      appending them to the current class loader. When the new code paths are loaded in
+  //      BaseDexClassLoader, the paths already present in the class loader will be passed
+  //      in the dex_elements array.
+  if (dex_elements != nullptr) {
+    GetDexFilesFromDexElementsArray(soa, dex_elements, &dex_files_loaded);
+  }
+
+  class_loader_chain_.push_back(ClassLoaderContext::ClassLoaderInfo(type));
+  ClassLoaderInfo& info = class_loader_chain_.back();
+  for (const DexFile* dex_file : dex_files_loaded) {
+    info.classpath.push_back(dex_file->GetLocation());
+    info.checksums.push_back(dex_file->GetLocationChecksum());
+    info.opened_dex_files.emplace_back(dex_file);
+  }
+
+  // We created the ClassLoaderInfo for the current loader. Move on to its parent.
+
+  StackHandleScope<1> hs(Thread::Current());
+  Handle<mirror::ClassLoader> parent = hs.NewHandle(class_loader->GetParent());
+
+  // Note that dex_elements array is null here. The elements are considered to be part of the
+  // current class loader and are not passed to the parents.
+  ScopedNullHandle<mirror::ObjectArray<mirror::Object>> null_dex_elements;
+  return AddInfoToContextFromClassLoader(soa, parent, null_dex_elements);
+}
+
+std::unique_ptr<ClassLoaderContext> ClassLoaderContext::CreateContextForClassLoader(
+    jobject class_loader,
+    jobjectArray dex_elements) {
+  CHECK(class_loader != nullptr);
+
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<2> hs(soa.Self());
+  Handle<mirror::ClassLoader> h_class_loader =
+      hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader));
+  Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
+      hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
+
+  std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files*/ false));
+  if (result->AddInfoToContextFromClassLoader(soa, h_class_loader, h_dex_elements)) {
+    return result;
+  } else {
+    return nullptr;
+  }
+}
+
+bool ClassLoaderContext::VerifyClassLoaderContextMatch(const std::string& context_spec) {
+  ClassLoaderContext expected_context;
+  if (!expected_context.Parse(context_spec, /*parse_checksums*/ true)) {
+    LOG(WARNING) << "Invalid class loader context: " << context_spec;
+    return false;
+  }
+
+  if (expected_context.special_shared_library_) {
     return true;
   }
 
-  if (context.class_loader_chain_.empty()) {
-    return true;
+  if (expected_context.class_loader_chain_.size() != class_loader_chain_.size()) {
+    LOG(WARNING) << "ClassLoaderContext size mismatch. expected="
+        << expected_context.class_loader_chain_.size()
+        << ", actual=" << class_loader_chain_.size();
+    return false;
   }
 
-  // TODO(calin): assert that we only have a PathClassLoader until the logic for
-  // checking the context covers all case.
-  CHECK_EQ(1u, context.class_loader_chain_.size());
-  const ClassLoaderInfo& info = context.class_loader_chain_[0];
-  CHECK_EQ(kPathClassLoader, info.type);
-  DCHECK_EQ(info.classpath.size(), info.checksums.size());
+  for (size_t i = 0; i < class_loader_chain_.size(); i++) {
+    const ClassLoaderInfo& info = class_loader_chain_[i];
+    const ClassLoaderInfo& expected_info = expected_context.class_loader_chain_[i];
+    if (info.type != expected_info.type) {
+      LOG(WARNING) << "ClassLoaderContext type mismatch for position " << i
+          << ". expected=" << GetClassLoaderTypeName(expected_info.type)
+          << ", found=" << GetClassLoaderTypeName(info.type);
+      return false;
+    }
+    if (info.classpath.size() != expected_info.classpath.size()) {
+      LOG(WARNING) << "ClassLoaderContext classpath size mismatch for position " << i
+            << ". expected=" << expected_info.classpath.size()
+            << ", found=" << info.classpath.size();
+      return false;
+    }
 
-  *out_classpath = info.classpath;
-  *out_checksums = info.checksums;
+    DCHECK_EQ(info.classpath.size(), info.checksums.size());
+    DCHECK_EQ(expected_info.classpath.size(), expected_info.checksums.size());
+
+    for (size_t k = 0; k < info.classpath.size(); k++) {
+      if (info.classpath[k] != expected_info.classpath[k]) {
+        LOG(WARNING) << "ClassLoaderContext classpath element mismatch for position " << i
+            << ". expected=" << expected_info.classpath[k]
+            << ", found=" << info.classpath[k];
+        return false;
+      }
+      if (info.checksums[k] != expected_info.checksums[k]) {
+        LOG(WARNING) << "ClassLoaderContext classpath element checksum mismatch for position " << i
+            << ". expected=" << expected_info.checksums[k]
+            << ", found=" << info.checksums[k];
+        return false;
+      }
+    }
+  }
   return true;
 }
+
+jclass ClassLoaderContext::GetClassLoaderClass(ClassLoaderType type) {
+  switch (type) {
+    case kPathClassLoader: return WellKnownClasses::dalvik_system_PathClassLoader;
+    case kDelegateLastClassLoader: return WellKnownClasses::dalvik_system_DelegateLastClassLoader;
+    case kInvalidClassLoader: break;  // will fail after the switch.
+  }
+  LOG(FATAL) << "Invalid class loader type " << type;
+  UNREACHABLE();
+}
+
 }  // namespace art
 
diff --git a/runtime/class_loader_context.h b/runtime/class_loader_context.h
index 9727a3b..37dd02b 100644
--- a/runtime/class_loader_context.h
+++ b/runtime/class_loader_context.h
@@ -22,7 +22,9 @@
 
 #include "arch/instruction_set.h"
 #include "base/dchecked_vector.h"
-#include "jni.h"
+#include "handle_scope.h"
+#include "mirror/class_loader.h"
+#include "scoped_thread_state_change.h"
 
 namespace art {
 
@@ -35,6 +37,8 @@
   // Creates an empty context (with no class loaders).
   ClassLoaderContext();
 
+  ~ClassLoaderContext();
+
   // Opens requested class path files and appends them to ClassLoaderInfo::opened_dex_files.
   // If the dex files have been stripped, the method opens them from their oat files which are added
   // to ClassLoaderInfo::opened_oat_files. The 'classpath_dir' argument specifies the directory to
@@ -56,11 +60,22 @@
   bool RemoveLocationsFromClassPaths(const dchecked_vector<std::string>& compilation_sources);
 
   // Creates the entire class loader hierarchy according to the current context.
-  // The compilation sources are appended to the classpath of the top class loader
-  // (i.e the class loader whose parent is the BootClassLoader).
-  // Should only be called if OpenDexFiles() returned true.
+  // Returns the first class loader from the chain.
+  //
+  // For example: if the context was built from the spec
+  // "ClassLoaderType1[ClasspathElem1:ClasspathElem2...];ClassLoaderType2[...]..."
+  // the method returns the class loader correponding to ClassLoader1. The parent chain will be
+  // ClassLoader1 --> ClassLoader2 --> ... --> BootClassLoader.
+  //
+  // The compilation sources are appended to the classpath of the first class loader (in the above
+  // example ClassLoader1).
+  //
   // If the context is empty, this method only creates a single PathClassLoader with the
   // given compilation_sources.
+  //
+  // Notes:
+  //   1) the objects are not completely set up. Do not use this outside of tests and the compiler.
+  //   2) should only be called before the first call to OpenDexFiles().
   jobject CreateClassLoader(const std::vector<const DexFile*>& compilation_sources) const;
 
   // Encodes the context as a string suitable to be added in oat files.
@@ -74,24 +89,35 @@
   // Should only be called if OpenDexFiles() returned true.
   std::vector<const DexFile*> FlattenOpenedDexFiles() const;
 
+  // Verifies that the current context is identical to the context encoded as `context_spec`.
+  // Identical means:
+  //    - the number and type of the class loaders from the chain matches
+  //    - the class loader from the same position have the same classpath
+  //      (the order and checksum of the dex files matches)
+  bool VerifyClassLoaderContextMatch(const std::string& context_spec);
+
   // Creates the class loader context from the given string.
   // The format: ClassLoaderType1[ClasspathElem1:ClasspathElem2...];ClassLoaderType2[...]...
   // ClassLoaderType is either "PCL" (PathClassLoader) or "DLC" (DelegateLastClassLoader).
   // ClasspathElem is the path of dex/jar/apk file.
+  //
+  // The spec represents a class loader chain with the natural interpretation:
+  // ClassLoader1 has ClassLoader2 as parent which has ClassLoader3 as a parent and so on.
+  // The last class loader is assumed to have the BootClassLoader as a parent.
+  //
   // Note that we allowed class loaders with an empty class path in order to support a custom
   // class loader for the source dex files.
   static std::unique_ptr<ClassLoaderContext> Create(const std::string& spec);
 
-  // Decodes the class loader context stored in the oat file with EncodeContextForOatFile.
-  // Returns true if the format matches, or false otherwise. If the return is true, the out
-  // arguments will contain the classpath dex files, their checksums and whether or not the
-  // context is a special shared library.
-  // The method asserts that the context is made out of only one PathClassLoader.
-  static bool DecodePathClassLoaderContextFromOatFileKey(
-      const std::string& context_spec,
-      std::vector<std::string>* out_classpath,
-      std::vector<uint32_t>* out_checksums,
-      bool* out_is_special_shared_library);
+  // Creates a context for the given class_loader and dex_elements.
+  // The method will walk the parent chain starting from `class_loader` and add their dex files
+  // to the current class loaders chain. The `dex_elements` will be added at the end of the
+  // classpath belonging to the `class_loader` argument.
+  // The ownership of the opened dex files will be retained by the given `class_loader`.
+  // If there are errors in processing the class loader chain (e.g. unsupported elements) the
+  // method returns null.
+  static std::unique_ptr<ClassLoaderContext> CreateContextForClassLoader(jobject class_loader,
+                                                                         jobjectArray dex_elements);
 
  private:
   enum ClassLoaderType {
@@ -118,6 +144,13 @@
     explicit ClassLoaderInfo(ClassLoaderType cl_type) : type(cl_type) {}
   };
 
+  // Constructs an empty context.
+  // `owns_the_dex_files` specifies whether or not the context will own the opened dex files
+  // present in the class loader chain. If `owns_the_dex_files` is true then OpenDexFiles cannot
+  // be called on this context (dex_files_open_attempted_ and dex_files_open_result_ will be set
+  // to true as well)
+  explicit ClassLoaderContext(bool owns_the_dex_files);
+
   // Reads the class loader spec in place and returns true if the spec is valid and the
   // compilation context was constructed.
   bool Parse(const std::string& spec, bool parse_checksums = false);
@@ -129,6 +162,19 @@
                             ClassLoaderType class_loader_type,
                             bool parse_checksums = false);
 
+  // CHECKs that the dex files were opened (OpenDexFiles was called and set dex_files_open_result_
+  // to true). Aborts if not. The `calling_method` is used in the log message to identify the source
+  // of the call.
+  void CheckDexFilesOpened(const std::string& calling_method) const;
+
+  // Adds the `class_loader` info to the context.
+  // The dex file present in `dex_elements` array (if not null) will be added at the end of
+  // the classpath.
+  bool AddInfoToContextFromClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                       Handle<mirror::ClassLoader> class_loader,
+                                       Handle<mirror::ObjectArray<mirror::Object>> dex_elements)
+  REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Extracts the class loader type from the given spec.
   // Return ClassLoaderContext::kInvalidClassLoader if the class loader type is not
   // recognized.
@@ -138,8 +184,8 @@
   // The returned format can be used when parsing a context spec.
   static const char* GetClassLoaderTypeName(ClassLoaderType type);
 
-  // CHECKs that the dex files were opened (OpenDexFiles was called). Aborts if not.
-  void CheckDexFilesOpened(const std::string& calling_method) const;
+  // Returns the WellKnownClass for the given class loader type.
+  static jclass GetClassLoaderClass(ClassLoaderType type);
 
   // The class loader chain represented as a vector.
   // The parent of class_loader_chain_[i] is class_loader_chain_[i++].
@@ -158,6 +204,13 @@
   // The result of the last OpenDexFiles() operation.
   bool dex_files_open_result_;
 
+  // Whether or not the context owns the opened dex and oat files.
+  // If true, the opened dex files will be de-allocated when the context is destructed.
+  // If false, the objects will continue to be alive.
+  // Note that for convenience the the opened dex/oat files are stored as unique pointers
+  // which will release their ownership in the destructor based on this flag.
+  const bool owns_the_dex_files_;
+
   friend class ClassLoaderContextTest;
 
   DISALLOW_COPY_AND_ASSIGN(ClassLoaderContext);
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index 03eb0e4..a87552d 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -45,18 +45,32 @@
 
   void VerifyClassLoaderPCL(ClassLoaderContext* context,
                             size_t index,
-                            std::string classpath) {
+                            const std::string& classpath) {
     VerifyClassLoaderInfo(
         context, index, ClassLoaderContext::kPathClassLoader, classpath);
   }
 
   void VerifyClassLoaderDLC(ClassLoaderContext* context,
                             size_t index,
-                            std::string classpath) {
+                            const std::string& classpath) {
     VerifyClassLoaderInfo(
         context, index, ClassLoaderContext::kDelegateLastClassLoader, classpath);
   }
 
+  void VerifyClassLoaderPCLFromTestDex(ClassLoaderContext* context,
+                                       size_t index,
+                                       const std::string& test_name) {
+    VerifyClassLoaderFromTestDex(
+        context, index, ClassLoaderContext::kPathClassLoader, test_name);
+  }
+
+  void VerifyClassLoaderDLCFromTestDex(ClassLoaderContext* context,
+                                       size_t index,
+                                       const std::string& test_name) {
+    VerifyClassLoaderFromTestDex(
+        context, index, ClassLoaderContext::kDelegateLastClassLoader, test_name);
+  }
+
   void VerifyOpenDexFiles(
       ClassLoaderContext* context,
       size_t index,
@@ -83,11 +97,49 @@
     }
   }
 
+  std::unique_ptr<ClassLoaderContext> CreateContextForClassLoader(jobject class_loader) {
+    return ClassLoaderContext::CreateContextForClassLoader(class_loader, nullptr);
+  }
+
+  std::unique_ptr<ClassLoaderContext> ParseContextWithChecksums(const std::string& context_spec) {
+    std::unique_ptr<ClassLoaderContext> context(new ClassLoaderContext());
+    if (!context->Parse(context_spec, /*parse_checksums*/ true)) {
+      return nullptr;
+    }
+    return context;
+  }
+
+  void VerifyContextForClassLoader(ClassLoaderContext* context) {
+    ASSERT_TRUE(context != nullptr);
+    ASSERT_TRUE(context->dex_files_open_attempted_);
+    ASSERT_TRUE(context->dex_files_open_result_);
+    ASSERT_FALSE(context->owns_the_dex_files_);
+    ASSERT_FALSE(context->special_shared_library_);
+  }
+
+  void VerifyClassLoaderDexFiles(ScopedObjectAccess& soa,
+                                 Handle<mirror::ClassLoader> class_loader,
+                                 jclass type,
+                                 std::vector<const DexFile*>& expected_dex_files)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    ASSERT_TRUE(class_loader->GetClass() == soa.Decode<mirror::Class>(type));
+
+    std::vector<const DexFile*> class_loader_dex_files = GetDexFiles(soa, class_loader);
+    ASSERT_EQ(expected_dex_files.size(), class_loader_dex_files.size());
+
+    for (size_t i = 0; i < expected_dex_files.size(); i++) {
+      ASSERT_EQ(expected_dex_files[i]->GetLocation(),
+                class_loader_dex_files[i]->GetLocation());
+      ASSERT_EQ(expected_dex_files[i]->GetLocationChecksum(),
+                class_loader_dex_files[i]->GetLocationChecksum());
+    }
+  }
+
  private:
   void VerifyClassLoaderInfo(ClassLoaderContext* context,
                              size_t index,
                              ClassLoaderContext::ClassLoaderType type,
-                             std::string classpath) {
+                             const std::string& classpath) {
     ASSERT_TRUE(context != nullptr);
     ASSERT_GT(context->class_loader_chain_.size(), index);
     ClassLoaderContext::ClassLoaderInfo& info = context->class_loader_chain_[index];
@@ -96,6 +148,18 @@
     Split(classpath, ':', &expected_classpath);
     ASSERT_EQ(expected_classpath, info.classpath);
   }
+
+  void VerifyClassLoaderFromTestDex(ClassLoaderContext* context,
+                                    size_t index,
+                                    ClassLoaderContext::ClassLoaderType type,
+                                    const std::string& test_name) {
+    std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles(test_name.c_str());
+    std::vector<std::vector<std::unique_ptr<const DexFile>>*> all_dex_files;
+    all_dex_files.push_back(&dex_files);
+
+    VerifyClassLoaderInfo(context, index, type, GetTestDexFileName(test_name.c_str()));
+    VerifyOpenDexFiles(context, index, all_dex_files);
+  }
 };
 
 TEST_F(ClassLoaderContextTest, ParseValidContextPCL) {
@@ -200,7 +264,7 @@
 
   ScopedObjectAccess soa(Thread::Current());
 
-  StackHandleScope<2> hs(soa.Self());
+  StackHandleScope<1> hs(soa.Self());
   Handle<mirror::ClassLoader> class_loader = hs.NewHandle(
       soa.Decode<mirror::ClassLoader>(jclass_loader));
 
@@ -209,25 +273,17 @@
   ASSERT_TRUE(class_loader->GetParent()->GetClass() ==
       soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
 
-
-  std::vector<const DexFile*> class_loader_dex_files = GetDexFiles(jclass_loader);
-  ASSERT_EQ(classpath_dex.size() + compilation_sources.size(), class_loader_dex_files.size());
-
-  // The classpath dex files must come first.
-  for (size_t i = 0; i < classpath_dex.size(); i++) {
-    ASSERT_EQ(classpath_dex[i]->GetLocation(),
-              class_loader_dex_files[i]->GetLocation());
-    ASSERT_EQ(classpath_dex[i]->GetLocationChecksum(),
-              class_loader_dex_files[i]->GetLocationChecksum());
+  // For the first class loader the class path dex files must come first and then the
+  // compilation sources.
+  std::vector<const DexFile*> expected_classpath = MakeNonOwningPointerVector(classpath_dex);
+  for (auto& dex : compilation_sources_raw) {
+    expected_classpath.push_back(dex);
   }
 
-  // The compilation dex files must come second.
-  for (size_t i = 0, k = classpath_dex.size(); i < compilation_sources.size(); i++, k++) {
-    ASSERT_EQ(compilation_sources[i]->GetLocation(),
-              class_loader_dex_files[k]->GetLocation());
-    ASSERT_EQ(compilation_sources[i]->GetLocationChecksum(),
-              class_loader_dex_files[k]->GetLocationChecksum());
-  }
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            expected_classpath);
 }
 
 TEST_F(ClassLoaderContextTest, CreateClassLoaderWithEmptyContext) {
@@ -244,28 +300,90 @@
 
   ScopedObjectAccess soa(Thread::Current());
 
-  StackHandleScope<2> hs(soa.Self());
+  StackHandleScope<1> hs(soa.Self());
   Handle<mirror::ClassLoader> class_loader = hs.NewHandle(
       soa.Decode<mirror::ClassLoader>(jclass_loader));
 
-  ASSERT_TRUE(class_loader->GetClass() ==
-      soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader));
+  // An empty context should create a single PathClassLoader with only the compilation sources.
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            compilation_sources_raw);
   ASSERT_TRUE(class_loader->GetParent()->GetClass() ==
       soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
-
-
-  std::vector<const DexFile*> class_loader_dex_files = GetDexFiles(jclass_loader);
-
-  // The compilation sources should be the only files present in the class loader
-  ASSERT_EQ(compilation_sources.size(), class_loader_dex_files.size());
-  for (size_t i = 0; i < compilation_sources.size(); i++) {
-    ASSERT_EQ(compilation_sources[i]->GetLocation(),
-        class_loader_dex_files[i]->GetLocation());
-    ASSERT_EQ(compilation_sources[i]->GetLocationChecksum(),
-        class_loader_dex_files[i]->GetLocationChecksum());
-  }
 }
 
+TEST_F(ClassLoaderContextTest, CreateClassLoaderWithComplexChain) {
+  // Setup the context.
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_a = OpenTestDexFiles("ForClassLoaderA");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_b = OpenTestDexFiles("ForClassLoaderB");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_c = OpenTestDexFiles("ForClassLoaderC");
+  std::vector<std::unique_ptr<const DexFile>> classpath_dex_d = OpenTestDexFiles("ForClassLoaderD");
+
+  std::string context_spec =
+      "PCL[" + CreateClassPath(classpath_dex_a) + ":" + CreateClassPath(classpath_dex_b) + "];" +
+      "DLC[" + CreateClassPath(classpath_dex_c) + "];" +
+      "PCL[" + CreateClassPath(classpath_dex_d) + "]";
+
+  std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_spec);
+  ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, ""));
+
+  // Setup the compilation sources.
+  std::vector<std::unique_ptr<const DexFile>> compilation_sources = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> compilation_sources_raw =
+      MakeNonOwningPointerVector(compilation_sources);
+
+  // Create the class loader.
+  jobject jclass_loader = context->CreateClassLoader(compilation_sources_raw);
+  ASSERT_TRUE(jclass_loader != nullptr);
+
+  // Verify the class loader.
+  ScopedObjectAccess soa(Thread::Current());
+
+  StackHandleScope<3> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader_1 = hs.NewHandle(
+      soa.Decode<mirror::ClassLoader>(jclass_loader));
+
+  // Verify the first class loader
+
+  // For the first class loader the class path dex files must come first and then the
+  // compilation sources.
+  std::vector<const DexFile*> class_loader_1_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_a);
+  for (auto& dex : classpath_dex_b) {
+    class_loader_1_dex_files.push_back(dex.get());
+  }
+  for (auto& dex : compilation_sources_raw) {
+    class_loader_1_dex_files.push_back(dex);
+  }
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_1,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_1_dex_files);
+
+  // Verify the second class loader
+  Handle<mirror::ClassLoader> class_loader_2 = hs.NewHandle(class_loader_1->GetParent());
+  std::vector<const DexFile*> class_loader_2_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_c);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_2,
+                            WellKnownClasses::dalvik_system_DelegateLastClassLoader,
+                            class_loader_2_dex_files);
+
+  // Verify the third class loader
+  Handle<mirror::ClassLoader> class_loader_3 = hs.NewHandle(class_loader_2->GetParent());
+  std::vector<const DexFile*> class_loader_3_dex_files =
+      MakeNonOwningPointerVector(classpath_dex_d);
+  VerifyClassLoaderDexFiles(soa,
+                            class_loader_3,
+                            WellKnownClasses::dalvik_system_PathClassLoader,
+                            class_loader_3_dex_files);
+  // The last class loader should have the BootClassLoader as a parent.
+  ASSERT_TRUE(class_loader_3->GetParent()->GetClass() ==
+      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+}
+
+
 TEST_F(ClassLoaderContextTest, RemoveSourceLocations) {
   std::unique_ptr<ClassLoaderContext> context =
       ClassLoaderContext::Create("PCL[a.dex]");
@@ -292,46 +410,90 @@
   std::vector<std::unique_ptr<const DexFile>> dex1 = OpenTestDexFiles("Main");
   std::vector<std::unique_ptr<const DexFile>> dex2 = OpenTestDexFiles("MyClass");
   std::string encoding = context->EncodeContextForOatFile("");
-  std::string expected_encoding = "PCL[" +
-      dex1[0]->GetLocation() + "*" + std::to_string(dex1[0]->GetLocationChecksum()) + ":" +
-      dex2[0]->GetLocation() + "*" + std::to_string(dex2[0]->GetLocationChecksum()) + "]";
+  std::string expected_encoding = "PCL[" + CreateClassPathWithChecksums(dex1) + ":" +
+      CreateClassPathWithChecksums(dex2) + "]";
   ASSERT_EQ(expected_encoding, context->EncodeContextForOatFile(""));
 }
 
-TEST_F(ClassLoaderContextTest, DecodeOatFileKey) {
-  std::string oat_file_encoding = "PCL[a.dex*123:b.dex*456]";
-  std::vector<std::string> classpath;
-  std::vector<uint32_t> checksums;
-  bool is_special_shared_library;
-  bool result = ClassLoaderContext::DecodePathClassLoaderContextFromOatFileKey(
-      oat_file_encoding,
-      &classpath,
-      &checksums,
-      &is_special_shared_library);
-  ASSERT_TRUE(result);
-  ASSERT_FALSE(is_special_shared_library);
-  ASSERT_EQ(2u, classpath.size());
-  ASSERT_EQ(2u, checksums.size());
-  ASSERT_EQ("a.dex", classpath[0]);
-  ASSERT_EQ(123u, checksums[0]);
-  ASSERT_EQ("b.dex", classpath[1]);
-  ASSERT_EQ(456u, checksums[1]);
+// TODO(calin) add a test which creates the context for a class loader together with dex_elements.
+TEST_F(ClassLoaderContextTest, CreateContextForClassLoader) {
+  // The chain is
+  //    ClassLoaderA (PathClassLoader)
+  //       ^
+  //       |
+  //    ClassLoaderB (DelegateLastClassLoader)
+  //       ^
+  //       |
+  //    ClassLoaderC (PathClassLoader)
+  //       ^
+  //       |
+  //    ClassLoaderD (DelegateLastClassLoader)
+
+  jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
+  jobject class_loader_b = LoadDexInDelegateLastClassLoader("ForClassLoaderB", class_loader_a);
+  jobject class_loader_c = LoadDexInPathClassLoader("ForClassLoaderC", class_loader_b);
+  jobject class_loader_d = LoadDexInDelegateLastClassLoader("ForClassLoaderD", class_loader_c);
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader_d);
+
+  VerifyContextForClassLoader(context.get());
+  VerifyContextSize(context.get(), 4);
+
+  VerifyClassLoaderDLCFromTestDex(context.get(), 0, "ForClassLoaderD");
+  VerifyClassLoaderPCLFromTestDex(context.get(), 1, "ForClassLoaderC");
+  VerifyClassLoaderDLCFromTestDex(context.get(), 2, "ForClassLoaderB");
+  VerifyClassLoaderPCLFromTestDex(context.get(), 3, "ForClassLoaderA");
 }
 
-TEST_F(ClassLoaderContextTest, DecodeOatFileKeySpecialLibrary) {
-  std::string oat_file_encoding = "&";
-  std::vector<std::string> classpath;
-  std::vector<uint32_t> checksums;
-  bool is_special_shared_library;
-  bool result = ClassLoaderContext::DecodePathClassLoaderContextFromOatFileKey(
-      oat_file_encoding,
-      &classpath,
-      &checksums,
-      &is_special_shared_library);
-  ASSERT_TRUE(result);
-  ASSERT_TRUE(is_special_shared_library);
-  ASSERT_TRUE(classpath.empty());
-  ASSERT_TRUE(checksums.empty());
+TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatch) {
+  std::string context_spec = "PCL[a.dex*123:b.dex*456];DLC[c.dex*890]";
+  std::unique_ptr<ClassLoaderContext> context = ParseContextWithChecksums(context_spec);
+
+  VerifyContextSize(context.get(), 2);
+  VerifyClassLoaderPCL(context.get(), 0, "a.dex:b.dex");
+  VerifyClassLoaderDLC(context.get(), 1, "c.dex");
+
+  ASSERT_TRUE(context->VerifyClassLoaderContextMatch(context_spec));
+
+  std::string wrong_class_loader_type = "PCL[a.dex*123:b.dex*456];PCL[c.dex*890]";
+  ASSERT_FALSE(context->VerifyClassLoaderContextMatch(wrong_class_loader_type));
+
+  std::string wrong_class_loader_order = "DLC[c.dex*890];PCL[a.dex*123:b.dex*456]";
+  ASSERT_FALSE(context->VerifyClassLoaderContextMatch(wrong_class_loader_order));
+
+  std::string wrong_classpath_order = "PCL[b.dex*456:a.dex*123];DLC[c.dex*890]";
+  ASSERT_FALSE(context->VerifyClassLoaderContextMatch(wrong_classpath_order));
+
+  std::string wrong_checksum = "PCL[a.dex*999:b.dex*456];DLC[c.dex*890]";
+  ASSERT_FALSE(context->VerifyClassLoaderContextMatch(wrong_checksum));
+
+  std::string wrong_extra_class_loader = "PCL[a.dex*123:b.dex*456];DLC[c.dex*890];PCL[d.dex*321]";
+  ASSERT_FALSE(context->VerifyClassLoaderContextMatch(wrong_extra_class_loader));
+
+  std::string wrong_extra_classpath = "PCL[a.dex*123:b.dex*456];DLC[c.dex*890:d.dex*321]";
+  ASSERT_FALSE(context->VerifyClassLoaderContextMatch(wrong_extra_classpath));
+
+  std::string wrong_spec = "PCL[a.dex*999:b.dex*456];DLC[";
+  ASSERT_FALSE(context->VerifyClassLoaderContextMatch(wrong_spec));
+}
+
+TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchAfterEncoding) {
+  jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
+  jobject class_loader_b = LoadDexInDelegateLastClassLoader("ForClassLoaderB", class_loader_a);
+  jobject class_loader_c = LoadDexInPathClassLoader("ForClassLoaderC", class_loader_b);
+  jobject class_loader_d = LoadDexInDelegateLastClassLoader("ForClassLoaderD", class_loader_c);
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader_d);
+
+  ASSERT_TRUE(context->VerifyClassLoaderContextMatch(context->EncodeContextForOatFile("")));
+}
+
+TEST_F(ClassLoaderContextTest, VerifyClassLoaderContextMatchAfterEncodingMultidex) {
+  jobject class_loader = LoadDexInPathClassLoader("MultiDex", nullptr);
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader);
+
+  ASSERT_TRUE(context->VerifyClassLoaderContextMatch(context->EncodeContextForOatFile("")));
 }
 
 }  // namespace art
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
new file mode 100644
index 0000000..d160a51
--- /dev/null
+++ b/runtime/class_loader_utils.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CLASS_LOADER_UTILS_H_
+#define ART_RUNTIME_CLASS_LOADER_UTILS_H_
+
+#include "handle_scope.h"
+#include "mirror/class_loader.h"
+#include "scoped_thread_state_change-inl.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+// Returns true if the given class loader is either a PathClassLoader or a DexClassLoader.
+// (they both have the same behaviour with respect to class lockup order)
+static bool IsPathOrDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                   Handle<mirror::ClassLoader> class_loader)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Class* class_loader_class = class_loader->GetClass();
+  return
+      (class_loader_class ==
+          soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader)) ||
+      (class_loader_class ==
+          soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DexClassLoader));
+}
+
+static bool IsDelegateLastClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                      Handle<mirror::ClassLoader> class_loader)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  mirror::Class* class_loader_class = class_loader->GetClass();
+  return class_loader_class ==
+      soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DelegateLastClassLoader);
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_CLASS_LOADER_UTILS_H_
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 659c7e4..7e762c3 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -20,30 +20,30 @@
 #include <dirent.h>
 #include <dlfcn.h>
 #include <fcntl.h>
-#include <ScopedLocalRef.h>
+#include "nativehelper/ScopedLocalRef.h"
 #include <stdlib.h>
 
 #include "../../external/icu/icu4c/source/common/unicode/uvernum.h"
 #include "android-base/stringprintf.h"
 
 #include "art_field-inl.h"
-#include "base/macros.h"
 #include "base/logging.h"
+#include "base/macros.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
 #include "compiler_callbacks.h"
 #include "dex_file-inl.h"
-#include "gc_root-inl.h"
 #include "gc/heap.h"
+#include "gc_root-inl.h"
 #include "gtest/gtest.h"
 #include "handle_scope-inl.h"
 #include "interpreter/unstarted_runtime.h"
 #include "java_vm_ext.h"
 #include "jni_internal.h"
+#include "mem_map.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
-#include "mem_map.h"
 #include "native/dalvik_system_DexFile.h"
 #include "noop_compiler_callbacks.h"
 #include "os.h"
@@ -425,7 +425,6 @@
   PostRuntimeCreate();
   runtime_.reset(Runtime::Current());
   class_linker_ = runtime_->GetClassLinker();
-  class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
 
   // Runtime::Create acquired the mutator_lock_ that is normally given away when we
   // Runtime::Start, give it away now and then switch to a more managable ScopedObjectAccess.
@@ -589,18 +588,24 @@
 }
 
 std::vector<const DexFile*> CommonRuntimeTestImpl::GetDexFiles(jobject jclass_loader) {
-  std::vector<const DexFile*> ret;
-
   ScopedObjectAccess soa(Thread::Current());
 
-  StackHandleScope<2> hs(soa.Self());
+  StackHandleScope<1> hs(soa.Self());
   Handle<mirror::ClassLoader> class_loader = hs.NewHandle(
       soa.Decode<mirror::ClassLoader>(jclass_loader));
+  return GetDexFiles(soa, class_loader);
+}
 
-  DCHECK_EQ(class_loader->GetClass(),
-            soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader));
-  DCHECK_EQ(class_loader->GetParent()->GetClass(),
-            soa.Decode<mirror::Class>(WellKnownClasses::java_lang_BootClassLoader));
+std::vector<const DexFile*> CommonRuntimeTestImpl::GetDexFiles(
+    ScopedObjectAccess& soa,
+    Handle<mirror::ClassLoader> class_loader) {
+  std::vector<const DexFile*> ret;
+
+  DCHECK(
+      (class_loader->GetClass() ==
+          soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader)) ||
+      (class_loader->GetClass() ==
+          soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DelegateLastClassLoader)));
 
   // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
   // We need to get the DexPathList and loop through it.
@@ -618,6 +623,7 @@
     // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
     // at the mCookie which is a DexFile vector.
     if (dex_elements_obj != nullptr) {
+      StackHandleScope<1> hs(soa.Self());
       Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
           hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
       for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
@@ -757,6 +763,82 @@
   return location;
 }
 
+std::string CommonRuntimeTestImpl::CreateClassPath(
+    const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+  CHECK(!dex_files.empty());
+  std::string classpath = dex_files[0]->GetLocation();
+  for (size_t i = 1; i < dex_files.size(); i++) {
+    classpath += ":" + dex_files[i]->GetLocation();
+  }
+  return classpath;
+}
+
+std::string CommonRuntimeTestImpl::CreateClassPathWithChecksums(
+    const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+  CHECK(!dex_files.empty());
+  std::string classpath = dex_files[0]->GetLocation() + "*" +
+      std::to_string(dex_files[0]->GetLocationChecksum());
+  for (size_t i = 1; i < dex_files.size(); i++) {
+    classpath += ":" + dex_files[i]->GetLocation() + "*" +
+        std::to_string(dex_files[i]->GetLocationChecksum());
+  }
+  return classpath;
+}
+
+void CommonRuntimeTestImpl::FillHeap(Thread* self,
+                                     ClassLinker* class_linker,
+                                     VariableSizedHandleScope* handle_scope) {
+  DCHECK(handle_scope != nullptr);
+
+  Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
+
+  // Class java.lang.Object.
+  Handle<mirror::Class> c(handle_scope->NewHandle(
+      class_linker->FindSystemClass(self, "Ljava/lang/Object;")));
+  // Array helps to fill memory faster.
+  Handle<mirror::Class> ca(handle_scope->NewHandle(
+      class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
+
+  // Start allocating with ~128K
+  size_t length = 128 * KB;
+  while (length > 40) {
+    const int32_t array_length = length / 4;  // Object[] has elements of size 4.
+    MutableHandle<mirror::Object> h(handle_scope->NewHandle<mirror::Object>(
+        mirror::ObjectArray<mirror::Object>::Alloc(self, ca.Get(), array_length)));
+    if (self->IsExceptionPending() || h == nullptr) {
+      self->ClearException();
+
+      // Try a smaller length
+      length = length / 2;
+      // Use at most a quarter the reported free space.
+      size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
+      if (length * 4 > mem) {
+        length = mem / 4;
+      }
+    }
+  }
+
+  // Allocate simple objects till it fails.
+  while (!self->IsExceptionPending()) {
+    handle_scope->NewHandle<mirror::Object>(c->AllocObject(self));
+  }
+  self->ClearException();
+}
+
+void CommonRuntimeTestImpl::SetUpRuntimeOptionsForFillHeap(RuntimeOptions *options) {
+  // Use a smaller heap
+  bool found = false;
+  for (std::pair<std::string, const void*>& pair : *options) {
+    if (pair.first.find("-Xmx") == 0) {
+      pair.first = "-Xmx4M";  // Smallest we can go.
+      found = true;
+    }
+  }
+  if (!found) {
+    options->emplace_back("-Xmx4M", nullptr);
+  }
+}
+
 CheckJniAbortCatcher::CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
   vm_->SetCheckJniAbortHook(Hook, &actual_);
 }
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index fcf3a31..74bc0b2 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -28,6 +28,7 @@
 // TODO: Add inl file and avoid including inl.
 #include "obj_ptr-inl.h"
 #include "os.h"
+#include "scoped_thread_state_change-inl.h"
 
 namespace art {
 
@@ -43,6 +44,8 @@
 class JavaVMExt;
 class Runtime;
 typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
+class Thread;
+class VariableSizedHandleScope;
 
 uint8_t* DecodeBase64(const char* src, size_t* dst_size);
 
@@ -104,6 +107,14 @@
   // Retuerns the filename for a test dex (i.e. XandY or ManyMethods).
   std::string GetTestDexFileName(const char* name) const;
 
+  // A helper function to fill the heap.
+  static void FillHeap(Thread* self,
+                       ClassLinker* class_linker,
+                       VariableSizedHandleScope* handle_scope)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  // A helper to set up a small heap (4M) to make FillHeap faster.
+  static void SetUpRuntimeOptionsForFillHeap(RuntimeOptions *options);
+
  protected:
   // Allow subclases such as CommonCompilerTest to add extra options.
   virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
@@ -159,9 +170,12 @@
   const DexFile* java_lang_dex_file_;
   std::vector<const DexFile*> boot_class_path_;
 
-  // Get the dex files from a PathClassLoader. This in order of the dex elements and their dex
-  // arrays.
+  // Get the dex files from a PathClassLoader or DelegateLastClassLoader.
+  // This only looks into the current class loader and does not recurse into the parents.
   std::vector<const DexFile*> GetDexFiles(jobject jclass_loader);
+  std::vector<const DexFile*> GetDexFiles(ScopedObjectAccess& soa,
+                                          Handle<mirror::ClassLoader> class_loader)
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get the first dex file from a PathClassLoader. Will abort if it is null.
   const DexFile* GetFirstDexFile(jobject jclass_loader);
@@ -176,6 +190,15 @@
   // initializers, initialize well-known classes, and creates the heap thread pool.
   virtual void FinalizeSetup();
 
+  // Creates the class path string for the given dex files (the list of dex file locations
+  // separated by ':').
+  std::string CreateClassPath(
+      const std::vector<std::unique_ptr<const DexFile>>& dex_files);
+  // Same as CreateClassPath but add the dex file checksum after each location. The separator
+  // is '*'.
+  std::string CreateClassPathWithChecksums(
+      const std::vector<std::unique_ptr<const DexFile>>& dex_files);
+
  private:
   static std::string GetCoreFileLocation(const char* suffix);
 
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 6758d75..a46f531 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -19,7 +19,6 @@
 #include <sstream>
 
 #include "android-base/stringprintf.h"
-#include "ScopedLocalRef.h"
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
@@ -32,6 +31,7 @@
 #include "mirror/method_type.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "obj_ptr-inl.h"
 #include "thread.h"
 #include "verifier/method_verifier.h"
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 0f15e8b..5a87ae8 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -30,8 +30,8 @@
 #include "base/enums.h"
 #include "base/strlcpy.h"
 #include "base/time_utils.h"
-#include "class_linker.h"
 #include "class_linker-inl.h"
+#include "class_linker.h"
 #include "dex_file-inl.h"
 #include "dex_file_annotations.h"
 #include "dex_instruction.h"
@@ -39,6 +39,7 @@
 #include "gc/accounting/card_table-inl.h"
 #include "gc/allocation_record.h"
 #include "gc/scoped_gc_critical_section.h"
+#include "gc/space/bump_pointer_space-walk-inl.h"
 #include "gc/space/large_object_space.h"
 #include "gc/space/space-inl.h"
 #include "handle_scope-inl.h"
@@ -46,19 +47,19 @@
 #include "jdwp/object_registry.h"
 #include "jni_internal.h"
 #include "jvalue-inl.h"
-#include "mirror/class.h"
 #include "mirror/class-inl.h"
+#include "mirror/class.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
 #include "mirror/throwable.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedPrimitiveArray.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedPrimitiveArray.h"
 #include "stack.h"
 #include "thread_list.h"
 #include "utf.h"
@@ -4813,13 +4814,6 @@
   DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
 };
 
-static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
-    REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
-  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
-  HeapChunkContext::HeapChunkJavaCallback(
-      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
-}
-
 void Dbg::DdmSendHeapSegments(bool native) {
   Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
   Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
@@ -4839,6 +4833,12 @@
 
   // Send a series of heap segment chunks.
   HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
+  auto bump_pointer_space_visitor = [&](mirror::Object* obj)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+    const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
+    HeapChunkContext::HeapChunkJavaCallback(
+        obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, &context);
+  };
   if (native) {
     UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
   } else {
@@ -4861,7 +4861,7 @@
       } else if (space->IsBumpPointerSpace()) {
         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
         context.SetChunkOverhead(0);
-        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
+        space->AsBumpPointerSpace()->Walk(bump_pointer_space_visitor);
         HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
       } else if (space->IsRegionSpace()) {
         heap->IncrementDisableMovingGC(self);
@@ -4870,7 +4870,7 @@
           ScopedSuspendAll ssa(__FUNCTION__);
           ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
           context.SetChunkOverhead(0);
-          space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
+          space->AsRegionSpace()->Walk(bump_pointer_space_visitor);
           HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
         }
         heap->DecrementDisableMovingGC(self);
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index 41db4d8..b163cdb 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -181,19 +181,18 @@
   if (lhs_shorty.find('L', 1) != StringPiece::npos) {
     const DexFile::TypeList* params = dex_file_->GetProtoParameters(*proto_id_);
     const DexFile::TypeList* rhs_params = rhs.dex_file_->GetProtoParameters(*rhs.proto_id_);
-    // Both lists are empty or have contents, or else shorty is broken.
-    DCHECK_EQ(params == nullptr, rhs_params == nullptr);
-    if (params != nullptr) {
-      uint32_t params_size = params->Size();
-      DCHECK_EQ(params_size, rhs_params->Size());  // Parameter list size must match.
-      for (uint32_t i = 0; i < params_size; ++i) {
-        const DexFile::TypeId& param_id = dex_file_->GetTypeId(params->GetTypeItem(i).type_idx_);
-        const DexFile::TypeId& rhs_param_id =
-            rhs.dex_file_->GetTypeId(rhs_params->GetTypeItem(i).type_idx_);
-        if (!DexFileStringEquals(dex_file_, param_id.descriptor_idx_,
-                                 rhs.dex_file_, rhs_param_id.descriptor_idx_)) {
-          return false;  // Parameter type mismatch.
-        }
+    // We found a reference parameter in the matching shorty, so both lists must be non-empty.
+    DCHECK(params != nullptr);
+    DCHECK(rhs_params != nullptr);
+    uint32_t params_size = params->Size();
+    DCHECK_EQ(params_size, rhs_params->Size());  // Parameter list size must match.
+    for (uint32_t i = 0; i < params_size; ++i) {
+      const DexFile::TypeId& param_id = dex_file_->GetTypeId(params->GetTypeItem(i).type_idx_);
+      const DexFile::TypeId& rhs_param_id =
+          rhs.dex_file_->GetTypeId(rhs_params->GetTypeItem(i).type_idx_);
+      if (!DexFileStringEquals(dex_file_, param_id.descriptor_idx_,
+                               rhs.dex_file_, rhs_param_id.descriptor_idx_)) {
+        return false;  // Parameter type mismatch.
       }
     }
   }
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index eb3b210..990ab11 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -62,11 +62,11 @@
   static const uint16_t kDexNoIndex16 = 0xFFFF;
 
   // The separator character in MultiDex locations.
-  static constexpr char kMultiDexSeparator = ':';
+  static constexpr char kMultiDexSeparator = '!';
 
   // A string version of the previous. This is a define so that we can merge string literals in the
   // preprocessor.
-  #define kMultiDexSeparatorString ":"
+  #define kMultiDexSeparatorString "!"
 
   // Raw header_item.
   struct Header {
@@ -499,7 +499,7 @@
     return GetBaseLocation(location.c_str());
   }
 
-  // Returns the ':classes*.dex' part of the dex location. Returns an empty
+  // Returns the '!classes*.dex' part of the dex location. Returns an empty
   // string if there is no multidex suffix for the given location.
   // The kMultiDexSeparator is included in the returned suffix.
   static std::string GetMultiDexSuffix(const std::string& location) {
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 78d5c5f..1a73062 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -535,9 +535,9 @@
   std::string dex_location_str = "/system/app/framework.jar";
   const char* dex_location = dex_location_str.c_str();
   ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexLocation(0, dex_location));
-  ASSERT_EQ("/system/app/framework.jar:classes2.dex",
+  ASSERT_EQ("/system/app/framework.jar!classes2.dex",
             DexFile::GetMultiDexLocation(1, dex_location));
-  ASSERT_EQ("/system/app/framework.jar:classes101.dex",
+  ASSERT_EQ("/system/app/framework.jar!classes101.dex",
             DexFile::GetMultiDexLocation(100, dex_location));
 }
 
@@ -563,11 +563,11 @@
 
 TEST(DexFileUtilsTest, GetBaseLocationAndMultiDexSuffix) {
   EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar"));
-  EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar:classes2.dex"));
-  EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar:classes8.dex"));
+  EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar!classes2.dex"));
+  EXPECT_EQ("/foo/bar/baz.jar", DexFile::GetBaseLocation("/foo/bar/baz.jar!classes8.dex"));
   EXPECT_EQ("", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar"));
-  EXPECT_EQ(":classes2.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar:classes2.dex"));
-  EXPECT_EQ(":classes8.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar:classes8.dex"));
+  EXPECT_EQ("!classes2.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar!classes2.dex"));
+  EXPECT_EQ("!classes8.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar!classes8.dex"));
 }
 
 TEST_F(DexFileTest, ZipOpenClassesPresent) {
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 6547299..a6c5d6c 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -50,6 +50,8 @@
                                     const InlineInfoEncoding& encoding,
                                     uint8_t inlining_depth)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(!outer_method->IsObsolete());
+
   // This method is being used by artQuickResolutionTrampoline, before it sets up
   // the passed parameters in a GC friendly way. Therefore we must never be
   // suspended while executing it.
@@ -78,10 +80,12 @@
   }
 
   // Lookup the declaring class of the inlined method.
-  const DexFile* dex_file = caller->GetDexFile();
+  ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
+  const DexFile* dex_file = dex_cache->GetDexFile();
   const DexFile::MethodId& method_id = dex_file->GetMethodId(method_index);
   ArtMethod* inlined_method = caller->GetDexCacheResolvedMethod(method_index, kRuntimePointerSize);
-  if (inlined_method != nullptr && !inlined_method->IsRuntimeMethod()) {
+  if (inlined_method != nullptr) {
+    DCHECK(!inlined_method->IsRuntimeMethod());
     return inlined_method;
   }
   const char* descriptor = dex_file->StringByTypeIdx(method_id.class_idx_);
@@ -90,25 +94,17 @@
   mirror::ClassLoader* class_loader = caller->GetDeclaringClass()->GetClassLoader();
   mirror::Class* klass = class_linker->LookupClass(self, descriptor, class_loader);
   if (klass == nullptr) {
-      LOG(FATAL) << "Could not find an inlined method from an .oat file: "
-                 << "the class " << descriptor << " was not found in the class loader of "
-                 << caller->PrettyMethod() << ". "
-                 << "This must be due to playing wrongly with class loaders";
+    LOG(FATAL) << "Could not find an inlined method from an .oat file: the class " << descriptor
+               << " was not found in the class loader of " << caller->PrettyMethod() << ". "
+               << "This must be due to playing wrongly with class loaders";
   }
 
-  // Lookup the method.
-  const char* method_name = dex_file->GetMethodName(method_id);
-  const Signature signature = dex_file->GetMethodSignature(method_id);
-
-  inlined_method = klass->FindDeclaredDirectMethod(method_name, signature, kRuntimePointerSize);
+  inlined_method = klass->FindClassMethod(dex_cache, method_index, kRuntimePointerSize);
   if (inlined_method == nullptr) {
-    inlined_method = klass->FindDeclaredVirtualMethod(method_name, signature, kRuntimePointerSize);
-    if (inlined_method == nullptr) {
-      LOG(FATAL) << "Could not find an inlined method from an .oat file: "
-                 << "the class " << descriptor << " does not have "
-                 << method_name << signature << " declared. "
-                 << "This must be due to duplicate classes or playing wrongly with class loaders";
-    }
+    LOG(FATAL) << "Could not find an inlined method from an .oat file: the class " << descriptor
+               << " does not have " << dex_file->GetMethodName(method_id)
+               << dex_file->GetMethodSignature(method_id) << " declared. "
+               << "This must be due to duplicate classes or playing wrongly with class loaders";
   }
   caller->SetDexCacheResolvedMethod(method_index, inlined_method, kRuntimePointerSize);
 
@@ -444,39 +440,20 @@
                                      ArtMethod* referrer,
                                      Thread* self) {
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
-  ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, referrer);
-  if (resolved_method == nullptr) {
+  constexpr ClassLinker::ResolveMode resolve_mode =
+      access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+                   : ClassLinker::ResolveMode::kNoChecks;
+  ArtMethod* resolved_method;
+  if (type == kStatic) {
+    resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+  } else {
     StackHandleScope<1> hs(self);
-    ObjPtr<mirror::Object> null_this = nullptr;
-    HandleWrapperObjPtr<mirror::Object> h_this(
-        hs.NewHandleWrapper(type == kStatic ? &null_this : this_object));
-    constexpr ClassLinker::ResolveMode resolve_mode =
-        access_check ? ClassLinker::kForceICCECheck
-                     : ClassLinker::kNoICCECheckForCache;
+    HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
     resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
   }
-  // Resolution and access check.
   if (UNLIKELY(resolved_method == nullptr)) {
     DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
     return nullptr;  // Failure.
-  } else if (access_check) {
-    mirror::Class* methods_class = resolved_method->GetDeclaringClass();
-    bool can_access_resolved_method =
-        referrer->GetDeclaringClass()->CheckResolvedMethodAccess(methods_class,
-                                                                 resolved_method,
-                                                                 referrer->GetDexCache(),
-                                                                 method_idx,
-                                                                 type);
-    if (UNLIKELY(!can_access_resolved_method)) {
-      DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
-      return nullptr;  // Failure.
-    }
-    // Incompatible class change should have been handled in resolve method.
-    if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) {
-      ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method,
-                                        referrer);
-      return nullptr;  // Failure.
-    }
   }
   // Next, null pointer check.
   if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
@@ -690,24 +667,14 @@
   }
   ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
   ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
-  ArtMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx, kRuntimePointerSize);
+  constexpr ClassLinker::ResolveMode resolve_mode = access_check
+      ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+      : ClassLinker::ResolveMode::kNoChecks;
+  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  ArtMethod* resolved_method = linker->GetResolvedMethod<type, resolve_mode>(method_idx, referrer);
   if (UNLIKELY(resolved_method == nullptr)) {
     return nullptr;
   }
-  if (access_check) {
-    // Check for incompatible class change errors and access.
-    bool icce = resolved_method->CheckIncompatibleClassChange(type);
-    if (UNLIKELY(icce)) {
-      return nullptr;
-    }
-    ObjPtr<mirror::Class> methods_class = resolved_method->GetDeclaringClass();
-    if (UNLIKELY(!referring_class->CanAccess(methods_class) ||
-                 !referring_class->CanAccessMember(methods_class,
-                                                   resolved_method->GetAccessFlags()))) {
-      // Potential illegal access, may need to refine the method's class.
-      return nullptr;
-    }
-  }
   if (type == kInterface) {  // Most common form of slow path dispatch.
     return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method,
                                                                   kRuntimePointerSize);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 36885d8..3061365 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1182,7 +1182,7 @@
     HandleWrapper<mirror::Object> h_receiver(
         hs.NewHandleWrapper(virtual_or_interface ? &receiver : &dummy));
     DCHECK_EQ(caller->GetDexFile(), called_method.dex_file);
-    called = linker->ResolveMethod<ClassLinker::kForceICCECheck>(
+    called = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
         self, called_method.dex_method_index, caller, invoke_type);
 
     // Update .bss entry in oat file if any.
@@ -1235,8 +1235,11 @@
         Handle<mirror::ClassLoader> class_loader(
             hs.NewHandle(caller->GetDeclaringClass()->GetClassLoader()));
         // TODO Maybe put this into a mirror::Class function.
-        mirror::Class* ref_class = linker->ResolveReferencedClassOfMethod(
-            called_method.dex_method_index, dex_cache, class_loader);
+        ObjPtr<mirror::Class> ref_class = linker->LookupResolvedType(
+            *dex_cache->GetDexFile(),
+            dex_cache->GetDexFile()->GetMethodId(called_method.dex_method_index).class_idx_,
+            dex_cache.Get(),
+            class_loader.Get());
         if (ref_class->IsInterface()) {
           called = ref_class->FindVirtualMethodForInterfaceSuper(called, kRuntimePointerSize);
         } else {
@@ -2458,6 +2461,21 @@
   return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
 }
 
+// Helper function for art_quick_imt_conflict_trampoline to look up the interface method.
+extern "C" ArtMethod* artLookupResolvedMethod(uint32_t method_index, ArtMethod* referrer)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension ants(__FUNCTION__);
+  DCHECK(!referrer->IsProxyMethod());
+  ArtMethod* result = Runtime::Current()->GetClassLinker()->LookupResolvedMethod(
+      method_index, referrer->GetDexCache(), referrer->GetClassLoader());
+  DCHECK(result == nullptr ||
+         result->GetDeclaringClass()->IsInterface() ||
+         result->GetDeclaringClass() ==
+             WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object))
+      << result->PrettyMethod();
+  return result;
+}
+
 // Determine target of interface dispatch. The interface method and this object are known non-null.
 // The interface method is the method returned by the dex cache in the conflict trampoline.
 extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
@@ -2465,7 +2483,6 @@
                                                       Thread* self,
                                                       ArtMethod** sp)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  CHECK(interface_method != nullptr);
   ObjPtr<mirror::Object> this_object(raw_this_object);
   ScopedQuickEntrypointChecks sqec(self);
   StackHandleScope<1> hs(self);
@@ -2475,7 +2492,8 @@
   ArtMethod* method = nullptr;
   ImTable* imt = cls->GetImt(kRuntimePointerSize);
 
-  if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
+  if (LIKELY(interface_method != nullptr)) {
+    DCHECK_NE(interface_method->GetDexMethodIndex(), DexFile::kDexNoIndex);
     // If the interface method is already resolved, look whether we have a match in the
     // ImtConflictTable.
     ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method),
@@ -2502,9 +2520,7 @@
       return GetTwoWordFailureValue();  // Failure.
     }
   } else {
-    // The interface method is unresolved, so look it up in the dex file of the caller.
-    DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod());
-
+    // The interface method is unresolved, so resolve it in the dex file of the caller.
     // Fetch the dex_method_idx of the target interface method from the caller.
     uint32_t dex_method_idx;
     uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
@@ -2622,10 +2638,8 @@
 
   // Resolve method - it's either MethodHandle.invoke() or MethodHandle.invokeExact().
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
-  ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::kForceICCECheck>(self,
-                                                                                   inst->VRegB(),
-                                                                                   caller_method,
-                                                                                   kVirtual);
+  ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+      self, inst->VRegB(), caller_method, kVirtual);
   DCHECK((resolved_method ==
           jni::DecodeArtMethod(WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact)) ||
          (resolved_method ==
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 8fcc87d..edf2e5b 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -26,7 +26,7 @@
 namespace accounting {
 
 template <typename Visitor>
-inline void HeapBitmap::Visit(const Visitor& visitor) {
+inline void HeapBitmap::Visit(Visitor&& visitor) {
   for (const auto& bitmap : continuous_space_bitmaps_) {
     bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
   }
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index a5d59bf..1d729ff 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -71,15 +71,6 @@
   large_object_bitmaps_.erase(it);
 }
 
-void HeapBitmap::Walk(ObjectCallback* callback, void* arg) {
-  for (const auto& bitmap : continuous_space_bitmaps_) {
-    bitmap->Walk(callback, arg);
-  }
-  for (const auto& bitmap : large_object_bitmaps_) {
-    bitmap->Walk(callback, arg);
-  }
-}
-
 }  // namespace accounting
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 7097f87..36426e9 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -47,11 +47,8 @@
   ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
   LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
 
-  void Walk(ObjectCallback* callback, void* arg)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_);
-
   template <typename Visitor>
-  void Visit(const Visitor& visitor)
+  ALWAYS_INLINE void Visit(Visitor&& visitor)
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 57c290e..2901995 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -27,6 +27,7 @@
 #include "gc/space/space.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "object_callbacks.h"
 #include "space_bitmap-inl.h"
 #include "thread-current-inl.h"
 
@@ -383,7 +384,7 @@
   }
 }
 
-void ModUnionTableReferenceCache::VisitObjects(ObjectCallback* callback, void* arg) {
+void ModUnionTableReferenceCache::VisitObjects(ObjectCallback callback, void* arg) {
   CardTable* const card_table = heap_->GetCardTable();
   ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
   for (uint8_t* card : cleared_cards_) {
@@ -550,7 +551,7 @@
       0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, bit_visitor);
 }
 
-void ModUnionTableCardCache::VisitObjects(ObjectCallback* callback, void* arg) {
+void ModUnionTableCardCache::VisitObjects(ObjectCallback callback, void* arg) {
   card_bitmap_->VisitSetBits(
       0,
       RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize,
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 591365f..9e261fd 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -21,21 +21,25 @@
 #include "base/allocator.h"
 #include "card_table.h"
 #include "globals.h"
-#include "object_callbacks.h"
+#include "mirror/object_reference.h"
 #include "safe_map.h"
 
 #include <set>
 #include <vector>
 
 namespace art {
+
 namespace mirror {
 class Object;
 }  // namespace mirror
 
+class MarkObjectVisitor;
+
 namespace gc {
 namespace space {
   class ContinuousSpace;
 }  // namespace space
+
 class Heap;
 
 namespace accounting {
@@ -44,6 +48,9 @@
 // cleared between GC phases, reducing the number of dirty cards that need to be scanned.
 class ModUnionTable {
  public:
+  // A callback for visiting an object in the heap.
+  using ObjectCallback = void (*)(mirror::Object*, void*);
+
   typedef std::set<uint8_t*, std::less<uint8_t*>,
                    TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
   typedef MemoryRangeBitmap<CardTable::kCardSize> CardBitmap;
@@ -72,7 +79,7 @@
   virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) = 0;
 
   // Visit all of the objects that may contain references to other spaces.
-  virtual void VisitObjects(ObjectCallback* callback, void* arg) = 0;
+  virtual void VisitObjects(ObjectCallback callback, void* arg) = 0;
 
   // Verification, sanity checks that we don't have clean cards which conflict with out cached data
   // for said cards. Exclusive lock is required since verify sometimes uses
@@ -124,7 +131,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::heap_bitmap_lock_);
 
-  virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+  virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -171,7 +178,7 @@
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
+  virtual void VisitObjects(ObjectCallback callback, void* arg) OVERRIDE
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 9feaf41..b37dd96 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -62,8 +62,9 @@
 }
 
 template<size_t kAlignment> template<typename Visitor>
-inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
-                                                      const Visitor& visitor) const {
+inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
+                                                      uintptr_t visit_end,
+                                                      Visitor&& visitor) const {
   DCHECK_LE(visit_begin, visit_end);
 #if 0
   for (uintptr_t i = visit_begin; i < visit_end; i += kAlignment) {
@@ -155,6 +156,26 @@
 #endif
 }
 
+template<size_t kAlignment> template<typename Visitor>
+void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) {
+  CHECK(bitmap_begin_ != nullptr);
+
+  uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
+  Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
+  for (uintptr_t i = 0; i <= end; ++i) {
+    uintptr_t w = bitmap_begin[i].LoadRelaxed();
+    if (w != 0) {
+      uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+      do {
+        const size_t shift = CTZ(w);
+        mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+        visitor(obj);
+        w ^= (static_cast<uintptr_t>(1)) << shift;
+      } while (w != 0);
+    }
+  }
+}
+
 template<size_t kAlignment> template<bool kSetBit>
 inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
   uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index eb9f039..317e2fc 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -137,27 +137,6 @@
 }
 
 template<size_t kAlignment>
-void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
-  CHECK(bitmap_begin_ != nullptr);
-  CHECK(callback != nullptr);
-
-  uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
-  Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
-  for (uintptr_t i = 0; i <= end; ++i) {
-    uintptr_t w = bitmap_begin[i].LoadRelaxed();
-    if (w != 0) {
-      uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
-      do {
-        const size_t shift = CTZ(w);
-        mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
-        (*callback)(obj, arg);
-        w ^= (static_cast<uintptr_t>(1)) << shift;
-      } while (w != 0);
-    }
-  }
-}
-
-template<size_t kAlignment>
 void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitmap,
                                         const SpaceBitmap<kAlignment>& mark_bitmap,
                                         uintptr_t sweep_begin, uintptr_t sweep_end,
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 889f57b..2fe6394 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -34,9 +34,6 @@
 }  // namespace mirror
 class MemMap;
 
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
 namespace gc {
 namespace accounting {
 
@@ -108,8 +105,6 @@
     return index < bitmap_size_ / sizeof(intptr_t);
   }
 
-  void VisitRange(uintptr_t base, uintptr_t max, ObjectCallback* callback, void* arg) const;
-
   class ClearVisitor {
    public:
     explicit ClearVisitor(SpaceBitmap* const bitmap)
@@ -134,13 +129,14 @@
   // TODO: Use lock annotations when clang is fixed.
   // REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
   template <typename Visitor>
-  void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
+  void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const
       NO_THREAD_SAFETY_ANALYSIS;
 
   // Visits set bits in address order.  The callback is not permitted to change the bitmap bits or
   // max during the traversal.
-  void Walk(ObjectCallback* callback, void* arg)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_);
+  template <typename Visitor>
+  void Walk(Visitor&& visitor)
+      REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Walk through the bitmaps in increasing address order, and find the object pointers that
   // correspond to garbage objects.  Call <callback> zero or more times with lists of these object
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8d3c62f..9d672b1 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -583,23 +583,22 @@
   ObjPtr<mirror::Object> const holder_;
 };
 
-void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
-  auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
-  // Objects not on dirty or aged cards should never have references to newly allocated regions.
-  if (collector->heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
-    VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
-    obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
-        visitor,
-        visitor);
-  }
-}
-
 void ConcurrentCopying::VerifyNoMissingCardMarks() {
+  auto visitor = [&](mirror::Object* obj)
+      REQUIRES(Locks::mutator_lock_)
+      REQUIRES(!mark_stack_lock_) {
+    // Objects not on dirty or aged cards should never have references to newly allocated regions.
+    if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) {
+      VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj);
+      obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+          internal_visitor, internal_visitor);
+    }
+  };
   TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
-  region_space_->Walk(&VerifyNoMissingCardMarkCallback, this);
+  region_space_->Walk(visitor);
   {
     ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this);
+    heap_->GetLiveBitmap()->Visit(visitor);
   }
 }
 
@@ -1212,34 +1211,6 @@
   ConcurrentCopying* const collector_;
 };
 
-class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
- public:
-  explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
-      : collector_(collector) {}
-  void operator()(mirror::Object* obj) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjectCallback(obj, collector_);
-  }
-  static void ObjectCallback(mirror::Object* obj, void *arg)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    CHECK(obj != nullptr);
-    ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
-    space::RegionSpace* region_space = collector->RegionSpace();
-    CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
-    VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
-    obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
-        visitor,
-        visitor);
-    if (kUseBakerReadBarrier) {
-      CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
-          << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
-    }
-  }
-
- private:
-  ConcurrentCopying* const collector_;
-};
-
 // Verify there's no from-space references left after the marking phase.
 void ConcurrentCopying::VerifyNoFromSpaceReferences() {
   Thread* self = Thread::Current();
@@ -1252,7 +1223,21 @@
       CHECK(!thread->GetIsGcMarking());
     }
   }
-  VerifyNoFromSpaceRefsObjectVisitor visitor(this);
+
+  auto verify_no_from_space_refs_visitor = [&](mirror::Object* obj)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CHECK(obj != nullptr);
+    space::RegionSpace* region_space = RegionSpace();
+    CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
+    VerifyNoFromSpaceRefsFieldVisitor visitor(this);
+    obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+        visitor,
+        visitor);
+    if (kUseBakerReadBarrier) {
+      CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
+          << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
+    }
+  };
   // Roots.
   {
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -1260,11 +1245,11 @@
     Runtime::Current()->VisitRoots(&ref_visitor);
   }
   // The to-space.
-  region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
+  region_space_->WalkToSpace(verify_no_from_space_refs_visitor);
   // Non-moving spaces.
   {
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    heap_->GetMarkBitmap()->Visit(visitor);
+    heap_->GetMarkBitmap()->Visit(verify_no_from_space_refs_visitor);
   }
   // The alloc stack.
   {
@@ -1275,7 +1260,7 @@
       if (obj != nullptr && obj->GetClass() != nullptr) {
         // TODO: need to call this only if obj is alive?
         ref_visitor(obj);
-        visitor(obj);
+        verify_no_from_space_refs_visitor(obj);
       }
     }
   }
@@ -1337,31 +1322,6 @@
   ConcurrentCopying* const collector_;
 };
 
-class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
- public:
-  explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
-      : collector_(collector) {}
-  void operator()(mirror::Object* obj) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjectCallback(obj, collector_);
-  }
-  static void ObjectCallback(mirror::Object* obj, void *arg)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    CHECK(obj != nullptr);
-    ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
-    space::RegionSpace* region_space = collector->RegionSpace();
-    CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
-    collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
-    AssertToSpaceInvariantFieldVisitor visitor(collector);
-    obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
-        visitor,
-        visitor);
-  }
-
- private:
-  ConcurrentCopying* const collector_;
-};
-
 class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
  public:
   RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
@@ -1599,8 +1559,14 @@
     region_space_->AddLiveBytes(to_ref, alloc_size);
   }
   if (ReadBarrier::kEnableToSpaceInvariantChecks) {
-    AssertToSpaceInvariantObjectVisitor visitor(this);
-    visitor(to_ref);
+    CHECK(to_ref != nullptr);
+    space::RegionSpace* region_space = RegionSpace();
+    CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space";
+    AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref);
+    AssertToSpaceInvariantFieldVisitor visitor(this);
+    to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+        visitor,
+        visitor);
   }
 }
 
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 7b4340e..ab60990 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -181,9 +181,6 @@
   void VerifyGrayImmuneObjects()
       REQUIRES(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
-  static void VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg)
-      REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
   void VerifyNoMissingCardMarks()
       REQUIRES(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
@@ -348,7 +345,6 @@
   class ActivateReadBarrierEntrypointsCallback;
   class ActivateReadBarrierEntrypointsCheckpoint;
   class AssertToSpaceInvariantFieldVisitor;
-  class AssertToSpaceInvariantObjectVisitor;
   class AssertToSpaceInvariantRefsVisitor;
   class ClearBlackPtrsVisitor;
   class ComputeUnevacFromSpaceLiveRatioVisitor;
@@ -365,7 +361,6 @@
   class ThreadFlipVisitor;
   class VerifyGrayImmuneObjectsVisitor;
   class VerifyNoFromSpaceRefsFieldVisitor;
-  class VerifyNoFromSpaceRefsObjectVisitor;
   class VerifyNoFromSpaceRefsVisitor;
   class VerifyNoMissingCardMarkVisitor;
 
diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h
new file mode 100644
index 0000000..b6ccb277
--- /dev/null
+++ b/runtime/gc/heap-visit-objects-inl.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
+#define ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
+
+#include "heap.h"
+
+#include "base/mutex-inl.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/space/bump_pointer_space-walk-inl.h"
+#include "gc/space/region_space-inl.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+
+namespace art {
+namespace gc {
+
+// Visit objects when threads aren't suspended. If concurrent moving
+// GC, disable moving GC and suspend threads and then visit objects.
+template <typename Visitor>
+inline void Heap::VisitObjects(Visitor&& visitor) {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertSharedHeld(self);
+  DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
+  if (IsGcConcurrentAndMoving()) {
+    // Concurrent moving GC. Just suspending threads isn't sufficient
+    // because a collection isn't one big pause and we could suspend
+    // threads in the middle (between phases) of a concurrent moving
+    // collection where it's not easily known which objects are alive
+    // (both the region space and the non-moving space) or which
+    // copies of objects to visit, and the to-space invariant could be
+    // easily broken. Visit objects while GC isn't running by using
+    // IncrementDisableMovingGC() and threads are suspended.
+    IncrementDisableMovingGC(self);
+    {
+      ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
+      ScopedSuspendAll ssa(__FUNCTION__);
+      VisitObjectsInternalRegionSpace(visitor);
+      VisitObjectsInternal(visitor);
+    }
+    DecrementDisableMovingGC(self);
+  } else {
+    // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
+    // catch bugs.
+    self->PoisonObjectPointers();
+    // GCs can move objects, so don't allow this.
+    ScopedAssertNoThreadSuspension ants("Visiting objects");
+    DCHECK(region_space_ == nullptr);
+    VisitObjectsInternal(visitor);
+    self->PoisonObjectPointers();
+  }
+}
+
+template <typename Visitor>
+inline void Heap::VisitObjectsPaused(Visitor&& visitor) {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  VisitObjectsInternalRegionSpace(visitor);
+  VisitObjectsInternal(visitor);
+}
+
+// Visit objects in the region spaces.
+template <typename Visitor>
+inline void Heap::VisitObjectsInternalRegionSpace(Visitor&& visitor) {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  if (region_space_ != nullptr) {
+    DCHECK(IsGcConcurrentAndMoving());
+    if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
+      // Exclude the pre-zygote fork time where the semi-space collector
+      // calls VerifyHeapReferences() as part of the zygote compaction
+      // which then would call here without the moving GC disabled,
+      // which is fine.
+      bool is_thread_running_gc = false;
+      if (kIsDebugBuild) {
+        MutexLock mu(self, *gc_complete_lock_);
+        is_thread_running_gc = self == thread_running_gc_;
+      }
+      // If we are not the thread running the GC on in a GC exclusive region, then moving GC
+      // must be disabled.
+      DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
+    }
+    region_space_->Walk(visitor);
+  }
+}
+
+// Visit objects in the other spaces.
+template <typename Visitor>
+inline void Heap::VisitObjectsInternal(Visitor&& visitor) {
+  if (bump_pointer_space_ != nullptr) {
+    // Visit objects in bump pointer space.
+    bump_pointer_space_->Walk(visitor);
+  }
+  // TODO: Switch to standard begin and end to use ranged a based loop.
+  for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
+    mirror::Object* const obj = it->AsMirrorPtr();
+
+    mirror::Class* kls = nullptr;
+    if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
+      // Below invariant is safe regardless of what space the Object is in.
+      // For speed reasons, only perform it when Rosalloc could possibly be used.
+      // (Disabled for read barriers because it never uses Rosalloc).
+      // (See the DCHECK in RosAllocSpace constructor).
+      if (!kUseReadBarrier) {
+        // Rosalloc has a race in allocation. Objects can be written into the allocation
+        // stack before their header writes are visible to this thread.
+        // See b/28790624 for more details.
+        //
+        // obj.class will either be pointing to a valid Class*, or it will point
+        // to a rosalloc free buffer.
+        //
+        // If it's pointing to a valid Class* then that Class's Class will be the
+        // ClassClass (whose Class is itself).
+        //
+        // A rosalloc free buffer will point to another rosalloc free buffer
+        // (or to null), and never to itself.
+        //
+        // Either way dereferencing while its not-null is safe because it will
+        // always point to another valid pointer or to null.
+        mirror::Class* klsClass = kls->GetClass();
+
+        if (klsClass == nullptr) {
+          continue;
+        } else if (klsClass->GetClass() != klsClass) {
+          continue;
+        }
+      } else {
+        // Ensure the invariant is not broken for non-rosalloc cases.
+        DCHECK(Heap::rosalloc_space_ == nullptr)
+            << "unexpected rosalloc with read barriers";
+        DCHECK(kls->GetClass() != nullptr)
+            << "invalid object: class does not have a class";
+        DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
+            << "invalid object: class's class is not ClassClass";
+      }
+
+      // Avoid the race condition caused by the object not yet being written into the allocation
+      // stack or the class not yet being written in the object. Or, if
+      // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
+      visitor(obj);
+    }
+  }
+  {
+    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    GetLiveBitmap()->Visit<Visitor>(visitor);
+  }
+}
+
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_HEAP_VISIT_OBJECTS_INL_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ad4c0d5..f1685b2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -37,6 +37,7 @@
 #include "cutils/sched_policy.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
+#include "entrypoints/quick/quick_alloc_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/heap_bitmap-inl.h"
 #include "gc/accounting/mod_union_table-inl.h"
@@ -61,27 +62,27 @@
 #include "gc/space/zygote_space.h"
 #include "gc/task_processor.h"
 #include "gc/verification.h"
-#include "entrypoints/quick/quick_alloc_entrypoints.h"
 #include "gc_pause_listener.h"
 #include "gc_root.h"
+#include "handle_scope-inl.h"
 #include "heap-inl.h"
+#include "heap-visit-objects-inl.h"
 #include "image.h"
 #include "intern_table.h"
 #include "java_vm_ext.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
-#include "obj_ptr-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/reference-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "obj_ptr-inl.h"
 #include "os.h"
 #include "reflection.h"
 #include "runtime.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
-#include "handle_scope-inl.h"
 #include "thread_list.h"
 #include "verify_object-inl.h"
 #include "well_known_classes.h"
@@ -905,134 +906,6 @@
   }
 }
 
-// Visit objects when threads aren't suspended. If concurrent moving
-// GC, disable moving GC and suspend threads and then visit objects.
-void Heap::VisitObjects(ObjectCallback callback, void* arg) {
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertSharedHeld(self);
-  DCHECK(!Locks::mutator_lock_->IsExclusiveHeld(self)) << "Call VisitObjectsPaused() instead";
-  if (IsGcConcurrentAndMoving()) {
-    // Concurrent moving GC. Just suspending threads isn't sufficient
-    // because a collection isn't one big pause and we could suspend
-    // threads in the middle (between phases) of a concurrent moving
-    // collection where it's not easily known which objects are alive
-    // (both the region space and the non-moving space) or which
-    // copies of objects to visit, and the to-space invariant could be
-    // easily broken. Visit objects while GC isn't running by using
-    // IncrementDisableMovingGC() and threads are suspended.
-    IncrementDisableMovingGC(self);
-    {
-      ScopedThreadSuspension sts(self, kWaitingForVisitObjects);
-      ScopedSuspendAll ssa(__FUNCTION__);
-      VisitObjectsInternalRegionSpace(callback, arg);
-      VisitObjectsInternal(callback, arg);
-    }
-    DecrementDisableMovingGC(self);
-  } else {
-    // Since concurrent moving GC has thread suspension, also poison ObjPtr the normal case to
-    // catch bugs.
-    self->PoisonObjectPointers();
-    // GCs can move objects, so don't allow this.
-    ScopedAssertNoThreadSuspension ants("Visiting objects");
-    DCHECK(region_space_ == nullptr);
-    VisitObjectsInternal(callback, arg);
-    self->PoisonObjectPointers();
-  }
-}
-
-// Visit objects when threads are already suspended.
-void Heap::VisitObjectsPaused(ObjectCallback callback, void* arg) {
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertExclusiveHeld(self);
-  VisitObjectsInternalRegionSpace(callback, arg);
-  VisitObjectsInternal(callback, arg);
-}
-
-// Visit objects in the region spaces.
-void Heap::VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) {
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertExclusiveHeld(self);
-  if (region_space_ != nullptr) {
-    DCHECK(IsGcConcurrentAndMoving());
-    if (!zygote_creation_lock_.IsExclusiveHeld(self)) {
-      // Exclude the pre-zygote fork time where the semi-space collector
-      // calls VerifyHeapReferences() as part of the zygote compaction
-      // which then would call here without the moving GC disabled,
-      // which is fine.
-      bool is_thread_running_gc = false;
-      if (kIsDebugBuild) {
-        MutexLock mu(self, *gc_complete_lock_);
-        is_thread_running_gc = self == thread_running_gc_;
-      }
-      // If we are not the thread running the GC on in a GC exclusive region, then moving GC
-      // must be disabled.
-      DCHECK(is_thread_running_gc || IsMovingGCDisabled(self));
-    }
-    region_space_->Walk(callback, arg);
-  }
-}
-
-// Visit objects in the other spaces.
-void Heap::VisitObjectsInternal(ObjectCallback callback, void* arg) {
-  if (bump_pointer_space_ != nullptr) {
-    // Visit objects in bump pointer space.
-    bump_pointer_space_->Walk(callback, arg);
-  }
-  // TODO: Switch to standard begin and end to use ranged a based loop.
-  for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
-    mirror::Object* const obj = it->AsMirrorPtr();
-
-    mirror::Class* kls = nullptr;
-    if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
-      // Below invariant is safe regardless of what space the Object is in.
-      // For speed reasons, only perform it when Rosalloc could possibly be used.
-      // (Disabled for read barriers because it never uses Rosalloc).
-      // (See the DCHECK in RosAllocSpace constructor).
-      if (!kUseReadBarrier) {
-        // Rosalloc has a race in allocation. Objects can be written into the allocation
-        // stack before their header writes are visible to this thread.
-        // See b/28790624 for more details.
-        //
-        // obj.class will either be pointing to a valid Class*, or it will point
-        // to a rosalloc free buffer.
-        //
-        // If it's pointing to a valid Class* then that Class's Class will be the
-        // ClassClass (whose Class is itself).
-        //
-        // A rosalloc free buffer will point to another rosalloc free buffer
-        // (or to null), and never to itself.
-        //
-        // Either way dereferencing while its not-null is safe because it will
-        // always point to another valid pointer or to null.
-        mirror::Class* klsClass = kls->GetClass();
-
-        if (klsClass == nullptr) {
-          continue;
-        } else if (klsClass->GetClass() != klsClass) {
-          continue;
-        }
-      } else {
-        // Ensure the invariant is not broken for non-rosalloc cases.
-        DCHECK(Heap::rosalloc_space_ == nullptr)
-            << "unexpected rosalloc with read barriers";
-        DCHECK(kls->GetClass() != nullptr)
-            << "invalid object: class does not have a class";
-        DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
-            << "invalid object: class's class is not ClassClass";
-      }
-
-      // Avoid the race condition caused by the object not yet being written into the allocation
-      // stack or the class not yet being written in the object. Or, if
-      // kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
-      callback(obj, arg);
-    }
-  }
-  {
-    ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    GetLiveBitmap()->Walk(callback, arg);
-  }
-}
-
 void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
   space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
   space::ContinuousSpace* space2 = non_moving_space_;
@@ -1639,13 +1512,17 @@
   }
 }
 
-void Heap::VerificationCallback(mirror::Object* obj, void* arg) {
-  reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
-}
-
 void Heap::VerifyHeap() {
   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
+  auto visitor = [&](mirror::Object* obj) {
+    VerifyObjectBody(obj);
+  };
+  // Technically we need the mutator lock here to call Visit. However, VerifyObjectBody is already
+  // NO_THREAD_SAFETY_ANALYSIS.
+  auto no_thread_safety_analysis = [&]() NO_THREAD_SAFETY_ANALYSIS {
+    GetLiveBitmap()->Visit(visitor);
+  };
+  no_thread_safety_analysis();
 }
 
 void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
@@ -1918,138 +1795,84 @@
   return GetBytesFreedEver() + GetBytesAllocated();
 }
 
-class InstanceCounter {
- public:
-  InstanceCounter(const std::vector<Handle<mirror::Class>>& classes,
-                  bool use_is_assignable_from,
-                  uint64_t* counts)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
-
-  static void Callback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
-    mirror::Class* instance_class = obj->GetClass();
-    CHECK(instance_class != nullptr);
-    for (size_t i = 0; i < instance_counter->classes_.size(); ++i) {
-      ObjPtr<mirror::Class> klass = instance_counter->classes_[i].Get();
-      if (instance_counter->use_is_assignable_from_) {
-        if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
-          ++instance_counter->counts_[i];
-        }
-      } else if (instance_class == klass) {
-        ++instance_counter->counts_[i];
-      }
-    }
-  }
-
- private:
-  const std::vector<Handle<mirror::Class>>& classes_;
-  bool use_is_assignable_from_;
-  uint64_t* const counts_;
-  DISALLOW_COPY_AND_ASSIGN(InstanceCounter);
-};
-
 void Heap::CountInstances(const std::vector<Handle<mirror::Class>>& classes,
                           bool use_is_assignable_from,
                           uint64_t* counts) {
-  InstanceCounter counter(classes, use_is_assignable_from, counts);
-  VisitObjects(InstanceCounter::Callback, &counter);
-}
-
-class InstanceCollector {
- public:
-  InstanceCollector(VariableSizedHandleScope& scope,
-                    Handle<mirror::Class> c,
-                    int32_t max_count,
-                    std::vector<Handle<mirror::Object>>& instances)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : scope_(scope),
-        class_(c),
-        max_count_(max_count),
-        instances_(instances) {}
-
-  static void Callback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    DCHECK(arg != nullptr);
-    InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
-    if (obj->GetClass() == instance_collector->class_.Get()) {
-      if (instance_collector->max_count_ == 0 ||
-          instance_collector->instances_.size() < instance_collector->max_count_) {
-        instance_collector->instances_.push_back(instance_collector->scope_.NewHandle(obj));
+  auto instance_counter = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+    mirror::Class* instance_class = obj->GetClass();
+    CHECK(instance_class != nullptr);
+    for (size_t i = 0; i < classes.size(); ++i) {
+      ObjPtr<mirror::Class> klass = classes[i].Get();
+      if (use_is_assignable_from) {
+        if (klass != nullptr && klass->IsAssignableFrom(instance_class)) {
+          ++counts[i];
+        }
+      } else if (instance_class == klass) {
+        ++counts[i];
       }
     }
-  }
-
- private:
-  VariableSizedHandleScope& scope_;
-  Handle<mirror::Class> const class_;
-  const uint32_t max_count_;
-  std::vector<Handle<mirror::Object>>& instances_;
-  DISALLOW_COPY_AND_ASSIGN(InstanceCollector);
-};
-
-void Heap::GetInstances(VariableSizedHandleScope& scope,
-                        Handle<mirror::Class> c,
-                        int32_t max_count,
-                        std::vector<Handle<mirror::Object>>& instances) {
-  InstanceCollector collector(scope, c, max_count, instances);
-  VisitObjects(&InstanceCollector::Callback, &collector);
+  };
+  VisitObjects(instance_counter);
 }
 
-class ReferringObjectsFinder {
- public:
-  ReferringObjectsFinder(VariableSizedHandleScope& scope,
-                         Handle<mirror::Object> object,
-                         int32_t max_count,
-                         std::vector<Handle<mirror::Object>>& referring_objects)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : scope_(scope),
-        object_(object),
-        max_count_(max_count),
-        referring_objects_(referring_objects) {}
-
-  static void Callback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
-  }
-
-  // For bitmap Visit.
-  // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
-  // annotalysis on visitors.
-  void operator()(ObjPtr<mirror::Object> o) const NO_THREAD_SAFETY_ANALYSIS {
-    o->VisitReferences(*this, VoidFunctor());
-  }
-
-  // For Object::VisitReferences.
-  void operator()(ObjPtr<mirror::Object> obj,
-                  MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
-    if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
-      referring_objects_.push_back(scope_.NewHandle(obj));
+void Heap::GetInstances(VariableSizedHandleScope& scope,
+                        Handle<mirror::Class> h_class,
+                        int32_t max_count,
+                        std::vector<Handle<mirror::Object>>& instances) {
+  DCHECK_GE(max_count, 0);
+  auto instance_collector = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (obj->GetClass() == h_class.Get()) {
+      if (max_count == 0 || instances.size() < static_cast<size_t>(max_count)) {
+        instances.push_back(scope.NewHandle(obj));
+      }
     }
-  }
-
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-      const {}
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
-
- private:
-  VariableSizedHandleScope& scope_;
-  Handle<mirror::Object> const object_;
-  const uint32_t max_count_;
-  std::vector<Handle<mirror::Object>>& referring_objects_;
-  DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
-};
+  };
+  VisitObjects(instance_collector);
+}
 
 void Heap::GetReferringObjects(VariableSizedHandleScope& scope,
                                Handle<mirror::Object> o,
                                int32_t max_count,
                                std::vector<Handle<mirror::Object>>& referring_objects) {
+  class ReferringObjectsFinder {
+   public:
+    ReferringObjectsFinder(VariableSizedHandleScope& scope_in,
+                           Handle<mirror::Object> object_in,
+                           int32_t max_count_in,
+                           std::vector<Handle<mirror::Object>>& referring_objects_in)
+        REQUIRES_SHARED(Locks::mutator_lock_)
+        : scope_(scope_in),
+          object_(object_in),
+          max_count_(max_count_in),
+          referring_objects_(referring_objects_in) {}
+
+    // For Object::VisitReferences.
+    void operator()(ObjPtr<mirror::Object> obj,
+                    MemberOffset offset,
+                    bool is_static ATTRIBUTE_UNUSED) const
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
+      if (ref == object_.Get() && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
+        referring_objects_.push_back(scope_.NewHandle(obj));
+      }
+    }
+
+    void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+        const {}
+    void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+   private:
+    VariableSizedHandleScope& scope_;
+    Handle<mirror::Object> const object_;
+    const uint32_t max_count_;
+    std::vector<Handle<mirror::Object>>& referring_objects_;
+    DISALLOW_COPY_AND_ASSIGN(ReferringObjectsFinder);
+  };
   ReferringObjectsFinder finder(scope, o, max_count, referring_objects);
-  VisitObjects(&ReferringObjectsFinder::Callback, &finder);
+  auto referring_objects_finder = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+    obj->VisitReferences(finder, VoidFunctor());
+  };
+  VisitObjects(referring_objects_finder);
 }
 
 void Heap::CollectGarbage(bool clear_soft_references) {
@@ -2357,24 +2180,25 @@
         bin_mark_bitmap_(nullptr),
         is_running_on_memory_tool_(is_running_on_memory_tool) {}
 
-  void BuildBins(space::ContinuousSpace* space) {
+  void BuildBins(space::ContinuousSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
     bin_live_bitmap_ = space->GetLiveBitmap();
     bin_mark_bitmap_ = space->GetMarkBitmap();
-    BinContext context;
-    context.prev_ = reinterpret_cast<uintptr_t>(space->Begin());
-    context.collector_ = this;
+    uintptr_t prev = reinterpret_cast<uintptr_t>(space->Begin());
     WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     // Note: This requires traversing the space in increasing order of object addresses.
-    bin_live_bitmap_->Walk(Callback, reinterpret_cast<void*>(&context));
+    auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+      uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
+      size_t bin_size = object_addr - prev;
+      // Add the bin consisting of the end of the previous object to the start of the current object.
+      AddBin(bin_size, prev);
+      prev = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
+    };
+    bin_live_bitmap_->Walk(visitor);
     // Add the last bin which spans after the last object to the end of the space.
-    AddBin(reinterpret_cast<uintptr_t>(space->End()) - context.prev_, context.prev_);
+    AddBin(reinterpret_cast<uintptr_t>(space->End()) - prev, prev);
   }
 
  private:
-  struct BinContext {
-    uintptr_t prev_;  // The end of the previous object.
-    ZygoteCompactingCollector* collector_;
-  };
   // Maps from bin sizes to locations.
   std::multimap<size_t, uintptr_t> bins_;
   // Live bitmap of the space which contains the bins.
@@ -2383,18 +2207,6 @@
   accounting::ContinuousSpaceBitmap* bin_mark_bitmap_;
   const bool is_running_on_memory_tool_;
 
-  static void Callback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(arg != nullptr);
-    BinContext* context = reinterpret_cast<BinContext*>(arg);
-    ZygoteCompactingCollector* collector = context->collector_;
-    uintptr_t object_addr = reinterpret_cast<uintptr_t>(obj);
-    size_t bin_size = object_addr - context->prev_;
-    // Add the bin consisting of the end of the previous object to the start of the current object.
-    collector->AddBin(bin_size, context->prev_);
-    context->prev_ = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kObjectAlignment);
-  }
-
   void AddBin(size_t size, uintptr_t position) {
     if (is_running_on_memory_tool_) {
       MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size);
@@ -2935,7 +2747,7 @@
 class VerifyReferenceVisitor : public SingleRootVisitor {
  public:
   VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
-      REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
 
   size_t GetFailureCount() const {
@@ -3089,8 +2901,7 @@
   VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
       : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
 
-  void operator()(mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+  void operator()(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Note: we are verifying the references in obj but not obj itself, this is because obj must
     // be live or else how did we find it in the live bitmap?
     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3098,12 +2909,6 @@
     obj->VisitReferences(visitor, visitor);
   }
 
-  static void VisitCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
-    visitor->operator()(obj);
-  }
-
   void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3175,7 +2980,7 @@
   // 2. Allocated during the GC (pre sweep GC verification).
   // We don't want to verify the objects in the live stack since they themselves may be
   // pointing to dead objects if they are not reachable.
-  VisitObjectsPaused(VerifyObjectVisitor::VisitCallback, &visitor);
+  VisitObjectsPaused(visitor);
   // Verify the roots:
   visitor.VerifyRoots();
   if (visitor.GetFailureCount() > 0) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9e55081..e172d2d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,6 +25,7 @@
 #include "allocator_type.h"
 #include "arch/instruction_set.h"
 #include "atomic.h"
+#include "base/mutex.h"
 #include "base/time_utils.h"
 #include "gc/gc_cause.h"
 #include "gc/collector/gc_type.h"
@@ -51,9 +52,6 @@
 class TimingLogger;
 class VariableSizedHandleScope;
 
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
 namespace mirror {
   class Class;
   class Object;
@@ -250,10 +248,12 @@
   }
 
   // Visit all of the live objects in the heap.
-  void VisitObjects(ObjectCallback callback, void* arg)
+  template <typename Visitor>
+  ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
-  void VisitObjectsPaused(ObjectCallback callback, void* arg)
+  template <typename Visitor>
+  ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
 
   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
@@ -1007,9 +1007,6 @@
 
   size_t GetPercentFree();
 
-  static void VerificationCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_);
-
   // Swap the allocation stack with the live stack.
   void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1051,10 +1048,12 @@
   // Trim 0 pages at the end of reference tables.
   void TrimIndirectReferenceTables(Thread* self);
 
-  void VisitObjectsInternal(ObjectCallback callback, void* arg)
+  template <typename Visitor>
+  ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
-  void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
+  template <typename Visitor>
+  ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
 
   void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 52da763..42b31ab 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -22,10 +22,10 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/reference-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "object_callbacks.h"
 #include "reference_processor-inl.h"
 #include "reflection.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "task_processor.h"
 #include "utils.h"
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
new file mode 100644
index 0000000..5d05ea2
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
+
+#include "bump_pointer_space.h"
+
+#include "base/bit_utils.h"
+#include "mirror/object-inl.h"
+#include "thread-current-inl.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+template <typename Visitor>
+inline void BumpPointerSpace::Walk(Visitor&& visitor) {
+  uint8_t* pos = Begin();
+  uint8_t* end = End();
+  uint8_t* main_end = pos;
+  // Internal indirection w/ NO_THREAD_SAFETY_ANALYSIS. Optimally, we'd like to have an annotation
+  // like
+  //   REQUIRES_AS(visitor.operator(mirror::Object*))
+  // on Walk to expose the interprocedural nature of locks here without having to duplicate the
+  // function.
+  //
+  // NO_THREAD_SAFETY_ANALYSIS is a workaround. The problem with the workaround of course is that
+  // it doesn't complain at the callsite. However, that is strictly not worse than the
+  // ObjectCallback version it replaces.
+  auto no_thread_safety_analysis_visit = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+    visitor(obj);
+  };
+
+  {
+    MutexLock mu(Thread::Current(), block_lock_);
+    // If we have 0 blocks then we need to update the main header since we have bump pointer style
+    // allocation into an unbounded region (actually bounded by Capacity()).
+    if (num_blocks_ == 0) {
+      UpdateMainBlock();
+    }
+    main_end = Begin() + main_block_size_;
+    if (num_blocks_ == 0) {
+      // We don't have any other blocks, this means someone else may be allocating into the main
+      // block. In this case, we don't want to try and visit the other blocks after the main block
+      // since these could actually be part of the main block.
+      end = main_end;
+    }
+  }
+  // Walk all of the objects in the main block first.
+  while (pos < main_end) {
+    mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+    // No read barrier because obj may not be a valid object.
+    if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
+      // There is a race condition where a thread has just allocated an object but not set the
+      // class. We can't know the size of this object, so we don't visit it and exit the function
+      // since there is guaranteed to be not other blocks.
+      return;
+    } else {
+      no_thread_safety_analysis_visit(obj);
+      pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
+    }
+  }
+  // Walk the other blocks (currently only TLABs).
+  while (pos < end) {
+    BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
+    size_t block_size = header->size_;
+    pos += sizeof(BlockHeader);  // Skip the header so that we know where the objects
+    mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+    const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
+    CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
+    // We don't know how many objects are allocated in the current block. When we hit a null class
+    // assume its the end. TODO: Have a thread update the header when it flushes the block?
+    // No read barrier because obj may not be a valid object.
+    while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+      no_thread_safety_analysis_visit(obj);
+      obj = GetNextObject(obj);
+    }
+    pos += block_size;
+  }
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index bb1ede1..5d91f4b 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -153,58 +153,6 @@
   return storage;
 }
 
-void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) {
-  uint8_t* pos = Begin();
-  uint8_t* end = End();
-  uint8_t* main_end = pos;
-  {
-    MutexLock mu(Thread::Current(), block_lock_);
-    // If we have 0 blocks then we need to update the main header since we have bump pointer style
-    // allocation into an unbounded region (actually bounded by Capacity()).
-    if (num_blocks_ == 0) {
-      UpdateMainBlock();
-    }
-    main_end = Begin() + main_block_size_;
-    if (num_blocks_ == 0) {
-      // We don't have any other blocks, this means someone else may be allocating into the main
-      // block. In this case, we don't want to try and visit the other blocks after the main block
-      // since these could actually be part of the main block.
-      end = main_end;
-    }
-  }
-  // Walk all of the objects in the main block first.
-  while (pos < main_end) {
-    mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
-    // No read barrier because obj may not be a valid object.
-    if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
-      // There is a race condition where a thread has just allocated an object but not set the
-      // class. We can't know the size of this object, so we don't visit it and exit the function
-      // since there is guaranteed to be not other blocks.
-      return;
-    } else {
-      callback(obj, arg);
-      pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
-    }
-  }
-  // Walk the other blocks (currently only TLABs).
-  while (pos < end) {
-    BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
-    size_t block_size = header->size_;
-    pos += sizeof(BlockHeader);  // Skip the header so that we know where the objects
-    mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
-    const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
-    CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
-    // We don't know how many objects are allocated in the current block. When we hit a null class
-    // assume its the end. TODO: Have a thread update the header when it flushes the block?
-    // No read barrier because obj may not be a valid object.
-    while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
-      callback(obj, arg);
-      obj = GetNextObject(obj);
-    }
-    pos += block_size;
-  }
-}
-
 accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCallback() {
   UNIMPLEMENTED(FATAL);
   UNREACHABLE();
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 566dc5d..4197d0c 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -25,9 +25,6 @@
 class Object;
 }
 
-// Same as in object_callbacks.h. Just avoid the include.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
 namespace gc {
 
 namespace collector {
@@ -149,8 +146,10 @@
   }
 
   // Go through all of the blocks and visit the continuous objects.
-  void Walk(ObjectCallback* callback, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
+  template <typename Visitor>
+  ALWAYS_INLINE void Walk(Visitor&& visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!block_lock_);
 
   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
 
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 3ae382e..fe0d35f 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1268,17 +1268,19 @@
           }
           dex_cache->FixupResolvedTypes<kWithoutReadBarrier>(new_types, fixup_adapter);
         }
-        ArtMethod** methods = dex_cache->GetResolvedMethods();
+        mirror::MethodDexCacheType* methods = dex_cache->GetResolvedMethods();
         if (methods != nullptr) {
-          ArtMethod** new_methods = fixup_adapter.ForwardObject(methods);
+          mirror::MethodDexCacheType* new_methods = fixup_adapter.ForwardObject(methods);
           if (methods != new_methods) {
             dex_cache->SetResolvedMethods(new_methods);
           }
           for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
-            ArtMethod* orig = mirror::DexCache::GetElementPtrSize(new_methods, j, pointer_size);
+            auto pair = mirror::DexCache::GetNativePairPtrSize(new_methods, j, pointer_size);
+            ArtMethod* orig = pair.object;
             ArtMethod* copy = fixup_adapter.ForwardObject(orig);
             if (orig != copy) {
-              mirror::DexCache::SetElementPtrSize(new_methods, j, copy, pointer_size);
+              pair.object = copy;
+              mirror::DexCache::SetNativePairPtrSize(new_methods, j, pair, pointer_size);
             }
           }
         }
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index a186f4c..1154620 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -191,10 +191,14 @@
   VLOG(heap) << "Size " << GetMemMap()->Size();
   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
   VLOG(heap) << "Capacity " << PrettySize(capacity);
-  // Remap the tail.
+  // Remap the tail. Pass MAP_PRIVATE since we don't want to share the same ashmem as the zygote
+  // space.
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(), alloc_space_name,
-                                                          PROT_READ | PROT_WRITE, &error_msg));
+  std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(End(),
+                                                          alloc_space_name,
+                                                          PROT_READ | PROT_WRITE,
+                                                          MAP_PRIVATE,
+                                                          &error_msg));
   CHECK(mem_map.get() != nullptr) << error_msg;
   void* allocator = CreateAllocator(End(), starting_size_, initial_size_, capacity,
                                     low_memory_mode);
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 2fba4a8..a3b53b4 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -184,8 +184,8 @@
   return bytes;
 }
 
-template<bool kToSpaceOnly>
-void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
+template<bool kToSpaceOnly, typename Visitor>
+void RegionSpace::WalkInternal(Visitor&& visitor) {
   // TODO: MutexLock on region_lock_ won't work due to lock order
   // issues (the classloader classes lock and the monitor lock). We
   // call this with threads suspended.
@@ -201,7 +201,7 @@
       DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
       DCHECK(obj->GetClass() != nullptr);
-      callback(obj, arg);
+      visitor(obj);
     } else if (r->IsLargeTail()) {
       // Do nothing.
     } else {
@@ -215,14 +215,12 @@
         GetLiveBitmap()->VisitMarkedRange(
             reinterpret_cast<uintptr_t>(pos),
             reinterpret_cast<uintptr_t>(top),
-            [callback, arg](mirror::Object* obj) {
-          callback(obj, arg);
-        });
+            visitor);
       } else {
         while (pos < top) {
           mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
           if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
-            callback(obj, arg);
+            visitor(obj);
             pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
           } else {
             break;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 6412158..77d76fb 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -17,7 +17,8 @@
 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_H_
 
-#include "object_callbacks.h"
+#include "base/macros.h"
+#include "base/mutex.h"
 #include "space.h"
 #include "thread.h"
 
@@ -152,14 +153,14 @@
   }
 
   // Go through all of the blocks and visit the continuous objects.
-  void Walk(ObjectCallback* callback, void* arg)
-      REQUIRES(Locks::mutator_lock_) {
-    WalkInternal<false>(callback, arg);
+  template <typename Visitor>
+  ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) {
+    WalkInternal<false /* kToSpaceOnly */>(visitor);
   }
-
-  void WalkToSpace(ObjectCallback* callback, void* arg)
+  template <typename Visitor>
+  ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
       REQUIRES(Locks::mutator_lock_) {
-    WalkInternal<true>(callback, arg);
+    WalkInternal<true>(visitor);
   }
 
   accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
@@ -247,8 +248,8 @@
  private:
   RegionSpace(const std::string& name, MemMap* mem_map);
 
-  template<bool kToSpaceOnly>
-  void WalkInternal(ObjectCallback* callback, void* arg) NO_THREAD_SAFETY_ANALYSIS;
+  template<bool kToSpaceOnly, typename Visitor>
+  ALWAYS_INLINE void WalkInternal(Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
 
   class Region {
    public:
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 06e4704..acfd889 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -78,6 +78,10 @@
 DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
 #define STRING_DEX_CACHE_ELEMENT_SIZE 8
 DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
+#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 1023
+DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheMethodCacheSize - 1)))
+#define METHOD_DEX_CACHE_HASH_BITS 10
+DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))))
 #define CARD_TABLE_CARD_SHIFT 0xa
 DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>(art::gc::accounting::CardTable::kCardShift)))
 #define MIN_LARGE_OBJECT_THRESHOLD 0x3000
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index ec860c7..f428bc2 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -52,6 +52,7 @@
 #include "gc/allocation_record.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
 #include "gc/space/space.h"
 #include "globals.h"
 #include "jdwp/jdwp.h"
@@ -485,13 +486,6 @@
   }
 
  private:
-  static void VisitObjectCallback(mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(obj != nullptr);
-    DCHECK(arg != nullptr);
-    reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj);
-  }
-
   void DumpHeapObject(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -534,8 +528,11 @@
     simple_roots_.clear();
     runtime->VisitRoots(this);
     runtime->VisitImageRoots(this);
-    runtime->GetHeap()->VisitObjectsPaused(VisitObjectCallback, this);
-
+    auto dump_object = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+      DCHECK(obj != nullptr);
+      DumpHeapObject(obj);
+    };
+    runtime->GetHeap()->VisitObjectsPaused(dump_object);
     output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_END, kHprofTime);
     output_->EndRecord();
   }
diff --git a/runtime/image.cc b/runtime/image.cc
index ac36d7c..7d0a709 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '5', '\0' };  // Fix DexCache fields.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '6', '\0' };  // Hash-based methods array.
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 2a601c9..9e9fa71 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -16,6 +16,7 @@
 
 #include "instrumentation.h"
 
+#include "art_method-inl.h"
 #include "base/enums.h"
 #include "common_runtime_test.h"
 #include "common_throws.h"
@@ -484,10 +485,11 @@
   Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
   mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
   ASSERT_TRUE(klass != nullptr);
-  ArtMethod* method = klass->FindDeclaredDirectMethod("returnReference",
-                                                      "()Ljava/lang/Object;",
-                                                      kRuntimePointerSize);
+  ArtMethod* method =
+      klass->FindClassMethod("returnReference", "()Ljava/lang/Object;", kRuntimePointerSize);
   ASSERT_TRUE(method != nullptr);
+  ASSERT_TRUE(method->IsDirect());
+  ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodExited,
             /*event_method*/ method,
             /*event_field*/ nullptr,
@@ -503,10 +505,10 @@
   Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
   mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
   ASSERT_TRUE(klass != nullptr);
-  ArtMethod* method = klass->FindDeclaredDirectMethod("returnPrimitive",
-                                                      "()I",
-                                                      kRuntimePointerSize);
+  ArtMethod* method = klass->FindClassMethod("returnPrimitive", "()I", kRuntimePointerSize);
   ASSERT_TRUE(method != nullptr);
+  ASSERT_TRUE(method->IsDirect());
+  ASSERT_TRUE(method->GetDeclaringClass() == klass);
   TestEvent(instrumentation::Instrumentation::kMethodExited,
             /*event_method*/ method,
             /*event_field*/ nullptr,
@@ -583,9 +585,11 @@
   Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
   mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
   ASSERT_TRUE(klass != nullptr);
-  ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
-                                                                    kRuntimePointerSize);
+  ArtMethod* method_to_deoptimize =
+      klass->FindClassMethod("instanceMethod", "()V", kRuntimePointerSize);
   ASSERT_TRUE(method_to_deoptimize != nullptr);
+  ASSERT_TRUE(method_to_deoptimize->IsDirect());
+  ASSERT_TRUE(method_to_deoptimize->GetDeclaringClass() == klass);
 
   EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
   EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize));
@@ -630,9 +634,11 @@
   Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
   mirror::Class* klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
   ASSERT_TRUE(klass != nullptr);
-  ArtMethod* method_to_deoptimize = klass->FindDeclaredDirectMethod("instanceMethod", "()V",
-                                                                    kRuntimePointerSize);
+  ArtMethod* method_to_deoptimize =
+      klass->FindClassMethod("instanceMethod", "()V", kRuntimePointerSize);
   ASSERT_TRUE(method_to_deoptimize != nullptr);
+  ASSERT_TRUE(method_to_deoptimize->IsDirect());
+  ASSERT_TRUE(method_to_deoptimize->GetDeclaringClass() == klass);
 
   EXPECT_FALSE(instr->AreAllMethodsDeoptimized());
   EXPECT_FALSE(instr->IsDeoptimized(method_to_deoptimize));
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 85cf73b..9cb74f7 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -27,8 +27,8 @@
 #include "jvalue-inl.h"
 #include "mirror/string-inl.h"
 #include "mterp/mterp.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "stack.h"
 #include "thread-inl.h"
 #include "unstarted_runtime.h"
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 0687b75..be2d34d 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -593,10 +593,8 @@
   }
 
   ArtMethod* invoke_method =
-      class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(self,
-                                                                invoke_method_idx,
-                                                                shadow_frame.GetMethod(),
-                                                                kVirtual);
+      class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+          self, invoke_method_idx, shadow_frame.GetMethod(), kVirtual);
 
   // There is a common dispatch method for method handles that takes
   // arguments either from a range or an array of arguments depending
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 152cce4..2c72821 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -27,7 +27,6 @@
 #include <unordered_map>
 
 #include "android-base/stringprintf.h"
-#include "ScopedLocalRef.h"
 
 #include "art_method-inl.h"
 #include "base/casts.h"
@@ -48,6 +47,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "nth_caller_visitor.h"
 #include "reflection.h"
 #include "thread-inl.h"
@@ -265,7 +265,7 @@
   bool ok = false;
   auto* cl = Runtime::Current()->GetClassLinker();
   if (cl->EnsureInitialized(self, h_klass, true, true)) {
-    auto* cons = h_klass->FindDeclaredDirectMethod("<init>", "()V", cl->GetImagePointerSize());
+    auto* cons = h_klass->FindConstructor("()V", cl->GetImagePointerSize());
     if (cons != nullptr) {
       Handle<mirror::Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
       CHECK(h_obj != nullptr);  // We don't expect OOM at compile-time.
@@ -591,8 +591,7 @@
   }
 
   auto* cl = Runtime::Current()->GetClassLinker();
-  ArtMethod* constructor = h_class->FindDeclaredDirectMethod(
-      "<init>", "([B)V", cl->GetImagePointerSize());
+  ArtMethod* constructor = h_class->FindConstructor("([B)V", cl->GetImagePointerSize());
   if (constructor == nullptr) {
     AbortTransactionOrFail(self, "Could not find ByteArrayInputStream constructor");
     return;
@@ -1010,8 +1009,7 @@
   Handle<mirror::Class> h_class(hs.NewHandle(klass));
   Handle<mirror::Object> h_obj(hs.NewHandle(h_class->AllocObject(self)));
   if (h_obj != nullptr) {
-    ArtMethod* init_method = h_class->FindDirectMethod(
-        "<init>", "()V", class_linker->GetImagePointerSize());
+    ArtMethod* init_method = h_class->FindConstructor("()V", class_linker->GetImagePointerSize());
     if (init_method == nullptr) {
       AbortTransactionOrFail(self, "Could not find <init> for %s", class_descriptor);
       return nullptr;
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index c2ef724..3461a65 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -387,8 +387,9 @@
   ScopedObjectAccess soa(self);
   mirror::Class* klass = mirror::String::GetJavaLangString();
   ArtMethod* method =
-      klass->FindDeclaredDirectMethod("<init>", "(Ljava/lang/String;)V",
-                                      Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+      klass->FindConstructor("(Ljava/lang/String;)V",
+                             Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+  ASSERT_TRUE(method != nullptr);
 
   // create instruction data for invoke-direct {v0, v1} of method with fake index
   uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
@@ -966,12 +967,14 @@
     ASSERT_TRUE(floating_decimal != nullptr);
     ASSERT_TRUE(class_linker->EnsureInitialized(self, floating_decimal, true, true));
 
-    ArtMethod* caller_method = floating_decimal->FindDeclaredDirectMethod(
+    ArtMethod* caller_method = floating_decimal->FindClassMethod(
         "getBinaryToASCIIBuffer",
         "()Lsun/misc/FloatingDecimal$BinaryToASCIIBuffer;",
         class_linker->GetImagePointerSize());
     // floating_decimal->DumpClass(LOG_STREAM(ERROR), mirror::Class::kDumpClassFullDetail);
     ASSERT_TRUE(caller_method != nullptr);
+    ASSERT_TRUE(caller_method->IsDirect());
+    ASSERT_TRUE(caller_method->GetDeclaringClass() == floating_decimal.Get());
     ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
     shadow_frame->SetLink(caller_frame);
 
@@ -1020,10 +1023,12 @@
   ASSERT_TRUE(double_class != nullptr);
   ASSERT_TRUE(class_linker->EnsureInitialized(self, double_class, true, true));
 
-  ArtMethod* method = double_class->FindDeclaredDirectMethod("toString",
-                                                             "(D)Ljava/lang/String;",
-                                                             class_linker->GetImagePointerSize());
+  ArtMethod* method = double_class->FindClassMethod("toString",
+                                                    "(D)Ljava/lang/String;",
+                                                    class_linker->GetImagePointerSize());
   ASSERT_TRUE(method != nullptr);
+  ASSERT_TRUE(method->IsDirect());
+  ASSERT_TRUE(method->GetDeclaringClass() == double_class.Get());
 
   // create instruction data for invoke-direct {v0, v1} of method with fake index
   uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
@@ -1179,8 +1184,8 @@
       boot_cp.Assign(boot_cp_class->AllocObject(self)->AsClassLoader());
       CHECK(boot_cp != nullptr);
 
-      ArtMethod* boot_cp_init = boot_cp_class->FindDeclaredDirectMethod(
-          "<init>", "()V", class_linker->GetImagePointerSize());
+      ArtMethod* boot_cp_init = boot_cp_class->FindConstructor(
+          "()V", class_linker->GetImagePointerSize());
       CHECK(boot_cp_init != nullptr);
 
       JValue result;
@@ -1333,8 +1338,8 @@
   Handle<mirror::String> input = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "abd"));
 
   // Find the constructor.
-  ArtMethod* throw_cons = throw_class->FindDeclaredDirectMethod(
-      "<init>", "(Ljava/lang/String;)V", class_linker->GetImagePointerSize());
+  ArtMethod* throw_cons = throw_class->FindConstructor(
+      "(Ljava/lang/String;)V", class_linker->GetImagePointerSize());
   ASSERT_TRUE(throw_cons != nullptr);
   Handle<mirror::Constructor> cons;
   if (class_linker->GetImagePointerSize() == PointerSize::k64) {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 2ad3b29..267f9fd 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -34,17 +34,17 @@
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "nativebridge/native_bridge.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "nativeloader/native_loader.h"
 #include "object_callbacks.h"
 #include "parsed_options.h"
 #include "runtime-inl.h"
 #include "runtime_options.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "sigchain.h"
-#include "ti/agent.h"
 #include "thread-inl.h"
 #include "thread_list.h"
+#include "ti/agent.h"
 
 namespace art {
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 969a570..7abf52e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -20,6 +20,7 @@
 
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/logging.h"
 #include "base/memory_tool.h"
 #include "debugger.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
@@ -45,6 +46,11 @@
 // At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
 static constexpr int kJitPoolThreadPthreadPriority = 9;
 
+// Different compilation threshold constants. These can be overridden on the command line.
+static constexpr size_t kJitDefaultCompileThreshold           = 10000;  // Non-debug default.
+static constexpr size_t kJitStressDefaultCompileThreshold     = 100;    // Fast-debug build.
+static constexpr size_t kJitSlowStressDefaultCompileThreshold = 2;      // Slow-debug build.
+
 // JIT compiler
 void* Jit::jit_library_handle_= nullptr;
 void* Jit::jit_compiler_handle_ = nullptr;
@@ -54,6 +60,11 @@
 void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
 bool Jit::generate_debug_info_ = false;
 
+struct StressModeHelper {
+  DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
+};
+DEFINE_RUNTIME_DEBUG_FLAG(StressModeHelper, kSlowMode);
+
 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
   auto* jit_options = new JitOptions;
   jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
@@ -67,7 +78,16 @@
   jit_options->profile_saver_options_ =
       options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
 
-  jit_options->compile_threshold_ = options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+  if (options.Exists(RuntimeArgumentMap::JITCompileThreshold)) {
+    jit_options->compile_threshold_ = *options.Get(RuntimeArgumentMap::JITCompileThreshold);
+  } else {
+    jit_options->compile_threshold_ =
+        kIsDebugBuild
+            ? (StressModeHelper::kSlowMode
+                   ? kJitSlowStressDefaultCompileThreshold
+                   : kJitStressDefaultCompileThreshold)
+            : kJitDefaultCompileThreshold;
+  }
   if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) {
     LOG(FATAL) << "Method compilation threshold is above its internal limit.";
   }
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index f898d41..51e49ec 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -48,8 +48,6 @@
 
 class Jit {
  public:
-  static constexpr bool kStressMode = kIsDebugBuild;
-  static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000;
   static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
   static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
   // How frequently should the interpreter check to see if OSR compilation is ready.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1c36bde..3bee560 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -47,9 +47,13 @@
 static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
 static constexpr int kProtData = PROT_READ | PROT_WRITE;
 static constexpr int kProtCode = PROT_READ | PROT_EXEC;
+static constexpr int kProtReadOnly = PROT_READ;
+static constexpr int kProtNone = PROT_NONE;
 
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
+static constexpr size_t kMinMapSpacingPages = 1;
+static constexpr size_t kMaxMapSpacingPages = 128;
 
 #define CHECKED_MPROTECT(memory, size, prot)                \
   do {                                                      \
@@ -60,19 +64,52 @@
     }                                                       \
   } while (false)                                           \
 
+static MemMap* SplitMemMap(MemMap* existing_map,
+                           const char* name,
+                           size_t split_offset,
+                           int split_prot,
+                           std::string* error_msg,
+                           bool use_ashmem,
+                           unique_fd* shmem_fd = nullptr) {
+  std::string error_str;
+  uint8_t* divider = existing_map->Begin() + split_offset;
+  MemMap* new_map = existing_map->RemapAtEnd(divider,
+                                             name,
+                                             split_prot,
+                                             MAP_SHARED,
+                                             &error_str,
+                                             use_ashmem,
+                                             shmem_fd);
+  if (new_map == nullptr) {
+    std::ostringstream oss;
+    oss << "Failed to create spacing for " << name << ": "
+        << error_str << " offset=" << split_offset;
+    *error_msg = oss.str();
+    return nullptr;
+  }
+  return new_map;
+}
+
 JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
                                    size_t max_capacity,
                                    bool generate_debug_info,
                                    std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
-  CHECK_GE(max_capacity, initial_capacity);
+  CHECK_GT(max_capacity, initial_capacity);
+  CHECK_GE(max_capacity - kMaxMapSpacingPages * kPageSize, initial_capacity);
 
-  // Generating debug information is mostly for using the 'perf' tool, which does
-  // not work with ashmem.
-  bool use_ashmem = !generate_debug_info;
+  // Generating debug information is for using the Linux perf tool on
+  // host which does not work with ashmem.
+  // Also, target linux does not support ashmem.
+  bool use_ashmem = !generate_debug_info && !kIsTargetLinux;
+
   // With 'perf', we want a 1-1 mapping between an address and a method.
   bool garbage_collect_code = !generate_debug_info;
 
+  // We only use two mappings (separating rw from rx) if we are able to use ashmem.
+  // See the above comment for debug information and not using ashmem.
+  bool use_two_mappings = use_ashmem;
+
   // We need to have 32 bit offsets from method headers in code cache which point to things
   // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
   // Ensure we're below 1 GB to be safe.
@@ -109,30 +146,114 @@
   initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
   max_capacity = RoundDown(max_capacity, 2 * kPageSize);
 
-  // Data cache is 1 / 2 of the map.
-  // TODO: Make this variable?
-  size_t data_size = max_capacity / 2;
-  size_t code_size = max_capacity - data_size;
-  DCHECK_EQ(code_size + data_size, max_capacity);
-  uint8_t* divider = data_map->Begin() + data_size;
+  // Create a region for JIT data and executable code. This will be
+  // laid out as:
+  //
+  //          +----------------+ --------------------
+  //          :                : ^                  ^
+  //          :  post_code_map : | post_code_size   |
+  //          :   [padding]    : v                  |
+  //          +----------------+ -                  |
+  //          |                | ^                  |
+  //          |   code_map     | | code_size        |
+  //          |   [JIT Code]   | v                  |
+  //          +----------------+ -                  | total_mapping_size
+  //          :                : ^                  |
+  //          :  pre_code_map  : | pre_code_size    |
+  //          :   [padding]    : v                  |
+  //          +----------------+ -                  |
+  //          |                | ^                  |
+  //          |    data_map    | | data_size        |
+  //          |   [Jit Data]   | v                  v
+  //          +----------------+ --------------------
+  //
+  // The padding regions - pre_code_map and post_code_map - exist to
+  // put some random distance between the writable JIT code mapping
+  // and the executable mapping. The padding is discarded at the end
+  // of this function.
+  size_t total_mapping_size = kMaxMapSpacingPages * kPageSize;
+  size_t data_size = RoundUp((max_capacity - total_mapping_size) / 2, kPageSize);
+  size_t pre_code_size =
+      GetRandomNumber(kMinMapSpacingPages, kMaxMapSpacingPages) * kPageSize;
+  size_t code_size = max_capacity - total_mapping_size - data_size;
+  size_t post_code_size = total_mapping_size - pre_code_size;
+  DCHECK_EQ(code_size + data_size + total_mapping_size, max_capacity);
 
-  MemMap* code_map =
-      data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str, use_ashmem);
-  if (code_map == nullptr) {
-    std::ostringstream oss;
-    oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
-    *error_msg = oss.str();
+  // Create pre-code padding region after data region, discarded after
+  // code and data regions are set-up.
+  std::unique_ptr<MemMap> pre_code_map(SplitMemMap(data_map.get(),
+                                                   "jit-code-cache-padding",
+                                                   data_size,
+                                                   kProtNone,
+                                                   error_msg,
+                                                   use_ashmem));
+  if (pre_code_map == nullptr) {
     return nullptr;
   }
-  DCHECK_EQ(code_map->Begin(), divider);
+  DCHECK_EQ(data_map->Size(), data_size);
+  DCHECK_EQ(pre_code_map->Size(), pre_code_size + code_size + post_code_size);
+
+  // Create code region.
+  unique_fd writable_code_fd;
+  std::unique_ptr<MemMap> code_map(SplitMemMap(pre_code_map.get(),
+                                               "jit-code-cache",
+                                               pre_code_size,
+                                               use_two_mappings ? kProtCode : kProtAll,
+                                               error_msg,
+                                               use_ashmem,
+                                               &writable_code_fd));
+  if (code_map == nullptr) {
+    return nullptr;
+  }
+  DCHECK_EQ(pre_code_map->Size(), pre_code_size);
+  DCHECK_EQ(code_map->Size(), code_size + post_code_size);
+
+  // Padding after code region, discarded after code and data regions
+  // are set-up.
+  std::unique_ptr<MemMap> post_code_map(SplitMemMap(code_map.get(),
+                                                    "jit-code-cache-padding",
+                                                    code_size,
+                                                    kProtNone,
+                                                    error_msg,
+                                                    use_ashmem));
+  if (post_code_map == nullptr) {
+    return nullptr;
+  }
+  DCHECK_EQ(code_map->Size(), code_size);
+  DCHECK_EQ(post_code_map->Size(), post_code_size);
+
+  std::unique_ptr<MemMap> writable_code_map;
+  if (use_two_mappings) {
+    // Allocate the R/W view.
+    writable_code_map.reset(MemMap::MapFile(code_size,
+                                            kProtData,
+                                            MAP_SHARED,
+                                            writable_code_fd.get(),
+                                            /* start */ 0,
+                                            /* low_4gb */ true,
+                                            "jit-writable-code",
+                                            &error_str));
+    if (writable_code_map == nullptr) {
+      std::ostringstream oss;
+      oss << "Failed to create writable code cache: " << error_str << " size=" << code_size;
+      *error_msg = oss.str();
+      return nullptr;
+    }
+  }
   data_size = initial_capacity / 2;
   code_size = initial_capacity - data_size;
   DCHECK_EQ(code_size + data_size, initial_capacity);
-  return new JitCodeCache(
-      code_map, data_map.release(), code_size, data_size, max_capacity, garbage_collect_code);
+  return new JitCodeCache(writable_code_map.release(),
+                          code_map.release(),
+                          data_map.release(),
+                          code_size,
+                          data_size,
+                          max_capacity,
+                          garbage_collect_code);
 }
 
-JitCodeCache::JitCodeCache(MemMap* code_map,
+JitCodeCache::JitCodeCache(MemMap* writable_code_map,
+                           MemMap* executable_code_map,
                            MemMap* data_map,
                            size_t initial_code_capacity,
                            size_t initial_data_capacity,
@@ -141,8 +262,9 @@
     : lock_("Jit code cache", kJitCodeCacheLock),
       lock_cond_("Jit code cache condition variable", lock_),
       collection_in_progress_(false),
-      code_map_(code_map),
       data_map_(data_map),
+      executable_code_map_(executable_code_map),
+      writable_code_map_(writable_code_map),
       max_capacity_(max_capacity),
       current_capacity_(initial_code_capacity + initial_data_capacity),
       code_end_(initial_code_capacity),
@@ -162,7 +284,8 @@
       inline_cache_cond_("Jit inline cache condition variable", lock_) {
 
   DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
-  code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/);
+  MemMap* writable_map = GetWritableMemMap();
+  code_mspace_ = create_mspace_with_base(writable_map->Begin(), code_end_, false /*locked*/);
   data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_end_, false /*locked*/);
 
   if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
@@ -171,7 +294,10 @@
 
   SetFootprintLimit(current_capacity_);
 
-  CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+  if (writable_code_map_ != nullptr) {
+    CHECKED_MPROTECT(writable_code_map_->Begin(), writable_code_map_->Size(), kProtReadOnly);
+  }
+  CHECKED_MPROTECT(executable_code_map_->Begin(), executable_code_map_->Size(), kProtCode);
   CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
 
   VLOG(jit) << "Created jit code cache: initial data size="
@@ -181,7 +307,7 @@
 }
 
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return code_map_->Begin() <= ptr && ptr < code_map_->End();
+  return executable_code_map_->Begin() <= ptr && ptr < executable_code_map_->End();
 }
 
 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
@@ -194,27 +320,96 @@
   return false;
 }
 
+/* This method is only for CHECK/DCHECK that pointers are within to a region. */
+static bool IsAddressInMap(const void* addr,
+                           const MemMap* mem_map,
+                           const char* check_name) {
+  if (addr == nullptr || mem_map->HasAddress(addr)) {
+    return true;
+  }
+  LOG(ERROR) << "Is" << check_name << "Address " << addr
+             << " not in [" << reinterpret_cast<void*>(mem_map->Begin())
+             << ", " << reinterpret_cast<void*>(mem_map->Begin() + mem_map->Size()) << ")";
+  return false;
+}
+
+bool JitCodeCache::IsDataAddress(const void* raw_addr) const {
+  return IsAddressInMap(raw_addr, data_map_.get(), "Data");
+}
+
+bool JitCodeCache::IsExecutableAddress(const void* raw_addr) const {
+  return IsAddressInMap(raw_addr, executable_code_map_.get(), "Executable");
+}
+
+bool JitCodeCache::IsWritableAddress(const void* raw_addr) const {
+  return IsAddressInMap(raw_addr, GetWritableMemMap(), "Writable");
+}
+
+// Convert one address within the source map to the same offset within the destination map.
+static void* ConvertAddress(const void* source_address,
+                            const MemMap* source_map,
+                            const MemMap* destination_map) {
+  DCHECK(source_map->HasAddress(source_address)) << source_address;
+  ptrdiff_t offset = reinterpret_cast<const uint8_t*>(source_address) - source_map->Begin();
+  uintptr_t address = reinterpret_cast<uintptr_t>(destination_map->Begin()) + offset;
+  return reinterpret_cast<void*>(address);
+}
+
+template <typename T>
+T* JitCodeCache::ToExecutableAddress(T* writable_address) const {
+  CHECK(IsWritableAddress(writable_address));
+  if (writable_address == nullptr) {
+    return nullptr;
+  }
+  void* executable_address = ConvertAddress(writable_address,
+                                            GetWritableMemMap(),
+                                            executable_code_map_.get());
+  CHECK(IsExecutableAddress(executable_address));
+  return reinterpret_cast<T*>(executable_address);
+}
+
+void* JitCodeCache::ToWritableAddress(const void* executable_address) const {
+  CHECK(IsExecutableAddress(executable_address));
+  if (executable_address == nullptr) {
+    return nullptr;
+  }
+  void* writable_address = ConvertAddress(executable_address,
+                                          executable_code_map_.get(),
+                                          GetWritableMemMap());
+  CHECK(IsWritableAddress(writable_address));
+  return writable_address;
+}
+
 class ScopedCodeCacheWrite : ScopedTrace {
  public:
-  explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
-      : ScopedTrace("ScopedCodeCacheWrite"),
-        code_map_(code_map),
-        only_for_tlb_shootdown_(only_for_tlb_shootdown) {
+  explicit ScopedCodeCacheWrite(JitCodeCache* code_cache, bool only_for_tlb_shootdown = false)
+      : ScopedTrace("ScopedCodeCacheWrite") {
     ScopedTrace trace("mprotect all");
-    CHECKED_MPROTECT(
-        code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtAll);
+    int prot_to_start_writing = kProtAll;
+    if (code_cache->writable_code_map_ == nullptr) {
+      // If there is only one mapping, use the executable mapping and toggle between rwx and rx.
+      prot_to_start_writing = kProtAll;
+      prot_to_stop_writing_ = kProtCode;
+    } else {
+      // If there are two mappings, use the writable mapping and toggle between rw and r.
+      prot_to_start_writing = kProtData;
+      prot_to_stop_writing_ = kProtReadOnly;
+    }
+    writable_map_ = code_cache->GetWritableMemMap();
+    // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
+    // one page.
+    size_ = only_for_tlb_shootdown ? kPageSize : writable_map_->Size();
+    CHECKED_MPROTECT(writable_map_->Begin(), size_, prot_to_start_writing);
   }
   ~ScopedCodeCacheWrite() {
     ScopedTrace trace("mprotect code");
-    CHECKED_MPROTECT(
-        code_map_->Begin(), only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(), kProtCode);
+    CHECKED_MPROTECT(writable_map_->Begin(), size_, prot_to_stop_writing_);
   }
- private:
-  MemMap* const code_map_;
 
-  // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
-  // one page.
-  const bool only_for_tlb_shootdown_;
+ private:
+  int prot_to_stop_writing_;
+  MemMap* writable_map_;
+  size_t size_;
 
   DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
 };
@@ -324,8 +519,10 @@
   }
 }
 
-static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
+uint8_t* JitCodeCache::GetRootTable(const void* code_ptr, uint32_t* number_of_roots) {
+  CHECK(IsExecutableAddress(code_ptr));
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+  // GetOptimizedCodeInfoPtr uses offsets relative to the EXECUTABLE address.
   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
   uint32_t roots = GetNumberOfRoots(data);
   if (number_of_roots != nullptr) {
@@ -370,6 +567,8 @@
 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
   MutexLock mu(Thread::Current(), lock_);
   for (const auto& entry : method_code_map_) {
+    // GetRootTable takes an EXECUTABLE address.
+    CHECK(IsExecutableAddress(entry.first));
     uint32_t number_of_roots = 0;
     uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
     GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
@@ -407,17 +606,19 @@
   }
 }
 
-void JitCodeCache::FreeCode(const void* code_ptr) {
-  uintptr_t allocation = FromCodeToAllocation(code_ptr);
+void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
+  CHECK(IsExecutableAddress(code_ptr));
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
   DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
+  // GetRootTable takes an EXECUTABLE address.
   FreeData(GetRootTable(code_ptr));
-  FreeCode(reinterpret_cast<uint8_t*>(allocation));
+  FreeRawCode(reinterpret_cast<uint8_t*>(FromCodeToAllocation(code_ptr)));
 }
 
 void JitCodeCache::FreeAllMethodHeaders(
     const std::unordered_set<OatQuickMethodHeader*>& method_headers) {
+  // method_headers are expected to be in the executable region.
   {
     MutexLock mu(Thread::Current(), *Locks::cha_lock_);
     Runtime::Current()->GetClassHierarchyAnalysis()
@@ -429,9 +630,9 @@
   // so it's possible for the same method_header to start representing
   // different compile code.
   MutexLock mu(Thread::Current(), lock_);
-  ScopedCodeCacheWrite scc(code_map_.get());
+  ScopedCodeCacheWrite scc(this);
   for (const OatQuickMethodHeader* method_header : method_headers) {
-    FreeCode(method_header->GetCode());
+    FreeCodeAndData(method_header->GetCode());
   }
 }
 
@@ -448,9 +649,10 @@
     // with the classlinker_classes_lock_ held, and suspending ourselves could
     // lead to a deadlock.
     {
-      ScopedCodeCacheWrite scc(code_map_.get());
+      ScopedCodeCacheWrite scc(this);
       for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
         if (alloc.ContainsUnsafe(it->second)) {
+          CHECK(IsExecutableAddress(OatQuickMethodHeader::FromCodePointer(it->first)));
           method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
           it = method_code_map_.erase(it);
         } else {
@@ -542,6 +744,115 @@
   method->SetCounter(std::min(jit_warmup_threshold - 1, 1));
 }
 
+#ifdef __aarch64__
+
+static void FlushJitCodeCacheRange(uint8_t* code_ptr,
+                                   uint8_t* writable_ptr,
+                                   size_t code_size) {
+  // Cache maintenance instructions can cause permission faults when a
+  // page is not present (e.g. swapped out or not backed). These
+  // faults should be handled by the kernel, but a bug in some Linux
+  // kernels may surface these permission faults to user-land which
+  // does not currently deal with them (b/63885946). To work around
+  // this, we read a value from each page to fault it in before
+  // attempting to perform cache maintenance operations.
+  //
+  // For reference, this behavior is caused by this commit:
+  // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+
+  // The cache-line size could be probed for from the CPU, but
+  // assuming a safe lower bound is safe for CPUs that have different
+  // cache-line sizes for big and little cores.
+  static const uintptr_t kSafeCacheLineSize = 32;
+
+  // Ensure stores are present in L1 data cache.
+  __asm __volatile("dsb ish" ::: "memory");
+
+  volatile uint8_t mutant;
+
+  // Push dirty cache-lines out to the point of unification (PoU). The
+  // point of unification is the first point in the cache/memory
+  // hierarchy where the instruction cache and data cache have the
+  // same view of memory. The PoU is where an instruction fetch will
+  // fetch the new code generated by the JIT.
+  //
+  // See: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch11s04.html
+  uintptr_t writable_addr = RoundDown(reinterpret_cast<uintptr_t>(writable_ptr),
+                                      kSafeCacheLineSize);
+  uintptr_t writable_end  = RoundUp(reinterpret_cast<uintptr_t>(writable_ptr) + code_size,
+                                    kSafeCacheLineSize);
+  while (writable_addr < writable_end) {
+    // Read from the cache-line to minimize the chance that a cache
+    // maintenance instruction causes a fault (see kernel bug comment
+    // above).
+    mutant = *reinterpret_cast<const uint8_t*>(writable_addr);
+
+    // Flush cache-line
+    __asm volatile("dc cvau, %0" :: "r"(writable_addr) : "memory");
+    writable_addr += kSafeCacheLineSize;
+  }
+
+  __asm __volatile("dsb ish" ::: "memory");
+
+  uintptr_t code_addr = RoundDown(reinterpret_cast<uintptr_t>(code_ptr), kSafeCacheLineSize);
+  const uintptr_t code_end = RoundUp(reinterpret_cast<uintptr_t>(code_ptr) + code_size,
+                                     kSafeCacheLineSize);
+  while (code_addr < code_end) {
+    // Read from the cache-line to minimize the chance that a cache
+    // maintenance instruction causes a fault (see kernel bug comment
+    // above).
+    mutant = *reinterpret_cast<const uint8_t*>(code_addr);
+
+    // Invalidating the data cache line is only strictly necessary
+    // when the JIT code cache has two mappings (the default). We know
+    // this cache line is clean so this is just invalidating it (using
+    // "dc ivac" would be preferable, but counts as a write and this
+    // memory may not be mapped write permission).
+    __asm volatile("dc cvau, %0" :: "r"(code_addr) : "memory");
+
+    // Invalidate the instruction cache line to force instructions in
+    // range to be re-fetched following update.
+    __asm volatile("ic ivau, %0" :: "r"(code_addr) : "memory");
+
+    code_addr += kSafeCacheLineSize;
+  }
+
+  // Wait for code cache invalidations to complete.
+  __asm __volatile("dsb ish" ::: "memory");
+
+  // Reset fetched instruction stream.
+  __asm __volatile("isb");
+}
+
+#else  // __aarch64
+
+static void FlushJitCodeCacheRange(uint8_t* code_ptr,
+                                   uint8_t* writable_ptr,
+                                   size_t code_size) {
+  if (writable_ptr != code_ptr) {
+    // When there are two mappings of the JIT code cache, RX and
+    // RW, flush the RW version first as we've just dirtied the
+    // cache lines with new code. Flushing the RX version first
+    // can cause a permission fault as the those addresses are not
+    // writable, but can appear dirty in the cache. There is a lot
+    // of potential subtlety here depending on how the cache is
+    // indexed and tagged.
+    //
+    // Flushing the RX version after the RW version is just
+    // invalidating cachelines in the instruction cache. This is
+    // necessary as the instruction cache will often have a
+    // different set of cache lines present and because the JIT
+    // code cache can start a new function at any boundary within
+    // a cache-line.
+    FlushDataCache(reinterpret_cast<char*>(writable_ptr),
+                   reinterpret_cast<char*>(writable_ptr + code_size));
+  }
+  FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
+                        reinterpret_cast<char*>(code_ptr + code_size));
+}
+
+#endif  // __aarch64
+
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           uint8_t* stack_map,
@@ -572,35 +883,36 @@
     MutexLock mu(self, lock_);
     WaitForPotentialCollectionToComplete(self);
     {
-      ScopedCodeCacheWrite scc(code_map_.get());
+      ScopedCodeCacheWrite scc(this);
       memory = AllocateCode(total_size);
       if (memory == nullptr) {
         return nullptr;
       }
-      code_ptr = memory + header_size;
+      uint8_t* writable_ptr = memory + header_size;
+      code_ptr = ToExecutableAddress(writable_ptr);
 
-      std::copy(code, code + code_size, code_ptr);
-      method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-      new (method_header) OatQuickMethodHeader(
+      std::copy(code, code + code_size, writable_ptr);
+      OatQuickMethodHeader* writable_method_header =
+          OatQuickMethodHeader::FromCodePointer(writable_ptr);
+      // We need to be able to write the OatQuickMethodHeader, so we use writable_method_header.
+      // Otherwise, the offsets encoded in OatQuickMethodHeader are used relative to an executable
+      // address, so we use code_ptr.
+      new (writable_method_header) OatQuickMethodHeader(
           code_ptr - stack_map,
           code_ptr - method_info,
           frame_size_in_bytes,
           core_spill_mask,
           fp_spill_mask,
           code_size);
-      // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
-      // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
-      // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
-      // 6P) stop being supported or their kernels are fixed.
-      //
-      // For reference, this behavior is caused by this commit:
-      // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
-      FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
-                            reinterpret_cast<char*>(code_ptr + code_size));
+
+      FlushJitCodeCacheRange(code_ptr, writable_ptr, code_size);
+
       DCHECK(!Runtime::Current()->IsAotCompiler());
       if (has_should_deoptimize_flag) {
-        method_header->SetHasShouldDeoptimizeFlag();
+        writable_method_header->SetHasShouldDeoptimizeFlag();
       }
+      // All the pointers exported from the cache are executable addresses.
+      method_header = ToExecutableAddress(writable_method_header);
     }
 
     number_of_compilations_++;
@@ -639,13 +951,14 @@
     // but below we still make the compiled code valid for the method.
     MutexLock mu(self, lock_);
     // Fill the root table before updating the entry point.
+    CHECK(IsDataAddress(roots_data));
     DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
     DCHECK_LE(roots_data, stack_map);
     FillRootTable(roots_data, roots);
     {
       // Flush data cache, as compiled code references literals in it.
       // We also need a TLB shootdown to act as memory barrier across cores.
-      ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
+      ScopedCodeCacheWrite ccw(this, /* only_for_tlb_shootdown */ true);
       FlushDataCache(reinterpret_cast<char*>(roots_data),
                      reinterpret_cast<char*>(roots_data + data_size));
     }
@@ -696,11 +1009,11 @@
 
   bool in_cache = false;
   {
-    ScopedCodeCacheWrite ccw(code_map_.get());
+    ScopedCodeCacheWrite ccw(this);
     for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
       if (code_iter->second == method) {
         if (release_memory) {
-          FreeCode(code_iter->first);
+          FreeCodeAndData(code_iter->first);
         }
         code_iter = method_code_map_.erase(code_iter);
         in_cache = true;
@@ -754,10 +1067,10 @@
     profiling_infos_.erase(profile);
   }
   method->SetProfilingInfo(nullptr);
-  ScopedCodeCacheWrite ccw(code_map_.get());
+  ScopedCodeCacheWrite ccw(this);
   for (auto code_iter = method_code_map_.begin(); code_iter != method_code_map_.end();) {
     if (code_iter->second == method) {
-      FreeCode(code_iter->first);
+      FreeCodeAndData(code_iter->first);
       code_iter = method_code_map_.erase(code_iter);
       continue;
     }
@@ -823,6 +1136,7 @@
                              uint8_t* stack_map_data,
                              uint8_t* roots_data) {
   DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
+  CHECK(IsDataAddress(roots_data));
   MutexLock mu(self, lock_);
   FreeData(reinterpret_cast<uint8_t*>(roots_data));
 }
@@ -944,11 +1258,11 @@
 
 void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
   size_t per_space_footprint = new_footprint / 2;
-  DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
+  CHECK(IsAlignedParam(per_space_footprint, kPageSize));
   DCHECK_EQ(per_space_footprint * 2, new_footprint);
   mspace_set_footprint_limit(data_mspace_, per_space_footprint);
   {
-    ScopedCodeCacheWrite scc(code_map_.get());
+    ScopedCodeCacheWrite scc(this);
     mspace_set_footprint_limit(code_mspace_, per_space_footprint);
   }
 }
@@ -1026,8 +1340,8 @@
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(code_map_->Begin()),
-          reinterpret_cast<uintptr_t>(code_map_->Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(executable_code_map_->Begin()),
+          reinterpret_cast<uintptr_t>(executable_code_map_->Begin() + current_capacity_ / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1103,14 +1417,16 @@
   std::unordered_set<OatQuickMethodHeader*> method_headers;
   {
     MutexLock mu(self, lock_);
-    ScopedCodeCacheWrite scc(code_map_.get());
+    ScopedCodeCacheWrite scc(this);
     // Iterate over all compiled code and remove entries that are not marked.
     for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
       const void* code_ptr = it->first;
+      CHECK(IsExecutableAddress(code_ptr));
       uintptr_t allocation = FromCodeToAllocation(code_ptr);
       if (GetLiveBitmap()->Test(allocation)) {
         ++it;
       } else {
+        CHECK(IsExecutableAddress(it->first));
         method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
         it = method_code_map_.erase(it);
       }
@@ -1153,6 +1469,7 @@
     for (const auto& it : method_code_map_) {
       ArtMethod* method = it.second;
       const void* code_ptr = it.first;
+      CHECK(IsExecutableAddress(code_ptr));
       const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
         GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
@@ -1178,6 +1495,7 @@
     // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
+        CHECK(IsDataAddress(info));
         const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
         // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope
         // that the compiled code would not get revived. As mutator threads run concurrently,
@@ -1238,6 +1556,7 @@
   --it;
 
   const void* code_ptr = it->first;
+  CHECK(IsExecutableAddress(code_ptr));
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
   if (!method_header->Contains(pc)) {
     return nullptr;
@@ -1320,6 +1639,7 @@
   // store in the ArtMethod's ProfilingInfo pointer.
   QuasiAtomic::ThreadFenceRelease();
 
+  CHECK(IsDataAddress(info));
   method->SetProfilingInfo(info);
   profiling_infos_.push_back(info);
   histogram_profiling_info_memory_use_.AddValue(profile_info_size);
@@ -1332,7 +1652,8 @@
   if (code_mspace_ == mspace) {
     size_t result = code_end_;
     code_end_ += increment;
-    return reinterpret_cast<void*>(result + code_map_->Begin());
+    MemMap* writable_map = GetWritableMemMap();
+    return reinterpret_cast<void*>(result + writable_map->Begin());
   } else {
     DCHECK_EQ(data_mspace_, mspace);
     size_t result = data_end_;
@@ -1484,6 +1805,7 @@
 
 size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) {
   MutexLock mu(Thread::Current(), lock_);
+  CHECK(IsExecutableAddress(ptr));
   return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
 }
 
@@ -1519,22 +1841,27 @@
   size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
   // Ensure the header ends up at expected instruction alignment.
   DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
+  CHECK(IsWritableAddress(result));
   used_memory_for_code_ += mspace_usable_size(result);
   return result;
 }
 
-void JitCodeCache::FreeCode(uint8_t* code) {
-  used_memory_for_code_ -= mspace_usable_size(code);
-  mspace_free(code_mspace_, code);
+void JitCodeCache::FreeRawCode(void* code) {
+  CHECK(IsExecutableAddress(code));
+  void* writable_code = ToWritableAddress(code);
+  used_memory_for_code_ -= mspace_usable_size(writable_code);
+  mspace_free(code_mspace_, writable_code);
 }
 
 uint8_t* JitCodeCache::AllocateData(size_t data_size) {
   void* result = mspace_malloc(data_mspace_, data_size);
+  CHECK(IsDataAddress(reinterpret_cast<uint8_t*>(result)));
   used_memory_for_data_ += mspace_usable_size(result);
   return reinterpret_cast<uint8_t*>(result);
 }
 
 void JitCodeCache::FreeData(uint8_t* data) {
+  CHECK(IsDataAddress(data));
   used_memory_for_data_ -= mspace_usable_size(data);
   mspace_free(data_mspace_, data);
 }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index daa1d61..a062ce4 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -229,6 +229,8 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr);
+
   // The GC needs to disallow the reading of inline caches when it processes them,
   // to avoid having a class being used while it is being deleted.
   void AllowInlineCacheAccess() REQUIRES(!lock_);
@@ -247,9 +249,12 @@
   }
 
  private:
+  friend class ScopedCodeCacheWrite;
+
   // Take ownership of maps.
   JitCodeCache(MemMap* code_map,
                MemMap* data_map,
+               MemMap* writable_code_map,
                size_t initial_code_capacity,
                size_t initial_data_capacity,
                size_t max_capacity,
@@ -292,7 +297,7 @@
       REQUIRES(!Locks::cha_lock_);
 
   // Free in the mspace allocations for `code_ptr`.
-  void FreeCode(const void* code_ptr) REQUIRES(lock_);
+  void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
 
   // Number of bytes allocated in the code cache.
   size_t CodeCacheSizeLocked() REQUIRES(lock_);
@@ -325,7 +330,7 @@
   bool CheckLiveCompiledCodeHasProfilingInfo()
       REQUIRES(lock_);
 
-  void FreeCode(uint8_t* code) REQUIRES(lock_);
+  void FreeRawCode(void* code) REQUIRES(lock_);
   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
   void FreeData(uint8_t* data) REQUIRES(lock_);
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
@@ -335,25 +340,58 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  MemMap* GetWritableMemMap() const {
+    if (writable_code_map_ == nullptr) {
+      // The system required us to map the JIT Code Cache RWX (see
+      // JitCodeCache::Create()).
+      return executable_code_map_.get();
+    } else {
+      // Executable code is mapped RX, and writable code is mapped RW
+      // to the underlying same memory, but at a different address.
+      return writable_code_map_.get();
+    }
+  }
+
+  bool IsDataAddress(const void* raw_addr) const;
+
+  bool IsExecutableAddress(const void* raw_addr) const;
+
+  bool IsWritableAddress(const void* raw_addr) const;
+
+  template <typename T>
+  T* ToExecutableAddress(T* writable_address) const;
+
+  void* ToWritableAddress(const void* executable_address) const;
+
   // Lock for guarding allocations, collections, and the method_code_map_.
   Mutex lock_;
   // Condition to wait on during collection.
   ConditionVariable lock_cond_ GUARDED_BY(lock_);
   // Whether there is a code cache collection in progress.
   bool collection_in_progress_ GUARDED_BY(lock_);
-  // Mem map which holds code.
-  std::unique_ptr<MemMap> code_map_;
+  // JITting methods obviously requires both write and execute permissions on a region of memory.
+  // In tye typical (non-debugging) case, we separate the memory mapped view that can write the code
+  // from a view that the runtime uses to execute the code. Having these two views eliminates any
+  // single address region having rwx permissions.  An attacker could still write the writable
+  // address and then execute the executable address. We allocate the mappings with a random
+  // address relationship to each other which makes the attacker need two addresses rather than
+  // just one.  In the debugging case there is no file descriptor to back the
+  // shared memory, and hence we have to use a single mapping.
   // Mem map which holds data (stack maps and profiling info).
   std::unique_ptr<MemMap> data_map_;
+  // Mem map which holds a non-writable view of code for JIT.
+  std::unique_ptr<MemMap> executable_code_map_;
+  // Mem map which holds a non-executable view of code for JIT.
+  std::unique_ptr<MemMap> writable_code_map_;
   // The opaque mspace for allocating code.
   void* code_mspace_ GUARDED_BY(lock_);
   // The opaque mspace for allocating data.
   void* data_mspace_ GUARDED_BY(lock_);
   // Bitmap for collecting code and data.
   std::unique_ptr<CodeCacheBitmap> live_bitmap_;
-  // Holds compiled code associated to the ArtMethod.
+  // Holds non-writable compiled code associated to the ArtMethod.
   SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
-  // Holds osr compiled code associated to the ArtMethod.
+  // Holds non-writable osr compiled code associated to the ArtMethod.
   SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
   // ProfilingInfo objects we have allocated.
   std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index a247b56..45c3792 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -47,9 +47,8 @@
 namespace art {
 
 const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
-// Last profile version: Move startup methods to use a bitmap. Also add support for post-startup
-// methods.
-const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '8', '\0' };
+// Last profile version: update the multidex separator.
+const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '9', '\0' };
 
 static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
 
@@ -1341,7 +1340,7 @@
 
   os << "ProfileInfo:";
 
-  const std::string kFirstDexFileKeySubstitute = ":classes.dex";
+  const std::string kFirstDexFileKeySubstitute = "!classes.dex";
 
   for (const DexFileData* dex_data : info_) {
     os << "\n";
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index dbad614..927f94b 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -34,12 +34,12 @@
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
 #include "fault_handler.h"
-#include "gc_root.h"
 #include "gc/accounting/card_table-inl.h"
+#include "gc_root.h"
 #include "indirect_reference_table-inl.h"
 #include "interpreter/interpreter.h"
-#include "jni_env_ext.h"
 #include "java_vm_ext.h"
+#include "jni_env_ext.h"
 #include "jvalue-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
@@ -49,12 +49,12 @@
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
 #include "mirror/throwable.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "parsed_options.h"
 #include "reflection.h"
 #include "runtime.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "thread.h"
 #include "utf.h"
 #include "well_known_classes.h"
@@ -233,17 +233,10 @@
   }
   ArtMethod* method = nullptr;
   auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-  if (is_static) {
-    method = c->FindDirectMethod(name, sig, pointer_size);
-  } else if (c->IsInterface()) {
+  if (c->IsInterface()) {
     method = c->FindInterfaceMethod(name, sig, pointer_size);
   } else {
-    method = c->FindVirtualMethod(name, sig, pointer_size);
-    if (method == nullptr) {
-      // No virtual method matching the signature.  Search declared
-      // private methods and constructors.
-      method = c->FindDeclaredDirectMethod(name, sig, pointer_size);
-    }
+    method = c->FindClassMethod(name, sig, pointer_size);
   }
   if (method == nullptr || method->IsStatic() != is_static) {
     ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index e1e4f9c..3f00450 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -24,8 +24,8 @@
 #include "java_vm_ext.h"
 #include "jni_env_ext.h"
 #include "mirror/string-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 
 namespace art {
 
@@ -626,9 +626,9 @@
             hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)));
         mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader);
         const auto pointer_size = class_linker_->GetImagePointerSize();
-        ArtMethod* method = direct ? c->FindDirectMethod(method_name, method_sig, pointer_size) :
-            c->FindVirtualMethod(method_name, method_sig, pointer_size);
+        ArtMethod* method = c->FindClassMethod(method_name, method_sig, pointer_size);
         ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
+        ASSERT_EQ(direct, method->IsDirect());
         method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub());
       }
       // Start runtime.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 7b41608..17035dd 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -536,8 +536,13 @@
   }
 }
 
-MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
-                           std::string* error_msg, bool use_ashmem) {
+MemMap* MemMap::RemapAtEnd(uint8_t* new_end,
+                           const char* tail_name,
+                           int tail_prot,
+                           int sharing_flags,
+                           std::string* error_msg,
+                           bool use_ashmem,
+                           unique_fd* shmem_fd) {
   use_ashmem = use_ashmem && !kIsTargetLinux;
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
@@ -563,14 +568,14 @@
   DCHECK_ALIGNED(tail_base_size, kPageSize);
 
   unique_fd fd;
-  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+  int flags = MAP_ANONYMOUS | sharing_flags;
   if (use_ashmem) {
     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
     // prefixed "dalvik-".
     std::string debug_friendly_name("dalvik-");
     debug_friendly_name += tail_name;
     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
-    flags = MAP_PRIVATE | MAP_FIXED;
+    flags = MAP_FIXED | sharing_flags;
     if (fd.get() == -1) {
       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
                                 tail_name, strerror(errno));
@@ -604,6 +609,9 @@
                               fd.get());
     return nullptr;
   }
+  if (shmem_fd != nullptr) {
+    shmem_fd->reset(fd.release());
+  }
   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 5603963..d8908ad 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -25,6 +25,7 @@
 #include <string>
 
 #include "android-base/thread_annotations.h"
+#include "android-base/unique_fd.h"
 
 namespace art {
 
@@ -37,6 +38,8 @@
 #define USE_ART_LOW_4G_ALLOCATOR 0
 #endif
 
+using android::base::unique_fd;
+
 #ifdef __linux__
 static constexpr bool kMadviseZeroes = true;
 #else
@@ -168,11 +171,14 @@
   }
 
   // Unmap the pages at end and remap them to create another memory map.
+  // sharing_flags should be either MAP_PRIVATE or MAP_SHARED.
   MemMap* RemapAtEnd(uint8_t* new_end,
                      const char* tail_name,
                      int tail_prot,
+                     int sharing_flags,
                      std::string* error_msg,
-                     bool use_ashmem = true);
+                     bool use_ashmem = true,
+                     unique_fd* shmem_fd = nullptr);
 
   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
       REQUIRES(!MemMap::mem_maps_lock_);
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 5f027b1..8d6bb38 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -74,6 +74,7 @@
     MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
                                 "MemMapTest_RemapAtEndTest_map1",
                                 PROT_READ | PROT_WRITE,
+                                MAP_PRIVATE,
                                 &error_msg);
     // Check the states of the two maps.
     EXPECT_EQ(m0->Begin(), base0) << error_msg;
@@ -456,6 +457,7 @@
   std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
                                             "MemMapTest_AlignByTest_map1",
                                             PROT_READ | PROT_WRITE,
+                                            MAP_PRIVATE,
                                             &error_msg));
   uint8_t* base1 = m1->Begin();
   ASSERT_TRUE(base1 != nullptr) << error_msg;
@@ -465,6 +467,7 @@
   std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
                                             "MemMapTest_AlignByTest_map2",
                                             PROT_READ | PROT_WRITE,
+                                            MAP_PRIVATE,
                                             &error_msg));
   uint8_t* base2 = m2->Begin();
   ASSERT_TRUE(base2 != nullptr) << error_msg;
@@ -474,6 +477,7 @@
   std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
                                             "MemMapTest_AlignByTest_map1",
                                             PROT_READ | PROT_WRITE,
+                                            MAP_PRIVATE,
                                             &error_msg));
   uint8_t* base3 = m3->Begin();
   ASSERT_TRUE(base3 != nullptr) << error_msg;
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 003cd4e..121c259 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -533,7 +533,11 @@
                                                        PointerSize pointer_size) {
   ObjPtr<Class> declaring_class = method->GetDeclaringClass();
   DCHECK(declaring_class != nullptr) << PrettyClass();
-  DCHECK(declaring_class->IsInterface()) << method->PrettyMethod();
+  if (UNLIKELY(!declaring_class->IsInterface())) {
+    DCHECK(declaring_class->IsObjectClass()) << method->PrettyMethod();
+    DCHECK(method->IsPublic() && !method->IsStatic());
+    return FindVirtualMethodForVirtual(method, pointer_size);
+  }
   DCHECK(!method->IsCopied());
   // TODO cache to improve lookup speed
   const int32_t iftable_count = GetIfTableCount();
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index b0e5b6a..6f70b19 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -396,95 +396,44 @@
   }
 }
 
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
-                                      const StringPiece& signature,
-                                      PointerSize pointer_size) {
-  // Check the current class before checking the interfaces.
-  ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
-  if (method != nullptr) {
-    return method;
-  }
-
-  int32_t iftable_count = GetIfTableCount();
-  ObjPtr<IfTable> iftable = GetIfTable();
-  for (int32_t i = 0; i < iftable_count; ++i) {
-    method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
-    if (method != nullptr) {
-      return method;
+template <typename SignatureType>
+static inline ArtMethod* FindInterfaceMethodWithSignature(ObjPtr<Class> klass,
+                                                          const StringPiece& name,
+                                                          const SignatureType& signature,
+                                                          PointerSize pointer_size)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // If the current class is not an interface, skip the search of its declared methods;
+  // such lookup is used only to distinguish between IncompatibleClassChangeError and
+  // NoSuchMethodError and the caller has already tried to search methods in the class.
+  if (LIKELY(klass->IsInterface())) {
+    // Search declared methods, both direct and virtual.
+    // (This lookup is used also for invoke-static on interface classes.)
+    for (ArtMethod& method : klass->GetDeclaredMethodsSlice(pointer_size)) {
+      if (method.GetName() == name && method.GetSignature() == signature) {
+        return &method;
+      }
     }
   }
-  return nullptr;
-}
 
-ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
-                                      const Signature& signature,
-                                      PointerSize pointer_size) {
-  // Check the current class before checking the interfaces.
-  ArtMethod* method = FindDeclaredVirtualMethod(name, signature, pointer_size);
-  if (method != nullptr) {
-    return method;
-  }
-
-  int32_t iftable_count = GetIfTableCount();
-  ObjPtr<IfTable> iftable = GetIfTable();
-  for (int32_t i = 0; i < iftable_count; ++i) {
-    method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(name, signature, pointer_size);
-    if (method != nullptr) {
-      return method;
+  // TODO: If there is a unique maximally-specific non-abstract superinterface method,
+  // we should return it, otherwise an arbitrary one can be returned.
+  ObjPtr<IfTable> iftable = klass->GetIfTable();
+  for (int32_t i = 0, iftable_count = iftable->Count(); i < iftable_count; ++i) {
+    ObjPtr<Class> iface = iftable->GetInterface(i);
+    for (ArtMethod& method : iface->GetVirtualMethodsSlice(pointer_size)) {
+      if (method.GetName() == name && method.GetSignature() == signature) {
+        return &method;
+      }
     }
   }
-  return nullptr;
-}
 
-ArtMethod* Class::FindInterfaceMethod(ObjPtr<DexCache> dex_cache,
-                                      uint32_t dex_method_idx,
-                                      PointerSize pointer_size) {
-  // Check the current class before checking the interfaces.
-  ArtMethod* method = FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
-  if (method != nullptr) {
-    return method;
-  }
-
-  int32_t iftable_count = GetIfTableCount();
-  ObjPtr<IfTable> iftable = GetIfTable();
-  for (int32_t i = 0; i < iftable_count; ++i) {
-    method = iftable->GetInterface(i)->FindDeclaredVirtualMethod(
-        dex_cache, dex_method_idx, pointer_size);
-    if (method != nullptr) {
-      return method;
-    }
-  }
-  return nullptr;
-}
-
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
-                                           const StringPiece& signature,
-                                           PointerSize pointer_size) {
-  for (auto& method : GetDirectMethods(pointer_size)) {
-    if (name == method.GetName() && method.GetSignature() == signature) {
-      return &method;
-    }
-  }
-  return nullptr;
-}
-
-ArtMethod* Class::FindDeclaredDirectMethod(const StringPiece& name,
-                                           const Signature& signature,
-                                           PointerSize pointer_size) {
-  for (auto& method : GetDirectMethods(pointer_size)) {
-    if (name == method.GetName() && signature == method.GetSignature()) {
-      return &method;
-    }
-  }
-  return nullptr;
-}
-
-ArtMethod* Class::FindDeclaredDirectMethod(ObjPtr<DexCache> dex_cache,
-                                           uint32_t dex_method_idx,
-                                           PointerSize pointer_size) {
-  if (GetDexCache() == dex_cache) {
-    for (auto& method : GetDirectMethods(pointer_size)) {
-      if (method.GetDexMethodIndex() == dex_method_idx) {
+  // Then search for public non-static methods in the java.lang.Object.
+  if (LIKELY(klass->IsInterface())) {
+    ObjPtr<Class> object_class = klass->GetSuperClass();
+    DCHECK(object_class->IsObjectClass());
+    for (ArtMethod& method : object_class->GetDeclaredMethodsSlice(pointer_size)) {
+      if (method.IsPublic() && !method.IsStatic() &&
+          method.GetName() == name && method.GetSignature() == signature) {
         return &method;
       }
     }
@@ -492,37 +441,220 @@
   return nullptr;
 }
 
-ArtMethod* Class::FindDirectMethod(const StringPiece& name,
-                                   const StringPiece& signature,
-                                   PointerSize pointer_size) {
-  for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
-    ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
-    if (method != nullptr) {
-      return method;
-    }
-  }
-  return nullptr;
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
+                                      const StringPiece& signature,
+                                      PointerSize pointer_size) {
+  return FindInterfaceMethodWithSignature(this, name, signature, pointer_size);
 }
 
-ArtMethod* Class::FindDirectMethod(const StringPiece& name,
-                                   const Signature& signature,
-                                   PointerSize pointer_size) {
-  for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
-    ArtMethod* method = klass->FindDeclaredDirectMethod(name, signature, pointer_size);
-    if (method != nullptr) {
-      return method;
-    }
-  }
-  return nullptr;
+ArtMethod* Class::FindInterfaceMethod(const StringPiece& name,
+                                      const Signature& signature,
+                                      PointerSize pointer_size) {
+  return FindInterfaceMethodWithSignature(this, name, signature, pointer_size);
 }
 
-ArtMethod* Class::FindDirectMethod(ObjPtr<DexCache> dex_cache,
-                                   uint32_t dex_method_idx,
-                                   PointerSize pointer_size) {
-  for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
-    ArtMethod* method = klass->FindDeclaredDirectMethod(dex_cache, dex_method_idx, pointer_size);
-    if (method != nullptr) {
-      return method;
+ArtMethod* Class::FindInterfaceMethod(ObjPtr<DexCache> dex_cache,
+                                      uint32_t dex_method_idx,
+                                      PointerSize pointer_size) {
+  // We always search by name and signature, ignoring the type index in the MethodId.
+  const DexFile& dex_file = *dex_cache->GetDexFile();
+  const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+  StringPiece name = dex_file.StringDataByIdx(method_id.name_idx_);
+  const Signature signature = dex_file.GetMethodSignature(method_id);
+  return FindInterfaceMethod(name, signature, pointer_size);
+}
+
+static inline bool IsInheritedMethod(ObjPtr<mirror::Class> klass,
+                                     ObjPtr<mirror::Class> declaring_class,
+                                     ArtMethod& method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK_EQ(declaring_class, method.GetDeclaringClass());
+  DCHECK_NE(klass, declaring_class);
+  DCHECK(klass->IsArrayClass() ? declaring_class->IsObjectClass()
+                               : klass->IsSubClass(declaring_class));
+  uint32_t access_flags = method.GetAccessFlags();
+  if ((access_flags & (kAccPublic | kAccProtected)) != 0) {
+    return true;
+  }
+  if ((access_flags & kAccPrivate) != 0) {
+    return false;
+  }
+  for (; klass != declaring_class; klass = klass->GetSuperClass()) {
+    if (!klass->IsInSamePackage(declaring_class)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+template <typename SignatureType>
+static inline ArtMethod* FindClassMethodWithSignature(ObjPtr<Class> this_klass,
+                                                      const StringPiece& name,
+                                                      const SignatureType& signature,
+                                                      PointerSize pointer_size)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Search declared methods first.
+  for (ArtMethod& method : this_klass->GetDeclaredMethodsSlice(pointer_size)) {
+    ArtMethod* np_method = method.GetInterfaceMethodIfProxy(pointer_size);
+    if (np_method->GetName() == name && np_method->GetSignature() == signature) {
+      return &method;
+    }
+  }
+
+  // Then search the superclass chain. If we find an inherited method, return it.
+  // If we find a method that's not inherited because of access restrictions,
+  // try to find a method inherited from an interface in copied methods.
+  ObjPtr<Class> klass = this_klass->GetSuperClass();
+  ArtMethod* uninherited_method = nullptr;
+  for (; klass != nullptr; klass = klass->GetSuperClass()) {
+    DCHECK(!klass->IsProxyClass());
+    for (ArtMethod& method : klass->GetDeclaredMethodsSlice(pointer_size)) {
+      if (method.GetName() == name && method.GetSignature() == signature) {
+        if (IsInheritedMethod(this_klass, klass, method)) {
+          return &method;
+        }
+        uninherited_method = &method;
+        break;
+      }
+    }
+    if (uninherited_method != nullptr) {
+      break;
+    }
+  }
+
+  // Then search copied methods.
+  // If we found a method that's not inherited, stop the search in its declaring class.
+  ObjPtr<Class> end_klass = klass;
+  DCHECK_EQ(uninherited_method != nullptr, end_klass != nullptr);
+  klass = this_klass;
+  if (UNLIKELY(klass->IsProxyClass())) {
+    DCHECK(klass->GetCopiedMethodsSlice(pointer_size).empty());
+    klass = klass->GetSuperClass();
+  }
+  for (; klass != end_klass; klass = klass->GetSuperClass()) {
+    DCHECK(!klass->IsProxyClass());
+    for (ArtMethod& method : klass->GetCopiedMethodsSlice(pointer_size)) {
+      if (method.GetName() == name && method.GetSignature() == signature) {
+        return &method;  // No further check needed, copied methods are inherited by definition.
+      }
+    }
+  }
+  return uninherited_method;  // Return the `uninherited_method` if any.
+}
+
+
+ArtMethod* Class::FindClassMethod(const StringPiece& name,
+                                  const StringPiece& signature,
+                                  PointerSize pointer_size) {
+  return FindClassMethodWithSignature(this, name, signature, pointer_size);
+}
+
+ArtMethod* Class::FindClassMethod(const StringPiece& name,
+                                  const Signature& signature,
+                                  PointerSize pointer_size) {
+  return FindClassMethodWithSignature(this, name, signature, pointer_size);
+}
+
+ArtMethod* Class::FindClassMethod(ObjPtr<DexCache> dex_cache,
+                                  uint32_t dex_method_idx,
+                                  PointerSize pointer_size) {
+  // FIXME: Hijacking a proxy class by a custom class loader can break this assumption.
+  DCHECK(!IsProxyClass());
+
+  // First try to find a declared method by dex_method_idx if we have a dex_cache match.
+  ObjPtr<DexCache> this_dex_cache = GetDexCache();
+  if (this_dex_cache == dex_cache) {
+    // Lookup is always performed in the class referenced by the MethodId.
+    DCHECK_EQ(dex_type_idx_, GetDexFile().GetMethodId(dex_method_idx).class_idx_.index_);
+    for (ArtMethod& method : GetDeclaredMethodsSlice(pointer_size)) {
+      if (method.GetDexMethodIndex() == dex_method_idx) {
+        return &method;
+      }
+    }
+  }
+  // If not found, we need to search by name and signature.
+  const DexFile& dex_file = *dex_cache->GetDexFile();
+  const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
+  const Signature signature = dex_file.GetMethodSignature(method_id);
+  StringPiece name;  // Delay strlen() until actually needed.
+  // If we do not have a dex_cache match, try to find the declared method in this class now.
+  if (this_dex_cache != dex_cache && !GetDeclaredMethodsSlice(pointer_size).empty()) {
+    DCHECK(name.empty());
+    name = dex_file.StringDataByIdx(method_id.name_idx_);
+    for (ArtMethod& method : GetDeclaredMethodsSlice(pointer_size)) {
+      if (method.GetName() == name && method.GetSignature() == signature) {
+        return &method;
+      }
+    }
+  }
+
+  // Then search the superclass chain. If we find an inherited method, return it.
+  // If we find a method that's not inherited because of access restrictions,
+  // try to find a method inherited from an interface in copied methods.
+  ArtMethod* uninherited_method = nullptr;
+  ObjPtr<Class> klass = GetSuperClass();
+  for (; klass != nullptr; klass = klass->GetSuperClass()) {
+    ArtMethod* candidate_method = nullptr;
+    ArraySlice<ArtMethod> declared_methods = klass->GetDeclaredMethodsSlice(pointer_size);
+    if (klass->GetDexCache() == dex_cache) {
+      // Matching dex_cache. We cannot compare the `dex_method_idx` anymore because
+      // the type index differs, so compare the name index and proto index.
+      for (ArtMethod& method : declared_methods) {
+        const DexFile::MethodId& cmp_method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+        if (cmp_method_id.name_idx_ == method_id.name_idx_ &&
+            cmp_method_id.proto_idx_ == method_id.proto_idx_) {
+          candidate_method = &method;
+          break;
+        }
+      }
+    } else {
+      if (!declared_methods.empty() && name.empty()) {
+        name = dex_file.StringDataByIdx(method_id.name_idx_);
+      }
+      for (ArtMethod& method : declared_methods) {
+        if (method.GetName() == name && method.GetSignature() == signature) {
+          candidate_method = &method;
+          break;
+        }
+      }
+    }
+    if (candidate_method != nullptr) {
+      if (IsInheritedMethod(this, klass, *candidate_method)) {
+        return candidate_method;
+      } else {
+        uninherited_method = candidate_method;
+        break;
+      }
+    }
+  }
+
+  // Then search copied methods.
+  // If we found a method that's not inherited, stop the search in its declaring class.
+  ObjPtr<Class> end_klass = klass;
+  DCHECK_EQ(uninherited_method != nullptr, end_klass != nullptr);
+  // After we have searched the declared methods of the super-class chain,
+  // search copied methods which can contain methods from interfaces.
+  for (klass = this; klass != end_klass; klass = klass->GetSuperClass()) {
+    ArraySlice<ArtMethod> copied_methods = klass->GetCopiedMethodsSlice(pointer_size);
+    if (!copied_methods.empty() && name.empty()) {
+      name = dex_file.StringDataByIdx(method_id.name_idx_);
+    }
+    for (ArtMethod& method : copied_methods) {
+      if (method.GetName() == name && method.GetSignature() == signature) {
+        return &method;  // No further check needed, copied methods are inherited by definition.
+      }
+    }
+  }
+  return uninherited_method;  // Return the `uninherited_method` if any.
+}
+
+ArtMethod* Class::FindConstructor(const StringPiece& signature, PointerSize pointer_size) {
+  // Internal helper, never called on proxy classes. We can skip GetInterfaceMethodIfProxy().
+  DCHECK(!IsProxyClass());
+  StringPiece name("<init>");
+  for (ArtMethod& method : GetDirectMethodsSliceUnchecked(pointer_size)) {
+    if (method.GetName() == name && method.GetSignature() == signature) {
+      return &method;
     }
   }
   return nullptr;
@@ -539,47 +671,6 @@
   return nullptr;
 }
 
-// TODO These should maybe be changed to be named FindOwnedVirtualMethod or something similar
-// because they do not only find 'declared' methods and will return copied methods. This behavior is
-// desired and correct but the naming can lead to confusion because in the java language declared
-// excludes interface methods which might be found by this.
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
-                                            const StringPiece& signature,
-                                            PointerSize pointer_size) {
-  for (auto& method : GetVirtualMethods(pointer_size)) {
-    ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
-    if (name == np_method->GetName() && np_method->GetSignature() == signature) {
-      return &method;
-    }
-  }
-  return nullptr;
-}
-
-ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name,
-                                            const Signature& signature,
-                                            PointerSize pointer_size) {
-  for (auto& method : GetVirtualMethods(pointer_size)) {
-    ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size);
-    if (name == np_method->GetName() && signature == np_method->GetSignature()) {
-      return &method;
-    }
-  }
-  return nullptr;
-}
-
-ArtMethod* Class::FindDeclaredVirtualMethod(ObjPtr<DexCache> dex_cache,
-                                            uint32_t dex_method_idx,
-                                            PointerSize pointer_size) {
-  if (GetDexCache() == dex_cache) {
-    for (auto& method : GetDeclaredVirtualMethods(pointer_size)) {
-      if (method.GetDexMethodIndex() == dex_method_idx) {
-        return &method;
-      }
-    }
-  }
-  return nullptr;
-}
-
 ArtMethod* Class::FindDeclaredVirtualMethodByName(const StringPiece& name,
                                                   PointerSize pointer_size) {
   for (auto& method : GetVirtualMethods(pointer_size)) {
@@ -591,42 +682,6 @@
   return nullptr;
 }
 
-ArtMethod* Class::FindVirtualMethod(const StringPiece& name,
-                                    const StringPiece& signature,
-                                    PointerSize pointer_size) {
-  for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
-    ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
-    if (method != nullptr) {
-      return method;
-    }
-  }
-  return nullptr;
-}
-
-ArtMethod* Class::FindVirtualMethod(const StringPiece& name,
-                                    const Signature& signature,
-                                    PointerSize pointer_size) {
-  for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
-    ArtMethod* method = klass->FindDeclaredVirtualMethod(name, signature, pointer_size);
-    if (method != nullptr) {
-      return method;
-    }
-  }
-  return nullptr;
-}
-
-ArtMethod* Class::FindVirtualMethod(ObjPtr<DexCache> dex_cache,
-                                    uint32_t dex_method_idx,
-                                    PointerSize pointer_size) {
-  for (ObjPtr<Class> klass = this; klass != nullptr; klass = klass->GetSuperClass()) {
-    ArtMethod* method = klass->FindDeclaredVirtualMethod(dex_cache, dex_method_idx, pointer_size);
-    if (method != nullptr) {
-      return method;
-    }
-  }
-  return nullptr;
-}
-
 ArtMethod* Class::FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size) {
   DCHECK(method->GetDeclaringClass()->IsInterface());
   DCHECK(IsInterface()) << "Should only be called on a interface class";
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index e516a06..c626897 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -915,6 +915,13 @@
   ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Find a method with the given name and signature in an interface class.
+  //
+  // Search for the method declared in the class, then search for a method declared in any
+  // superinterface, then search the superclass java.lang.Object (implicitly declared methods
+  // in an interface without superinterfaces, see JLS 9.2, can be inherited, see JLS 9.4.1).
+  // TODO: Implement search for a unique maximally-specific non-abstract superinterface method.
+
   ArtMethod* FindInterfaceMethod(const StringPiece& name,
                                  const StringPiece& signature,
                                  PointerSize pointer_size)
@@ -930,49 +937,46 @@
                                  PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredDirectMethod(const StringPiece& name,
-                                      const StringPiece& signature,
-                                      PointerSize pointer_size)
+  // Find a method with the given name and signature in a non-interface class.
+  //
+  // Search for the method in the class, following the JLS rules which conflict with the RI
+  // in some cases. The JLS says that inherited methods are searched (JLS 15.12.2.1) and
+  // these can come from a superclass or a superinterface (JLS 8.4.8). We perform the
+  // following search:
+  //   1. Search the methods declared directly in the class. If we find a method with the
+  //      given name and signature, return that method.
+  //   2. Search the methods declared in superclasses until we find a method with the given
+  //      signature or complete the search in java.lang.Object. If we find a method with the
+  //      given name and signature, check if it's been inherited by the class where we're
+  //      performing the lookup (qualifying type). If it's inherited, return it. Otherwise,
+  //      just remember the method and its declaring class and proceed to step 3.
+  //   3. Search "copied" methods (containing methods inherited from interfaces) in the class
+  //      and its superclass chain. If we found a method in step 2 (which was not inherited,
+  //      otherwise we would not be performing step 3), end the search when we reach its
+  //      declaring class, otherwise search the entire superclass chain. If we find a method
+  //      with the given name and signature, return that method.
+  //   4. Return the method found in step 2 if any (not inherited), or null.
+  //
+  // It's the responsibility of the caller to throw exceptions if the returned method (or null)
+  // does not satisfy the request. Special consideration should be given to the case where this
+  // function returns a method that's not inherited (found in step 2, returned in step 4).
+
+  ArtMethod* FindClassMethod(const StringPiece& name,
+                             const StringPiece& signature,
+                             PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredDirectMethod(const StringPiece& name,
-                                      const Signature& signature,
-                                      PointerSize pointer_size)
+  ArtMethod* FindClassMethod(const StringPiece& name,
+                             const Signature& signature,
+                             PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtMethod* FindDeclaredDirectMethod(ObjPtr<DexCache> dex_cache,
-                                      uint32_t dex_method_idx,
-                                      PointerSize pointer_size)
+  ArtMethod* FindClassMethod(ObjPtr<DexCache> dex_cache,
+                             uint32_t dex_method_idx,
+                             PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtMethod* FindDirectMethod(const StringPiece& name,
-                              const StringPiece& signature,
-                              PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* FindDirectMethod(const StringPiece& name,
-                              const Signature& signature,
-                              PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* FindDirectMethod(ObjPtr<DexCache> dex_cache,
-                              uint32_t dex_method_idx,
-                              PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name,
-                                       const StringPiece& signature,
-                                       PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name,
-                                       const Signature& signature,
-                                       PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* FindDeclaredVirtualMethod(ObjPtr<DexCache> dex_cache,
-                                       uint32_t dex_method_idx,
-                                       PointerSize pointer_size)
+  ArtMethod* FindConstructor(const StringPiece& signature, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name,
@@ -983,21 +987,6 @@
                                             PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ArtMethod* FindVirtualMethod(const StringPiece& name,
-                               const StringPiece& signature,
-                               PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* FindVirtualMethod(const StringPiece& name,
-                               const Signature& signature,
-                               PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ArtMethod* FindVirtualMethod(ObjPtr<DexCache> dex_cache,
-                               uint32_t dex_method_idx,
-                               PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   ArtMethod* FindClassInitializer(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool HasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 18e22ef..fdb14f1 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -208,24 +208,38 @@
   }
 }
 
+inline uint32_t DexCache::MethodSlotIndex(uint32_t method_idx) {
+  DCHECK_LT(method_idx, GetDexFile()->NumMethodIds());
+  const uint32_t slot_idx = method_idx % kDexCacheMethodCacheSize;
+  DCHECK_LT(slot_idx, NumResolvedMethods());
+  return slot_idx;
+}
+
 inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) {
   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
-  DCHECK_LT(method_idx, NumResolvedMethods());  // NOTE: Unchecked, i.e. not throwing AIOOB.
-  ArtMethod* method = GetElementPtrSize<ArtMethod*>(GetResolvedMethods(), method_idx, ptr_size);
-  // Hide resolution trampoline methods from the caller
-  if (method != nullptr && method->IsRuntimeMethod()) {
-    DCHECK_EQ(method, Runtime::Current()->GetResolutionMethod());
-    return nullptr;
-  }
-  return method;
+  auto pair = GetNativePairPtrSize(GetResolvedMethods(), MethodSlotIndex(method_idx), ptr_size);
+  return pair.GetObjectForIndex(method_idx);
 }
 
 inline void DexCache::SetResolvedMethod(uint32_t method_idx,
                                         ArtMethod* method,
                                         PointerSize ptr_size) {
   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
-  DCHECK_LT(method_idx, NumResolvedMethods());  // NOTE: Unchecked, i.e. not throwing AIOOB.
-  SetElementPtrSize(GetResolvedMethods(), method_idx, method, ptr_size);
+  DCHECK(method != nullptr);
+  MethodDexCachePair pair(method, method_idx);
+  SetNativePairPtrSize(GetResolvedMethods(), MethodSlotIndex(method_idx), pair, ptr_size);
+}
+
+inline void DexCache::ClearResolvedMethod(uint32_t method_idx, PointerSize ptr_size) {
+  DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
+  uint32_t slot_idx = MethodSlotIndex(method_idx);
+  auto* resolved_methods = GetResolvedMethods();
+  // This is racy but should only be called from the single-threaded ImageWriter.
+  DCHECK(Runtime::Current()->IsAotCompiler());
+  if (GetNativePairPtrSize(resolved_methods, slot_idx, ptr_size).index == method_idx) {
+    MethodDexCachePair cleared(nullptr, MethodDexCachePair::InvalidIndexForSlot(slot_idx));
+    SetNativePairPtrSize(resolved_methods, slot_idx, cleared, ptr_size);
+  }
 }
 
 template <typename PtrType>
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 96e3475..7b18a4c 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -61,14 +61,14 @@
         : reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
   }
 
-  mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
-      reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
-  mirror::TypeDexCacheType* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
-      reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
-  ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
-      reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
-  mirror::FieldDexCacheType* fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
-      reinterpret_cast<mirror::FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
+  StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
+      reinterpret_cast<StringDexCacheType*>(raw_arrays + layout.StringsOffset());
+  TypeDexCacheType* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
+      reinterpret_cast<TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
+  MethodDexCacheType* methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
+      reinterpret_cast<MethodDexCacheType*>(raw_arrays + layout.MethodsOffset());
+  FieldDexCacheType* fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
+      reinterpret_cast<FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
 
   size_t num_strings = kDexCacheStringCacheSize;
   if (dex_file->NumStringIds() < num_strings) {
@@ -82,6 +82,10 @@
   if (dex_file->NumFieldIds() < num_fields) {
     num_fields = dex_file->NumFieldIds();
   }
+  size_t num_methods = kDexCacheMethodCacheSize;
+  if (dex_file->NumMethodIds() < num_methods) {
+    num_methods = dex_file->NumMethodIds();
+  }
 
   // Note that we allocate the method type dex caches regardless of this flag,
   // and we make sure here that they're not used by the runtime. This is in the
@@ -105,7 +109,7 @@
 
   GcRoot<mirror::CallSite>* call_sites = (dex_file->NumCallSiteIds() == 0)
       ? nullptr
-      : reinterpret_cast<GcRoot<mirror::CallSite>*>(raw_arrays + layout.CallSitesOffset());
+      : reinterpret_cast<GcRoot<CallSite>*>(raw_arrays + layout.CallSitesOffset());
 
   DCHECK_ALIGNED(raw_arrays, alignof(StringDexCacheType)) <<
                  "Expected raw_arrays to align to StringDexCacheType.";
@@ -125,8 +129,9 @@
       CHECK_EQ(types[i].load(std::memory_order_relaxed).index, 0u);
       CHECK(types[i].load(std::memory_order_relaxed).object.IsNull());
     }
-    for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
-      CHECK(GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
+    for (size_t i = 0; i < num_methods; ++i) {
+      CHECK_EQ(GetNativePairPtrSize(methods, i, image_pointer_size).index, 0u);
+      CHECK(GetNativePairPtrSize(methods, i, image_pointer_size).object == nullptr);
     }
     for (size_t i = 0; i < num_fields; ++i) {
       CHECK_EQ(GetNativePairPtrSize(fields, i, image_pointer_size).index, 0u);
@@ -149,6 +154,9 @@
   if (fields != nullptr) {
     mirror::FieldDexCachePair::Initialize(fields, image_pointer_size);
   }
+  if (methods != nullptr) {
+    mirror::MethodDexCachePair::Initialize(methods, image_pointer_size);
+  }
   if (method_types != nullptr) {
     mirror::MethodTypeDexCachePair::Initialize(method_types);
   }
@@ -159,14 +167,13 @@
                   types,
                   num_types,
                   methods,
-                  dex_file->NumMethodIds(),
+                  num_methods,
                   fields,
                   num_fields,
                   method_types,
                   num_method_types,
                   call_sites,
-                  dex_file->NumCallSiteIds(),
-                  image_pointer_size);
+                  dex_file->NumCallSiteIds());
 }
 
 void DexCache::Init(const DexFile* dex_file,
@@ -175,15 +182,14 @@
                     uint32_t num_strings,
                     TypeDexCacheType* resolved_types,
                     uint32_t num_resolved_types,
-                    ArtMethod** resolved_methods,
+                    MethodDexCacheType* resolved_methods,
                     uint32_t num_resolved_methods,
                     FieldDexCacheType* resolved_fields,
                     uint32_t num_resolved_fields,
                     MethodTypeDexCacheType* resolved_method_types,
                     uint32_t num_resolved_method_types,
                     GcRoot<CallSite>* resolved_call_sites,
-                    uint32_t num_resolved_call_sites,
-                    PointerSize pointer_size) {
+                    uint32_t num_resolved_call_sites) {
   CHECK(dex_file != nullptr);
   CHECK(location != nullptr);
   CHECK_EQ(num_strings != 0u, strings != nullptr);
@@ -207,24 +213,6 @@
   SetField32<false>(NumResolvedFieldsOffset(), num_resolved_fields);
   SetField32<false>(NumResolvedMethodTypesOffset(), num_resolved_method_types);
   SetField32<false>(NumResolvedCallSitesOffset(), num_resolved_call_sites);
-
-  Runtime* const runtime = Runtime::Current();
-  if (runtime->HasResolutionMethod()) {
-    // Initialize the resolve methods array to contain trampolines for resolution.
-    Fixup(runtime->GetResolutionMethod(), pointer_size);
-  }
-}
-
-void DexCache::Fixup(ArtMethod* trampoline, PointerSize pointer_size) {
-  // Fixup the resolve methods array to contain trampoline for resolution.
-  CHECK(trampoline != nullptr);
-  CHECK(trampoline->IsRuntimeMethod());
-  auto* resolved_methods = GetResolvedMethods();
-  for (size_t i = 0, length = NumResolvedMethods(); i < length; i++) {
-    if (GetElementPtrSize<ArtMethod*>(resolved_methods, i, pointer_size) == nullptr) {
-      SetElementPtrSize(resolved_methods, i, trampoline, pointer_size);
-    }
-  }
 }
 
 void DexCache::SetLocation(ObjPtr<mirror::String> location) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index cf570b8..7fd5dd1 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -129,6 +129,9 @@
 using FieldDexCachePair = NativeDexCachePair<ArtField>;
 using FieldDexCacheType = std::atomic<FieldDexCachePair>;
 
+using MethodDexCachePair = NativeDexCachePair<ArtMethod>;
+using MethodDexCacheType = std::atomic<MethodDexCachePair>;
+
 using MethodTypeDexCachePair = DexCachePair<MethodType>;
 using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
 
@@ -153,6 +156,11 @@
   static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
                 "Field dex cache size is not a power of 2.");
 
+  // Size of method dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
+  static constexpr size_t kDexCacheMethodCacheSize = 1024;
+  static_assert(IsPowerOfTwo(kDexCacheMethodCacheSize),
+                "Method dex cache size is not a power of 2.");
+
   // Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
   // to hold.
   static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
@@ -171,6 +179,10 @@
     return kDexCacheFieldCacheSize;
   }
 
+  static constexpr size_t StaticMethodSize() {
+    return kDexCacheMethodCacheSize;
+  }
+
   static constexpr size_t StaticMethodTypeSize() {
     return kDexCacheMethodTypeCacheSize;
   }
@@ -189,9 +201,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(Locks::dex_lock_);
 
-  void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
   void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -284,6 +293,8 @@
                                        ArtMethod* resolved,
                                        PointerSize ptr_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE void ClearResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Pointer sized variant, used for patching.
   ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
@@ -328,11 +339,11 @@
     SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
   }
 
-  ArtMethod** GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetFieldPtr<ArtMethod**>(ResolvedMethodsOffset());
+  MethodDexCacheType* GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetFieldPtr<MethodDexCacheType*>(ResolvedMethodsOffset());
   }
 
-  void SetResolvedMethods(ArtMethod** resolved_methods)
+  void SetResolvedMethods(MethodDexCacheType* resolved_methods)
       ALWAYS_INLINE
       REQUIRES_SHARED(Locks::mutator_lock_) {
     SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
@@ -429,6 +440,7 @@
   uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+  uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
@@ -438,15 +450,14 @@
             uint32_t num_strings,
             TypeDexCacheType* resolved_types,
             uint32_t num_resolved_types,
-            ArtMethod** resolved_methods,
+            MethodDexCacheType* resolved_methods,
             uint32_t num_resolved_methods,
             FieldDexCacheType* resolved_fields,
             uint32_t num_resolved_fields,
             MethodTypeDexCacheType* resolved_method_types,
             uint32_t num_resolved_method_types,
             GcRoot<CallSite>* resolved_call_sites,
-            uint32_t num_resolved_call_sites,
-            PointerSize pointer_size)
+            uint32_t num_resolved_call_sites)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
@@ -471,7 +482,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Due to lack of 16-byte atomics support, we use hand-crafted routines.
-#if  defined(__aarch64__)
+#if defined(__aarch64__)
   // 16-byte atomics are supported on aarch64.
   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
       std::atomic<ConversionPair64>* target) {
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 5b1ba8d..d2b9240 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -54,7 +54,8 @@
       || java_lang_dex_file_->NumStringIds() == dex_cache->NumStrings());
   EXPECT_TRUE(dex_cache->StaticTypeSize() == dex_cache->NumResolvedTypes()
       || java_lang_dex_file_->NumTypeIds() == dex_cache->NumResolvedTypes());
-  EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
+  EXPECT_TRUE(dex_cache->StaticMethodSize() == dex_cache->NumResolvedMethods()
+      || java_lang_dex_file_->NumMethodIds() == dex_cache->NumResolvedMethods());
   EXPECT_TRUE(dex_cache->StaticArtFieldSize() == dex_cache->NumResolvedFields()
       || java_lang_dex_file_->NumFieldIds() ==  dex_cache->NumResolvedFields());
   EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
@@ -128,14 +129,18 @@
       hs.NewHandle(class_linker_->FindClass(soa.Self(), "LMethodTypes;", class_loader)));
   class_linker_->EnsureInitialized(soa.Self(), method_types, true, true);
 
-  ArtMethod* method1 = method_types->FindVirtualMethod(
+  ArtMethod* method1 = method_types->FindClassMethod(
       "method1",
       "(Ljava/lang/String;)Ljava/lang/String;",
       kRuntimePointerSize);
-  ArtMethod* method2 = method_types->FindVirtualMethod(
+  ASSERT_TRUE(method1 != nullptr);
+  ASSERT_FALSE(method1->IsDirect());
+  ArtMethod* method2 = method_types->FindClassMethod(
       "method2",
       "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;",
       kRuntimePointerSize);
+  ASSERT_TRUE(method2 != nullptr);
+  ASSERT_FALSE(method2->IsDirect());
 
   const DexFile& dex_file = *(method1->GetDexFile());
   Handle<mirror::DexCache> dex_cache = hs.NewHandle(
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 7027410..aee4b19 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -26,7 +26,6 @@
 #include "object-inl.h"
 #include "object_array.h"
 #include "object_array-inl.h"
-#include "object_callbacks.h"
 #include "stack_trace_element.h"
 #include "string.h"
 #include "utils.h"
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 27ce149..fb12841 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -36,11 +36,8 @@
  protected:
   void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
     // Use a smaller heap
-    for (std::pair<std::string, const void*>& pair : *options) {
-      if (pair.first.find("-Xmx") == 0) {
-        pair.first = "-Xmx4M";  // Smallest we can go.
-      }
-    }
+    SetUpRuntimeOptionsForFillHeap(options);
+
     options->push_back(std::make_pair("-Xint", nullptr));
   }
  public:
@@ -56,52 +53,6 @@
   bool completed_;
 };
 
-// Fill the heap.
-static const size_t kMaxHandles = 1000000;  // Use arbitrary large amount for now.
-static void FillHeap(Thread* self, ClassLinker* class_linker,
-                     std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp,
-                     std::vector<MutableHandle<mirror::Object>>* handles)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
-
-  hsp->reset(new StackHandleScope<kMaxHandles>(self));
-  // Class java.lang.Object.
-  Handle<mirror::Class> c((*hsp)->NewHandle(class_linker->FindSystemClass(self,
-                                                                       "Ljava/lang/Object;")));
-  // Array helps to fill memory faster.
-  Handle<mirror::Class> ca((*hsp)->NewHandle(class_linker->FindSystemClass(self,
-                                                                        "[Ljava/lang/Object;")));
-
-  // Start allocating with 128K
-  size_t length = 128 * KB / 4;
-  while (length > 10) {
-    MutableHandle<mirror::Object> h((*hsp)->NewHandle<mirror::Object>(
-        mirror::ObjectArray<mirror::Object>::Alloc(self, ca.Get(), length / 4)));
-    if (self->IsExceptionPending() || h == nullptr) {
-      self->ClearException();
-
-      // Try a smaller length
-      length = length / 8;
-      // Use at most half the reported free space.
-      size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
-      if (length * 8 > mem) {
-        length = mem / 8;
-      }
-    } else {
-      handles->push_back(h);
-    }
-  }
-
-  // Allocate simple objects till it fails.
-  while (!self->IsExceptionPending()) {
-    MutableHandle<mirror::Object> h = (*hsp)->NewHandle<mirror::Object>(c->AllocObject(self));
-    if (!self->IsExceptionPending() && h != nullptr) {
-      handles->push_back(h);
-    }
-  }
-  self->ClearException();
-}
-
 // Check that an exception can be thrown correctly.
 // This test is potentially racy, but the timeout is long enough that it should work.
 
@@ -304,16 +255,12 @@
   test->complete_barrier_ = std::unique_ptr<Barrier>(new Barrier(3));
   test->completed_ = false;
 
-  // Fill the heap.
-  std::unique_ptr<StackHandleScope<kMaxHandles>> hsp;
-  std::vector<MutableHandle<mirror::Object>> handles;
-
   // Our job: Fill the heap, then try Wait.
-  FillHeap(soa.Self(), class_linker, &hsp, &handles);
+  {
+    VariableSizedHandleScope vhs(soa.Self());
+    test->FillHeap(soa.Self(), class_linker, &vhs);
 
-  // Now release everything.
-  for (MutableHandle<mirror::Object>& h : handles) {
-    h.Assign(nullptr);
+    // Now release everything.
   }
 
   // Need to drop the mutator lock to allow barriers.
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index ad00966..f6a8360 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -19,7 +19,6 @@
 #include <sstream>
 
 #include "android-base/stringprintf.h"
-#include "nativehelper/jni_macros.h"
 
 #include "base/logging.h"
 #include "base/stl_util.h"
@@ -32,14 +31,15 @@
 #include "mirror/object-inl.h"
 #include "mirror/string.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "oat_file.h"
 #include "oat_file_assistant.h"
 #include "oat_file_manager.h"
 #include "os.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
 #include "utils.h"
 #include "well_known_classes.h"
 #include "zip_archive.h"
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index e1eae21..3357fa7 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -40,8 +40,8 @@
 #include "mirror/class.h"
 #include "mirror/object_array-inl.h"
 #include "native_util.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "scoped_fast_native_object_access-inl.h"
 #include "trace.h"
 #include "well_known_classes.h"
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index fed9c1c..020612b 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -22,15 +22,14 @@
 extern "C" void android_set_application_target_sdk_version(uint32_t version);
 #endif
 #include <limits.h>
-#include <ScopedUtfChars.h>
+#include "nativehelper/ScopedUtfChars.h"
 
 #pragma GCC diagnostic push
 #pragma GCC diagnostic ignored "-Wshadow"
-#include "toStringArray.h"
+#include "nativehelper/toStringArray.h"
 #pragma GCC diagnostic pop
 
 #include "android-base/stringprintf.h"
-#include "nativehelper/jni_macros.h"
 
 #include "art_method-inl.h"
 #include "arch/instruction_set.h"
@@ -53,6 +52,7 @@
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
 #include "runtime.h"
 #include "scoped_fast_native_object_access-inl.h"
 #include "scoped_thread_state_change-inl.h"
@@ -298,15 +298,16 @@
 
 // Based on ClassLinker::ResolveString.
 static void PreloadDexCachesResolveString(
-    Handle<mirror::DexCache> dex_cache, dex::StringIndex string_idx, StringTable& strings)
+    ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx, StringTable& strings)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::String>  string = dex_cache->GetResolvedString(string_idx);
-  if (string != nullptr) {
-    return;
+  uint32_t slot_idx = dex_cache->StringSlotIndex(string_idx);
+  auto pair = dex_cache->GetStrings()[slot_idx].load(std::memory_order_relaxed);
+  if (!pair.object.IsNull()) {
+    return;  // The entry already contains some String.
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
   const char* utf8 = dex_file->StringDataByIdx(string_idx);
-  string = strings[utf8];
+  ObjPtr<mirror::String> string = strings[utf8];
   if (string == nullptr) {
     return;
   }
@@ -319,18 +320,17 @@
                                         ObjPtr<mirror::DexCache> dex_cache,
                                         dex::TypeIndex type_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(type_idx);
-  if (klass != nullptr) {
-    return;
+  uint32_t slot_idx = dex_cache->TypeSlotIndex(type_idx);
+  auto pair = dex_cache->GetResolvedTypes()[slot_idx].load(std::memory_order_relaxed);
+  if (!pair.object.IsNull()) {
+    return;  // The entry already contains some Class.
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
   const char* class_name = dex_file->StringByTypeIdx(type_idx);
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
-  if (class_name[1] == '\0') {
-    klass = linker->FindPrimitiveClass(class_name[0]);
-  } else {
-    klass = linker->LookupClass(self, class_name, nullptr);
-  }
+  ObjPtr<mirror::Class> klass = (class_name[1] == '\0')
+      ? linker->FindPrimitiveClass(class_name[0])
+      : linker->LookupClass(self, class_name, nullptr);
   if (klass == nullptr) {
     return;
   }
@@ -345,26 +345,27 @@
 }
 
 // Based on ClassLinker::ResolveField.
-static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
+static void PreloadDexCachesResolveField(ObjPtr<mirror::DexCache> dex_cache,
+                                         uint32_t field_idx,
                                          bool is_static)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
-  if (field != nullptr) {
-    return;
+  uint32_t slot_idx = dex_cache->FieldSlotIndex(field_idx);
+  auto pair = mirror::DexCache::GetNativePairPtrSize(dex_cache->GetResolvedFields(),
+                                                     slot_idx,
+                                                     kRuntimePointerSize);
+  if (pair.object != nullptr) {
+    return;  // The entry already contains some ArtField.
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
   const DexFile::FieldId& field_id = dex_file->GetFieldId(field_idx);
-  Thread* const self = Thread::Current();
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Class> klass(hs.NewHandle(dex_cache->GetResolvedType(field_id.class_idx_)));
+  ObjPtr<mirror::Class> klass =
+      ClassLinker::LookupResolvedType(field_id.class_idx_, dex_cache, nullptr);
   if (klass == nullptr) {
     return;
   }
-  if (is_static) {
-    field = mirror::Class::FindStaticField(self, klass.Get(), dex_cache.Get(), field_idx);
-  } else {
-    field = klass->FindInstanceField(dex_cache.Get(), field_idx);
-  }
+  ArtField* field = is_static
+      ? mirror::Class::FindStaticField(Thread::Current(), klass, dex_cache, field_idx)
+      : klass->FindInstanceField(dex_cache, field_idx);
   if (field == nullptr) {
     return;
   }
@@ -372,35 +373,25 @@
 }
 
 // Based on ClassLinker::ResolveMethod.
-static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
-                                          InvokeType invoke_type)
+static void PreloadDexCachesResolveMethod(ObjPtr<mirror::DexCache> dex_cache, uint32_t method_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, kRuntimePointerSize);
-  if (method != nullptr) {
-    return;
+  uint32_t slot_idx = dex_cache->MethodSlotIndex(method_idx);
+  auto pair = mirror::DexCache::GetNativePairPtrSize(dex_cache->GetResolvedMethods(),
+                                                     slot_idx,
+                                                     kRuntimePointerSize);
+  if (pair.object != nullptr) {
+    return;  // The entry already contains some ArtMethod.
   }
   const DexFile* dex_file = dex_cache->GetDexFile();
   const DexFile::MethodId& method_id = dex_file->GetMethodId(method_idx);
-  ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(method_id.class_idx_);
+  ObjPtr<mirror::Class> klass =
+      ClassLinker::LookupResolvedType(method_id.class_idx_, dex_cache, nullptr);
   if (klass == nullptr) {
     return;
   }
-  switch (invoke_type) {
-    case kDirect:
-    case kStatic:
-      method = klass->FindDirectMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
-      break;
-    case kInterface:
-      method = klass->FindInterfaceMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
-      break;
-    case kSuper:
-    case kVirtual:
-      method = klass->FindVirtualMethod(dex_cache.Get(), method_idx, kRuntimePointerSize);
-      break;
-    default:
-      LOG(FATAL) << "Unreachable - invocation type: " << invoke_type;
-      UNREACHABLE();
-  }
+  ArtMethod* method = klass->IsInterface()
+      ? klass->FindInterfaceMethod(dex_cache, method_idx, kRuntimePointerSize)
+      : klass->FindClassMethod(dex_cache, method_idx, kRuntimePointerSize);
   if (method == nullptr) {
     return;
   }
@@ -462,27 +453,31 @@
     }
     ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file);
     DCHECK(dex_cache != nullptr);  // Boot class path dex caches are never unloaded.
-    for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
-      ObjPtr<mirror::String> string = dex_cache->GetResolvedString(dex::StringIndex(j));
-      if (string != nullptr) {
+    for (size_t j = 0, num_strings = dex_cache->NumStrings(); j < num_strings; ++j) {
+      auto pair = dex_cache->GetStrings()[j].load(std::memory_order_relaxed);
+      if (!pair.object.IsNull()) {
         filled->num_strings++;
       }
     }
-    for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
-      ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(dex::TypeIndex(j));
-      if (klass != nullptr) {
+    for (size_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; ++j) {
+      auto pair = dex_cache->GetResolvedTypes()[j].load(std::memory_order_relaxed);
+      if (!pair.object.IsNull()) {
         filled->num_types++;
       }
     }
-    for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
-      ArtField* field = dex_cache->GetResolvedField(j, class_linker->GetImagePointerSize());
-      if (field != nullptr) {
+    for (size_t j = 0, num_fields = dex_cache->NumResolvedFields(); j < num_fields; ++j) {
+      auto pair = mirror::DexCache::GetNativePairPtrSize(dex_cache->GetResolvedFields(),
+                                                         j,
+                                                         kRuntimePointerSize);
+      if (pair.object != nullptr) {
         filled->num_fields++;
       }
     }
-    for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) {
-      ArtMethod* method = dex_cache->GetResolvedMethod(j, kRuntimePointerSize);
-      if (method != nullptr) {
+    for (size_t j = 0, num_methods = dex_cache->NumResolvedMethods(); j < num_methods; ++j) {
+      auto pair = mirror::DexCache::GetNativePairPtrSize(dex_cache->GetResolvedMethods(),
+                                                         j,
+                                                         kRuntimePointerSize);
+      if (pair.object != nullptr) {
         filled->num_methods++;
       }
     }
@@ -522,8 +517,7 @@
   for (size_t i = 0; i < boot_class_path.size(); i++) {
     const DexFile* dex_file = boot_class_path[i];
     CHECK(dex_file != nullptr);
-    StackHandleScope<1> hs(soa.Self());
-    Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file, nullptr)));
+    ObjPtr<mirror::DexCache> dex_cache = linker->RegisterDexFile(*dex_file, nullptr);
     CHECK(dex_cache != nullptr);  // Boot class path dex caches are never unloaded.
     if (kPreloadDexCachesStrings) {
       for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
@@ -533,7 +527,7 @@
 
     if (kPreloadDexCachesTypes) {
       for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) {
-        PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), dex::TypeIndex(j));
+        PreloadDexCachesResolveType(soa.Self(), dex_cache, dex::TypeIndex(j));
       }
     }
 
@@ -557,13 +551,11 @@
         }
         for (; it.HasNextDirectMethod(); it.Next()) {
           uint32_t method_idx = it.GetMemberIndex();
-          InvokeType invoke_type = it.GetMethodInvokeType(class_def);
-          PreloadDexCachesResolveMethod(dex_cache, method_idx, invoke_type);
+          PreloadDexCachesResolveMethod(dex_cache, method_idx);
         }
         for (; it.HasNextVirtualMethod(); it.Next()) {
           uint32_t method_idx = it.GetMemberIndex();
-          InvokeType invoke_type = it.GetMethodInvokeType(class_def);
-          PreloadDexCachesResolveMethod(dex_cache, method_idx, invoke_type);
+          PreloadDexCachesResolveMethod(dex_cache, method_idx);
         }
       }
     }
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 31aeba0..2e4db7a 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -19,7 +19,6 @@
 #include <stdlib.h>
 
 #include "android-base/stringprintf.h"
-#include "nativehelper/jni_macros.h"
 
 #include "arch/instruction_set.h"
 #include "art_method-inl.h"
@@ -27,11 +26,12 @@
 #include "java_vm_ext.h"
 #include "jit/jit.h"
 #include "jni_internal.h"
-#include "JNIHelp.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/JNIHelp.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "non_debuggable_classes.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedUtfChars.h"
 #include "stack.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index d3377be..1a19940 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -18,8 +18,6 @@
 
 #include <iostream>
 
-#include "nativehelper/jni_macros.h"
-
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
@@ -28,7 +26,6 @@
 #include "dex_file-inl.h"
 #include "dex_file_annotations.h"
 #include "jni_internal.h"
-#include "nth_caller_visitor.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/field-inl.h"
@@ -37,12 +34,14 @@
 #include "mirror/object_array-inl.h"
 #include "mirror/string-inl.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
+#include "nth_caller_visitor.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
-#include "scoped_thread_state_change-inl.h"
 #include "scoped_fast_native_object_access-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
+#include "scoped_thread_state_change-inl.h"
 #include "utf.h"
 #include "well_known_classes.h"
 
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index ac0d633..e2de141 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -22,12 +22,12 @@
 #include "jni_internal.h"
 #include "mirror/array.h"
 #include "mirror/object-inl.h"
-#include "mirror/string.h"
 #include "mirror/string-inl.h"
+#include "mirror/string.h"
 #include "native_util.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "scoped_fast_native_object_access-inl.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "verify_object.h"
 
 namespace art {
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 9c2e918..2db9a5c 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -16,17 +16,16 @@
 
 #include "java_lang_StringFactory.h"
 
-#include "nativehelper/jni_macros.h"
-
 #include "common_throws.h"
 #include "jni_internal.h"
 #include "mirror/object-inl.h"
 #include "mirror/string.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedPrimitiveArray.h"
 #include "scoped_fast_native_object_access-inl.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedPrimitiveArray.h"
 
 namespace art {
 
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 4ce72ed..4fbbb72 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -16,16 +16,15 @@
 
 #include "java_lang_Thread.h"
 
-#include "nativehelper/jni_macros.h"
-
 #include "common_throws.h"
 #include "jni_internal.h"
-#include "monitor.h"
 #include "mirror/object.h"
+#include "monitor.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "scoped_fast_native_object_access-inl.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedUtfChars.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "verify_object.h"
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index fc50d55..4034e8c 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -16,17 +16,16 @@
 
 #include "java_lang_VMClassLoader.h"
 
-#include "nativehelper/jni_macros.h"
-
 #include "class_linker.h"
 #include "jni_internal.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "obj_ptr.h"
 #include "scoped_fast_native_object_access-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
 #include "well_known_classes.h"
 #include "zip_archive.h"
 
@@ -135,7 +134,7 @@
   for (size_t i = 0; i < path.size(); ++i) {
     const DexFile* dex_file = path[i];
 
-    // For multidex locations, e.g., x.jar:classes2.dex, we want to look into x.jar.
+    // For multidex locations, e.g., x.jar!classes2.dex, we want to look into x.jar.
     const std::string& location(dex_file->GetBaseLocation());
 
     ScopedLocalRef<jstring> javaPath(env, env->NewStringUTF(location.c_str()));
diff --git a/runtime/native/libcore_util_CharsetUtils.cc b/runtime/native/libcore_util_CharsetUtils.cc
index 38634e6..c698548 100644
--- a/runtime/native/libcore_util_CharsetUtils.cc
+++ b/runtime/native/libcore_util_CharsetUtils.cc
@@ -18,14 +18,13 @@
 
 #include <string.h>
 
-#include "nativehelper/jni_macros.h"
-
 #include "jni_internal.h"
-#include "mirror/string.h"
 #include "mirror/string-inl.h"
+#include "mirror/string.h"
 #include "native_util.h"
+#include "nativehelper/ScopedPrimitiveArray.h"
+#include "nativehelper/jni_macros.h"
 #include "scoped_fast_native_object_access-inl.h"
-#include "ScopedPrimitiveArray.h"
 #include "unicode/utf16.h"
 
 
diff --git a/runtime/native/native_util.h b/runtime/native/native_util.h
index 98384e0..593b3ca 100644
--- a/runtime/native/native_util.h
+++ b/runtime/native/native_util.h
@@ -21,7 +21,7 @@
 
 #include "android-base/logging.h"
 #include "base/macros.h"
-#include "ScopedLocalRef.h"
+#include "nativehelper/ScopedLocalRef.h"
 
 namespace art {
 
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index 925b909..c3e74bd 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -16,14 +16,13 @@
 
 #include "org_apache_harmony_dalvik_ddmc_DdmServer.h"
 
-#include "nativehelper/jni_macros.h"
-
 #include "base/logging.h"
 #include "debugger.h"
 #include "jni_internal.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/ScopedPrimitiveArray.h"
 #include "scoped_fast_native_object_access-inl.h"
-#include "ScopedPrimitiveArray.h"
 
 namespace art {
 
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 125d737..8c42973 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -16,17 +16,16 @@
 
 #include "org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
 
-#include "nativehelper/jni_macros.h"
-
 #include "base/logging.h"
 #include "base/mutex.h"
 #include "debugger.h"
 #include "gc/heap.h"
 #include "jni_internal.h"
 #include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedPrimitiveArray.h"
 #include "scoped_fast_native_object_access-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedPrimitiveArray.h"
 #include "thread_list.h"
 
 namespace art {
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index cbff0bb..7e16357 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -337,7 +337,7 @@
     } else {
       os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR "  "
                                                             : "%08" PRIxPTR "  ",
-                         BacktraceMap::GetRelativePc(it->map, it->pc));
+                         it->rel_pc);
       os << it->map.name;
       os << " (";
       if (!it->func_name.empty()) {
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index 9cc7e60..871ffba 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -19,8 +19,8 @@
 #include "base/logging.h"
 #include "jni_internal.h"
 #include "mirror/class-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "obj_ptr-inl.h"
-#include "ScopedLocalRef.h"
 #include "thread-current-inl.h"
 
 namespace art {
diff --git a/runtime/oat.h b/runtime/oat.h
index f4edb16..c4a983e 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: add new class status to skip superclass validation.
-  static constexpr uint8_t kOatVersion[] = { '1', '2', '9', '\0' };
+  // Last oat version changed reason: MIPS Baker thunks.
+  static constexpr uint8_t kOatVersion[] = { '1', '3', '1', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b112b84..be7d495 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -289,8 +289,8 @@
   // If not null, abs_dex_location is used to resolve the absolute dex
   // location of relative dex locations encoded in the oat file.
   // For example, given absolute location "/data/app/foo/base.apk", encoded
-  // dex locations "base.apk", "base.apk:classes2.dex", etc. would be resolved
-  // to "/data/app/foo/base.apk", "/data/app/foo/base.apk:classes2.dex", etc.
+  // dex locations "base.apk", "base.apk!classes2.dex", etc. would be resolved
+  // to "/data/app/foo/base.apk", "/data/app/foo/base.apk!classes2.dex", etc.
   // Relative encoded dex locations that don't match the given abs_dex_location
   // are left unchanged.
   static std::string ResolveRelativeEncodedDexLocation(
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index dc542d4..e950fca 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -264,212 +264,6 @@
   }
 }
 
-template <typename T>
-static void IterateOverJavaDexFile(ObjPtr<mirror::Object> dex_file,
-                                   ArtField* const cookie_field,
-                                   const T& fn)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (dex_file != nullptr) {
-    mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
-    if (long_array == nullptr) {
-      // This should never happen so log a warning.
-      LOG(WARNING) << "Null DexFile::mCookie";
-      return;
-    }
-    int32_t long_array_size = long_array->GetLength();
-    // Start from 1 to skip the oat file.
-    for (int32_t j = 1; j < long_array_size; ++j) {
-      const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
-          long_array->GetWithoutChecks(j)));
-      if (!fn(cp_dex_file)) {
-        return;
-      }
-    }
-  }
-}
-
-template <typename T>
-static void IterateOverPathClassLoader(
-    Handle<mirror::ClassLoader> class_loader,
-    MutableHandle<mirror::ObjectArray<mirror::Object>> dex_elements,
-    const T& fn) REQUIRES_SHARED(Locks::mutator_lock_) {
-  // Handle this step.
-  // Handle as if this is the child PathClassLoader.
-  // The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
-  // We need to get the DexPathList and loop through it.
-  ArtField* const cookie_field =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
-  ArtField* const dex_file_field =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
-  ObjPtr<mirror::Object> dex_path_list =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
-          GetObject(class_loader.Get());
-  if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
-    // DexPathList has an array dexElements of Elements[] which each contain a dex file.
-    ObjPtr<mirror::Object> dex_elements_obj =
-        jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
-            GetObject(dex_path_list);
-    // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look
-    // at the mCookie which is a DexFile vector.
-    if (dex_elements_obj != nullptr) {
-      dex_elements.Assign(dex_elements_obj->AsObjectArray<mirror::Object>());
-      for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
-        mirror::Object* element = dex_elements->GetWithoutChecks(i);
-        if (element == nullptr) {
-          // Should never happen, fall back to java code to throw a NPE.
-          break;
-        }
-        ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
-        IterateOverJavaDexFile(dex_file, cookie_field, fn);
-      }
-    }
-  }
-}
-
-static bool GetDexFilesFromClassLoader(
-    ScopedObjectAccessAlreadyRunnable& soa,
-    mirror::ClassLoader* class_loader,
-    std::vector<const DexFile*>* dex_files)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
-    // The boot class loader. We don't load any of these files, as we know we compiled against
-    // them correctly.
-    return true;
-  }
-
-  // Unsupported class-loader?
-  if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader) !=
-      class_loader->GetClass()) {
-    VLOG(class_linker) << "Unsupported class-loader "
-                       << mirror::Class::PrettyClass(class_loader->GetClass());
-    return false;
-  }
-
-  bool recursive_result = GetDexFilesFromClassLoader(soa, class_loader->GetParent(), dex_files);
-  if (!recursive_result) {
-    // Something wrong up the chain.
-    return false;
-  }
-
-  // Collect all the dex files.
-  auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
-            REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (cp_dex_file->NumClassDefs() > 0) {
-      dex_files->push_back(cp_dex_file);
-    }
-    return true;  // Continue looking.
-  };
-
-  // Handle for dex-cache-element.
-  StackHandleScope<3> hs(soa.Self());
-  MutableHandle<mirror::ObjectArray<mirror::Object>> dex_elements(
-      hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
-  Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
-
-  IterateOverPathClassLoader(h_class_loader, dex_elements, GetDexFilesFn);
-
-  return true;
-}
-
-static void GetDexFilesFromDexElementsArray(
-    ScopedObjectAccessAlreadyRunnable& soa,
-    Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
-    std::vector<const DexFile*>* dex_files)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (dex_elements == nullptr) {
-    // Nothing to do.
-    return;
-  }
-
-  ArtField* const cookie_field =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
-  ArtField* const dex_file_field =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
-  ObjPtr<mirror::Class> const element_class = soa.Decode<mirror::Class>(
-      WellKnownClasses::dalvik_system_DexPathList__Element);
-  ObjPtr<mirror::Class> const dexfile_class = soa.Decode<mirror::Class>(
-      WellKnownClasses::dalvik_system_DexFile);
-
-  // Collect all the dex files.
-  auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
-      dex_files->push_back(cp_dex_file);
-    }
-    return true;  // Continue looking.
-  };
-
-  for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
-    mirror::Object* element = dex_elements->GetWithoutChecks(i);
-    if (element == nullptr) {
-      continue;
-    }
-
-    // We support this being dalvik.system.DexPathList$Element and dalvik.system.DexFile.
-
-    ObjPtr<mirror::Object> dex_file;
-    if (element_class == element->GetClass()) {
-      dex_file = dex_file_field->GetObject(element);
-    } else if (dexfile_class == element->GetClass()) {
-      dex_file = element;
-    } else {
-      LOG(WARNING) << "Unsupported element in dex_elements: "
-                   << mirror::Class::PrettyClass(element->GetClass());
-      continue;
-    }
-
-    IterateOverJavaDexFile(dex_file, cookie_field, GetDexFilesFn);
-  }
-}
-
-static bool AreSharedLibrariesOk(const std::string& context_spec,
-                                 std::vector<const DexFile*>& dex_files,
-                                 std::string* error_msg) {
-  std::vector<std::string> classpath;
-  std::vector<uint32_t> checksums;
-  bool is_special_shared_library;
-  if (!ClassLoaderContext::DecodePathClassLoaderContextFromOatFileKey(
-          context_spec, &classpath, &checksums, &is_special_shared_library)) {
-    *error_msg = "Could not decode the class loader context from the oat file key.";
-    return false;
-  }
-
-  DCHECK_EQ(classpath.size(), checksums.size());
-
-  // The classpath size should match the number of dex files.
-  if (classpath.size() != dex_files.size()) {
-    *error_msg = "The number of loaded dex files does not match the number of files "
-        "specified in the context. Expected=" + std::to_string(classpath.size()) +
-        ", found=" + std::to_string(dex_files.size());
-    return false;
-  }
-
-  // If we find the special shared library, skip the shared libraries check.
-  if (is_special_shared_library) {
-    return true;
-  }
-
-  // Check that the loaded dex files have the same order and checksums as the shared libraries.
-  for (size_t i = 0; i < dex_files.size(); ++i) {
-    const std::string& dex_location = dex_files[i]->GetLocation();
-    uint32_t dex_location_checksum = dex_files[i]->GetLocationChecksum();
-    std::string absolute_library_path =
-        OatFile::ResolveRelativeEncodedDexLocation(dex_location.c_str(), classpath[i]);
-    if (dex_location != absolute_library_path) {
-      *error_msg = "SharedLibraryCheck: expected=" + absolute_library_path + ", found=" +
-          dex_location;
-      return false;
-    }
-    if (dex_location_checksum  != checksums[i]) {
-      *error_msg = "SharedLibraryCheck: checksum mismatch for " + dex_location + ". Expected=" +
-          std::to_string(checksums[i]) + ", found=" + std::to_string(dex_location_checksum);
-      return false;
-    }
-  }
-
-  return true;
-}
-
 static bool CollisionCheck(std::vector<const DexFile*>& dex_files_loaded,
                            std::vector<const DexFile*>& dex_files_unloaded,
                            std::string* error_msg /*out*/) {
@@ -554,52 +348,38 @@
   DCHECK(oat_file != nullptr);
   DCHECK(error_msg != nullptr);
 
-  std::vector<const DexFile*> dex_files_loaded;
-
-  // Try to get dex files from the given class loader. If the class loader is null, or we do
-  // not support one of the class loaders in the chain, we do nothing and assume the collision
-  // check has succeeded.
-  bool class_loader_ok = false;
-  {
-    ScopedObjectAccess soa(Thread::Current());
-    StackHandleScope<2> hs(Thread::Current());
-    Handle<mirror::ClassLoader> h_class_loader =
-        hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader));
-    Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
-        hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
-    if (h_class_loader != nullptr &&
-        GetDexFilesFromClassLoader(soa, h_class_loader.Get(), &dex_files_loaded)) {
-      class_loader_ok = true;
-
-      // In this case, also take into account the dex_elements array, if given. We don't need to
-      // read it otherwise, as we'll compare against all open oat files anyways.
-      GetDexFilesFromDexElementsArray(soa, h_dex_elements, &dex_files_loaded);
-    } else if (h_class_loader != nullptr) {
-      VLOG(class_linker) << "Something unsupported with "
-                         << mirror::Class::PrettyClass(h_class_loader->GetClass());
-
-      // This is a class loader we don't recognize. Our earlier strategy would
-      // be to perform a global duplicate class check (with all loaded oat files)
-      // but that seems overly conservative - we have no way of knowing that
-      // those files are present in the same loader hierarchy. Among other
-      // things, it hurt GMS core and its filtering class loader.
-    }
+  // If the class_loader is null there's not much we can do. This happens if a dex files is loaded
+  // directly with DexFile APIs instead of using class loaders.
+  if (class_loader == nullptr) {
+    LOG(WARNING) << "Opening an oat file without a class loader. "
+        << "Are you using the deprecated DexFile APIs?";
+    return false;
   }
 
-  // Exit if we find a class loader we don't recognize. Proceed to check shared
-  // libraries and do a full class loader check otherwise.
-  if (!class_loader_ok) {
-      LOG(WARNING) << "Skipping duplicate class check due to unrecognized classloader";
+  std::unique_ptr<ClassLoaderContext> context =
+      ClassLoaderContext::CreateContextForClassLoader(class_loader, dex_elements);
+
+  // The context might be null if there are unrecognized class loaders in the chain or they
+  // don't meet sensible sanity conditions. In this case we assume that the app knows what it's
+  // doing and accept the oat file.
+  // Note that this has correctness implications as we cannot guarantee that the class resolution
+  // used during compilation is OK (b/37777332).
+  if (context == nullptr) {
+      LOG(WARNING) << "Skipping duplicate class check due to unsupported classloader";
       return false;
   }
 
-  // Exit if shared libraries are ok. Do a full duplicate classes check otherwise.
-  const std::string
-      shared_libraries(oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
-  if (AreSharedLibrariesOk(shared_libraries, dex_files_loaded, error_msg)) {
+  // If the pat file loading context matches the context used during compilation then we accept
+  // the oat file without addition checks
+  if (context->VerifyClassLoaderContextMatch(
+      oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey))) {
     return false;
   }
 
+  // The class loader context does not match. Perform a full duplicate classes check.
+
+  std::vector<const DexFile*> dex_files_loaded = context->FlattenOpenedDexFiles();
+
   // Vector that holds the newly opened dex files live, this is done to prevent leaks.
   std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
 
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index d5fe1f3..7bf0f84 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -45,13 +45,13 @@
       OatFile::ResolveRelativeEncodedDexLocation(
         "/data/app/foo/base.apk", "foo/base.apk"));
 
-  EXPECT_EQ(std::string("/data/app/foo/base.apk:classes2.dex"),
+  EXPECT_EQ(std::string("/data/app/foo/base.apk!classes2.dex"),
       OatFile::ResolveRelativeEncodedDexLocation(
-        "/data/app/foo/base.apk", "base.apk:classes2.dex"));
+        "/data/app/foo/base.apk", "base.apk!classes2.dex"));
 
-  EXPECT_EQ(std::string("/data/app/foo/base.apk:classes11.dex"),
+  EXPECT_EQ(std::string("/data/app/foo/base.apk!classes11.dex"),
       OatFile::ResolveRelativeEncodedDexLocation(
-        "/data/app/foo/base.apk", "base.apk:classes11.dex"));
+        "/data/app/foo/base.apk", "base.apk!classes11.dex"));
 
   EXPECT_EQ(std::string("base.apk"),
       OatFile::ResolveRelativeEncodedDexLocation(
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index ea5e698..9eccb5a 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -25,9 +25,6 @@
   template<class MirrorType> class HeapReference;
 }  // namespace mirror
 
-// A callback for visiting an object in the heap.
-typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
-
 class IsMarkedVisitor {
  public:
   virtual ~IsMarkedVisitor() {}
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index 6a8f2ce..c1b2636 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -53,12 +53,12 @@
 #include "mirror/string-inl.h"
 #include "monitor.h"
 #include "native/scoped_fast_native_object_access-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 #include "thread_list.h"
-#include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
 #include "verify_object.h"
 
 #undef LOG_TAG
diff --git a/runtime/openjdkjvmti/events-inl.h b/runtime/openjdkjvmti/events-inl.h
index f30d7ce..43177ab 100644
--- a/runtime/openjdkjvmti/events-inl.h
+++ b/runtime/openjdkjvmti/events-inl.h
@@ -21,7 +21,7 @@
 
 #include "events.h"
 #include "jni_internal.h"
-#include "ScopedLocalRef.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "ti_breakpoint.h"
 
 #include "art_jvmti.h"
diff --git a/runtime/openjdkjvmti/events.cc b/runtime/openjdkjvmti/events.cc
index f749daa..7a930d4 100644
--- a/runtime/openjdkjvmti/events.cc
+++ b/runtime/openjdkjvmti/events.cc
@@ -31,9 +31,9 @@
 
 #include "events-inl.h"
 
+#include "art_field-inl.h"
 #include "art_jvmti.h"
 #include "art_method-inl.h"
-#include "art_field-inl.h"
 #include "base/logging.h"
 #include "gc/allocation_listener.h"
 #include "gc/gc_pause_listener.h"
@@ -45,8 +45,8 @@
 #include "jni_internal.h"
 #include "mirror/class.h"
 #include "mirror/object-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "runtime.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
 #include "thread_list.h"
diff --git a/runtime/openjdkjvmti/jvmti_weak_table-inl.h b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
index 64ab3e7..a640acb 100644
--- a/runtime/openjdkjvmti/jvmti_weak_table-inl.h
+++ b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
@@ -44,8 +44,8 @@
 #include "jvmti_allocator.h"
 #include "mirror/class.h"
 #include "mirror/object.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "runtime.h"
-#include "ScopedLocalRef.h"
 
 namespace openjdkjvmti {
 
diff --git a/runtime/openjdkjvmti/ti_breakpoint.cc b/runtime/openjdkjvmti/ti_breakpoint.cc
index 6d0e2c6..f5116a8 100644
--- a/runtime/openjdkjvmti/ti_breakpoint.cc
+++ b/runtime/openjdkjvmti/ti_breakpoint.cc
@@ -42,9 +42,9 @@
 #include "mirror/class-inl.h"
 #include "mirror/object_array-inl.h"
 #include "modifiers.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
 #include "ti_phase.h"
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index b8e7955..954b5d1 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -39,12 +39,13 @@
 #include "art_jvmti.h"
 #include "base/array_ref.h"
 #include "base/macros.h"
-#include "class_table-inl.h"
 #include "class_linker.h"
+#include "class_table-inl.h"
 #include "common_throws.h"
 #include "dex_file_annotations.h"
 #include "events-inl.h"
 #include "fixed_up_dex_file.h"
+#include "gc/heap-visit-objects-inl.h"
 #include "gc/heap.h"
 #include "gc_root.h"
 #include "handle.h"
@@ -53,16 +54,16 @@
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_ext.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object_reference.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object_reference.h"
 #include "mirror/reference.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "primitive.h"
 #include "reflection.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
@@ -544,21 +545,15 @@
         LOG(FATAL) << "Unreachable";
       }
 
-      static void AllObjectsCallback(art::mirror::Object* obj, void* arg)
-          REQUIRES_SHARED(art::Locks::mutator_lock_) {
-        HeapFixupVisitor* hfv = reinterpret_cast<HeapFixupVisitor*>(arg);
-
-        // Visit references, not native roots.
-        obj->VisitReferences<false>(*hfv, *hfv);
-      }
-
      private:
       const art::mirror::Class* input_;
       art::mirror::Class* output_;
     };
     HeapFixupVisitor hfv(input, output);
-    art::Runtime::Current()->GetHeap()->VisitObjectsPaused(HeapFixupVisitor::AllObjectsCallback,
-                                                           &hfv);
+    auto object_visitor = [&](art::mirror::Object* obj) {
+      obj->VisitReferences<false>(hfv, hfv);  // Visit references, not native roots.
+    };
+    art::Runtime::Current()->GetHeap()->VisitObjectsPaused(object_visitor);
   }
 
   // A set of all the temp classes we have handed out. We have to fix up references to these.
diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc
index 205046c..e81e4bc 100644
--- a/runtime/openjdkjvmti/ti_class_loader.cc
+++ b/runtime/openjdkjvmti/ti_class_loader.cc
@@ -51,9 +51,9 @@
 #include "mirror/class.h"
 #include "mirror/class_ext.h"
 #include "mirror/object.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "object_lock.h"
 #include "runtime.h"
-#include "ScopedLocalRef.h"
 #include "transform.h"
 
 namespace openjdkjvmti {
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 29658d9..91fdaca 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -22,6 +22,7 @@
 #include "base/mutex.h"
 #include "class_linker.h"
 #include "gc/heap.h"
+#include "gc/heap-visit-objects-inl.h"
 #include "gc_root-inl.h"
 #include "java_frame_root_info.h"
 #include "jni_env_ext.h"
@@ -30,7 +31,6 @@
 #include "mirror/class.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
-#include "object_callbacks.h"
 #include "object_tagging.h"
 #include "obj_ptr-inl.h"
 #include "primitive.h"
@@ -653,33 +653,25 @@
   art::Runtime::Current()->RemoveSystemWeakHolder(&gIndexCachingTable);
 }
 
-template <typename Callback>
-struct IterateThroughHeapData {
-  IterateThroughHeapData(Callback _cb,
-                         ObjectTagTable* _tag_table,
-                         jvmtiEnv* _env,
-                         art::ObjPtr<art::mirror::Class> klass,
-                         jint _heap_filter,
-                         const jvmtiHeapCallbacks* _callbacks,
-                         const void* _user_data)
-      : cb(_cb),
-        tag_table(_tag_table),
-        heap_filter(_heap_filter),
-        filter_klass(klass),
-        env(_env),
-        callbacks(_callbacks),
-        user_data(_user_data),
-        stop_reports(false) {
+template <typename T>
+static jvmtiError DoIterateThroughHeap(T fn,
+                                       jvmtiEnv* env,
+                                       ObjectTagTable* tag_table,
+                                       jint heap_filter_int,
+                                       jclass klass,
+                                       const jvmtiHeapCallbacks* callbacks,
+                                       const void* user_data) {
+  if (callbacks == nullptr) {
+    return ERR(NULL_POINTER);
   }
 
-  static void ObjectCallback(art::mirror::Object* obj, void* arg)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    IterateThroughHeapData* ithd = reinterpret_cast<IterateThroughHeapData*>(arg);
-    ithd->ObjectCallback(obj);
-  }
+  art::Thread* self = art::Thread::Current();
+  art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
 
-  void ObjectCallback(art::mirror::Object* obj)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  bool stop_reports = false;
+  const HeapFilter heap_filter(heap_filter_int);
+  art::ObjPtr<art::mirror::Class> filter_klass = soa.Decode<art::mirror::Class>(klass);
+  auto visitor = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
     // Early return, as we can't really stop visiting.
     if (stop_reports) {
       return;
@@ -713,7 +705,7 @@
     }
 
     jlong saved_tag = tag;
-    jint ret = cb(obj, callbacks, class_tag, size, &tag, length, const_cast<void*>(user_data));
+    jint ret = fn(obj, callbacks, class_tag, size, &tag, length, const_cast<void*>(user_data));
 
     if (tag != saved_tag) {
       tag_table->Set(obj, tag);
@@ -734,44 +726,8 @@
     if (!stop_reports) {
       stop_reports = ReportPrimitiveField::Report(obj, tag_table, callbacks, user_data);
     }
-  }
-
-  Callback cb;
-  ObjectTagTable* tag_table;
-  const HeapFilter heap_filter;
-  art::ObjPtr<art::mirror::Class> filter_klass;
-  jvmtiEnv* env;
-  const jvmtiHeapCallbacks* callbacks;
-  const void* user_data;
-
-  bool stop_reports;
-};
-
-template <typename T>
-static jvmtiError DoIterateThroughHeap(T fn,
-                                       jvmtiEnv* env,
-                                       ObjectTagTable* tag_table,
-                                       jint heap_filter,
-                                       jclass klass,
-                                       const jvmtiHeapCallbacks* callbacks,
-                                       const void* user_data) {
-  if (callbacks == nullptr) {
-    return ERR(NULL_POINTER);
-  }
-
-  art::Thread* self = art::Thread::Current();
-  art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
-
-  using Iterator = IterateThroughHeapData<T>;
-  Iterator ithd(fn,
-                tag_table,
-                env,
-                soa.Decode<art::mirror::Class>(klass),
-                heap_filter,
-                callbacks,
-                user_data);
-
-  art::Runtime::Current()->GetHeap()->VisitObjects(Iterator::ObjectCallback, &ithd);
+  };
+  art::Runtime::Current()->GetHeap()->VisitObjects(visitor);
 
   return ERR(NONE);
 }
diff --git a/runtime/openjdkjvmti/ti_method.cc b/runtime/openjdkjvmti/ti_method.cc
index 9b5b964..ab434d7 100644
--- a/runtime/openjdkjvmti/ti_method.cc
+++ b/runtime/openjdkjvmti/ti_method.cc
@@ -39,9 +39,9 @@
 #include "jni_internal.h"
 #include "mirror/object_array-inl.h"
 #include "modifiers.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
 #include "ti_phase.h"
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index 3c8bdc6..8893c9b 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -34,9 +34,9 @@
 #include "art_jvmti.h"
 #include "base/macros.h"
 #include "events-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
diff --git a/runtime/openjdkjvmti/ti_properties.cc b/runtime/openjdkjvmti/ti_properties.cc
index e399b48..c412814 100644
--- a/runtime/openjdkjvmti/ti_properties.cc
+++ b/runtime/openjdkjvmti/ti_properties.cc
@@ -35,8 +35,8 @@
 #include <vector>
 
 #include "jni.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
 
 #include "art_jvmti.h"
 #include "runtime.h"
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index debee91..c679d73 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -36,10 +36,11 @@
 #include "android-base/stringprintf.h"
 
 #include "art_field-inl.h"
-#include "art_method-inl.h"
 #include "art_jvmti.h"
+#include "art_method-inl.h"
 #include "base/array_ref.h"
 #include "base/logging.h"
+#include "base/stringpiece.h"
 #include "class_linker-inl.h"
 #include "debugger.h"
 #include "dex_file.h"
@@ -60,10 +61,10 @@
 #include "mirror/class-inl.h"
 #include "mirror/class_ext.h"
 #include "mirror/object.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "non_debuggable_classes.h"
 #include "object_lock.h"
 #include "runtime.h"
-#include "ScopedLocalRef.h"
 #include "ti_breakpoint.h"
 #include "ti_class_loader.h"
 #include "transform.h"
@@ -572,13 +573,15 @@
 // Try and get the declared method. First try to get a virtual method then a direct method if that's
 // not found.
 static art::ArtMethod* FindMethod(art::Handle<art::mirror::Class> klass,
-                                  const char* name,
+                                  art::StringPiece name,
                                   art::Signature sig) REQUIRES_SHARED(art::Locks::mutator_lock_) {
-  art::ArtMethod* m = klass->FindDeclaredVirtualMethod(name, sig, art::kRuntimePointerSize);
-  if (m == nullptr) {
-    m = klass->FindDeclaredDirectMethod(name, sig, art::kRuntimePointerSize);
+  DCHECK(!klass->IsProxyClass());
+  for (art::ArtMethod& m : klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize)) {
+    if (m.GetName() == name && m.GetSignature() == sig) {
+      return &m;
+    }
   }
-  return m;
+  return nullptr;
 }
 
 bool Redefiner::ClassRedefinition::CheckSameMethods() {
@@ -1368,7 +1371,7 @@
   const art::DexFile::TypeId& declaring_class_id = dex_file_->GetTypeId(class_def.class_idx_);
   const art::DexFile& old_dex_file = mclass->GetDexFile();
   // Update methods.
-  for (art::ArtMethod& method : mclass->GetMethods(image_pointer_size)) {
+  for (art::ArtMethod& method : mclass->GetDeclaredMethods(image_pointer_size)) {
     const art::DexFile::StringId* new_name_id = dex_file_->FindStringId(method.GetName());
     art::dex::TypeIndex method_return_idx =
         dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(method.GetReturnTypeDescriptor()));
diff --git a/runtime/openjdkjvmti/ti_search.cc b/runtime/openjdkjvmti/ti_search.cc
index 6e0196e..25bc5d6 100644
--- a/runtime/openjdkjvmti/ti_search.cc
+++ b/runtime/openjdkjvmti/ti_search.cc
@@ -43,14 +43,14 @@
 #include "mirror/class-inl.h"
 #include "mirror/object.h"
 #include "mirror/string.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
-#include "ti_phase.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
+#include "ti_phase.h"
 #include "well_known_classes.h"
 
 namespace openjdkjvmti {
@@ -105,17 +105,21 @@
   }
 
   art::ArtMethod* get_property =
-      properties_class->FindDeclaredVirtualMethod(
+      properties_class->FindClassMethod(
           "getProperty",
           "(Ljava/lang/String;)Ljava/lang/String;",
           art::kRuntimePointerSize);
   DCHECK(get_property != nullptr);
+  DCHECK(!get_property->IsDirect());
+  DCHECK(get_property->GetDeclaringClass() == properties_class);
   art::ArtMethod* set_property =
-      properties_class->FindDeclaredVirtualMethod(
+      properties_class->FindClassMethod(
           "setProperty",
           "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/Object;",
           art::kRuntimePointerSize);
   DCHECK(set_property != nullptr);
+  DCHECK(!set_property->IsDirect());
+  DCHECK(set_property->GetDeclaringClass() == properties_class);
 
   // This is an allocation. Do this late to avoid the need for handles.
   ScopedLocalRef<jobject> cp_jobj(self->GetJniEnv(), nullptr);
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index edb6ffe..ff2de8d 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -37,8 +37,8 @@
 #include <vector>
 
 #include "art_field-inl.h"
-#include "art_method-inl.h"
 #include "art_jvmti.h"
+#include "art_method-inl.h"
 #include "barrier.h"
 #include "base/bit_utils.h"
 #include "base/enums.h"
@@ -50,8 +50,8 @@
 #include "jni_internal.h"
 #include "mirror/class.h"
 #include "mirror/dex_cache.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "stack.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index fe0e3bb..9acea2a 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -43,14 +43,14 @@
 #include "mirror/class.h"
 #include "mirror/object-inl.h"
 #include "mirror/string.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "obj_ptr.h"
-#include "ti_phase.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 #include "thread_list.h"
+#include "ti_phase.h"
 #include "well_known_classes.h"
 
 namespace openjdkjvmti {
@@ -701,7 +701,7 @@
 
 jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
                                     jthread target_jthread,
-                                    art::Thread* target) {
+                                    const art::Thread* target) {
   // Loop since we need to bail out and try again if we would end up getting suspended while holding
   // the user_code_suspension_lock_ due to a SuspendReason::kForUserCode. In this situation we
   // release the lock, wait to get resumed and try again.
@@ -729,12 +729,12 @@
       if (state == art::ThreadState::kTerminated || state == art::ThreadState::kStarting) {
         return ERR(THREAD_NOT_ALIVE);
       }
-      target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
+      art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
           target_jthread,
           /* request_suspension */ true,
           art::SuspendReason::kForUserCode,
           &timeout);
-      if (target == nullptr && !timeout) {
+      if (ret_target == nullptr && !timeout) {
         // TODO It would be good to get more information about why exactly the thread failed to
         // suspend.
         return ERR(INTERNAL);
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
index d07dc06..0f7e837 100644
--- a/runtime/openjdkjvmti/ti_thread.h
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -98,7 +98,9 @@
   // cause the thread to wake up if the thread is suspended for the debugger or gc or something.
   static jvmtiError SuspendSelf(art::Thread* self)
       REQUIRES(!art::Locks::mutator_lock_, !art::Locks::user_code_suspension_lock_);
-  static jvmtiError SuspendOther(art::Thread* self, jthread target_jthread, art::Thread* target)
+  static jvmtiError SuspendOther(art::Thread* self,
+                                 jthread target_jthread,
+                                 const art::Thread* target)
       REQUIRES(!art::Locks::mutator_lock_, !art::Locks::user_code_suspension_lock_);
 
   static art::ArtField* context_class_loader_;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 4e95b01..b055bf9 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -18,6 +18,7 @@
 #include <vector>
 
 #include "art_field-inl.h"
+#include "art_method-inl.h"
 #include "base/enums.h"
 #include "class_linker-inl.h"
 #include "common_compiler_test.h"
@@ -63,21 +64,27 @@
     jsize array_index = 0;
     // Fill the method array
     DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
-    ArtMethod* method = javaLangObject->FindDeclaredVirtualMethod(
+    ArtMethod* method = javaLangObject->FindClassMethod(
         "equals", "(Ljava/lang/Object;)Z", kRuntimePointerSize);
     CHECK(method != nullptr);
+    CHECK(!method->IsDirect());
+    CHECK(method->GetDeclaringClass() == javaLangObject);
     DCHECK(!Runtime::Current()->IsActiveTransaction());
     soa.Env()->SetObjectArrayElement(
         proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
             mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
-    method = javaLangObject->FindDeclaredVirtualMethod("hashCode", "()I", kRuntimePointerSize);
+    method = javaLangObject->FindClassMethod("hashCode", "()I", kRuntimePointerSize);
     CHECK(method != nullptr);
+    CHECK(!method->IsDirect());
+    CHECK(method->GetDeclaringClass() == javaLangObject);
     soa.Env()->SetObjectArrayElement(
         proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
             mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
-    method = javaLangObject->FindDeclaredVirtualMethod(
+    method = javaLangObject->FindClassMethod(
         "toString", "()Ljava/lang/String;", kRuntimePointerSize);
     CHECK(method != nullptr);
+    CHECK(!method->IsDirect());
+    CHECK(method->GetDeclaringClass() == javaLangObject);
     soa.Env()->SetObjectArrayElement(
         proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
             mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), method)));
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index 260be8f..d830387 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -56,8 +56,8 @@
       h_ref_class->AllocObject(self)));
   CHECK(h_ref_instance != nullptr);
 
-  ArtMethod* constructor = h_ref_class->FindDeclaredDirectMethod(
-      "<init>", "(Ljava/lang/Object;)V", class_linker->GetImagePointerSize());
+  ArtMethod* constructor = h_ref_class->FindConstructor(
+      "(Ljava/lang/Object;)V", class_linker->GetImagePointerSize());
   CHECK(constructor != nullptr);
 
   uint32_t args[2];
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 532da2b..6f1d15c 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -28,10 +28,10 @@
 #include "mirror/class-inl.h"
 #include "mirror/executable.h"
 #include "mirror/object_array-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "nth_caller_visitor.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack_reference.h"
-#include "ScopedLocalRef.h"
 #include "well_known_classes.h"
 
 namespace art {
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 1ba4b7b..fa2f1e5 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -18,13 +18,13 @@
 
 #include <float.h>
 #include <limits.h>
-#include "ScopedLocalRef.h"
 
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "common_compiler_test.h"
 #include "java_vm_ext.h"
 #include "jni_internal.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 
 namespace art {
@@ -108,9 +108,9 @@
                                                        class_loader);
     CHECK(c != nullptr);
 
-    *method = is_static ? c->FindDirectMethod(method_name, method_signature, kRuntimePointerSize)
-                        : c->FindVirtualMethod(method_name, method_signature, kRuntimePointerSize);
-    CHECK(method != nullptr);
+    *method = c->FindClassMethod(method_name, method_signature, kRuntimePointerSize);
+    CHECK(*method != nullptr);
+    CHECK_EQ(is_static, (*method)->IsStatic());
 
     if (is_static) {
       *receiver = nullptr;
@@ -520,10 +520,11 @@
   mirror::Class* klass = class_linker_->FindClass(soa.Self(), "LMain;", class_loader);
   ASSERT_TRUE(klass != nullptr);
 
-  ArtMethod* method = klass->FindDirectMethod("main",
-                                              "([Ljava/lang/String;)V",
-                                              kRuntimePointerSize);
+  ArtMethod* method = klass->FindClassMethod("main",
+                                             "([Ljava/lang/String;)V",
+                                             kRuntimePointerSize);
   ASSERT_TRUE(method != nullptr);
+  ASSERT_TRUE(method->IsStatic());
 
   // Start runtime.
   bool started = runtime_->Start();
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index bf9e405..ebee5ea 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -39,8 +39,6 @@
 
 #include "android-base/strings.h"
 
-#include "JniConstants.h"
-#include "ScopedLocalRef.h"
 #include "arch/arm/quick_method_frame_info_arm.h"
 #include "arch/arm/registers_arm.h"
 #include "arch/arm64/quick_method_frame_info_arm64.h"
@@ -87,6 +85,7 @@
 #include "java_vm_ext.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
+#include "jit/profile_saver.h"
 #include "jni_internal.h"
 #include "linear_alloc.h"
 #include "mirror/array.h"
@@ -133,17 +132,17 @@
 #include "native/sun_misc_Unsafe.h"
 #include "native_bridge_art_interface.h"
 #include "native_stack_dump.h"
+#include "nativehelper/JniConstants.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "oat_file.h"
 #include "oat_file_manager.h"
 #include "object_callbacks.h"
 #include "os.h"
 #include "parsed_options.h"
-#include "jit/profile_saver.h"
 #include "quick/quick_method_frame_info.h"
 #include "reflection.h"
 #include "runtime_callbacks.h"
 #include "runtime_options.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "sigchain.h"
 #include "signal_catcher.h"
@@ -633,9 +632,10 @@
       hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ClassLoader)));
   CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
 
-  ArtMethod* getSystemClassLoader = class_loader_class->FindDirectMethod(
+  ArtMethod* getSystemClassLoader = class_loader_class->FindClassMethod(
       "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
   CHECK(getSystemClassLoader != nullptr);
+  CHECK(getSystemClassLoader->IsStatic());
 
   JValue result = InvokeWithJValues(soa,
                                     nullptr,
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 640f9ce..0ea3180 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -34,10 +34,10 @@
 #include "mem_map.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "obj_ptr.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "thread-inl.h"
 #include "thread_list.h"
 #include "well_known_classes.h"
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 09a200a..78a60fa 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -70,7 +70,7 @@
 RUNTIME_OPTIONS_KEY (bool,                EnableHSpaceCompactForOOM,      true)
 RUNTIME_OPTIONS_KEY (bool,                UseJitCompilation,              false)
 RUNTIME_OPTIONS_KEY (bool,                DumpNativeStackOnSigQuit,       true)
-RUNTIME_OPTIONS_KEY (unsigned int,        JITCompileThreshold,            jit::Jit::kDefaultCompileThreshold)
+RUNTIME_OPTIONS_KEY (unsigned int,        JITCompileThreshold)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITWarmupThreshold)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITOsrThreshold)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITPriorityThreadWeight)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 004b68e..cdbb908 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -34,16 +34,16 @@
 
 #include "android-base/stringprintf.h"
 
-#include "arch/context.h"
 #include "arch/context-inl.h"
+#include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/bit_utils.h"
 #include "base/memory_tool.h"
 #include "base/mutex.h"
+#include "base/systrace.h"
 #include "base/timing_logger.h"
 #include "base/to_str.h"
-#include "base/systrace.h"
 #include "class_linker-inl.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
@@ -58,38 +58,38 @@
 #include "gc_root.h"
 #include "handle_scope-inl.h"
 #include "indirect_reference_table-inl.h"
+#include "interpreter/interpreter.h"
 #include "interpreter/shadow_frame.h"
 #include "java_frame_root_info.h"
 #include "java_vm_ext.h"
 #include "jni_internal.h"
-#include "mirror/class_loader.h"
 #include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/stack_trace_element.h"
 #include "monitor.h"
 #include "native_stack_dump.h"
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "nth_caller_visitor.h"
 #include "oat_quick_method_header.h"
 #include "obj_ptr-inl.h"
 #include "object_lock.h"
-#include "quick_exception_handler.h"
 #include "quick/quick_method_frame_info.h"
+#include "quick_exception_handler.h"
 #include "read_barrier-inl.h"
 #include "reflection.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
-#include "ScopedUtfChars.h"
 #include "stack.h"
 #include "stack_map.h"
-#include "thread_list.h"
 #include "thread-inl.h"
+#include "thread_list.h"
 #include "utils.h"
 #include "verifier/method_verifier.h"
 #include "verify_object.h"
 #include "well_known_classes.h"
-#include "interpreter/interpreter.h"
 
 #if ART_USE_FUTEXES
 #include "linux/futex.h"
@@ -2780,7 +2780,7 @@
     }
   }
   ArtMethod* exception_init_method =
-      exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize());
+      exception_class->FindConstructor(signature, cl->GetImagePointerSize());
 
   CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
       << PrettyDescriptor(exception_class_descriptor);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 9c938ff..f1a7b65 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -18,8 +18,8 @@
 
 #include <backtrace/BacktraceMap.h>
 #include <dirent.h>
-#include <ScopedLocalRef.h>
-#include <ScopedUtfChars.h>
+#include "nativehelper/ScopedLocalRef.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include <sys/types.h>
 #include <unistd.h>
 
diff --git a/runtime/trace.cc b/runtime/trace.cc
index cabd162..36532c6 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -32,20 +32,20 @@
 #include "common_throws.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "instrumentation.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
-#include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "os.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedLocalRef.h"
 #include "stack.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "utils.h"
-#include "entrypoints/quick/quick_entrypoints.h"
 
 namespace art {
 
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 72f63c6..9d4e9fb 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -64,7 +64,7 @@
                 "Expecting alignof(StringDexCacheType) == 8");
   static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
                 "Expecting alignof(MethodTypeDexCacheType) == 8");
-  // This is the same as alignof(FieldDexCacheType) for the given pointer size.
+  // This is the same as alignof({Field,Method}DexCacheType) for the given pointer size.
   return 2u * static_cast<size_t>(pointer_size);
 }
 
@@ -84,7 +84,7 @@
   if (num_elements < cache_size) {
     cache_size = num_elements;
   }
-  return ArraySize(PointerSize::k64, cache_size);
+  return PairArraySize(GcRootAsPointerSize<mirror::Class>(), cache_size);
 }
 
 inline size_t DexCacheArraysLayout::TypesAlignment() const {
@@ -96,11 +96,15 @@
 }
 
 inline size_t DexCacheArraysLayout::MethodsSize(size_t num_elements) const {
-  return ArraySize(pointer_size_, num_elements);
+  size_t cache_size = mirror::DexCache::kDexCacheMethodCacheSize;
+  if (num_elements < cache_size) {
+    cache_size = num_elements;
+  }
+  return PairArraySize(pointer_size_, cache_size);
 }
 
 inline size_t DexCacheArraysLayout::MethodsAlignment() const {
-  return static_cast<size_t>(pointer_size_);
+  return 2u * static_cast<size_t>(pointer_size_);
 }
 
 inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
@@ -113,7 +117,7 @@
   if (num_elements < cache_size) {
     cache_size = num_elements;
   }
-  return ArraySize(PointerSize::k64, cache_size);
+  return PairArraySize(GcRootAsPointerSize<mirror::String>(), cache_size);
 }
 
 inline size_t DexCacheArraysLayout::StringsAlignment() const {
@@ -132,7 +136,7 @@
   if (num_elements < cache_size) {
     cache_size = num_elements;
   }
-  return 2u * static_cast<size_t>(pointer_size_) * cache_size;
+  return PairArraySize(pointer_size_, cache_size);
 }
 
 inline size_t DexCacheArraysLayout::FieldsAlignment() const {
@@ -170,6 +174,10 @@
   return static_cast<size_t>(element_size) * num_elements;
 }
 
+inline size_t DexCacheArraysLayout::PairArraySize(PointerSize element_size, uint32_t num_elements) {
+  return 2u * static_cast<size_t>(element_size) * num_elements;
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_UTILS_DEX_CACHE_ARRAYS_LAYOUT_INL_H_
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index 377a374..fc04159 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -130,6 +130,7 @@
   static size_t ElementOffset(PointerSize element_size, uint32_t idx);
 
   static size_t ArraySize(PointerSize element_size, uint32_t num_elements);
+  static size_t PairArraySize(PointerSize element_size, uint32_t num_elements);
 };
 
 }  // namespace art
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 634bd47..48b703a 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -192,18 +192,21 @@
   ASSERT_TRUE(c != nullptr);
   ArtMethod* m;
 
-  m = c->FindVirtualMethod("charAt", "(I)C", kRuntimePointerSize);
+  m = c->FindClassMethod("charAt", "(I)C", kRuntimePointerSize);
   ASSERT_TRUE(m != nullptr);
+  ASSERT_FALSE(m->IsDirect());
   EXPECT_EQ("Java_java_lang_String_charAt", m->JniShortName());
   EXPECT_EQ("Java_java_lang_String_charAt__I", m->JniLongName());
 
-  m = c->FindVirtualMethod("indexOf", "(Ljava/lang/String;I)I", kRuntimePointerSize);
+  m = c->FindClassMethod("indexOf", "(Ljava/lang/String;I)I", kRuntimePointerSize);
   ASSERT_TRUE(m != nullptr);
+  ASSERT_FALSE(m->IsDirect());
   EXPECT_EQ("Java_java_lang_String_indexOf", m->JniShortName());
   EXPECT_EQ("Java_java_lang_String_indexOf__Ljava_lang_String_2I", m->JniLongName());
 
-  m = c->FindDirectMethod("copyValueOf", "([CII)Ljava/lang/String;", kRuntimePointerSize);
+  m = c->FindClassMethod("copyValueOf", "([CII)Ljava/lang/String;", kRuntimePointerSize);
   ASSERT_TRUE(m != nullptr);
+  ASSERT_TRUE(m->IsStatic());
   EXPECT_EQ("Java_java_lang_String_copyValueOf", m->JniShortName());
   EXPECT_EQ("Java_java_lang_String_copyValueOf___3CII", m->JniLongName());
 }
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index ea480f4..0351fd3 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -72,8 +72,8 @@
 
    private:
     static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
-    // Last update: Change quickening info format.
-    static constexpr uint8_t kVdexVersion[] = { '0', '0', '8', '\0' };
+    // Last update: Change method lookup.
+    static constexpr uint8_t kVdexVersion[] = { '0', '0', '9', '\0' };
 
     uint8_t magic_[4];
     uint8_t version_[4];
diff --git a/runtime/verifier/method_resolution_kind.h b/runtime/verifier/method_resolution_kind.h
deleted file mode 100644
index f72eb7a..0000000
--- a/runtime/verifier/method_resolution_kind.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_VERIFIER_METHOD_RESOLUTION_KIND_H_
-#define ART_RUNTIME_VERIFIER_METHOD_RESOLUTION_KIND_H_
-
-namespace art {
-namespace verifier {
-
-// Values corresponding to the method resolution algorithms defined in mirror::Class.
-enum MethodResolutionKind {
-  kDirectMethodResolution,
-  kVirtualMethodResolution,
-  kInterfaceMethodResolution,
-};
-
-}  // namespace verifier
-}  // namespace art
-
-#endif  // ART_RUNTIME_VERIFIER_METHOD_RESOLUTION_KIND_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index efb02f6..6dc7953 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -39,7 +39,6 @@
 #include "indenter.h"
 #include "intern_table.h"
 #include "leb128.h"
-#include "method_resolution_kind.h"
 #include "mirror/class.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
@@ -230,7 +229,7 @@
     }
     previous_method_idx = method_idx;
     InvokeType type = it->GetMethodInvokeType(class_def);
-    ArtMethod* method = linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
+    ArtMethod* method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
         *dex_file, method_idx, dex_cache, class_loader, nullptr, type);
     if (method == nullptr) {
       DCHECK(self->IsExceptionPending());
@@ -3821,21 +3820,6 @@
   return *common_super;
 }
 
-inline static MethodResolutionKind GetMethodResolutionKind(
-    MethodType method_type, bool is_interface) {
-  if (method_type == METHOD_DIRECT || method_type == METHOD_STATIC) {
-    return kDirectMethodResolution;
-  } else if (method_type == METHOD_INTERFACE) {
-    return kInterfaceMethodResolution;
-  } else if (method_type == METHOD_SUPER && is_interface) {
-    return kInterfaceMethodResolution;
-  } else {
-    DCHECK(method_type == METHOD_VIRTUAL || method_type == METHOD_SUPER
-           || method_type == METHOD_POLYMORPHIC);
-    return kVirtualMethodResolution;
-  }
-}
-
 ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
     uint32_t dex_method_idx, MethodType method_type) {
   const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
@@ -3849,47 +3833,41 @@
   if (klass_type.IsUnresolvedTypes()) {
     return nullptr;  // Can't resolve Class so no more to do here
   }
-  mirror::Class* klass = klass_type.GetClass();
+  ObjPtr<mirror::Class> klass = klass_type.GetClass();
   const RegType& referrer = GetDeclaringClass();
   auto* cl = Runtime::Current()->GetClassLinker();
   auto pointer_size = cl->GetImagePointerSize();
-  MethodResolutionKind res_kind = GetMethodResolutionKind(method_type, klass->IsInterface());
 
   ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx, pointer_size);
-  bool stash_method = false;
   if (res_method == nullptr) {
-    const char* name = dex_file_->GetMethodName(method_id);
-    const Signature signature = dex_file_->GetMethodSignature(method_id);
-
-    if (res_kind == kDirectMethodResolution) {
-      res_method = klass->FindDirectMethod(name, signature, pointer_size);
-    } else if (res_kind == kVirtualMethodResolution) {
-      res_method = klass->FindVirtualMethod(name, signature, pointer_size);
+    // Try to find the method with the appropriate lookup for the klass type (interface or not).
+    // If this lookup does not match `method_type`, errors shall be reported below.
+    if (klass->IsInterface()) {
+      res_method = klass->FindInterfaceMethod(dex_cache_.Get(), dex_method_idx, pointer_size);
     } else {
-      DCHECK_EQ(res_kind, kInterfaceMethodResolution);
-      res_method = klass->FindInterfaceMethod(name, signature, pointer_size);
+      res_method = klass->FindClassMethod(dex_cache_.Get(), dex_method_idx, pointer_size);
     }
-
     if (res_method != nullptr) {
-      stash_method = true;
-    } else {
-      // If a virtual or interface method wasn't found with the expected type, look in
-      // the direct methods. This can happen when the wrong invoke type is used or when
-      // a class has changed, and will be flagged as an error in later checks.
-      // Note that in this case, we do not put the resolved method in the Dex cache
-      // because it was not discovered using the expected type of method resolution.
-      if (res_kind != kDirectMethodResolution) {
-        // Record result of the initial resolution attempt.
-        VerifierDeps::MaybeRecordMethodResolution(*dex_file_, dex_method_idx, res_kind, nullptr);
-        // Change resolution type to 'direct' and try to resolve again.
-        res_kind = kDirectMethodResolution;
-        res_method = klass->FindDirectMethod(name, signature, pointer_size);
-      }
+      dex_cache_->SetResolvedMethod(dex_method_idx, res_method, pointer_size);
     }
   }
 
-  // Record result of method resolution attempt.
-  VerifierDeps::MaybeRecordMethodResolution(*dex_file_, dex_method_idx, res_kind, res_method);
+  // Record result of method resolution attempt. The klass resolution has recorded whether
+  // the class is an interface or not and therefore the type of the lookup performed above.
+  // TODO: Maybe we should not record dependency if the invoke type does not match the lookup type.
+  VerifierDeps::MaybeRecordMethodResolution(*dex_file_, dex_method_idx, res_method);
+
+  if (res_method == nullptr) {
+    // Try to find the method also with the other type for better error reporting below
+    // but do not store such bogus lookup result in the DexCache or VerifierDeps.
+    if (klass->IsInterface()) {
+      res_method = klass->FindClassMethod(dex_cache_.Get(), dex_method_idx, pointer_size);
+    } else {
+      // If there was an interface method with the same signature,
+      // we would have found it also in the "copied" methods.
+      DCHECK(klass->FindInterfaceMethod(dex_cache_.Get(), dex_method_idx, pointer_size) == nullptr);
+    }
+  }
 
   if (res_method == nullptr) {
     Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method "
@@ -3940,11 +3918,6 @@
     }
   }
 
-  // Only stash after the above passed. Otherwise the method wasn't guaranteed to be correct.
-  if (stash_method) {
-    dex_cache_->SetResolvedMethod(dex_method_idx, res_method, pointer_size);
-  }
-
   // Check if access is allowed.
   if (!referrer.CanAccessMember(res_method->GetDeclaringClass(), res_method->GetAccessFlags())) {
     Fail(VERIFY_ERROR_ACCESS_METHOD) << "illegal method access (call "
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 740b7dd..883de38 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -711,6 +711,29 @@
       DCHECK(c1 != nullptr && !c1->IsPrimitive());
       DCHECK(c2 != nullptr && !c2->IsPrimitive());
       mirror::Class* join_class = ClassJoin(c1, c2);
+      if (UNLIKELY(join_class == nullptr)) {
+        // Internal error joining the classes (e.g., OOME). Report an unresolved reference type.
+        // We cannot report an unresolved merge type, as that will attempt to merge the resolved
+        // components, leaving us in an infinite loop.
+        // We do not want to report the originating exception, as that would require a fast path
+        // out all the way to VerifyClass. Instead attempt to continue on without a detailed type.
+        Thread* self = Thread::Current();
+        self->AssertPendingException();
+        self->ClearException();
+
+        // When compiling on the host, we rather want to abort to ensure determinism for preopting.
+        // (In that case, it is likely a misconfiguration of dex2oat.)
+        if (!kIsTargetBuild && Runtime::Current()->IsAotCompiler()) {
+          LOG(FATAL) << "Could not create class join of "
+                     << c1->PrettyClass()
+                     << " & "
+                     << c2->PrettyClass();
+          UNREACHABLE();
+        }
+
+        return reg_types->MakeUnresolvedReference();
+      }
+
       // Record the dependency that both `c1` and `c2` are assignable to `join_class`.
       // The `verifier` is null during unit tests.
       if (verifier != nullptr) {
@@ -753,10 +776,18 @@
       DCHECK(result->IsObjectClass());
       return result;
     }
+    Thread* self = Thread::Current();
     ObjPtr<mirror::Class> common_elem = ClassJoin(s_ct, t_ct);
+    if (UNLIKELY(common_elem == nullptr)) {
+      self->AssertPendingException();
+      return nullptr;
+    }
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-    mirror::Class* array_class = class_linker->FindArrayClass(Thread::Current(), &common_elem);
-    DCHECK(array_class != nullptr);
+    mirror::Class* array_class = class_linker->FindArrayClass(self, &common_elem);
+    if (UNLIKELY(array_class == nullptr)) {
+      self->AssertPendingException();
+      return nullptr;
+    }
     return array_class;
   } else {
     size_t s_depth = s->Depth();
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 6c01a79..c5d8ff5 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -355,6 +355,10 @@
    * the perversion of Object being assignable to an interface type (note, however, that we don't
    * allow assignment of Object or Interface to any concrete class and are therefore type safe).
    *
+   * Note: This may return null in case of internal errors, e.g., OOME when a new class would have
+   *       to be created but there is no heap space. The exception will stay pending, and it is
+   *       the job of the caller to handle it.
+   *
    * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
    */
   static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t)
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 93286ea..0c00868 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -222,6 +222,11 @@
   }
 }
 
+const RegType& RegTypeCache::MakeUnresolvedReference() {
+  // The descriptor is intentionally invalid so nothing else will match this type.
+  return AddEntry(new (&arena_) UnresolvedReferenceType(AddString("a"), entries_.size()));
+}
+
 const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
   DCHECK(klass != nullptr);
   if (klass->IsPrimitive()) {
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 37f8a1f..c9bf6a9 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -97,6 +97,10 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   const RegType& FromUnresolvedSuperClass(const RegType& child)
       REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Note: this should not be used outside of RegType::ClassJoin!
+  const RegType& MakeUnresolvedReference() REQUIRES_SHARED(Locks::mutator_lock_);
+
   const ConstantType& Zero() REQUIRES_SHARED(Locks::mutator_lock_) {
     return FromCat1Const(0, true);
   }
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index b0ea6c8..1aa0966 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -22,6 +22,7 @@
 #include "base/casts.h"
 #include "base/scoped_arena_allocator.h"
 #include "common_runtime_test.h"
+#include "compiler_callbacks.h"
 #include "reg_type_cache-inl.h"
 #include "reg_type-inl.h"
 #include "scoped_thread_state_change-inl.h"
@@ -677,5 +678,59 @@
   EXPECT_FALSE(imprecise_const.Equals(precise_const));
 }
 
+class RegTypeOOMTest : public RegTypeTest {
+ protected:
+  void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+    SetUpRuntimeOptionsForFillHeap(options);
+
+    // We must not appear to be a compiler, or we'll abort on the host.
+    callbacks_.reset();
+  }
+};
+
+TEST_F(RegTypeOOMTest, ClassJoinOOM) {
+  // TODO: Figure out why FillHeap isn't good enough under CMS.
+  TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS();
+
+  // Tests that we don't abort with OOMs.
+
+  ArenaStack stack(Runtime::Current()->GetArenaPool());
+  ScopedArenaAllocator allocator(&stack);
+  ScopedObjectAccess soa(Thread::Current());
+
+  // We cannot allow moving GC. Otherwise we'd have to ensure the reg types are updated (reference
+  // reg types store a class pointer in a GCRoot, which is normally updated through active verifiers
+  // being registered with their thread), which is unnecessarily complex.
+  Runtime::Current()->GetHeap()->IncrementDisableMovingGC(soa.Self());
+
+  // We merge nested array of primitive wrappers. These have a join type of an array of Number of
+  // the same depth. We start with depth five, as we want at least two newly created classes to
+  // test recursion (it's just more likely that nobody uses such deep arrays in runtime bringup).
+  constexpr const char* kIntArrayFive = "[[[[[Ljava/lang/Integer;";
+  constexpr const char* kFloatArrayFive = "[[[[[Ljava/lang/Float;";
+  constexpr const char* kNumberArrayFour = "[[[[Ljava/lang/Number;";
+  constexpr const char* kNumberArrayFive = "[[[[[Ljava/lang/Number;";
+
+  RegTypeCache cache(true, allocator);
+  const RegType& int_array_array = cache.From(nullptr, kIntArrayFive, false);
+  ASSERT_TRUE(int_array_array.HasClass());
+  const RegType& float_array_array = cache.From(nullptr, kFloatArrayFive, false);
+  ASSERT_TRUE(float_array_array.HasClass());
+
+  // Check assumptions: the joined classes don't exist, yet.
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ASSERT_TRUE(class_linker->LookupClass(soa.Self(), kNumberArrayFour, nullptr) == nullptr);
+  ASSERT_TRUE(class_linker->LookupClass(soa.Self(), kNumberArrayFive, nullptr) == nullptr);
+
+  // Fill the heap.
+  VariableSizedHandleScope hs(soa.Self());
+  FillHeap(soa.Self(), class_linker, &hs);
+
+  const RegType& join_type = int_array_array.Merge(float_array_array, &cache, nullptr);
+  ASSERT_TRUE(join_type.IsUnresolvedReference());
+
+  Runtime::Current()->GetHeap()->DecrementDisableMovingGC(soa.Self());
+}
+
 }  // namespace verifier
 }  // namespace art
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 122e05f..112eec8 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -54,9 +54,7 @@
     MergeSets(my_deps->unassignable_types_, other_deps.unassignable_types_);
     MergeSets(my_deps->classes_, other_deps.classes_);
     MergeSets(my_deps->fields_, other_deps.fields_);
-    MergeSets(my_deps->direct_methods_, other_deps.direct_methods_);
-    MergeSets(my_deps->virtual_methods_, other_deps.virtual_methods_);
-    MergeSets(my_deps->interface_methods_, other_deps.interface_methods_);
+    MergeSets(my_deps->methods_, other_deps.methods_);
     for (dex::TypeIndex entry : other_deps.unverified_classes_) {
       my_deps->unverified_classes_.push_back(entry);
     }
@@ -317,7 +315,6 @@
 
 void VerifierDeps::AddMethodResolution(const DexFile& dex_file,
                                        uint32_t method_idx,
-                                       MethodResolutionKind resolution_kind,
                                        ArtMethod* method) {
   DexFileDeps* dex_deps = GetDexFileDeps(dex_file);
   if (dex_deps == nullptr) {
@@ -334,14 +331,7 @@
   MethodResolution method_tuple(method_idx,
                                 GetAccessFlags(method),
                                 GetMethodDeclaringClassStringId(dex_file, method_idx, method));
-  if (resolution_kind == kDirectMethodResolution) {
-    dex_deps->direct_methods_.emplace(method_tuple);
-  } else if (resolution_kind == kVirtualMethodResolution) {
-    dex_deps->virtual_methods_.emplace(method_tuple);
-  } else {
-    DCHECK_EQ(resolution_kind, kInterfaceMethodResolution);
-    dex_deps->interface_methods_.emplace(method_tuple);
-  }
+  dex_deps->methods_.insert(method_tuple);
 }
 
 mirror::Class* VerifierDeps::FindOneClassPathBoundaryForInterface(mirror::Class* destination,
@@ -537,11 +527,10 @@
 
 void VerifierDeps::MaybeRecordMethodResolution(const DexFile& dex_file,
                                                uint32_t method_idx,
-                                               MethodResolutionKind resolution_kind,
                                                ArtMethod* method) {
   VerifierDeps* thread_deps = GetThreadLocalVerifierDeps();
   if (thread_deps != nullptr) {
-    thread_deps->AddMethodResolution(dex_file, method_idx, resolution_kind, method);
+    thread_deps->AddMethodResolution(dex_file, method_idx, method);
   }
 }
 
@@ -698,9 +687,7 @@
     EncodeSet(buffer, deps.unassignable_types_);
     EncodeSet(buffer, deps.classes_);
     EncodeSet(buffer, deps.fields_);
-    EncodeSet(buffer, deps.direct_methods_);
-    EncodeSet(buffer, deps.virtual_methods_);
-    EncodeSet(buffer, deps.interface_methods_);
+    EncodeSet(buffer, deps.methods_);
     EncodeUint16Vector(buffer, deps.unverified_classes_);
   }
 }
@@ -723,9 +710,7 @@
     DecodeSet(&data_start, data_end, &deps->unassignable_types_);
     DecodeSet(&data_start, data_end, &deps->classes_);
     DecodeSet(&data_start, data_end, &deps->fields_);
-    DecodeSet(&data_start, data_end, &deps->direct_methods_);
-    DecodeSet(&data_start, data_end, &deps->virtual_methods_);
-    DecodeSet(&data_start, data_end, &deps->interface_methods_);
+    DecodeSet(&data_start, data_end, &deps->methods_);
     DecodeUint16Vector(&data_start, data_end, &deps->unverified_classes_);
   }
   CHECK_LE(data_start, data_end);
@@ -763,9 +748,7 @@
          (unassignable_types_ == rhs.unassignable_types_) &&
          (classes_ == rhs.classes_) &&
          (fields_ == rhs.fields_) &&
-         (direct_methods_ == rhs.direct_methods_) &&
-         (virtual_methods_ == rhs.virtual_methods_) &&
-         (interface_methods_ == rhs.interface_methods_) &&
+         (methods_ == rhs.methods_) &&
          (unverified_classes_ == rhs.unverified_classes_);
 }
 
@@ -825,27 +808,21 @@
       }
     }
 
-    for (const auto& entry :
-            { std::make_pair(kDirectMethodResolution, dep.second->direct_methods_),
-              std::make_pair(kVirtualMethodResolution, dep.second->virtual_methods_),
-              std::make_pair(kInterfaceMethodResolution, dep.second->interface_methods_) }) {
-      for (const MethodResolution& method : entry.second) {
-        const DexFile::MethodId& method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+    for (const MethodResolution& method : dep.second->methods_) {
+      const DexFile::MethodId& method_id = dex_file.GetMethodId(method.GetDexMethodIndex());
+      vios->Stream()
+          << dex_file.GetMethodDeclaringClassDescriptor(method_id) << "->"
+          << dex_file.GetMethodName(method_id)
+          << dex_file.GetMethodSignature(method_id).ToString()
+          << " is expected to be ";
+      if (!method.IsResolved()) {
+        vios->Stream() << "unresolved\n";
+      } else {
         vios->Stream()
-            << dex_file.GetMethodDeclaringClassDescriptor(method_id) << "->"
-            << dex_file.GetMethodName(method_id)
-            << dex_file.GetMethodSignature(method_id).ToString()
-            << " is expected to be ";
-        if (!method.IsResolved()) {
-          vios->Stream() << "unresolved\n";
-        } else {
-          vios->Stream()
-            << "in class "
-            << GetStringFromId(dex_file, method.GetDeclaringClassIndex())
-            << ", have the access flags " << std::hex << method.GetAccessFlags() << std::dec
-            << ", and be of kind " << entry.first
-            << "\n";
-        }
+          << "in class "
+          << GetStringFromId(dex_file, method.GetDeclaringClassIndex())
+          << ", have the access flags " << std::hex << method.GetAccessFlags() << std::dec
+          << "\n";
       }
     }
 
@@ -1030,7 +1007,6 @@
 bool VerifierDeps::VerifyMethods(Handle<mirror::ClassLoader> class_loader,
                                  const DexFile& dex_file,
                                  const std::set<MethodResolution>& methods,
-                                 MethodResolutionKind kind,
                                  Thread* self) const {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   PointerSize pointer_size = class_linker->GetImagePointerSize();
@@ -1054,27 +1030,20 @@
     }
     DCHECK(cls->IsResolved());
     ArtMethod* method = nullptr;
-    if (kind == kDirectMethodResolution) {
-      method = cls->FindDirectMethod(name, signature, pointer_size);
-    } else if (kind == kVirtualMethodResolution) {
-      method = cls->FindVirtualMethod(name, signature, pointer_size);
-    } else {
-      DCHECK_EQ(kind, kInterfaceMethodResolution);
+    if (cls->IsInterface()) {
       method = cls->FindInterfaceMethod(name, signature, pointer_size);
+    } else {
+      method = cls->FindClassMethod(name, signature, pointer_size);
     }
 
     if (entry.IsResolved()) {
       std::string temp;
       if (method == nullptr) {
-        LOG(INFO) << "VerifierDeps: Could not resolve "
-                  << kind
-                  << " method "
+        LOG(INFO) << "VerifierDeps: Could not resolve method "
                   << GetMethodDescription(dex_file, entry.GetDexMethodIndex());
         return false;
       } else if (expected_decl_klass != method->GetDeclaringClass()->GetDescriptor(&temp)) {
-        LOG(INFO) << "VerifierDeps: Unexpected declaring class for "
-                  << kind
-                  << " method resolution "
+        LOG(INFO) << "VerifierDeps: Unexpected declaring class for method resolution "
                   << GetMethodDescription(dex_file, entry.GetDexMethodIndex())
                   << " (expected="
                   << expected_decl_klass
@@ -1083,9 +1052,7 @@
                   << ")";
         return false;
       } else if (entry.GetAccessFlags() != GetAccessFlags(method)) {
-        LOG(INFO) << "VerifierDeps: Unexpected access flags for resolved "
-                  << kind
-                  << " method resolution "
+        LOG(INFO) << "VerifierDeps: Unexpected access flags for resolved method resolution "
                   << GetMethodDescription(dex_file, entry.GetDexMethodIndex())
                   << std::hex
                   << " (expected="
@@ -1096,9 +1063,7 @@
         return false;
       }
     } else if (method != nullptr) {
-      LOG(INFO) << "VerifierDeps: Unexpected successful resolution of "
-                << kind
-                << " method "
+      LOG(INFO) << "VerifierDeps: Unexpected successful resolution of method "
                 << GetMethodDescription(dex_file, entry.GetDexMethodIndex());
       return false;
     }
@@ -1118,12 +1083,7 @@
   result = result && VerifyClasses(class_loader, dex_file, deps.classes_, self);
   result = result && VerifyFields(class_loader, dex_file, deps.fields_, self);
 
-  result = result && VerifyMethods(
-      class_loader, dex_file, deps.direct_methods_, kDirectMethodResolution, self);
-  result = result && VerifyMethods(
-      class_loader, dex_file, deps.virtual_methods_, kVirtualMethodResolution, self);
-  result = result && VerifyMethods(
-      class_loader, dex_file, deps.interface_methods_, kInterfaceMethodResolution, self);
+  result = result && VerifyMethods(class_loader, dex_file, deps.methods_, self);
 
   return result;
 }
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index 43eb948..b883a9e 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -25,7 +25,6 @@
 #include "base/mutex.h"
 #include "dex_file_types.h"
 #include "handle.h"
-#include "method_resolution_kind.h"
 #include "obj_ptr.h"
 #include "thread.h"
 #include "verifier_enums.h"  // For MethodVerifier::FailureKind.
@@ -88,12 +87,10 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::verifier_deps_lock_);
 
-  // Record the outcome `method` of resolving method `method_idx` from `dex_file`
-  // using `res_kind` kind of method resolution algorithm. If `method` is null,
-  // the method is assumed unresolved.
+  // Record the outcome `method` of resolving method `method_idx` from `dex_file`.
+  // If `method` is null, the method is assumed unresolved.
   static void MaybeRecordMethodResolution(const DexFile& dex_file,
                                           uint32_t method_idx,
-                                          MethodResolutionKind res_kind,
                                           ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::verifier_deps_lock_);
@@ -193,9 +190,7 @@
     // Sets of recorded class/field/method resolutions.
     std::set<ClassResolution> classes_;
     std::set<FieldResolution> fields_;
-    std::set<MethodResolution> direct_methods_;
-    std::set<MethodResolution> virtual_methods_;
-    std::set<MethodResolution> interface_methods_;
+    std::set<MethodResolution> methods_;
 
     // List of classes that were not fully verified in that dex file.
     std::vector<dex::TypeIndex> unverified_classes_;
@@ -267,7 +262,6 @@
 
   void AddMethodResolution(const DexFile& dex_file,
                            uint32_t method_idx,
-                           MethodResolutionKind res_kind,
                            ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::verifier_deps_lock_);
@@ -321,7 +315,6 @@
   bool VerifyMethods(Handle<mirror::ClassLoader> class_loader,
                      const DexFile& dex_file,
                      const std::set<MethodResolution>& methods,
-                     MethodResolutionKind kind,
                      Thread* self) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index f72fdb4..1c14cf2 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -27,8 +27,8 @@
 #include "jni_internal.h"
 #include "mirror/class.h"
 #include "mirror/throwable.h"
+#include "nativehelper/ScopedLocalRef.h"
 #include "obj_ptr-inl.h"
-#include "ScopedLocalRef.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 
diff --git a/test/162-method-resolution/expected.txt b/test/162-method-resolution/expected.txt
new file mode 100644
index 0000000..1bf39c9
--- /dev/null
+++ b/test/162-method-resolution/expected.txt
@@ -0,0 +1,43 @@
+Calling Test1Derived.test():
+Test1Derived.foo()
+Calling Test1User.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IllegalAccessError
+Calling Test1User2.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IllegalAccessError
+Calling Test2User.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IncompatibleClassChangeError
+Calling Test2User2.test():
+Test2Base.foo()
+Calling Test3User.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IncompatibleClassChangeError
+Calling Test4User.test():
+Test4Derived@...
+Calling Test5User.test():
+Test5Derived.foo()
+Calling Test5User2.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IncompatibleClassChangeError
+Calling Test6User.test():
+Test6Derived@...
+Calling Test6User2.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IncompatibleClassChangeError
+Calling Test7User.test():
+Test7Interface.foo()
+Calling Test7User2.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IllegalAccessError
+Calling Test8User.test():
+Test8Derived.foo()
+Calling Test8User2.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IncompatibleClassChangeError
+Calling Test9User.test():
+Test9Derived.foo()
+Calling Test9User2.test():
+Caught java.lang.reflect.InvocationTargetException
+  caused by java.lang.IncompatibleClassChangeError
diff --git a/test/162-method-resolution/info.txt b/test/162-method-resolution/info.txt
new file mode 100644
index 0000000..ff57a9a
--- /dev/null
+++ b/test/162-method-resolution/info.txt
@@ -0,0 +1,4 @@
+Tests that the method resolution is consistent with JLS and the RI.
+Where the RI conflicts with JLS, we follow the JLS and suppress the divergence
+when the test is executed with --jvm.
+(See Main.java for per-test details.)
diff --git a/test/162-method-resolution/jasmin-multidex/Test1User.j b/test/162-method-resolution/jasmin-multidex/Test1User.j
new file mode 100644
index 0000000..09ba77b
--- /dev/null
+++ b/test/162-method-resolution/jasmin-multidex/Test1User.j
@@ -0,0 +1,26 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test1User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test1Derived
+    dup
+    invokespecial Test1Derived.<init>()V
+    invokevirtual Test1Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin-multidex/Test3User.j b/test/162-method-resolution/jasmin-multidex/Test3User.j
new file mode 100644
index 0000000..90f3a4e
--- /dev/null
+++ b/test/162-method-resolution/jasmin-multidex/Test3User.j
@@ -0,0 +1,26 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test3User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test3Derived
+    dup
+    invokespecial Test3Derived.<init>()V
+    invokevirtual Test3Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test1Derived.j b/test/162-method-resolution/jasmin/Test1Derived.j
new file mode 100644
index 0000000..d754c64
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test1Derived.j
@@ -0,0 +1,43 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test1Derived
+.super Test1Base
+
+.method public <init>()V
+   .limit stack 1
+   .limit locals 1
+   aload_0
+   invokespecial Test1Base.<init>()V
+   return
+.end method
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test1Derived
+    dup
+    invokespecial Test1Derived.<init>()V
+    invokespecial Test1Derived.foo()V
+    return
+.end method
+
+.method private foo()V
+    .limit stack 2
+    .limit locals 1
+    getstatic java/lang/System/out Ljava/io/PrintStream;
+    ldc "Test1Derived.foo()"
+    invokevirtual java/io/PrintStream.println(Ljava/lang/String;)V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test1User2.j b/test/162-method-resolution/jasmin/Test1User2.j
new file mode 100644
index 0000000..8af9aab
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test1User2.j
@@ -0,0 +1,26 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test1User2
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test1Derived
+    dup
+    invokespecial Test1Derived.<init>()V
+    invokevirtual Test1Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test2Derived.j b/test/162-method-resolution/jasmin/Test2Derived.j
new file mode 100644
index 0000000..bb4525d
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test2Derived.j
@@ -0,0 +1,25 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test2Derived
+.super Test2Base
+.implements Test2Interface
+
+.method public <init>()V
+   .limit stack 1
+   .limit locals 1
+   aload_0
+   invokespecial Test2Base.<init>()V
+   return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test2User.j b/test/162-method-resolution/jasmin/Test2User.j
new file mode 100644
index 0000000..2cce074
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test2User.j
@@ -0,0 +1,26 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test2User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test2Derived
+    dup
+    invokespecial Test2Derived.<init>()V
+    invokevirtual Test2Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test2User2.j b/test/162-method-resolution/jasmin/Test2User2.j
new file mode 100644
index 0000000..eb80f32
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test2User2.j
@@ -0,0 +1,23 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test2User2
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 0
+    .limit locals 0
+    invokestatic Test2Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test3Derived.j b/test/162-method-resolution/jasmin/Test3Derived.j
new file mode 100644
index 0000000..2bf4bf1
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test3Derived.j
@@ -0,0 +1,25 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test3Derived
+.super Test3Base
+.implements Test3Interface
+
+.method public <init>()V
+   .limit stack 1
+   .limit locals 1
+   aload_0
+   invokespecial Test3Base.<init>()V
+   return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test4User.j b/test/162-method-resolution/jasmin/Test4User.j
new file mode 100644
index 0000000..5b65368
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test4User.j
@@ -0,0 +1,29 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test4User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 3
+    .limit locals 0
+    getstatic java/lang/System/out Ljava/io/PrintStream;
+    new Test4Derived
+    dup
+    invokespecial Test4Derived.<init>()V
+    invokeinterface Test4Interface.toString()Ljava/lang/String; 1
+    invokestatic Main.normalizeToString(Ljava/lang/String;)Ljava/lang/String;
+    invokevirtual java/io/PrintStream.println(Ljava/lang/String;)V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test5User.j b/test/162-method-resolution/jasmin/Test5User.j
new file mode 100644
index 0000000..036e366
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test5User.j
@@ -0,0 +1,40 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test5User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 1
+    new Test5Derived
+    dup
+    invokespecial Test5Derived.<init>()V
+    astore_0
+
+    ; Call an unresolved method bar() to force verification at runtime
+    ; to populate the dex cache entry for Test5Base.foo()V.
+    ; try { b.bar(); } catch (IncompatibleClassChangeError icce) { }
+    aload_0
+    dup ; Bogus operand to be swallowed by the pop in the non-exceptional path.
+  catch_begin:
+    invokevirtual Test5Derived.bar()V
+  catch_end:
+    pop ; Pops the exception or the bogus operand from above.
+  .catch java/lang/IncompatibleClassChangeError from catch_begin to catch_end using catch_end
+
+    aload_0
+    invokevirtual Test5Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test5User2.j b/test/162-method-resolution/jasmin/Test5User2.j
new file mode 100644
index 0000000..9484a69
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test5User2.j
@@ -0,0 +1,26 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test5User2
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test5Derived
+    dup
+    invokespecial Test5Derived.<init>()V
+    invokeinterface Test5Derived.foo()V 1
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test6User.j b/test/162-method-resolution/jasmin/Test6User.j
new file mode 100644
index 0000000..55b43f1
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test6User.j
@@ -0,0 +1,29 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test6User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 3
+    .limit locals 0
+    getstatic java/lang/System/out Ljava/io/PrintStream;
+    new Test6Derived
+    dup
+    invokespecial Test6Derived.<init>()V
+    invokeinterface Test6Interface.toString()Ljava/lang/String; 1
+    invokestatic Main.normalizeToString(Ljava/lang/String;)Ljava/lang/String;
+    invokevirtual java/io/PrintStream.println(Ljava/lang/String;)V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test6User2.j b/test/162-method-resolution/jasmin/Test6User2.j
new file mode 100644
index 0000000..ab9ac0e
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test6User2.j
@@ -0,0 +1,29 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test6User2
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 3
+    .limit locals 0
+    getstatic java/lang/System/out Ljava/io/PrintStream;
+    new Test6Derived
+    dup
+    invokespecial Test6Derived.<init>()V
+    invokevirtual Test6Interface.toString()Ljava/lang/String;
+    invokestatic Main.normalizeToString(Ljava/lang/String;)Ljava/lang/String;
+    invokevirtual java/io/PrintStream.println(Ljava/lang/String;)V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test8Derived.j b/test/162-method-resolution/jasmin/Test8Derived.j
new file mode 100644
index 0000000..73f8b28
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test8Derived.j
@@ -0,0 +1,33 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test8Derived
+.super Test8Base
+
+.method public <init>()V
+   .limit stack 1
+   .limit locals 1
+   aload_0
+   invokespecial Test8Base.<init>()V
+   return
+.end method
+
+.method public foo()V
+    .limit stack 2
+    .limit locals 1
+    getstatic java/lang/System/out Ljava/io/PrintStream;
+    ldc "Test8Derived.foo()"
+    invokevirtual java/io/PrintStream.println(Ljava/lang/String;)V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test8User.j b/test/162-method-resolution/jasmin/Test8User.j
new file mode 100644
index 0000000..af60c6e
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test8User.j
@@ -0,0 +1,26 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test8User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test8Derived
+    dup
+    invokespecial Test8Derived.<init>()V
+    invokevirtual Test8Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test8User2.j b/test/162-method-resolution/jasmin/Test8User2.j
new file mode 100644
index 0000000..5cdb95c
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test8User2.j
@@ -0,0 +1,23 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test8User2
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 0
+    .limit locals 0
+    invokestatic Test8Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test9Derived.j b/test/162-method-resolution/jasmin/Test9Derived.j
new file mode 100644
index 0000000..789f0f2
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test9Derived.j
@@ -0,0 +1,33 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test9Derived
+.super Test9Base
+
+.method public <init>()V
+   .limit stack 1
+   .limit locals 1
+   aload_0
+   invokespecial Test9Base.<init>()V
+   return
+.end method
+
+.method public static foo()V
+    .limit stack 2
+    .limit locals 1
+    getstatic java/lang/System/out Ljava/io/PrintStream;
+    ldc "Test9Derived.foo()"
+    invokevirtual java/io/PrintStream.println(Ljava/lang/String;)V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test9User.j b/test/162-method-resolution/jasmin/Test9User.j
new file mode 100644
index 0000000..81f9a7d
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test9User.j
@@ -0,0 +1,23 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test9User
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 0
+    .limit locals 0
+    invokestatic Test9Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/jasmin/Test9User2.j b/test/162-method-resolution/jasmin/Test9User2.j
new file mode 100644
index 0000000..ae53905
--- /dev/null
+++ b/test/162-method-resolution/jasmin/Test9User2.j
@@ -0,0 +1,26 @@
+; Copyright (C) 2017 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Test9User2
+.super java/lang/Object
+
+.method public static test()V
+    .limit stack 2
+    .limit locals 0
+    new Test9Derived
+    dup
+    invokespecial Test9Derived.<init>()V
+    invokevirtual Test9Derived.foo()V
+    return
+.end method
diff --git a/test/162-method-resolution/multidex.jpp b/test/162-method-resolution/multidex.jpp
new file mode 100644
index 0000000..22e3aee
--- /dev/null
+++ b/test/162-method-resolution/multidex.jpp
@@ -0,0 +1,117 @@
+Test1Base:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test1Base
+Test1Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test1Derived
+Test1User2:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test1User2
+
+Test2Base:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test2Base
+Test2Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test2Derived
+Test2Interface:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test2Interface
+Test2User:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test2User
+Test2User2:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test2User2
+
+Test3Base:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test3Base
+Test3Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test3Derived
+Test3Interface:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test3Interface
+
+Test4Interface:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test4Interface
+Test4Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test4Derived
+Test4User:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test4User
+
+Test5Interface:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test5Interface
+Test5Base:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test5Base
+Test5Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test5Derived
+Test5User:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test5User
+Test5User2:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test5User2
+
+Test6Interface:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test6Interface
+Test6Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test6Derived
+Test6User:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test6User
+Test6User2:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test6User2
+
+Test7Base:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test7Base
+Test7Interface:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test7Interface
+Test7Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test7Derived
+Test7User:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test7User
+
+Test8Base:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test8Base
+Test8Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test8Derived
+Test8User:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test8User
+Test8User2:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test8User2
+
+Test9Base:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test9Base
+Test9Derived:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test9Derived
+Test9User:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test9User
+Test9User2:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Test9User2
+
+Main:
+  @@com.android.jack.annotations.ForceInMainDex
+  class Main
diff --git a/test/162-method-resolution/src/Main.java b/test/162-method-resolution/src/Main.java
new file mode 100644
index 0000000..fa95aa7
--- /dev/null
+++ b/test/162-method-resolution/src/Main.java
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+    public static void main(String[] args) {
+        // Check if we're running dalvik or RI.
+        usingRI = false;
+        try {
+            Class.forName("dalvik.system.PathClassLoader");
+        } catch (ClassNotFoundException e) {
+            usingRI = true;
+        }
+
+        try {
+            test1();
+            test2();
+            test3();
+            test4();
+            test5();
+            test6();
+            test7();
+            test8();
+            test9();
+
+            // TODO: How to test that interface method resolution returns the unique
+            // maximally-specific non-abstract superinterface method if there is one?
+            // Maybe reflection? (This is not even implemented yet!)
+        } catch (Throwable t) {
+            t.printStackTrace(System.out);
+        }
+    }
+
+    /*
+     * Test1
+     * -----
+     * Tested functions:
+     *     public class Test1Base {
+     *         public void foo() { ... }
+     *     }
+     *     public class Test1Derived extends Test1Base {
+     *         private void foo() { ... }
+     *         ...
+     *     }
+     * Tested invokes:
+     *     invoke-direct  Test1Derived.foo()V   from Test1Derived in first dex file
+     *         expected: executes Test1Derived.foo()V
+     *     invoke-virtual Test1Derived.foo()V   from Test1User    in second dex file
+     *         expected: throws IllegalAccessError (JLS 15.12.4.3)
+     *     invoke-virtual Test1Derived.foo()V   from Test1User2   in first dex file
+     *         expected: throws IllegalAccessError (JLS 15.12.4.3)
+     *
+     * Previously, the behavior was inconsistent between dex files, throwing ICCE
+     * from one and invoking the method from another. This was because the lookups for
+     * direct and virtual methods were independent but results were stored in a single
+     * slot in the DexCache method array and then retrieved from there without checking
+     * the resolution kind. Thus, the first invoke-direct stored the private
+     * Test1Derived.foo() in the DexCache and the attempt to use invoke-virtual
+     * from the same dex file (by Test1User2) would throw ICCE. However, the same
+     * invoke-virtual from a different dex file (by Test1User) would ignore the
+     * direct method Test1Derived.foo() and find the Test1Base.foo() and call it.
+     *
+     * The method lookup has been changed and we now consistently find the private
+     * Derived.foo() and throw ICCE for both invoke-virtual calls.
+     *
+     * Files:
+     *   src/Test1Base.java          - defines public foo()V.
+     *   jasmin/Test1Derived.j       - defines private foo()V, calls it with invokespecial.
+     *   jasmin-multidex/Test1User.j - calls invokevirtual Test1Derived.foo().
+     *   jasmin/Test1User2.j         - calls invokevirtual Test1Derived.foo().
+     */
+    private static void test1() throws Exception {
+        invokeUserTest("Test1Derived");
+        invokeUserTest("Test1User");
+        invokeUserTest("Test1User2");
+    }
+
+    /*
+     * Test2
+     * -----
+     * Tested functions:
+     *     public class Test2Base {
+     *         public static void foo() { ... }
+     *     }
+     *     public interface Test2Interface {
+     *         default void foo() { ... }  // default: avoid subclassing Test2Derived.
+     *     }
+     *     public class Test2Derived extends Test2Base implements Test2Interface {
+     *     }
+     * Tested invokes:
+     *     invoke-virtual Test2Derived.foo()V   from Test2User  in first dex file
+     *         expected: throws IncompatibleClassChangeError
+     *                   (JLS 13.4.19, the inherited Base.foo() changed from non-static to static)
+     *     invoke-static  Test2Derived.foo()V   from Test2User2 in first dex file
+     *         expected: executes Test2Base.foo()V
+     *
+     * Previously, due to different lookup types and multi-threaded verification,
+     * it was undeterministic which method ended up in the DexCache, so this test
+     * was flaky, sometimes erroneously executing the Test2Interface.foo().
+     *
+     * The method lookup has been changed and we now consistently find the
+     * Test2Base.foo()V over the method from the interface, in line with the RI.
+     *
+     * Files:
+     *   src/Test2Base.java          - defines public static foo()V.
+     *   src/Test2Interface.java     - defines default foo()V.
+     *   jasmin/Test2Derived.j       - extends Test2Derived, implements Test2Interface.
+     *   jasmin/Test2User.j          - calls invokevirtual Test2Derived.foo()
+     *   jasmin/Test2User2.j         - calls invokestatic Test2Derived.foo()
+     */
+    private static void test2() throws Exception {
+        invokeUserTest("Test2User");
+        invokeUserTest("Test2User2");
+    }
+
+    /*
+     * Test3
+     * -----
+     * Tested functions:
+     *     public class Test3Base {
+     *         public static void foo() { ... }
+     *     }
+     *     public interface Test3Interface {
+     *         default void foo() { ... }  // default: avoid subclassing Test3Derived.
+     *     }
+     *     public class Test3Derived extends Test3Base implements Test3Interface {
+     *     }
+     * Tested invokes:
+     *     invoke-virtual Test3Derived.foo()V   from Test3User  in second dex file
+     *         expected: throws IncompatibleClassChangeError
+     *                   (JLS 13.4.19, the inherited Base.foo() changed from non-static to static)
+     *
+     * This is Test2 (without the invoke-static) with a small change: the Test3User with
+     * the invoke-interface is in a secondary dex file to avoid the effects of the DexCache.
+     *
+     * Previously the invoke-virtual would resolve to the Test3Interface.foo()V but
+     * it now resolves to Test3Base.foo()V and throws ICCE in line with the RI.
+     *
+     * Files:
+     *   src/Test3Base.java          - defines public static foo()V.
+     *   src/Test3Interface.java     - defines default foo()V.
+     *   src/Test3Derived.java       - extends Test2Derived, implements Test2Interface.
+     *   jasmin-multidex/Test3User.j - calls invokevirtual Test3Derived.foo()
+     */
+    private static void test3() throws Exception {
+        invokeUserTest("Test3User");
+    }
+
+    /*
+     * Test4
+     * -----
+     * Tested functions:
+     *     public interface Test4Interface {
+     *         // Not declaring toString().
+     *     }
+     * Tested invokes:
+     *     invoke-interface Test4Interface.toString()Ljava/lang/String; in first dex file
+     *         expected: executes java.lang.Object.toString()Ljava/lang/String
+     *                   (JLS 9.2 specifies implicitly declared methods from Object).
+     *
+     * The RI resolves the call to java.lang.Object.toString() and executes it.
+     * ART used to resolve it in a secondary resolution attempt only to distinguish
+     * between ICCE and NSME and then throw ICCE. We now allow the call to proceed.
+     *
+     * Files:
+     *   src/Test4Interface.java     - does not declare toString().
+     *   src/Test4Derived.java       - extends Test4Interface.
+     *   jasmin/Test4User.j          - calls invokeinterface Test4Interface.toString().
+     */
+    private static void test4() throws Exception {
+        invokeUserTest("Test4User");
+    }
+
+    /*
+     * Test5
+     * -----
+     * Tested functions:
+     *     public interface Test5Interface {
+     *         public void foo();
+     *     }
+     *     public abstract class Test5Base implements Test5Interface{
+     *         // Not declaring foo().
+     *     }
+     *     public class Test5Derived extends Test5Base {
+     *         public void foo() { ... }
+     *     }
+     * Tested invokes:
+     *     invoke-virtual   Test5Base.foo()V from Test5User  in first dex file
+     *         expected: executes Test5Derived.foo()V
+     *     invoke-interface Test5Base.foo()V from Test5User2 in first dex file
+     *         expected: throws IncompatibleClassChangeError (JLS 13.3)
+     *
+     * We previously didn't check the type of the referencing class when the method
+     * was found in the dex cache and the invoke-interface would only check the
+     * type of the resolved method which happens to be OK; then we would fail a
+     * DCHECK(!method->IsCopied()) in Class::FindVirtualMethodForInterface(). This has
+     * been fixed and we consistently check the type of the referencing class as well.
+     *
+     * Since normal virtual method dispatch in compiled or quickened code does not
+     * actually use the DexCache and we want to populate the Test5Base.foo()V entry
+     * anyway, we force verification at runtime by adding a call to an arbitrary
+     * unresolved method to Test5User.test(), catching and ignoring the ICCE. Files:
+     *   src/Test5Interface.java     - interface, declares foo()V.
+     *   src/Test5Base.java          - abstract class, implements Test5Interface.
+     *   src/Test5Derived.java       - extends Test5Base, implements foo()V.
+     *   jasmin/Test5User2.j         - calls invokeinterface Test5Base.foo()V.
+     *   jasmin/Test5User.j          - calls invokevirtual Test5Base.foo()V,
+     *                               - also calls undefined Test5Base.bar()V, supresses ICCE.
+     */
+    private static void test5() throws Exception {
+        invokeUserTest("Test5User");
+        invokeUserTest("Test5User2");
+    }
+
+    /*
+     * Test6
+     * -----
+     * Tested functions:
+     *     public interface Test6Interface {
+     *         // Not declaring toString().
+     *     }
+     * Tested invokes:
+     *     invoke-interface Test6Interface.toString() from Test6User  in first dex file
+     *         expected: executes java.lang.Object.toString()Ljava/lang/String
+     *                   (JLS 9.2 specifies implicitly declared methods from Object).
+     *     invoke-virtual   Test6Interface.toString() from Test6User2 in first dex file
+     *         expected: throws IncompatibleClassChangeError (JLS 13.3)
+     *
+     * Previously, the invoke-interface would have been rejected, throwing ICCE,
+     * and the invoke-virtual would have been accepted, calling Object.toString().
+     *
+     * The method lookup has been changed and we now accept the invoke-interface,
+     * calling Object.toString(), and reject the invoke-virtual, throwing ICCE,
+     * in line with the RI. However, if the method is already in the DexCache for
+     * the invoke-virtual, we need to check the referenced class in order to throw
+     * the ICCE as the resolved method kind actually matches the invoke-virtual.
+     * This test ensures that we do.
+     *
+     * Files:
+     *   src/Test6Interface.java     - interface, does not declare toString().
+     *   src/Test6Derived.java       - implements Test6Interface.
+     *   jasmin/Test6User.j          - calls invokeinterface Test6Interface.toString().
+     *   jasmin/Test6User2.j         - calls invokevirtual Test6Interface.toString().
+     */
+    private static void test6() throws Exception {
+        invokeUserTest("Test6User");
+        invokeUserTest("Test6User2");
+    }
+
+    /*
+     * Test7
+     * -----
+     * Tested function:
+     *     public class Test7Base {
+     *         private void foo() { ... }
+     *     }
+     *     public interface Test7Interface {
+     *         default void foo() { ... }
+     *     }
+     *     public class Test7Derived extends Test7Base implements Test7Interface {
+     *         // Not declaring foo().
+     *     }
+     * Tested invokes:
+     *     invoke-virtual   Test7Derived.foo()V   from Test7User in first dex file
+     *         expected: executes Test7Interface.foo()V (inherited by Test7Derived, JLS 8.4.8)
+     *     invoke-interface Test7Interface.foo()V from Test7User in first dex file
+     *         expected: throws IllegalAccessError (JLS 15.12.4.4)
+     * on a Test7Derived object.
+     *
+     * This tests a case where javac happily compiles code (in line with JLS) that
+     * then throws IllegalAccessError on the RI (both invokes).
+     *
+     * For the invoke-virtual, the RI throws IAE as the private Test7Base.foo() is
+     * found before the inherited (see JLS 8.4.8) Test7Interface.foo(). This conflicts
+     * with the JLS 15.12.2.1 saying that members inherited (JLS 8.4.8) from superclasses
+     * and superinterfaces are included in the search. ART follows the JLS behavior.
+     *
+     * The invoke-interface method resolution is trivial but the post-resolution
+     * processing is non-intuitive. According to the JLS 15.12.4.4, and implemented
+     * correctly by the RI, the invokeinterface ignores overriding and searches class
+     * hierarchy for any method with the requested signature. Thus it finds the private
+     * Test7Base.foo()V and throws IllegalAccessError. Unfortunately, ART does not comply
+     * and simply calls Test7Interface.foo()V. Bug: 63624936.
+     *
+     * Files:
+     *   src/Test7User.java          - calls invoke-virtual Test7Derived.foo()V.
+     *   src/Test7Base.java          - defines private foo()V.
+     *   src/Test7Interface.java     - defines default foo()V.
+     *   src/Test7Derived.java       - extends Test7Base, implements Test7Interface.
+     */
+    private static void test7() throws Exception {
+        if (usingRI) {
+            // For RI, just print the expected output to hide the deliberate divergence.
+            System.out.println("Calling Test7User.test():\n" +
+                               "Test7Interface.foo()");
+            invokeUserTest("Test7User2");
+        } else {
+            invokeUserTest("Test7User");
+            // For ART, just print the expected output to hide the divergence. Bug: 63624936.
+            // The expected.txt lists the desired behavior, not the current behavior.
+            System.out.println("Calling Test7User2.test():\n" +
+                               "Caught java.lang.reflect.InvocationTargetException\n" +
+                               "  caused by java.lang.IllegalAccessError");
+        }
+    }
+
+    /*
+     * Test8
+     * -----
+     * Tested function:
+     *     public class Test8Base {
+     *         public static void foo() { ... }
+     *     }
+     *     public class Test8Derived extends Test8Base {
+     *         public void foo() { ... }
+     *     }
+     * Tested invokes:
+     *     invoke-virtual   Test8Derived.foo()V from Test8User in first dex file
+     *         expected: executes Test8Derived.foo()V
+     *     invoke-static    Test8Derived.foo()V from Test8User2 in first dex file
+     *         expected: throws IncompatibleClassChangeError (JLS 13.4.19)
+     *
+     * Another test for invoke type mismatch.
+     *
+     * Files:
+     *   src/Test8Base.java          - defines static foo()V.
+     *   jasmin/Test8Derived.j       - defines non-static foo()V.
+     *   jasmin/Test8User.j          - calls invokevirtual Test8Derived.foo()V.
+     *   jasmin/Test8User2.j         - calls invokestatic Test8Derived.foo()V.
+     */
+    private static void test8() throws Exception {
+        invokeUserTest("Test8User");
+        invokeUserTest("Test8User2");
+    }
+
+    /*
+     * Test9
+     * -----
+     * Tested function:
+     *     public class Test9Base {
+     *         public void foo() { ... }
+     *     }
+     *     public class Test9Derived extends Test9Base {
+     *         public static void foo() { ... }
+     *     }
+     * Tested invokes:
+     *     invoke-static    Test9Derived.foo()V from Test9User in first dex file
+     *         expected: executes Test9Derived.foo()V
+     *     invoke-virtual   Test9Derived.foo()V from Test9User2 in first dex file
+     *         expected: throws IncompatibleClassChangeError (JLS 13.4.19)
+     *
+     * Another test for invoke type mismatch.
+     *
+     * Files:
+     *   src/Test9Base.java          - defines non-static foo()V.
+     *   jasmin/Test9Derived.j       - defines static foo()V.
+     *   jasmin/Test9User.j          - calls invokestatic Test8Derived.foo()V.
+     *   jasmin/Test9User2.j         - calls invokevirtual Test8Derived.foo()V.
+     */
+    private static void test9() throws Exception {
+        invokeUserTest("Test9User");
+        invokeUserTest("Test9User2");
+    }
+
+    private static void invokeUserTest(String userName) throws Exception {
+        System.out.println("Calling " + userName + ".test():");
+        try {
+            Class<?> user = Class.forName(userName);
+            Method utest = user.getDeclaredMethod("test");
+            utest.invoke(null);
+        } catch (Throwable t) {
+            System.out.println("Caught " + t.getClass().getName());
+            for (Throwable c = t.getCause(); c != null; c = c.getCause()) {
+                System.out.println("  caused by " + c.getClass().getName());
+            }
+        }
+    }
+
+    // Replace the variable part of the output of the default toString() implementation
+    // so that we have a deterministic output.
+    static String normalizeToString(String s) {
+        int atPos = s.indexOf("@");
+        return s.substring(0, atPos + 1) + "...";
+    }
+
+    static boolean usingRI;
+}
diff --git a/test/162-method-resolution/src/Test1Base.java b/test/162-method-resolution/src/Test1Base.java
new file mode 100644
index 0000000..63a0ce3
--- /dev/null
+++ b/test/162-method-resolution/src/Test1Base.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test1Base {
+    public void foo() {
+        System.out.println("Test1Base.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test2Base.java b/test/162-method-resolution/src/Test2Base.java
new file mode 100644
index 0000000..7d028d4
--- /dev/null
+++ b/test/162-method-resolution/src/Test2Base.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test2Base {
+    public static void foo() {
+        System.out.println("Test2Base.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test2Interface.java b/test/162-method-resolution/src/Test2Interface.java
new file mode 100644
index 0000000..d5f1820
--- /dev/null
+++ b/test/162-method-resolution/src/Test2Interface.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Test2Interface {
+    default void foo() {
+        System.out.println("Test2Interface.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test3Base.java b/test/162-method-resolution/src/Test3Base.java
new file mode 100644
index 0000000..2c63ff3
--- /dev/null
+++ b/test/162-method-resolution/src/Test3Base.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test3Base {
+    public static void foo() {
+        System.out.println("Test3Base.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test3Interface.java b/test/162-method-resolution/src/Test3Interface.java
new file mode 100644
index 0000000..baaf671
--- /dev/null
+++ b/test/162-method-resolution/src/Test3Interface.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Test3Interface {
+    default void foo() {
+        System.out.println("Test3Interface.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test4Derived.java b/test/162-method-resolution/src/Test4Derived.java
new file mode 100644
index 0000000..e253f3b
--- /dev/null
+++ b/test/162-method-resolution/src/Test4Derived.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test4Derived implements Test4Interface {
+}
diff --git a/test/162-method-resolution/src/Test4Interface.java b/test/162-method-resolution/src/Test4Interface.java
new file mode 100644
index 0000000..49b516f
--- /dev/null
+++ b/test/162-method-resolution/src/Test4Interface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Test4Interface {
+    // removed: public String toString();
+}
diff --git a/test/162-method-resolution/src/Test5Base.java b/test/162-method-resolution/src/Test5Base.java
new file mode 100644
index 0000000..25914ee
--- /dev/null
+++ b/test/162-method-resolution/src/Test5Base.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public abstract class Test5Base implements Test5Interface {
+}
diff --git a/test/162-method-resolution/src/Test5Derived.java b/test/162-method-resolution/src/Test5Derived.java
new file mode 100644
index 0000000..5717ed5
--- /dev/null
+++ b/test/162-method-resolution/src/Test5Derived.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test5Derived extends Test5Base {
+    public void foo() {
+        System.out.println("Test5Derived.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test5Interface.java b/test/162-method-resolution/src/Test5Interface.java
new file mode 100644
index 0000000..82c20b2
--- /dev/null
+++ b/test/162-method-resolution/src/Test5Interface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Test5Interface {
+    public void foo();
+}
diff --git a/test/162-method-resolution/src/Test6Derived.java b/test/162-method-resolution/src/Test6Derived.java
new file mode 100644
index 0000000..9213347
--- /dev/null
+++ b/test/162-method-resolution/src/Test6Derived.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test6Derived implements Test6Interface {
+}
diff --git a/test/162-method-resolution/src/Test6Interface.java b/test/162-method-resolution/src/Test6Interface.java
new file mode 100644
index 0000000..86e2e4b
--- /dev/null
+++ b/test/162-method-resolution/src/Test6Interface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Test6Interface {
+    // removed: public String toString();
+}
diff --git a/test/162-method-resolution/src/Test7Base.java b/test/162-method-resolution/src/Test7Base.java
new file mode 100644
index 0000000..4cc3223
--- /dev/null
+++ b/test/162-method-resolution/src/Test7Base.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test7Base {
+    private void foo() {
+        System.out.println("Test7Base.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test7Derived.java b/test/162-method-resolution/src/Test7Derived.java
new file mode 100644
index 0000000..25f0b56
--- /dev/null
+++ b/test/162-method-resolution/src/Test7Derived.java
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test7Derived extends Test7Base implements Test7Interface {
+}
diff --git a/test/162-method-resolution/src/Test7Interface.java b/test/162-method-resolution/src/Test7Interface.java
new file mode 100644
index 0000000..598b2dd
--- /dev/null
+++ b/test/162-method-resolution/src/Test7Interface.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface Test7Interface {
+    default void foo() {
+        System.out.println("Test7Interface.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test7User.java b/test/162-method-resolution/src/Test7User.java
new file mode 100644
index 0000000..5cb5b0a
--- /dev/null
+++ b/test/162-method-resolution/src/Test7User.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test7User {
+    public static void test() {
+        new Test7Derived().foo();
+    }
+}
diff --git a/test/162-method-resolution/src/Test7User2.java b/test/162-method-resolution/src/Test7User2.java
new file mode 100644
index 0000000..794c5c2
--- /dev/null
+++ b/test/162-method-resolution/src/Test7User2.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test7User2 {
+    public static void test() {
+        Test7Interface iface = new Test7Derived();
+        iface.foo();
+    }
+}
diff --git a/test/162-method-resolution/src/Test8Base.java b/test/162-method-resolution/src/Test8Base.java
new file mode 100644
index 0000000..b4fd3bc
--- /dev/null
+++ b/test/162-method-resolution/src/Test8Base.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test8Base {
+    public static void foo() {
+        System.out.println("Test8Base.foo()");
+    }
+}
diff --git a/test/162-method-resolution/src/Test9Base.java b/test/162-method-resolution/src/Test9Base.java
new file mode 100644
index 0000000..85ec79b
--- /dev/null
+++ b/test/162-method-resolution/src/Test9Base.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Test9Base {
+    public void foo() {
+        System.out.println("Test9Base.foo()");
+    }
+}
diff --git a/test/1910-transform-with-default/expected.txt b/test/1910-transform-with-default/expected.txt
new file mode 100644
index 0000000..f43ef61
--- /dev/null
+++ b/test/1910-transform-with-default/expected.txt
@@ -0,0 +1,4 @@
+hello
+hello
+Goodbye
+Goodbye
diff --git a/test/1910-transform-with-default/info.txt b/test/1910-transform-with-default/info.txt
new file mode 100644
index 0000000..96ebddd
--- /dev/null
+++ b/test/1910-transform-with-default/info.txt
@@ -0,0 +1,4 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that we we can redefine classes that have default methods inherited from
+interfaces.
diff --git a/test/1910-transform-with-default/run b/test/1910-transform-with-default/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1910-transform-with-default/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1910-transform-with-default/src/Main.java b/test/1910-transform-with-default/src/Main.java
new file mode 100644
index 0000000..fd8b3c7
--- /dev/null
+++ b/test/1910-transform-with-default/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1910.run();
+  }
+}
diff --git a/test/1910-transform-with-default/src/art/Redefinition.java b/test/1910-transform-with-default/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1910-transform-with-default/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/1910-transform-with-default/src/art/Test1910.java b/test/1910-transform-with-default/src/art/Test1910.java
new file mode 100644
index 0000000..775fe63
--- /dev/null
+++ b/test/1910-transform-with-default/src/art/Test1910.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1910 {
+  static interface TestInterface {
+    public void sayHi();
+    public default void sayHiTwice() {
+      sayHi();
+      sayHi();
+    }
+  }
+
+  static class Transform implements TestInterface {
+    public void sayHi() {
+      System.out.println("hello");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform implements TestInterface {
+   *   public void sayHi() {
+   *    System.out.println("Goodbye");
+   *   }
+   * }
+   */
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAIwoABgAPCQAQABEIABIKABMAFAcAFgcAGQcAGgEABjxpbml0PgEAAygpVgEABENv" +
+    "ZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA1UZXN0MTkxMC5qYXZh" +
+    "DAAIAAkHABwMAB0AHgEAB0dvb2RieWUHAB8MACAAIQcAIgEAFmFydC9UZXN0MTkxMCRUcmFuc2Zv" +
+    "cm0BAAlUcmFuc2Zvcm0BAAxJbm5lckNsYXNzZXMBABBqYXZhL2xhbmcvT2JqZWN0AQAaYXJ0L1Rl" +
+    "c3QxOTEwJFRlc3RJbnRlcmZhY2UBAA1UZXN0SW50ZXJmYWNlAQAQamF2YS9sYW5nL1N5c3RlbQEA" +
+    "A291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmlu" +
+    "dGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWAQAMYXJ0L1Rlc3QxOTEwACAABQAGAAEABwAAAAIA" +
+    "AAAIAAkAAQAKAAAAHQABAAEAAAAFKrcAAbEAAAABAAsAAAAGAAEAAAAdAAEADAAJAAEACgAAACUA" +
+    "AgABAAAACbIAAhIDtgAEsQAAAAEACwAAAAoAAgAAAB8ACAAgAAIADQAAAAIADgAYAAAAEgACAAUA" +
+    "FQAXAAgABwAVABsGCA==");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQCimuj5gqsyBEhWaMcfKWwG9eiBycoK3JfcAwAAcAAAAHhWNBIAAAAAAAAAABgDAAAV" +
+    "AAAAcAAAAAoAAADEAAAAAgAAAOwAAAABAAAABAEAAAQAAAAMAQAAAQAAACwBAACQAgAATAEAAK4B" +
+    "AAC2AQAAvwEAAN0BAAD3AQAABwIAACsCAABLAgAAYgIAAHYCAACKAgAAngIAAK0CAAC4AgAAuwIA" +
+    "AL8CAADMAgAA0gIAANcCAADgAgAA5wIAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+    "CgAAAA0AAAANAAAACQAAAAAAAAAOAAAACQAAAKgBAAAIAAUAEQAAAAEAAAAAAAAAAQAAABMAAAAF" +
+    "AAEAEgAAAAYAAAAAAAAAAQAAAAAAAAAGAAAAoAEAAAsAAACQAQAACAMAAAAAAAACAAAA+QIAAP8C" +
+    "AAABAAEAAQAAAO4CAAAEAAAAcBADAAAADgADAAEAAgAAAPMCAAAIAAAAYgAAABoBAQBuIAIAEAAO" +
+    "AEwBAAAAAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAHAAY8aW5pdD4AB0dvb2RieWUAHExhcnQvVGVz" +
+    "dDE5MTAkVGVzdEludGVyZmFjZTsAGExhcnQvVGVzdDE5MTAkVHJhbnNmb3JtOwAOTGFydC9UZXN0" +
+    "MTkxMDsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3Rh" +
+    "dGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVj" +
+    "dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AA1UZXN0MTkxMC5qYXZh" +
+    "AAlUcmFuc2Zvcm0AAVYAAlZMAAthY2Nlc3NGbGFncwAEbmFtZQADb3V0AAdwcmludGxuAAVzYXlI" +
+    "aQAFdmFsdWUAHQAHDgAfAAcOeAACAwEUGAICBAIPBAgQFwwAAAEBAICABNgCAQHwAgAAEAAAAAAA" +
+    "AAABAAAAAAAAAAEAAAAVAAAAcAAAAAIAAAAKAAAAxAAAAAMAAAACAAAA7AAAAAQAAAABAAAABAEA" +
+    "AAUAAAAEAAAADAEAAAYAAAABAAAALAEAAAMQAAABAAAATAEAAAEgAAACAAAAWAEAAAYgAAABAAAA" +
+    "kAEAAAEQAAACAAAAoAEAAAIgAAAVAAAArgEAAAMgAAACAAAA7gIAAAQgAAACAAAA+QIAAAAgAAAB" +
+    "AAAACAMAAAAQAAABAAAAGAMAAA==");
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new Transform());
+  }
+
+  public static void doTest(TestInterface t) {
+    t.sayHiTwice();
+    Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    t.sayHiTwice();
+  }
+}
diff --git a/test/497-inlining-and-class-loader/clear_dex_cache.cc b/test/497-inlining-and-class-loader/clear_dex_cache.cc
index 9ba05bc..c113042 100644
--- a/test/497-inlining-and-class-loader/clear_dex_cache.cc
+++ b/test/497-inlining-and-class-loader/clear_dex_cache.cc
@@ -34,22 +34,32 @@
   ScopedObjectAccess soa(Thread::Current());
   mirror::DexCache* dex_cache = soa.Decode<mirror::Class>(cls)->GetDexCache();
   size_t num_methods = dex_cache->NumResolvedMethods();
-  ArtMethod** methods = dex_cache->GetResolvedMethods();
+  mirror::MethodDexCacheType* methods = dex_cache->GetResolvedMethods();
   CHECK_EQ(num_methods != 0u, methods != nullptr);
   if (num_methods == 0u) {
     return nullptr;
   }
   jarray array;
   if (sizeof(void*) == 4) {
-    array = env->NewIntArray(num_methods);
+    array = env->NewIntArray(2u * num_methods);
   } else {
-    array = env->NewLongArray(num_methods);
+    array = env->NewLongArray(2u * num_methods);
   }
   CHECK(array != nullptr);
-  mirror::PointerArray* pointer_array = soa.Decode<mirror::PointerArray>(array).Ptr();
+  ObjPtr<mirror::Array> decoded_array = soa.Decode<mirror::Array>(array);
   for (size_t i = 0; i != num_methods; ++i) {
-    ArtMethod* method = mirror::DexCache::GetElementPtrSize(methods, i, kRuntimePointerSize);
-    pointer_array->SetElementPtrSize(i, method, kRuntimePointerSize);
+    auto pair = mirror::DexCache::GetNativePairPtrSize(methods, i, kRuntimePointerSize);
+    uint32_t index = pair.index;
+    ArtMethod* method = pair.object;
+    if (sizeof(void*) == 4) {
+      ObjPtr<mirror::IntArray> int_array = down_cast<mirror::IntArray*>(decoded_array.Ptr());
+      int_array->Set(2u * i, index);
+      int_array->Set(2u * i + 1u, static_cast<jint>(reinterpret_cast<uintptr_t>(method)));
+    } else {
+      ObjPtr<mirror::LongArray> long_array = down_cast<mirror::LongArray*>(decoded_array.Ptr());
+      long_array->Set(2u * i, index);
+      long_array->Set(2u * i + 1u, reinterpret_cast64<jlong>(method));
+    }
   }
   return array;
 }
@@ -59,14 +69,26 @@
   ScopedObjectAccess soa(Thread::Current());
   mirror::DexCache* dex_cache = soa.Decode<mirror::Class>(cls)->GetDexCache();
   size_t num_methods = dex_cache->NumResolvedMethods();
-  ArtMethod** methods = soa.Decode<mirror::Class>(cls)->GetDexCache()->GetResolvedMethods();
+  mirror::MethodDexCacheType* methods =
+      soa.Decode<mirror::Class>(cls)->GetDexCache()->GetResolvedMethods();
   CHECK_EQ(num_methods != 0u, methods != nullptr);
-  ObjPtr<mirror::PointerArray> old = soa.Decode<mirror::PointerArray>(old_cache);
+  ObjPtr<mirror::Array> old = soa.Decode<mirror::Array>(old_cache);
   CHECK_EQ(methods != nullptr, old != nullptr);
   CHECK_EQ(num_methods, static_cast<size_t>(old->GetLength()));
   for (size_t i = 0; i != num_methods; ++i) {
-    ArtMethod* method = old->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
-    mirror::DexCache::SetElementPtrSize(methods, i, method, kRuntimePointerSize);
+    uint32_t index;
+    ArtMethod* method;
+    if (sizeof(void*) == 4) {
+      ObjPtr<mirror::IntArray> int_array = down_cast<mirror::IntArray*>(old.Ptr());
+      index = static_cast<uint32_t>(int_array->Get(2u * i));
+      method = reinterpret_cast<ArtMethod*>(static_cast<uint32_t>(int_array->Get(2u * i + 1u)));
+    } else {
+      ObjPtr<mirror::LongArray> long_array = down_cast<mirror::LongArray*>(old.Ptr());
+      index = dchecked_integral_cast<uint32_t>(long_array->Get(2u * i));
+      method = reinterpret_cast64<ArtMethod*>(long_array->Get(2u * i + 1u));
+    }
+    mirror::MethodDexCachePair pair(method, index);
+    mirror::DexCache::SetNativePairPtrSize(methods, i, pair, kRuntimePointerSize);
   }
 }
 
diff --git a/test/551-checker-shifter-operand/src/Main.java b/test/551-checker-shifter-operand/src/Main.java
index 951889a..3177ec0 100644
--- a/test/551-checker-shifter-operand/src/Main.java
+++ b/test/551-checker-shifter-operand/src/Main.java
@@ -327,6 +327,7 @@
    */
 
   /// CHECK-START-ARM: void Main.$opt$validateExtendByteInt1(int, byte) instruction_simplifier_arm (after)
+  /// CHECK:                            DataProcWithShifterOp
   /// CHECK-NOT:                        DataProcWithShifterOp
 
   /// CHECK-START-ARM64: void Main.$opt$validateExtendByteInt1(int, byte) instruction_simplifier_arm64 (after)
@@ -399,6 +400,8 @@
   }
 
   /// CHECK-START-ARM: void Main.$opt$validateExtendCharInt1(int, char) instruction_simplifier_arm (after)
+  /// CHECK:                            DataProcWithShifterOp
+  /// CHECK:                            DataProcWithShifterOp
   /// CHECK-NOT:                        DataProcWithShifterOp
 
   /// CHECK-START-ARM64: void Main.$opt$validateExtendCharInt1(int, char) instruction_simplifier_arm64 (after)
@@ -469,6 +472,8 @@
   }
 
   /// CHECK-START-ARM: void Main.$opt$validateExtendShortInt1(int, short) instruction_simplifier_arm (after)
+  /// CHECK:                            DataProcWithShifterOp
+  /// CHECK:                            DataProcWithShifterOp
   /// CHECK-NOT:                        DataProcWithShifterOp
 
   /// CHECK-START-ARM64: void Main.$opt$validateExtendShortInt1(int, short) instruction_simplifier_arm64 (after)
diff --git a/test/569-checker-pattern-replacement/run b/test/569-checker-pattern-replacement/run
index f7e9df2..8ab6527 100755
--- a/test/569-checker-pattern-replacement/run
+++ b/test/569-checker-pattern-replacement/run
@@ -15,4 +15,4 @@
 # limitations under the License.
 
 exec ${RUN} "$@" \
-    -Xcompiler-option --no-inline-from=core-oj,569-checker-pattern-replacement.jar:classes2.dex
+    -Xcompiler-option --no-inline-from="core-oj,569-checker-pattern-replacement.jar!classes2.dex"
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index 45ead6b..faec3c3 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -18,9 +18,9 @@
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "jit/profiling_info.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "oat_quick_method_header.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedUtfChars.h"
 #include "stack.h"
 #include "stack_map.h"
 
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index ae3dad8..06e3fb4 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -23,10 +23,10 @@
 #include "method_reference.h"
 #include "mirror/class-inl.h"
 #include "mirror/executable.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "oat_file_assistant.h"
 #include "oat_file_manager.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedUtfChars.h"
 #include "thread.h"
 
 namespace art {
diff --git a/test/647-jni-get-field-id/get_field_id.cc b/test/647-jni-get-field-id/get_field_id.cc
index 2056cfb..139e4b6 100644
--- a/test/647-jni-get-field-id/get_field_id.cc
+++ b/test/647-jni-get-field-id/get_field_id.cc
@@ -16,7 +16,7 @@
 
 #include "jni.h"
 
-#include "ScopedUtfChars.h"
+#include "nativehelper/ScopedUtfChars.h"
 
 namespace art {
 
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index fdbfbe2..f36d1a3 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -53,3 +53,6 @@
 hello - Unmodifiable
 Transformation error : java.lang.Exception(Failed to redefine class <[LTransform;> due to JVMTI_ERROR_UNMODIFIABLE_CLASS)
 hello - Unmodifiable
+hello - Undefault
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform5;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+hello - Undefault
diff --git a/test/921-hello-failure/src/Iface4.java b/test/921-hello-failure/src/Iface4.java
new file mode 100644
index 0000000..66804c2
--- /dev/null
+++ b/test/921-hello-failure/src/Iface4.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+interface Iface4 {
+  default void sayHiTwice(String s) {
+    sayHi(s);
+    sayHi(s);
+  }
+  void sayHi(String s);
+}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index cfdcdc2..fb481bd 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -35,6 +35,7 @@
     MissingField.doTest(new Transform4("there"));
     FieldChange.doTest(new Transform4("there again"));
     Unmodifiable.doTest(new Transform[] { new Transform(), });
+    Undefault.doTest(new Transform5());
   }
 
   // TODO Replace this shim with a better re-write of this test.
diff --git a/test/921-hello-failure/src/Transform5.java b/test/921-hello-failure/src/Transform5.java
new file mode 100644
index 0000000..cf7b20a
--- /dev/null
+++ b/test/921-hello-failure/src/Transform5.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform5 implements Iface4 {
+  public void sayHi(String name) {
+    System.out.println("hello - " + name);
+  }
+}
diff --git a/test/921-hello-failure/src/Undefault.java b/test/921-hello-failure/src/Undefault.java
new file mode 100644
index 0000000..8303a84
--- /dev/null
+++ b/test/921-hello-failure/src/Undefault.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class Undefault {
+  // The following is a base64 encoding of the following class.
+  // class Transform5 implements Iface4 {
+  //   public void sayHiTwice(String s) {
+  //     throw new Error("Should not be called");
+  //   }
+  //   public void sayHi(String name) {
+  //     throw new Error("Should not be called!");
+  //   }
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAGgoABwASBwATCAAUCgACABUIABYHABcHABgHABkBAAY8aW5pdD4BAAMoKVYBAARD" +
+    "b2RlAQAPTGluZU51bWJlclRhYmxlAQAKc2F5SGlUd2ljZQEAFShMamF2YS9sYW5nL1N0cmluZzsp" +
+    "VgEABXNheUhpAQAKU291cmNlRmlsZQEAD1RyYW5zZm9ybTUuamF2YQwACQAKAQAPamF2YS9sYW5n" +
+    "L0Vycm9yAQAUU2hvdWxkIG5vdCBiZSBjYWxsZWQMAAkADgEAFVNob3VsZCBub3QgYmUgY2FsbGVk" +
+    "IQEAClRyYW5zZm9ybTUBABBqYXZhL2xhbmcvT2JqZWN0AQAGSWZhY2U0ACAABgAHAAEACAAAAAMA" +
+    "AAAJAAoAAQALAAAAHQABAAEAAAAFKrcAAbEAAAABAAwAAAAGAAEAAAABAAEADQAOAAEACwAAACIA" +
+    "AwACAAAACrsAAlkSA7cABL8AAAABAAwAAAAGAAEAAAADAAEADwAOAAEACwAAACIAAwACAAAACrsA" +
+    "AlkSBbcABL8AAAABAAwAAAAGAAEAAAAGAAEAEAAAAAIAEQ==");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQD5XbJiwMAcY0cucJ5gcVhFu7tMG0dZX8PsAgAAcAAAAHhWNBIAAAAAAAAAAFgCAAAN" +
+    "AAAAcAAAAAYAAACkAAAAAgAAALwAAAAAAAAAAAAAAAUAAADUAAAAAQAAAPwAAADQAQAAHAEAAIIB" +
+    "AACKAQAAlAEAAKIBAAC1AQAAyQEAAN0BAADzAQAACgIAABsCAAAeAgAAIgIAACkCAAABAAAAAgAA" +
+    "AAMAAAAEAAAABQAAAAkAAAAJAAAABQAAAAAAAAAKAAAABQAAAHwBAAABAAAAAAAAAAEAAQALAAAA" +
+    "AQABAAwAAAACAAEAAAAAAAMAAAAAAAAAAQAAAAAAAAADAAAAdAEAAAgAAAAAAAAARgIAAAAAAAAB" +
+    "AAEAAQAAADUCAAAEAAAAcBAEAAAADgAEAAIAAgAAADoCAAAIAAAAIgACABoBBwBwIAMAEAAnAAQA" +
+    "AgACAAAAQAIAAAgAAAAiAAIAGgEGAHAgAwAQACcAAQAAAAAAAAABAAAABAAGPGluaXQ+AAhMSWZh" +
+    "Y2U0OwAMTFRyYW5zZm9ybTU7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7" +
+    "ABJMamF2YS9sYW5nL1N0cmluZzsAFFNob3VsZCBub3QgYmUgY2FsbGVkABVTaG91bGQgbm90IGJl" +
+    "IGNhbGxlZCEAD1RyYW5zZm9ybTUuamF2YQABVgACVkwABXNheUhpAApzYXlIaVR3aWNlAAEABw4A" +
+    "BgEABw4AAwEABw4AAAABAgCAgAScAgEBtAIBAdQCDAAAAAAAAAABAAAAAAAAAAEAAAANAAAAcAAA" +
+    "AAIAAAAGAAAApAAAAAMAAAACAAAAvAAAAAUAAAAFAAAA1AAAAAYAAAABAAAA/AAAAAEgAAADAAAA" +
+    "HAEAAAEQAAACAAAAdAEAAAIgAAANAAAAggEAAAMgAAADAAAANQIAAAAgAAABAAAARgIAAAAQAAAB" +
+    "AAAAWAIAAA==");
+
+  public static void doTest(Transform5 t) {
+    t.sayHi("Undefault");
+    try {
+      Main.doCommonClassRedefinition(Transform5.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("Undefault");
+  }
+}
diff --git a/test/924-threads/src/art/Test924.java b/test/924-threads/src/art/Test924.java
index 84b7c62..b73eb30 100644
--- a/test/924-threads/src/art/Test924.java
+++ b/test/924-threads/src/art/Test924.java
@@ -164,8 +164,10 @@
       do {
         Thread.yield();
       } while (t.getState() != Thread.State.BLOCKED);
-      Thread.sleep(10);
-      printThreadState(t);
+      // Since internal thread suspension (For GC or other cases) can happen at any time and changes
+      // the thread state we just have it print the majority thread state across 11 calls over 55
+      // milliseconds.
+      printMajorityThreadState(t, 11, 5);
     }
 
     // Sleeping.
@@ -357,10 +359,32 @@
     STATE_KEYS.addAll(STATE_NAMES.keySet());
     Collections.sort(STATE_KEYS);
   }
-  
-  private static void printThreadState(Thread t) {
-    int state = getThreadState(t);
 
+  // Call getThreadState 'votes' times waiting 'wait' millis between calls and print the most common
+  // result.
+  private static void printMajorityThreadState(Thread t, int votes, int wait) throws Exception {
+    Map<Integer, Integer> states = new HashMap<>();
+    for (int i = 0; i < votes; i++) {
+      int cur_state = getThreadState(t);
+      states.put(cur_state, states.getOrDefault(cur_state, 0) + 1);
+      Thread.sleep(wait);  // Wait a little bit.
+    }
+    int best_state = -1;
+    int highest_count = 0;
+    for (Map.Entry<Integer, Integer> e : states.entrySet()) {
+      if (e.getValue() > highest_count) {
+        highest_count = e.getValue();
+        best_state = e.getKey();
+      }
+    }
+    printThreadState(best_state);
+  }
+
+  private static void printThreadState(Thread t) {
+    printThreadState(getThreadState(t));
+  }
+
+  private static void printThreadState(int state) {
     StringBuilder sb = new StringBuilder();
 
     for (Integer i : STATE_KEYS) {
diff --git a/test/VerifierDeps/Main.smali b/test/VerifierDeps/Main.smali
index 74c0d03..824f0dc 100644
--- a/test/VerifierDeps/Main.smali
+++ b/test/VerifierDeps/Main.smali
@@ -405,12 +405,6 @@
   return-void
 .end method
 
-.method public static InvokeVirtual_ActuallyDirect(LMyThread;)V
-  .registers 1
-  invoke-virtual {p0}, LMyThread;->activeCount()I
-  return-void
-.end method
-
 .method public static InvokeInterface_Resolved_DeclaredInReferenced(LMyThread;)V
   .registers 1
   invoke-interface {p0}, Ljava/lang/Runnable;->run()V
@@ -420,7 +414,9 @@
 .method public static InvokeInterface_Resolved_DeclaredInSuperclass(LMyThread;)V
   .registers 1
   # Method join() is declared in the superclass of MyThread. As such, it should
-  # be called with invoke-virtual and will not be resolved here.
+  # be called with invoke-virtual. However, the lookup type does not depend
+  # on the invoke type, so it shall be resolved here anyway.
+  # TODO: Maybe we should not record dependency if the invoke type does not match the lookup type.
   invoke-interface {p0}, LMyThread;->join()V
   return-void
 .end method
@@ -428,6 +424,8 @@
 .method public static InvokeInterface_Resolved_DeclaredInSuperinterface1(LMyThreadSet;)V
   .registers 1
   # Verification will fail because the referring class is not an interface.
+  # However, the lookup type does not depend on the invoke type, so it shall be resolved here anyway.
+  # TODO: Maybe we should not record dependency if the invoke type does not match the lookup type.
   invoke-interface {p0}, LMyThreadSet;->run()V
   return-void
 .end method
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index d8e5b57..7c0ed69 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -25,10 +25,10 @@
 #include "jit/jit_code_cache.h"
 #include "jit/profiling_info.h"
 #include "mirror/class-inl.h"
+#include "nativehelper/ScopedUtfChars.h"
 #include "oat_quick_method_header.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
-#include "ScopedUtfChars.h"
 #include "thread-current-inl.h"
 
 namespace art {
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index a0c4ea8..6596ff4 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -233,8 +233,8 @@
 
 HOST_OUT_EXECUTABLES = os.path.join(ANDROID_BUILD_TOP,
                                     _get_build_var("HOST_OUT_EXECUTABLES"))
-os.environ['JACK'] = HOST_OUT_EXECUTABLES + '/jack'
-os.environ['DX'] = HOST_OUT_EXECUTABLES + '/dx'
-os.environ['SMALI'] = HOST_OUT_EXECUTABLES + '/smali'
-os.environ['JASMIN'] = HOST_OUT_EXECUTABLES + '/jasmin'
-os.environ['DXMERGER'] = HOST_OUT_EXECUTABLES + '/dexmerger'
+
+# Set up default values for $JACK, $DX, $SMALI, etc to the $HOST_OUT_EXECUTABLES/$name path.
+for tool in ['jack', 'dx', 'smali', 'jasmin', 'dxmerger']:
+  binary = tool if tool != 'dxmerger' else 'dexmerger'
+  os.environ.setdefault(tool.upper(), HOST_OUT_EXECUTABLES + '/' + binary)
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index baf7600..e8b6f1c 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -303,7 +303,8 @@
         }
     },
     'art-gtest-valgrind32': {
-        'make' : 'valgrind-test-art-host32',
+      # Disabled: x86 valgrind does not understand SSE4.x
+      # 'make' : 'valgrind-test-art-host32',
         'env': {
             'ART_USE_READ_BARRIER' : 'false'
         }
diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/ObjectHandler.java
index cc55b7a..8262910 100644
--- a/tools/ahat/src/ObjectHandler.java
+++ b/tools/ahat/src/ObjectHandler.java
@@ -110,7 +110,7 @@
   private static void printClassInstanceFields(Doc doc, Query query, AhatClassInstance inst) {
     doc.section("Fields");
     AhatInstance base = inst.getBaseline();
-    printFields(doc, query, INSTANCE_FIELDS_ID, !base.isPlaceHolder(),
+    printFields(doc, query, INSTANCE_FIELDS_ID, inst != base && !base.isPlaceHolder(),
         inst.asClassInstance().getInstanceFields(),
         base.isPlaceHolder() ? null : base.asClassInstance().getInstanceFields());
   }
@@ -211,7 +211,7 @@
 
     doc.section("Static Fields");
     AhatInstance base = clsobj.getBaseline();
-    printFields(doc, query, STATIC_FIELDS_ID, !base.isPlaceHolder(),
+    printFields(doc, query, STATIC_FIELDS_ID, clsobj != base && !base.isPlaceHolder(),
         clsobj.getStaticFieldValues(),
         base.isPlaceHolder() ? null : base.asClassObj().getStaticFieldValues());
   }
diff --git a/tools/ahat/src/ObjectsHandler.java b/tools/ahat/src/ObjectsHandler.java
index 86d48f1..fd226c2 100644
--- a/tools/ahat/src/ObjectsHandler.java
+++ b/tools/ahat/src/ObjectsHandler.java
@@ -43,13 +43,7 @@
     Site site = mSnapshot.getSite(id, depth);
 
     List<AhatInstance> insts = new ArrayList<AhatInstance>();
-    for (AhatInstance inst : site.getObjects()) {
-      if ((heapName == null || inst.getHeap().getName().equals(heapName))
-          && (className == null || inst.getClassName().equals(className))) {
-        insts.add(inst);
-      }
-    }
-
+    site.getObjects(heapName, className, insts);
     Collections.sort(insts, Sort.defaultInstanceCompare(mSnapshot));
 
     doc.title("Objects");
diff --git a/tools/ahat/src/Summarizer.java b/tools/ahat/src/Summarizer.java
index 016eab4..3e9da31 100644
--- a/tools/ahat/src/Summarizer.java
+++ b/tools/ahat/src/Summarizer.java
@@ -60,14 +60,7 @@
       formatted.append("root ");
     }
 
-    // Annotate classes as classes.
-    DocString linkText = new DocString();
-    if (inst.isClassObj()) {
-      linkText.append("class ");
-    }
-
-    linkText.append(inst.toString());
-
+    DocString linkText = DocString.text(inst.toString());
     if (inst.isPlaceHolder()) {
       // Don't make links to placeholder objects.
       formatted.append(linkText);
diff --git a/tools/ahat/src/dominators/DominatorsComputation.java b/tools/ahat/src/dominators/DominatorsComputation.java
new file mode 100644
index 0000000..9a2a272
--- /dev/null
+++ b/tools/ahat/src/dominators/DominatorsComputation.java
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.dominators;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Deque;
+import java.util.List;
+import java.util.Queue;
+
+/**
+ * Generic DominatorsComputation.
+ *
+ * To use the dominators computation, have your graph nodes implement the
+ * DominatorsComputation.Node interface, then call
+ * DominatorsComputation.computeDominators on the single root node.
+ */
+public class DominatorsComputation {
+  /**
+   * Interface for a directed graph to perform the dominators computation on.
+   */
+  public interface Node {
+    /**
+     * Associate the given dominator state with this node.
+     */
+    void setDominatorsComputationState(Object state);
+
+    /**
+     * Get the most recent dominator state associated with this node using
+     * setDominatorsComputationState. If setDominatorsComputationState has not
+     * yet been called, this should return null.
+     */
+    Object getDominatorsComputationState();
+
+    /**
+     * Return a collection of nodes referenced from this node, for the
+     * purposes of computing dominators.
+     */
+    Iterable<? extends Node> getReferencesForDominators();
+
+    /**
+     * Update this node's dominator based on the results of the dominators
+     * computation.
+     */
+    void setDominator(Node dominator);
+  }
+
+  // NodeS is information associated with a particular node for the
+  // purposes of computing dominators.
+  // By convention we use the suffix 'S' to name instances of NodeS.
+  private static class NodeS {
+    // The node that this NodeS is associated with.
+    public Node node;
+
+    // Unique identifier for this node, in increasing order based on the order
+    // this node was visited in a depth first search from the root. In
+    // particular, given nodes A and B, if A.id > B.id, then A cannot be a
+    // dominator of B.
+    public long id;
+
+    // Upper bound on the id of this node's dominator.
+    // The true immediate dominator of this node must have id <= domid.
+    // This upper bound is slowly tightened as part of the dominators
+    // computation.
+    public long domid;
+
+    // The current candidate dominator for this node.
+    // Invariant: (domid < domS.id) implies this node is on the queue of
+    // nodes to be revisited.
+    public NodeS domS;
+
+    // A node with a reference to this node that is one step closer to the
+    // root than this node.
+    // Invariant: srcS.id < this.id
+    public NodeS srcS;
+
+    // The set of nodes X reachable by 'this' on a path of nodes from the
+    // root with increasing ids (possibly excluding X) that this node does not
+    // dominate (this.id > X.domid).
+    // We can use a List instead of a Set for this because we guarentee that
+    // we don't add the same node more than once to the list (see below).
+    public List<NodeS> undom = new ArrayList<NodeS>();
+
+    // The largest id of the node X for which we did X.undom.add(this).
+    // This is an optimization to avoid adding duplicate node entries to the
+    // undom set.
+    //
+    // The first time we see this node, we reach it through a path of nodes
+    // with IDs 0,...,a,this. These form our src chain to the root.
+    //
+    // The next time we see this node, we reach it through a path of nodes
+    // with IDS 0,...,b,c,...,d,this. Where all 0,...,b < a and all c,...,d > a.
+    //
+    // The next time we see this node, we reach it through a path of nodes
+    // with IDS 0,...,e,f,...,g,this. With all 0,...,e < d and all f,...,g > d.
+    // And so on.
+    //
+    // The first time we see this node, we set undomid to a.id. Nodes 0,...,a
+    // will be added as undom in the 'revisit' phase of the node.
+    //
+    // The next times we see this node, we mark a+,...,d as undom and
+    // change undomid to d. And so on.
+    public long undomid;
+  }
+
+  private static class Link {
+    public NodeS srcS;
+    public Node dst;
+
+    public Link(NodeS srcS, Node dst) {
+      this.srcS = srcS;
+      this.dst = dst;
+    }
+  }
+
+  /**
+   * Compute the dominator tree rooted at the given node.
+   * There must not be any incoming references to the root node.
+   */
+  public static void computeDominators(Node root) {
+    long id = 0;
+
+    // List of all nodes seen. We keep track of this here to update all the
+    // dominators once we are done.
+    List<NodeS> nodes = new ArrayList<NodeS>();
+
+    // The set of nodes N such that N.domid < N.domS.id. These nodes need
+    // to be revisisted because their dominator is clearly wrong.
+    // Use a Queue instead of a Set because performance will be better. We
+    // avoid adding nodes already on the queue by checking whether it was
+    // already true that N.domid < N.domS.id, in which case the node is
+    // already on the queue.
+    Queue<NodeS> revisit = new ArrayDeque<NodeS>();
+
+    // Set up the root node specially.
+    NodeS rootS = new NodeS();
+    rootS.node = root;
+    rootS.id = id++;
+    root.setDominatorsComputationState(rootS);
+
+    // 1. Do a depth first search of the nodes, label them with ids and come
+    // up with intial candidate dominators for them.
+    Deque<Link> dfs = new ArrayDeque<Link>();
+    for (Node child : root.getReferencesForDominators()) {
+      dfs.push(new Link(rootS, child));
+    }
+
+    while (!dfs.isEmpty()) {
+      Link link = dfs.pop();
+      NodeS dstS = (NodeS)link.dst.getDominatorsComputationState();
+      if (dstS == null) {
+        // This is the first time we have seen the node. The candidate
+        // dominator is link src.
+        dstS = new NodeS();
+        dstS.node = link.dst;
+        dstS.id = id++;
+        dstS.domid = link.srcS.id;
+        dstS.domS = link.srcS;
+        dstS.srcS = link.srcS;
+        dstS.undomid = dstS.domid;
+        nodes.add(dstS);
+        link.dst.setDominatorsComputationState(dstS);
+
+        for (Node child : link.dst.getReferencesForDominators()) {
+          dfs.push(new Link(dstS, child));
+        }
+      } else {
+        // We have seen the node already. Update the state based on the new
+        // potential dominator.
+        NodeS srcS = link.srcS;
+        boolean revisiting = dstS.domid < dstS.domS.id;
+
+        while (srcS.id > dstS.domid) {
+          if (srcS.id > dstS.undomid) {
+            srcS.undom.add(dstS);
+          }
+          srcS = srcS.srcS;
+        }
+        dstS.undomid = link.srcS.id;
+
+        if (srcS.id < dstS.domid) {
+          // In this case, dstS.domid must be wrong, because we just found a
+          // path to dstS that does not go through dstS.domid:
+          // All nodes from root to srcS have id < domid, and all nodes from
+          // srcS to dstS had id > domid, so dstS.domid cannot be on this path
+          // from root to dstS.
+          dstS.domid = srcS.id;
+          if (!revisiting) {
+            revisit.add(dstS);
+          }
+        }
+      }
+    }
+
+    // 2. Continue revisiting nodes until they all satisfy the requirement
+    // that domS.id <= domid.
+    while (!revisit.isEmpty()) {
+      NodeS nodeS = revisit.poll();
+      NodeS domS = nodeS.domS;
+      assert nodeS.domid < domS.id;
+      while (domS.id > nodeS.domid) {
+        if (domS.domS.id < nodeS.domid) {
+          // In this case, nodeS.domid must be wrong, because there is a path
+          // from root to nodeS that does not go through nodeS.domid:
+          //  * We can go from root to domS without going through nodeS.domid,
+          //    because otherwise nodeS.domid would dominate domS, not
+          //    domS.domS.
+          //  * We can go from domS to nodeS without going through nodeS.domid
+          //    because we know nodeS is reachable from domS on a path of nodes
+          //    with increases ids, which cannot include nodeS.domid, which
+          //    has a smaller id than domS.
+          nodeS.domid = domS.domS.id;
+        }
+        domS.undom.add(nodeS);
+        domS = domS.srcS;
+      }
+      nodeS.domS = domS;
+      nodeS.domid = domS.id;
+
+      for (NodeS xS : nodeS.undom) {
+        if (domS.id < xS.domid) {
+          // In this case, xS.domid must be wrong, because there is a path
+          // from the root to xX that does not go through xS.domid:
+          //  * We can go from root to nodeS without going through xS.domid,
+          //    because otherwise xS.domid would dominate nodeS, not domS.
+          //  * We can go from nodeS to xS without going through xS.domid
+          //    because we know xS is reachable from nodeS on a path of nodes
+          //    with increasing ids, which cannot include xS.domid, which has
+          //    a smaller id than nodeS.
+          boolean revisiting = xS.domid < xS.domS.id;
+          xS.domid = domS.id;
+          if (!revisiting) {
+            revisit.add(xS);
+          }
+        }
+      }
+    }
+
+    // 3. Update the dominators of the nodes.
+    root.setDominatorsComputationState(null);
+    for (NodeS nodeS : nodes) {
+      nodeS.node.setDominator(nodeS.domS.node);
+      nodeS.node.setDominatorsComputationState(null);
+    }
+  }
+}
diff --git a/tools/ahat/src/heapdump/AhatArrayInstance.java b/tools/ahat/src/heapdump/AhatArrayInstance.java
index d88cf94..6d4485d 100644
--- a/tools/ahat/src/heapdump/AhatArrayInstance.java
+++ b/tools/ahat/src/heapdump/AhatArrayInstance.java
@@ -20,6 +20,7 @@
 import com.android.tools.perflib.heap.Instance;
 import java.nio.charset.StandardCharsets;
 import java.util.AbstractList;
+import java.util.Collections;
 import java.util.List;
 
 public class AhatArrayInstance extends AhatInstance {
@@ -37,8 +38,8 @@
     super(id);
   }
 
-  @Override void initialize(AhatSnapshot snapshot, Instance inst) {
-    super.initialize(snapshot, inst);
+  @Override void initialize(AhatSnapshot snapshot, Instance inst, Site site) {
+    super.initialize(snapshot, inst, site);
 
     ArrayInstance array = (ArrayInstance)inst;
     switch (array.getArrayType()) {
@@ -49,10 +50,6 @@
           if (objects[i] != null) {
             Instance ref = (Instance)objects[i];
             insts[i] = snapshot.findInstance(ref.getId());
-            if (ref.getNextInstanceToGcRoot() == inst) {
-              String field = "[" + Integer.toString(i) + "]";
-              insts[i].setNextInstanceToGcRoot(this, field);
-            }
           }
         }
         mValues = new AbstractList<Value>() {
@@ -132,6 +129,35 @@
     return mValues.get(index);
   }
 
+  @Override
+  ReferenceIterator getReferences() {
+    // The list of references will be empty if this is a primitive array.
+    List<Reference> refs = Collections.emptyList();
+    if (!mValues.isEmpty()) {
+      Value first = mValues.get(0);
+      if (first == null || first.isAhatInstance()) {
+        refs = new AbstractList<Reference>() {
+          @Override
+          public int size() {
+            return mValues.size();
+          }
+
+          @Override
+          public Reference get(int index) {
+            Value value = mValues.get(index);
+            if (value != null) {
+              assert value.isAhatInstance();
+              String field = "[" + Integer.toString(index) + "]";
+              return new Reference(AhatArrayInstance.this, field, value.asAhatInstance(), true);
+            }
+            return null;
+          }
+        };
+      }
+    }
+    return new ReferenceIterator(refs);
+  }
+
   @Override public boolean isArrayInstance() {
     return true;
   }
diff --git a/tools/ahat/src/heapdump/AhatClassInstance.java b/tools/ahat/src/heapdump/AhatClassInstance.java
index 158de52..2115923 100644
--- a/tools/ahat/src/heapdump/AhatClassInstance.java
+++ b/tools/ahat/src/heapdump/AhatClassInstance.java
@@ -19,6 +19,7 @@
 import com.android.tools.perflib.heap.ClassInstance;
 import com.android.tools.perflib.heap.Instance;
 import java.awt.image.BufferedImage;
+import java.util.AbstractList;
 import java.util.Arrays;
 import java.util.List;
 
@@ -29,8 +30,8 @@
     super(id);
   }
 
-  @Override void initialize(AhatSnapshot snapshot, Instance inst) {
-    super.initialize(snapshot, inst);
+  @Override void initialize(AhatSnapshot snapshot, Instance inst, Site site) {
+    super.initialize(snapshot, inst, site);
 
     ClassInstance classInst = (ClassInstance)inst;
     List<ClassInstance.FieldValue> fieldValues = classInst.getValues();
@@ -40,15 +41,7 @@
       String name = field.getField().getName();
       String type = field.getField().getType().toString();
       Value value = snapshot.getValue(field.getValue());
-
       mFieldValues[i] = new FieldValue(name, type, value);
-
-      if (field.getValue() instanceof Instance) {
-        Instance ref = (Instance)field.getValue();
-        if (ref.getNextInstanceToGcRoot() == inst) {
-          value.asAhatInstance().setNextInstanceToGcRoot(this, "." + name);
-        }
-      }
     }
   }
 
@@ -101,6 +94,30 @@
     return Arrays.asList(mFieldValues);
   }
 
+  @Override
+  ReferenceIterator getReferences() {
+    List<Reference> refs = new AbstractList<Reference>() {
+      @Override
+      public int size() {
+        return mFieldValues.length;
+      }
+
+      @Override
+      public Reference get(int index) {
+        FieldValue field = mFieldValues[index];
+        Value value = field.value;
+        if (value != null && value.isAhatInstance()) {
+          boolean strong = !field.name.equals("referent")
+                        || !isInstanceOfClass("java.lang.ref.Reference");
+          AhatInstance ref = value.asAhatInstance();
+          return new Reference(AhatClassInstance.this, "." + field.name, ref, strong);
+        }
+        return null;
+      }
+    };
+    return new ReferenceIterator(refs);
+  }
+
   /**
    * Returns true if this is an instance of a class with the given name.
    */
diff --git a/tools/ahat/src/heapdump/AhatClassObj.java b/tools/ahat/src/heapdump/AhatClassObj.java
index c5ade1d..052d7a8 100644
--- a/tools/ahat/src/heapdump/AhatClassObj.java
+++ b/tools/ahat/src/heapdump/AhatClassObj.java
@@ -19,6 +19,7 @@
 import com.android.tools.perflib.heap.ClassObj;
 import com.android.tools.perflib.heap.Field;
 import com.android.tools.perflib.heap.Instance;
+import java.util.AbstractList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
@@ -34,8 +35,8 @@
     super(id);
   }
 
-  @Override void initialize(AhatSnapshot snapshot, Instance inst) {
-    super.initialize(snapshot, inst);
+  @Override void initialize(AhatSnapshot snapshot, Instance inst, Site site) {
+    super.initialize(snapshot, inst, site);
 
     ClassObj classObj = (ClassObj)inst;
     mClassName = classObj.getClassName();
@@ -58,13 +59,6 @@
       String type = field.getKey().getType().toString();
       Value value = snapshot.getValue(field.getValue());
       mStaticFieldValues[index++] = new FieldValue(name, type, value);
-
-      if (field.getValue() instanceof Instance) {
-        Instance ref = (Instance)field.getValue();
-        if (ref.getNextInstanceToGcRoot() == inst) {
-          value.asAhatInstance().setNextInstanceToGcRoot(this, "." + name);
-        }
-      }
     }
   }
 
@@ -96,6 +90,27 @@
     return Arrays.asList(mStaticFieldValues);
   }
 
+  @Override
+  ReferenceIterator getReferences() {
+    List<Reference> refs = new AbstractList<Reference>() {
+      @Override
+      public int size() {
+        return mStaticFieldValues.length;
+      }
+
+      @Override
+      public Reference get(int index) {
+        FieldValue field = mStaticFieldValues[index];
+        Value value = field.value;
+        if (value != null && value.isAhatInstance()) {
+          return new Reference(AhatClassObj.this, "." + field.name, value.asAhatInstance(), true);
+        }
+        return null;
+      }
+    };
+    return new ReferenceIterator(refs);
+  }
+
   @Override public boolean isClassObj() {
     return true;
   }
@@ -105,11 +120,10 @@
   }
 
   @Override public String toString() {
-    return mClassName;
+    return "class " + mClassName;
   }
 
   @Override AhatInstance newPlaceHolderInstance() {
     return new AhatPlaceHolderClassObj(this);
   }
 }
-
diff --git a/tools/ahat/src/heapdump/AhatInstance.java b/tools/ahat/src/heapdump/AhatInstance.java
index af369d9..8905b76 100644
--- a/tools/ahat/src/heapdump/AhatInstance.java
+++ b/tools/ahat/src/heapdump/AhatInstance.java
@@ -16,39 +16,48 @@
 
 package com.android.ahat.heapdump;
 
+import com.android.ahat.dominators.DominatorsComputation;
 import com.android.tools.perflib.heap.ClassObj;
 import com.android.tools.perflib.heap.Instance;
-import com.android.tools.perflib.heap.RootObj;
 import java.awt.image.BufferedImage;
 import java.util.ArrayDeque;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Deque;
 import java.util.List;
+import java.util.Queue;
 
-public abstract class AhatInstance implements Diffable<AhatInstance> {
-  private long mId;
+public abstract class AhatInstance implements Diffable<AhatInstance>,
+                                              DominatorsComputation.Node {
+  // The id of this instance from the heap dump.
+  private final long mId;
+
+  // Fields initialized in initialize().
   private Size mSize;
-  private Size[] mRetainedSizes;      // Retained size indexed by heap index
-  private boolean mIsReachable;
   private AhatHeap mHeap;
-  private AhatInstance mImmediateDominator;
-  private AhatInstance mNextInstanceToGcRoot;
-  private String mNextInstanceToGcRootField = "???";
   private AhatClassObj mClassObj;
-  private AhatInstance[] mHardReverseReferences;
-  private AhatInstance[] mSoftReverseReferences;
   private Site mSite;
 
   // If this instance is a root, mRootTypes contains a set of the root types.
   // If this instance is not a root, mRootTypes is null.
   private List<String> mRootTypes;
 
-  // List of instances this instance immediately dominates.
-  private List<AhatInstance> mDominated = new ArrayList<AhatInstance>();
+  // Fields initialized in computeReverseReferences().
+  private AhatInstance mNextInstanceToGcRoot;
+  private String mNextInstanceToGcRootField;
+  private ArrayList<AhatInstance> mHardReverseReferences;
+  private ArrayList<AhatInstance> mSoftReverseReferences;
 
+  // Fields initialized in DominatorsComputation.computeDominators().
+  // mDominated - the list of instances immediately dominated by this instance.
+  // mRetainedSizes - retained size indexed by heap index.
+  private AhatInstance mImmediateDominator;
+  private List<AhatInstance> mDominated = new ArrayList<AhatInstance>();
+  private Size[] mRetainedSizes;
+  private Object mDominatorsComputationState;
+
+  // The baseline instance for purposes of diff.
   private AhatInstance mBaseline;
 
   public AhatInstance(long id) {
@@ -62,58 +71,16 @@
    * There is no guarantee that the AhatInstances returned by
    * snapshot.findInstance have been initialized yet.
    */
-  void initialize(AhatSnapshot snapshot, Instance inst) {
-    mId = inst.getId();
+  void initialize(AhatSnapshot snapshot, Instance inst, Site site) {
     mSize = new Size(inst.getSize(), 0);
-    mIsReachable = inst.isReachable();
-
-    List<AhatHeap> heaps = snapshot.getHeaps();
-
     mHeap = snapshot.getHeap(inst.getHeap().getName());
 
-    Instance dom = inst.getImmediateDominator();
-    if (dom == null || dom instanceof RootObj) {
-      mImmediateDominator = null;
-    } else {
-      mImmediateDominator = snapshot.findInstance(dom.getId());
-      mImmediateDominator.mDominated.add(this);
-    }
-
     ClassObj clsObj = inst.getClassObj();
     if (clsObj != null) {
       mClassObj = snapshot.findClassObj(clsObj.getId());
     }
 
-    // A couple notes about reverse references:
-    // * perflib sometimes returns unreachable reverse references. If
-    //   snapshot.findInstance returns null, it means the reverse reference is
-    //   not reachable, so we filter it out.
-    // * We store the references as AhatInstance[] instead of
-    //   ArrayList<AhatInstance> because it saves a lot of space and helps
-    //   with performance when there are a lot of AhatInstances.
-    ArrayList<AhatInstance> ahatRefs = new ArrayList<AhatInstance>();
-    ahatRefs = new ArrayList<AhatInstance>();
-    for (Instance ref : inst.getHardReverseReferences()) {
-      AhatInstance ahat = snapshot.findInstance(ref.getId());
-      if (ahat != null) {
-        ahatRefs.add(ahat);
-      }
-    }
-    mHardReverseReferences = new AhatInstance[ahatRefs.size()];
-    ahatRefs.toArray(mHardReverseReferences);
-
-    List<Instance> refs = inst.getSoftReverseReferences();
-    ahatRefs.clear();
-    if (refs != null) {
-      for (Instance ref : refs) {
-        AhatInstance ahat = snapshot.findInstance(ref.getId());
-        if (ahat != null) {
-          ahatRefs.add(ahat);
-        }
-      }
-    }
-    mSoftReverseReferences = new AhatInstance[ahatRefs.size()];
-    ahatRefs.toArray(mSoftReverseReferences);
+    mSite = site;
   }
 
   /**
@@ -166,7 +133,7 @@
    * Returns whether this object is strongly-reachable.
    */
   public boolean isReachable() {
-    return mIsReachable;
+    return mImmediateDominator != null;
   }
 
   /**
@@ -177,6 +144,12 @@
   }
 
   /**
+   * Returns an iterator over the references this AhatInstance has to other
+   * AhatInstances.
+   */
+  abstract ReferenceIterator getReferences();
+
+  /**
    * Returns true if this instance is marked as a root instance.
    */
   public boolean isRoot() {
@@ -227,13 +200,6 @@
   }
 
   /**
-   * Sets the allocation site of this instance.
-   */
-  void setSite(Site site) {
-    mSite = site;
-  }
-
-  /**
    * Returns true if the given instance is a class object
    */
   public boolean isClassObj() {
@@ -311,14 +277,20 @@
    * Returns a list of objects with hard references to this object.
    */
   public List<AhatInstance> getHardReverseReferences() {
-    return Arrays.asList(mHardReverseReferences);
+    if (mHardReverseReferences != null) {
+      return mHardReverseReferences;
+    }
+    return Collections.emptyList();
   }
 
   /**
    * Returns a list of objects with soft references to this object.
    */
   public List<AhatInstance> getSoftReverseReferences() {
-    return Arrays.asList(mSoftReverseReferences);
+    if (mSoftReverseReferences != null) {
+      return mSoftReverseReferences;
+    }
+    return Collections.emptyList();
   }
 
   /**
@@ -425,8 +397,10 @@
   }
 
   void setNextInstanceToGcRoot(AhatInstance inst, String field) {
-    mNextInstanceToGcRoot = inst;
-    mNextInstanceToGcRootField = field;
+    if (mNextInstanceToGcRoot == null && !isRoot()) {
+      mNextInstanceToGcRoot = inst;
+      mNextInstanceToGcRootField = field;
+    }
   }
 
   /** Returns a human-readable identifier for this object.
@@ -466,6 +440,47 @@
   }
 
   /**
+   * Initialize the reverse reference fields of this instance and all other
+   * instances reachable from it. Initializes the following fields:
+   *   mNextInstanceToGcRoot
+   *   mNextInstanceToGcRootField
+   *   mHardReverseReferences
+   *   mSoftReverseReferences
+   */
+  static void computeReverseReferences(AhatInstance root) {
+    // Do a breadth first search to visit the nodes.
+    Queue<Reference> bfs = new ArrayDeque<Reference>();
+    for (Reference ref : root.getReferences()) {
+      bfs.add(ref);
+    }
+    while (!bfs.isEmpty()) {
+      Reference ref = bfs.poll();
+
+      if (ref.ref.mHardReverseReferences == null) {
+        // This is the first time we are seeing ref.ref.
+        ref.ref.mNextInstanceToGcRoot = ref.src;
+        ref.ref.mNextInstanceToGcRootField = ref.field;
+        ref.ref.mHardReverseReferences = new ArrayList<AhatInstance>();
+        for (Reference childRef : ref.ref.getReferences()) {
+          bfs.add(childRef);
+        }
+      }
+
+      // Note: ref.src is null when the src is the SuperRoot.
+      if (ref.src != null) {
+        if (ref.strong) {
+          ref.ref.mHardReverseReferences.add(ref.src);
+        } else {
+          if (ref.ref.mSoftReverseReferences == null) {
+            ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>();
+          }
+          ref.ref.mSoftReverseReferences.add(ref.src);
+        }
+      }
+    }
+  }
+
+  /**
    * Recursively compute the retained size of the given instance and all
    * other instances it dominates.
    */
@@ -486,8 +501,10 @@
         for (int i = 0; i < numHeaps; i++) {
           inst.mRetainedSizes[i] = Size.ZERO;
         }
-        inst.mRetainedSizes[inst.mHeap.getIndex()] = 
-          inst.mRetainedSizes[inst.mHeap.getIndex()].plus(inst.mSize);
+        if (!(inst instanceof SuperRoot)) {
+          inst.mRetainedSizes[inst.mHeap.getIndex()] =
+            inst.mRetainedSizes[inst.mHeap.getIndex()].plus(inst.mSize);
+        }
         deque.push(inst);
         for (AhatInstance dominated : inst.mDominated) {
           deque.push(dominated);
@@ -501,4 +518,25 @@
       }
     }
   }
+
+  @Override
+  public void setDominatorsComputationState(Object state) {
+    mDominatorsComputationState = state;
+  }
+
+  @Override
+  public Object getDominatorsComputationState() {
+    return mDominatorsComputationState;
+  }
+
+  @Override
+  public Iterable<? extends DominatorsComputation.Node> getReferencesForDominators() {
+    return new DominatorReferenceIterator(getReferences());
+  }
+
+  @Override
+  public void setDominator(DominatorsComputation.Node dominator) {
+    mImmediateDominator = (AhatInstance)dominator;
+    mImmediateDominator.mDominated.add(this);
+  }
 }
diff --git a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java b/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
index 4aac804..d797b11 100644
--- a/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
+++ b/tools/ahat/src/heapdump/AhatPlaceHolderInstance.java
@@ -16,6 +16,9 @@
 
 package com.android.ahat.heapdump;
 
+import java.util.Collections;
+import java.util.List;
+
 /**
  * Generic PlaceHolder instance to take the place of a real AhatInstance for
  * the purposes of displaying diffs.
@@ -60,4 +63,10 @@
   @Override public boolean isPlaceHolder() {
     return true;
   }
+
+  @Override
+  ReferenceIterator getReferences() {
+    List<Reference> refs = Collections.emptyList();
+    return new ReferenceIterator(refs);
+  }
 }
diff --git a/tools/ahat/src/heapdump/AhatSnapshot.java b/tools/ahat/src/heapdump/AhatSnapshot.java
index 35d6c8a..7df78c5 100644
--- a/tools/ahat/src/heapdump/AhatSnapshot.java
+++ b/tools/ahat/src/heapdump/AhatSnapshot.java
@@ -16,6 +16,7 @@
 
 package com.android.ahat.heapdump;
 
+import com.android.ahat.dominators.DominatorsComputation;
 import com.android.tools.perflib.captures.DataBuffer;
 import com.android.tools.perflib.captures.MemoryMappedFileBuffer;
 import com.android.tools.perflib.heap.ArrayInstance;
@@ -42,7 +43,7 @@
   private final Site mRootSite = new Site("ROOT");
 
   // Collection of objects whose immediate dominator is the SENTINEL_ROOT.
-  private final List<AhatInstance> mRooted = new ArrayList<AhatInstance>();
+  private final List<AhatInstance> mRooted;
 
   // List of all ahat instances stored in increasing order by id.
   private final List<AhatInstance> mInstances = new ArrayList<AhatInstance>();
@@ -80,7 +81,6 @@
    */
   private AhatSnapshot(DataBuffer buffer, ProguardMap map) throws IOException {
     Snapshot snapshot = Snapshot.createSnapshot(buffer, map);
-    snapshot.computeDominators();
 
     // Properly label the class of class objects in the perflib snapshot.
     final ClassObj javaLangClass = snapshot.findClass("java.lang.Class");
@@ -139,46 +139,45 @@
     // and instances.
     for (AhatInstance ahat : mInstances) {
       Instance inst = snapshot.findInstance(ahat.getId());
-      ahat.initialize(this, inst);
 
-      Long registeredNativeSize = registeredNative.get(inst);
-      if (registeredNativeSize != null) {
-        ahat.addRegisteredNativeSize(registeredNativeSize);
-      }
-
-      if (inst.getImmediateDominator() == Snapshot.SENTINEL_ROOT) {
-        mRooted.add(ahat);
-      }
-
-      if (inst.isReachable()) {
-        ahat.getHeap().addToSize(ahat.getSize());
-      }
-
-      // Update sites.
       StackFrame[] frames = null;
       StackTrace stack = inst.getStack();
       if (stack != null) {
         frames = stack.getFrames();
       }
       Site site = mRootSite.add(frames, frames == null ? 0 : frames.length, ahat);
-      ahat.setSite(site);
+      ahat.initialize(this, inst, site);
+
+      Long registeredNativeSize = registeredNative.get(inst);
+      if (registeredNativeSize != null) {
+        ahat.addRegisteredNativeSize(registeredNativeSize);
+      }
     }
 
     // Record the roots and their types.
+    SuperRoot superRoot = new SuperRoot();
     for (RootObj root : snapshot.getGCRoots()) {
       Instance inst = root.getReferredInstance();
       if (inst != null) {
-        findInstance(inst.getId()).addRootType(root.getRootType().toString());
+        AhatInstance ahat = findInstance(inst.getId());
+        if (!ahat.isRoot()) {
+          superRoot.addRoot(ahat);
+        }
+        ahat.addRootType(root.getRootType().toString());
       }
     }
     snapshot.dispose();
 
-    // Compute the retained sizes of objects. We do this explicitly now rather
-    // than relying on the retained sizes computed by perflib so that
-    // registered native sizes are included.
-    for (AhatInstance inst : mRooted) {
-      AhatInstance.computeRetainedSize(inst, mHeaps.size());
+    AhatInstance.computeReverseReferences(superRoot);
+    DominatorsComputation.computeDominators(superRoot);
+    AhatInstance.computeRetainedSize(superRoot, mHeaps.size());
+
+    mRooted = superRoot.getDominated();
+    for (AhatHeap heap : mHeaps) {
+      heap.addToSize(superRoot.getRetainedSize(heap));
     }
+
+    mRootSite.computeObjectsInfos(mHeaps.size());
   }
 
   /**
diff --git a/tools/ahat/src/heapdump/DominatorReferenceIterator.java b/tools/ahat/src/heapdump/DominatorReferenceIterator.java
new file mode 100644
index 0000000..ce2e6ef
--- /dev/null
+++ b/tools/ahat/src/heapdump/DominatorReferenceIterator.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Reference iterator used for the dominators computation.
+ * This visits only strong references.
+ */
+class DominatorReferenceIterator implements Iterator<AhatInstance>,
+                                            Iterable<AhatInstance> {
+  private ReferenceIterator mIter;
+  private AhatInstance mNext;
+
+  public DominatorReferenceIterator(ReferenceIterator iter) {
+    mIter = iter;
+    mNext = null;
+  }
+
+  @Override
+  public boolean hasNext() {
+    while (mNext == null && mIter.hasNext()) {
+      Reference ref = mIter.next();
+      if (ref.strong) {
+        mNext = ref.ref;
+      }
+    }
+    return mNext != null;
+  }
+
+  @Override
+  public AhatInstance next() {
+    if (hasNext()) {
+      AhatInstance next = mNext;
+      mNext = null;
+      return next;
+    }
+    throw new NoSuchElementException();
+  }
+
+  @Override
+  public Iterator<AhatInstance> iterator() {
+    return this;
+  }
+}
diff --git a/tools/ahat/src/heapdump/Reference.java b/tools/ahat/src/heapdump/Reference.java
new file mode 100644
index 0000000..980f278
--- /dev/null
+++ b/tools/ahat/src/heapdump/Reference.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+/**
+ * Reference represents a reference from 'src' to 'ref' through 'field'.
+ * Field is a string description for human consumption. This is typically
+ * either "." followed by the field name or an array subscript such as "[4]".
+ * 'strong' is true if this is a strong reference, false if it is a
+ * weak/soft/other reference.
+ */
+public class Reference {
+  public final AhatInstance src;
+  public final String field;
+  public final AhatInstance ref;
+  public final boolean strong;
+
+  public Reference(AhatInstance src, String field, AhatInstance ref, boolean strong) {
+    this.src = src;
+    this.field = field;
+    this.ref = ref;
+    this.strong = strong;
+  }
+}
diff --git a/tools/ahat/src/heapdump/ReferenceIterator.java b/tools/ahat/src/heapdump/ReferenceIterator.java
new file mode 100644
index 0000000..a707fb2
--- /dev/null
+++ b/tools/ahat/src/heapdump/ReferenceIterator.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+class ReferenceIterator implements Iterator<Reference>,
+                                   Iterable<Reference> {
+  private List<Reference> mRefs;
+  private int mLength;
+  private int mNextIndex;
+  private Reference mNext;
+
+  /**
+   * Construct a ReferenceIterator that iterators over the given list of
+   * references. Elements of the given list of references may be null, in
+   * which case the ReferenceIterator will skip over them.
+   */
+  public ReferenceIterator(List<Reference> refs) {
+    mRefs = refs;
+    mLength = refs.size();
+    mNextIndex = 0;
+    mNext = null;
+  }
+
+  @Override
+  public boolean hasNext() {
+    while (mNext == null && mNextIndex < mLength) {
+      mNext = mRefs.get(mNextIndex);
+      mNextIndex++;
+    }
+    return mNext != null;
+  }
+
+  @Override
+  public Reference next() {
+    if (!hasNext()) {
+      throw new NoSuchElementException();
+    }
+    Reference next = mNext;
+    mNext = null;
+    return next;
+  }
+
+  @Override
+  public Iterator<Reference> iterator() {
+    return this;
+  }
+}
diff --git a/tools/ahat/src/heapdump/Site.java b/tools/ahat/src/heapdump/Site.java
index fdd4eea..f0fc5d2 100644
--- a/tools/ahat/src/heapdump/Site.java
+++ b/tools/ahat/src/heapdump/Site.java
@@ -42,15 +42,15 @@
   private int mDepth;
 
   // The total size of objects allocated in this site (including child sites),
-  // organized by heap index. Heap indices outside the range of mSizesByHeap
-  // implicitly have size 0.
+  // organized by heap index. Computed as part of computeObjectsInfos.
   private Size[] mSizesByHeap;
 
   // List of child sites.
   private List<Site> mChildren;
 
-  // List of all objects allocated in this site (including child sites).
+  // List of objects allocated at this site (not including child sites).
   private List<AhatInstance> mObjects;
+
   private List<ObjectsInfo> mObjectsInfos;
   private Map<AhatHeap, Map<AhatClassObj, ObjectsInfo>> mObjectsInfoMap;
 
@@ -111,7 +111,6 @@
     mLineNumber = line;
     mId = id;
     mDepth = depth;
-    mSizesByHeap = new Size[0];
     mChildren = new ArrayList<Site>();
     mObjects = new ArrayList<AhatInstance>();
     mObjectsInfos = new ArrayList<ObjectsInfo>();
@@ -130,67 +129,102 @@
   }
 
   private static Site add(Site site, StackFrame[] frames, int depth, AhatInstance inst) {
-    while (true) {
-      site.mObjects.add(inst);
+    while (depth > 0) {
+      StackFrame next = frames[depth - 1];
+      Site child = null;
+      for (int i = 0; i < site.mChildren.size(); i++) {
+        Site curr = site.mChildren.get(i);
+        if (curr.mLineNumber == next.getLineNumber()
+            && curr.mMethodName.equals(next.getMethodName())
+            && curr.mSignature.equals(next.getSignature())
+            && curr.mFilename.equals(next.getFilename())) {
+          child = curr;
+          break;
+        }
+      }
+      if (child == null) {
+        child = new Site(site, next.getMethodName(), next.getSignature(),
+            next.getFilename(), next.getLineNumber(), inst.getId(), depth - 1);
+        site.mChildren.add(child);
+      }
+      depth = depth - 1;
+      site = child;
+    }
+    site.mObjects.add(inst);
+    return site;
+  }
 
-      ObjectsInfo info = site.getObjectsInfo(inst.getHeap(), inst.getClassObj());
+  /**
+   * Recompute the ObjectsInfos for this and all child sites.
+   * This should be done after the sites tree has been formed. It should also
+   * be done after dominators computation has been performed to ensure only
+   * reachable objects are included in the ObjectsInfos.
+   *
+   * @param numHeaps - The number of heaps in the heap dump.
+   */
+  void computeObjectsInfos(int numHeaps) {
+    // Count up the total sizes by heap.
+    mSizesByHeap = new Size[numHeaps];
+    for (int i = 0; i < numHeaps; ++i) {
+      mSizesByHeap[i] = Size.ZERO;
+    }
+
+    // Add all reachable objects allocated at this site.
+    for (AhatInstance inst : mObjects) {
       if (inst.isReachable()) {
         AhatHeap heap = inst.getHeap();
-        if (heap.getIndex() >= site.mSizesByHeap.length) {
-          Size[] newSizes = new Size[heap.getIndex() + 1];
-          for (int i = 0; i < site.mSizesByHeap.length; i++) {
-            newSizes[i] = site.mSizesByHeap[i];
-          }
-          for (int i = site.mSizesByHeap.length; i < heap.getIndex() + 1; i++) {
-            newSizes[i] = Size.ZERO;
-          }
-          site.mSizesByHeap = newSizes;
-        }
-        site.mSizesByHeap[heap.getIndex()]
-          = site.mSizesByHeap[heap.getIndex()].plus(inst.getSize());
-
+        Size size = inst.getSize();
+        ObjectsInfo info = getObjectsInfo(heap, inst.getClassObj());
         info.numInstances++;
-        info.numBytes = info.numBytes.plus(inst.getSize());
+        info.numBytes = info.numBytes.plus(size);
+        mSizesByHeap[heap.getIndex()] = mSizesByHeap[heap.getIndex()].plus(size);
       }
+    }
 
-      if (depth > 0) {
-        StackFrame next = frames[depth - 1];
-        Site child = null;
-        for (int i = 0; i < site.mChildren.size(); i++) {
-          Site curr = site.mChildren.get(i);
-          if (curr.mLineNumber == next.getLineNumber()
-              && curr.mMethodName.equals(next.getMethodName())
-              && curr.mSignature.equals(next.getSignature())
-              && curr.mFilename.equals(next.getFilename())) {
-            child = curr;
-            break;
-          }
-        }
-        if (child == null) {
-          child = new Site(site, next.getMethodName(), next.getSignature(),
-              next.getFilename(), next.getLineNumber(), inst.getId(), depth - 1);
-          site.mChildren.add(child);
-        }
-        depth = depth - 1;
-        site = child;
-      } else {
-        return site;
+    // Add objects allocated in child sites.
+    for (Site child : mChildren) {
+      child.computeObjectsInfos(numHeaps);
+      for (ObjectsInfo childInfo : child.mObjectsInfos) {
+        ObjectsInfo info = getObjectsInfo(childInfo.heap, childInfo.classObj);
+        info.numInstances += childInfo.numInstances;
+        info.numBytes = info.numBytes.plus(childInfo.numBytes);
+      }
+      for (int i = 0; i < numHeaps; ++i) {
+        mSizesByHeap[i] = mSizesByHeap[i].plus(child.mSizesByHeap[i]);
       }
     }
   }
 
   // Get the size of a site for a specific heap.
   public Size getSize(AhatHeap heap) {
-    int index = heap.getIndex();
-    return index >= 0 && index < mSizesByHeap.length ? mSizesByHeap[index] : Size.ZERO;
+    return mSizesByHeap[heap.getIndex()];
   }
 
   /**
-   * Get the list of objects allocated under this site. Includes objects
-   * allocated in children sites.
+   * Collect the objects allocated under this site, optionally filtered by
+   * heap name or class name. Includes objects allocated in children sites.
+   * @param heapName - The name of the heap the collected objects should
+   *                   belong to. This may be null to indicate objects of
+   *                   every heap should be collected.
+   * @param className - The name of the class the collected objects should
+   *                    belong to. This may be null to indicate objects of
+   *                    every class should be collected.
+   * @param objects - Out parameter. A collection of objects that all
+   *                  collected objects should be added to.
    */
-  public Collection<AhatInstance> getObjects() {
-    return mObjects;
+  public void getObjects(String heapName, String className, Collection<AhatInstance> objects) {
+    for (AhatInstance inst : mObjects) {
+      if ((heapName == null || inst.getHeap().getName().equals(heapName))
+          && (className == null || inst.getClassName().equals(className))) {
+        objects.add(inst);
+      }
+    }
+
+    // Recursively visit children. Recursion should be okay here because the
+    // stack depth is limited by a reasonable amount (128 frames or so).
+    for (Site child : mChildren) {
+      child.getObjects(heapName, className, objects);
+    }
   }
 
   /**
@@ -220,8 +254,8 @@
   // Get the combined size of the site for all heaps.
   public Size getTotalSize() {
     Size total = Size.ZERO;
-    for (int i = 0; i < mSizesByHeap.length; i++) {
-      total = total.plus(mSizesByHeap[i]);
+    for (Size size : mSizesByHeap) {
+      total = total.plus(size);
     }
     return total;
   }
diff --git a/tools/ahat/src/heapdump/SuperRoot.java b/tools/ahat/src/heapdump/SuperRoot.java
new file mode 100644
index 0000000..54410cf
--- /dev/null
+++ b/tools/ahat/src/heapdump/SuperRoot.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+import com.android.ahat.dominators.DominatorsComputation;
+import java.util.AbstractList;
+import java.util.ArrayList;
+import java.util.List;
+
+public class SuperRoot extends AhatInstance implements DominatorsComputation.Node {
+  private List<AhatInstance> mRoots = new ArrayList<AhatInstance>();
+  private Object mDominatorsComputationState;
+
+  public SuperRoot() {
+    super(0);
+  }
+
+  public void addRoot(AhatInstance root) {
+    mRoots.add(root);
+  }
+
+  @Override
+  public String toString() {
+    return "SUPER_ROOT";
+  }
+
+  @Override
+  ReferenceIterator getReferences() {
+    List<Reference> refs = new AbstractList<Reference>() {
+      @Override
+      public int size() {
+        return mRoots.size();
+      }
+
+      @Override
+      public Reference get(int index) {
+        String field = ".roots[" + Integer.toString(index) + "]";
+        return new Reference(null, field, mRoots.get(index), true);
+      }
+    };
+    return new ReferenceIterator(refs);
+  }
+}
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
index 3d3de78..13fd102 100644
--- a/tools/ahat/test-dump/Main.java
+++ b/tools/ahat/test-dump/Main.java
@@ -60,6 +60,14 @@
     public StackSmasher child;
   }
 
+  public static class Reference {
+    public Object referent;
+
+    public Reference(Object referent) {
+      this.referent = referent;
+    }
+  }
+
   // We will take a heap dump that includes a single instance of this
   // DumpedStuff class. Objects stored as fields in this class can be easily
   // found in the hprof dump by searching for the instance of the DumpedStuff
@@ -71,6 +79,7 @@
     public char[] charArray = "char thing".toCharArray();
     public String nullString = null;
     public Object anObject = new Object();
+    public Reference aReference = new Reference(anObject);
     public ReferenceQueue<Object> referenceQueue = new ReferenceQueue<Object>();
     public PhantomReference aPhantomReference = new PhantomReference(anObject, referenceQueue);
     public WeakReference aWeakReference = new WeakReference(anObject, referenceQueue);
diff --git a/tools/ahat/test/DominatorsTest.java b/tools/ahat/test/DominatorsTest.java
new file mode 100644
index 0000000..0424e10
--- /dev/null
+++ b/tools/ahat/test/DominatorsTest.java
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.ahat.dominators.DominatorsComputation;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+
+public class DominatorsTest {
+  private static class Node implements DominatorsComputation.Node {
+    public String name;
+    public List<Node> depends = new ArrayList<Node>();
+    public Node dominator;
+    private Object dominatorsComputationState;
+
+    public Node(String name) {
+      this.name = name;
+    }
+
+    public void computeDominators() {
+      DominatorsComputation.computeDominators(this);
+    }
+
+    public String toString() {
+      return name;
+    }
+
+    @Override
+    public void setDominatorsComputationState(Object state) {
+      dominatorsComputationState = state;
+    }
+
+    @Override
+    public Object getDominatorsComputationState() {
+      return dominatorsComputationState;
+    }
+
+    @Override
+    public Collection<Node> getReferencesForDominators() {
+      return depends;
+    }
+
+    @Override
+    public void setDominator(DominatorsComputation.Node dominator) {
+      this.dominator = (Node)dominator;
+    }
+  }
+
+  @Test
+  public void singleNode() {
+    // --> n
+    // Trivial case.
+    Node n = new Node("n");
+    n.computeDominators();
+  }
+
+  @Test
+  public void parentWithChild() {
+    // --> parent --> child
+    // The child node is dominated by the parent.
+    Node parent = new Node("parent");
+    Node child = new Node("child");
+    parent.depends = Arrays.asList(child);
+
+    parent.computeDominators();
+    assertEquals(parent, child.dominator);
+  }
+
+  @Test
+  public void reachableTwoWays() {
+    //            /-> right -->\
+    // --> parent               child
+    //            \-> left --->/
+    // The child node can be reached either by right or by left.
+    Node parent = new Node("parent");
+    Node right = new Node("right");
+    Node left = new Node("left");
+    Node child = new Node("child");
+    parent.depends = Arrays.asList(left, right);
+    right.depends = Arrays.asList(child);
+    left.depends = Arrays.asList(child);
+
+    parent.computeDominators();
+    assertEquals(parent, left.dominator);
+    assertEquals(parent, right.dominator);
+    assertEquals(parent, child.dominator);
+  }
+
+  @Test
+  public void reachableDirectAndIndirect() {
+    //            /-> right -->\
+    // --> parent  -----------> child
+    // The child node can be reached either by right or parent.
+    Node parent = new Node("parent");
+    Node right = new Node("right");
+    Node child = new Node("child");
+    parent.depends = Arrays.asList(right, child);
+    right.depends = Arrays.asList(child);
+
+    parent.computeDominators();
+    assertEquals(parent, child.dominator);
+    assertEquals(parent, right.dominator);
+  }
+
+  @Test
+  public void subDominator() {
+    // --> parent --> middle --> child
+    // The child is dominated by an internal node.
+    Node parent = new Node("parent");
+    Node middle = new Node("middle");
+    Node child = new Node("child");
+    parent.depends = Arrays.asList(middle);
+    middle.depends = Arrays.asList(child);
+
+    parent.computeDominators();
+    assertEquals(parent, middle.dominator);
+    assertEquals(middle, child.dominator);
+  }
+
+  @Test
+  public void childSelfLoop() {
+    // --> parent --> child -\
+    //                  \<---/
+    // The child points back to itself.
+    Node parent = new Node("parent");
+    Node child = new Node("child");
+    parent.depends = Arrays.asList(child);
+    child.depends = Arrays.asList(child);
+
+    parent.computeDominators();
+    assertEquals(parent, child.dominator);
+  }
+
+  @Test
+  public void singleEntryLoop() {
+    // --> parent --> a --> b --> c -\
+    //                 \<------------/
+    // There is a loop in the graph, with only one way into the loop.
+    Node parent = new Node("parent");
+    Node a = new Node("a");
+    Node b = new Node("b");
+    Node c = new Node("c");
+    parent.depends = Arrays.asList(a);
+    a.depends = Arrays.asList(b);
+    b.depends = Arrays.asList(c);
+    c.depends = Arrays.asList(a);
+
+    parent.computeDominators();
+    assertEquals(parent, a.dominator);
+    assertEquals(a, b.dominator);
+    assertEquals(b, c.dominator);
+  }
+
+  @Test
+  public void multiEntryLoop() {
+    // --> parent --> right --> a --> b ----\
+    //        \                  \<-- c <---/
+    //         \--> left --->--------/
+    // There is a loop in the graph, with two different ways to enter the
+    // loop.
+    Node parent = new Node("parent");
+    Node left = new Node("left");
+    Node right = new Node("right");
+    Node a = new Node("a");
+    Node b = new Node("b");
+    Node c = new Node("c");
+    parent.depends = Arrays.asList(left, right);
+    right.depends = Arrays.asList(a);
+    left.depends = Arrays.asList(c);
+    a.depends = Arrays.asList(b);
+    b.depends = Arrays.asList(c);
+    c.depends = Arrays.asList(a);
+
+    parent.computeDominators();
+    assertEquals(parent, right.dominator);
+    assertEquals(parent, left.dominator);
+    assertEquals(parent, a.dominator);
+    assertEquals(parent, c.dominator);
+    assertEquals(a, b.dominator);
+  }
+
+  @Test
+  public void dominatorOverwrite() {
+    //            /---------> right <--\
+    // --> parent  --> child <--/      /
+    //            \---> left ---------/
+    // Test a strange case where we have had trouble in the past with a
+    // dominator getting improperly overwritten. The relevant features of this
+    // case are: 'child' is visited after 'right', 'child' is dominated by
+    // 'parent', and 'parent' revisits 'right' after visiting 'child'.
+    Node parent = new Node("parent");
+    Node right = new Node("right");
+    Node left = new Node("left");
+    Node child = new Node("child");
+    parent.depends = Arrays.asList(left, child, right);
+    left.depends = Arrays.asList(right);
+    right.depends = Arrays.asList(child);
+
+    parent.computeDominators();
+    assertEquals(parent, left.dominator);
+    assertEquals(parent, child.dominator);
+    assertEquals(parent, right.dominator);
+  }
+
+  @Test
+  public void stackOverflow() {
+    // --> a --> b --> ... --> N
+    // Verify we don't smash the stack for deep chains.
+    Node root = new Node("root");
+    Node curr = root;
+    for (int i = 0; i < 10000; ++i) {
+      Node node = new Node("n" + i);
+      curr.depends.add(node);
+      curr = node;
+    }
+
+    root.computeDominators();
+  }
+
+  @Test
+  public void hiddenRevisit() {
+    //           /-> left ---->---------\
+    // --> parent      \---> a --> b --> c
+    //           \-> right -/
+    // Test a case we have had trouble in the past.
+    // When a's dominator is updated from left to parent, that should trigger
+    // all reachable children's dominators to be updated too. In particular,
+    // c's dominator should be updated, even though b's dominator is
+    // unchanged.
+    Node parent = new Node("parent");
+    Node right = new Node("right");
+    Node left = new Node("left");
+    Node a = new Node("a");
+    Node b = new Node("b");
+    Node c = new Node("c");
+    parent.depends = Arrays.asList(right, left);
+    left.depends = Arrays.asList(a, c);
+    right.depends = Arrays.asList(a);
+    a.depends = Arrays.asList(b);
+    b.depends = Arrays.asList(c);
+
+    parent.computeDominators();
+    assertEquals(parent, left.dominator);
+    assertEquals(parent, right.dominator);
+    assertEquals(parent, a.dominator);
+    assertEquals(parent, c.dominator);
+    assertEquals(a, b.dominator);
+  }
+
+  @Test
+  public void preUndominatedUpdate() {
+    //       /--------->--------\
+    //      /          /---->----\
+    // --> p -> a --> b --> c --> d --> e
+    //           \---------->----------/
+    // Test a case we have had trouble in the past.
+    // The candidate dominator for e is revised from d to a, then d is shown
+    // to be reachable from p. Make sure that causes e's dominator to be
+    // refined again from a to p. The extra nodes are there to ensure the
+    // necessary scheduling to expose the bug we had.
+    Node p = new Node("p");
+    Node a = new Node("a");
+    Node b = new Node("b");
+    Node c = new Node("c");
+    Node d = new Node("d");
+    Node e = new Node("e");
+    p.depends = Arrays.asList(d, a);
+    a.depends = Arrays.asList(e, b);
+    b.depends = Arrays.asList(d, c);
+    c.depends = Arrays.asList(d);
+    d.depends = Arrays.asList(e);
+
+    p.computeDominators();
+    assertEquals(p, a.dominator);
+    assertEquals(a, b.dominator);
+    assertEquals(b, c.dominator);
+    assertEquals(p, d.dominator);
+    assertEquals(p, e.dominator);
+  }
+}
diff --git a/tools/ahat/test/InstanceTest.java b/tools/ahat/test/InstanceTest.java
index 71b081c..f0e7f44 100644
--- a/tools/ahat/test/InstanceTest.java
+++ b/tools/ahat/test/InstanceTest.java
@@ -337,7 +337,7 @@
   public void classObjToString() throws IOException {
     TestDump dump = TestDump.getTestDump();
     AhatInstance obj = dump.getAhatSnapshot().findClass("Main");
-    assertEquals("Main", obj.toString());
+    assertEquals("class Main", obj.toString());
   }
 
   @Test
@@ -370,6 +370,18 @@
   }
 
   @Test
+  public void reverseReferences() throws IOException {
+    TestDump dump = TestDump.getTestDump();
+    AhatInstance obj = dump.getDumpedAhatInstance("anObject");
+    AhatInstance ref = dump.getDumpedAhatInstance("aReference");
+    AhatInstance weak = dump.getDumpedAhatInstance("aWeakReference");
+    assertTrue(obj.getHardReverseReferences().contains(ref));
+    assertFalse(obj.getHardReverseReferences().contains(weak));
+    assertFalse(obj.getSoftReverseReferences().contains(ref));
+    assertTrue(obj.getSoftReverseReferences().contains(weak));
+  }
+
+  @Test
   public void asStringEmbedded() throws IOException {
     // Set up a heap dump with an instance of java.lang.String of
     // "hello" with instance id 0x42 that is backed by a char array that is
diff --git a/tools/ahat/test/Tests.java b/tools/ahat/test/Tests.java
index a95788e..a1e3246 100644
--- a/tools/ahat/test/Tests.java
+++ b/tools/ahat/test/Tests.java
@@ -24,6 +24,7 @@
       args = new String[]{
         "com.android.ahat.DiffFieldsTest",
         "com.android.ahat.DiffTest",
+        "com.android.ahat.DominatorsTest",
         "com.android.ahat.InstanceTest",
         "com.android.ahat.NativeAllocationTest",
         "com.android.ahat.ObjectHandlerTest",
diff --git a/tools/art b/tools/art
index 077dc4a..bc0c85e 100644
--- a/tools/art
+++ b/tools/art
@@ -278,7 +278,7 @@
           -Xps-profile-path:$PROFILE_PATH      \
           -Xusejit:true                        \
           "${ARGS_WITH_QUICKEN[@]}"            \
-          "&>" "$ANDROID_DATA/profile_gen.log"
+          &> "$ANDROID_DATA/profile_gen.log"
   EXIT_STATUS=$?
 
   if [ $EXIT_STATUS != 0 ]; then
diff --git a/tools/cpp-define-generator/constant_dexcache.def b/tools/cpp-define-generator/constant_dexcache.def
index ede16d2..743ebb7 100644
--- a/tools/cpp-define-generator/constant_dexcache.def
+++ b/tools/cpp-define-generator/constant_dexcache.def
@@ -25,4 +25,8 @@
 DEFINE_EXPR(STRING_DEX_CACHE_HASH_BITS,                int32_t,
     art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
 DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE,             int32_t,
-    sizeof(art::mirror::StringDexCachePair))
\ No newline at end of file
+    sizeof(art::mirror::StringDexCachePair))
+DEFINE_EXPR(METHOD_DEX_CACHE_SIZE_MINUS_ONE,           int32_t,
+    art::mirror::DexCache::kDexCacheMethodCacheSize - 1)
+DEFINE_EXPR(METHOD_DEX_CACHE_HASH_BITS,                int32_t,
+    art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))
diff --git a/tools/dexfuzz/README b/tools/dexfuzz/README
index 58f6226..a635fe9 100644
--- a/tools/dexfuzz/README
+++ b/tools/dexfuzz/README
@@ -138,12 +138,14 @@
 InstructionDuplicator 80
 InstructionSwapper 80
 InvokeChanger 30
+NewArrayLengthChanger 50
 NewMethodCaller 10
 NonsenseStringPrinter 10
 OppositeBranchChanger 40
 PoolIndexChanger 30
 RandomBranchChanger 30
 RandomInstructionGenerator 30
+RegisterClobber 40
 SwitchBranchShifter 30
 TryBlockShifter 40
 ValuePrinter 40
diff --git a/tools/dexfuzz/src/dexfuzz/DexFuzz.java b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
index 97cdfee..d37bd34 100644
--- a/tools/dexfuzz/src/dexfuzz/DexFuzz.java
+++ b/tools/dexfuzz/src/dexfuzz/DexFuzz.java
@@ -33,8 +33,9 @@
  * Entrypoint class for dexfuzz.
  */
 public class DexFuzz {
+  // Last version update 1.5: added register clobber mutator.
   private static int majorVersion = 1;
-  private static int minorVersion = 3;
+  private static int minorVersion = 5;
   private static int seedChangeVersion = 0;
 
   /**
diff --git a/tools/dexfuzz/src/dexfuzz/program/Program.java b/tools/dexfuzz/src/dexfuzz/program/Program.java
index 1d0c678..bb2f4c0 100644
--- a/tools/dexfuzz/src/dexfuzz/program/Program.java
+++ b/tools/dexfuzz/src/dexfuzz/program/Program.java
@@ -31,12 +31,14 @@
 import dexfuzz.program.mutators.InstructionDuplicator;
 import dexfuzz.program.mutators.InstructionSwapper;
 import dexfuzz.program.mutators.InvokeChanger;
+import dexfuzz.program.mutators.NewArrayLengthChanger;
 import dexfuzz.program.mutators.NewMethodCaller;
 import dexfuzz.program.mutators.NonsenseStringPrinter;
 import dexfuzz.program.mutators.OppositeBranchChanger;
 import dexfuzz.program.mutators.PoolIndexChanger;
 import dexfuzz.program.mutators.RandomBranchChanger;
 import dexfuzz.program.mutators.RandomInstructionGenerator;
+import dexfuzz.program.mutators.RegisterClobber;
 import dexfuzz.program.mutators.SwitchBranchShifter;
 import dexfuzz.program.mutators.TryBlockShifter;
 import dexfuzz.program.mutators.ValuePrinter;
@@ -201,12 +203,14 @@
     registerMutator(new InstructionDuplicator(rng, mutationStats, mutations));
     registerMutator(new InstructionSwapper(rng, mutationStats, mutations));
     registerMutator(new InvokeChanger(rng, mutationStats, mutations));
+    registerMutator(new NewArrayLengthChanger(rng, mutationStats, mutations));
     registerMutator(new NewMethodCaller(rng, mutationStats, mutations));
     registerMutator(new NonsenseStringPrinter(rng, mutationStats, mutations));
     registerMutator(new OppositeBranchChanger(rng, mutationStats, mutations));
     registerMutator(new PoolIndexChanger(rng, mutationStats, mutations));
     registerMutator(new RandomBranchChanger(rng, mutationStats, mutations));
     registerMutator(new RandomInstructionGenerator(rng, mutationStats, mutations));
+    registerMutator(new RegisterClobber(rng, mutationStats, mutations));
     registerMutator(new SwitchBranchShifter(rng, mutationStats, mutations));
     registerMutator(new TryBlockShifter(rng, mutationStats, mutations));
     registerMutator(new ValuePrinter(rng, mutationStats, mutations));
diff --git a/tools/dexfuzz/src/dexfuzz/program/mutators/InvokeChanger.java b/tools/dexfuzz/src/dexfuzz/program/mutators/InvokeChanger.java
index 3488503..8750fc6 100644
--- a/tools/dexfuzz/src/dexfuzz/program/mutators/InvokeChanger.java
+++ b/tools/dexfuzz/src/dexfuzz/program/mutators/InvokeChanger.java
@@ -136,7 +136,7 @@
 
     String oldInsnString = invokeInsn.toString();
 
-    Opcode newOpcode = isInvokeCalIInst(invokeInsn);
+    Opcode newOpcode = getDifferentInvokeCallOpcode(invokeInsn);
 
     invokeInsn.insn.info = Instruction.getOpcodeInfo(newOpcode);
 
@@ -148,7 +148,7 @@
     invokeCallInsns = null;
   }
 
-  private Opcode isInvokeCalIInst(MInsn mInsn) {
+  private Opcode getDifferentInvokeCallOpcode(MInsn mInsn) {
     Opcode opcode = mInsn.insn.info.opcode;
     if (isSimpleInvokeInst(opcode)) {
       int index = opcode.ordinal() - Opcode.INVOKE_VIRTUAL.ordinal();
@@ -159,7 +159,7 @@
       int length = INVOKE_RANGE_LIST.length;
       return INVOKE_RANGE_LIST[(index + 1 + rng.nextInt(length - 1)) % length];
     }
-      return opcode;
+    return opcode;
   }
 
   private boolean isSimpleInvokeInst(Opcode opcode){
diff --git a/tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java b/tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java
new file mode 100644
index 0000000..aba7971
--- /dev/null
+++ b/tools/dexfuzz/src/dexfuzz/program/mutators/NewArrayLengthChanger.java
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dexfuzz.program.mutators;
+
+import dexfuzz.Log;
+import dexfuzz.MutationStats;
+import dexfuzz.program.MInsn;
+import dexfuzz.program.MutatableCode;
+import dexfuzz.program.Mutation;
+import dexfuzz.rawdex.Instruction;
+import dexfuzz.rawdex.Opcode;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+// This mutation might change the length of an array but can also change the
+// value of the register in every place it is used.
+public class NewArrayLengthChanger extends CodeMutator {
+  /**
+   * Every CodeMutator has an AssociatedMutation, representing the
+   * mutation that this CodeMutator can perform, to allow separate
+   * generateMutation() and applyMutation() phases, allowing serialization.
+   */
+  public static class AssociatedMutation extends Mutation {
+    public int newArrayToChangeIdx;
+
+    @Override
+    public String getString() {
+      return Integer.toString(newArrayToChangeIdx);
+    }
+
+    @Override
+    public void parseString(String[] elements) {
+      newArrayToChangeIdx = Integer.parseInt(elements[2]);
+    }
+  }
+
+  // The following two methods are here for the benefit of MutationSerializer,
+  // so it can create a CodeMutator and get the correct associated Mutation, as it
+  // reads in mutations from a dump of mutations.
+  @Override
+  public Mutation getNewMutation() {
+    return new AssociatedMutation();
+  }
+
+  public NewArrayLengthChanger() { }
+
+  public NewArrayLengthChanger(Random rng, MutationStats stats, List<Mutation> mutations) {
+    super(rng, stats, mutations);
+    likelihood = 50;
+  }
+
+  // A cache that should only exist between generateMutation() and applyMutation(),
+  // or be created at the start of applyMutation(), if we're reading in mutations from
+  // a file.
+  private List<MInsn> newArrayLengthInsns = null;
+
+  private void generateCachedArrayLengthInsns(MutatableCode mutatableCode) {
+    if (newArrayLengthInsns != null) {
+      return;
+    }
+
+    newArrayLengthInsns = new ArrayList<MInsn>();
+
+    for (MInsn mInsn : mutatableCode.getInstructions()) {
+      if (isNewArray(mInsn)) {
+        newArrayLengthInsns.add(mInsn);
+      }
+    }
+  }
+
+  @Override
+  protected boolean canMutate(MutatableCode mutatableCode) {
+    for (MInsn mInsn : mutatableCode.getInstructions()) {
+      // TODO: Add filled-new-array and filled-new-array/range with their respective
+      // positions of registers and also proper encoding.
+      if (isNewArray(mInsn)) {
+        return true;
+      }
+    }
+    Log.debug("No New Array instruction in method, skipping...");
+    return false;
+  }
+
+  @Override
+  protected Mutation generateMutation(MutatableCode mutatableCode) {
+    generateCachedArrayLengthInsns(mutatableCode);
+
+    int newArrayIdx = rng.nextInt(newArrayLengthInsns.size());
+
+    AssociatedMutation mutation = new AssociatedMutation();
+    mutation.setup(this.getClass(), mutatableCode);
+    mutation.newArrayToChangeIdx = newArrayIdx;
+    return mutation;
+  }
+
+  @Override
+  protected void applyMutation(Mutation uncastMutation) {
+    // Cast the Mutation to our AssociatedMutation, so we can access its fields.
+    AssociatedMutation mutation = (AssociatedMutation) uncastMutation;
+    MutatableCode mutatableCode = mutation.mutatableCode;
+    MInsn newArrayInsn = newArrayLengthInsns.get(mutation.newArrayToChangeIdx);
+    int newArrayInsnIdx = mutatableCode.getInstructionIndex(newArrayInsn);
+
+    MInsn newInsn = new MInsn();
+    newInsn.insn = new Instruction();
+    newInsn.insn.info = Instruction.getOpcodeInfo(Opcode.CONST_16);
+    newInsn.insn.vregA = (int) newArrayInsn.insn.vregB;
+    // New length chosen randomly between 1 to 100.
+    newInsn.insn.vregB = rng.nextInt(100);
+    mutatableCode.insertInstructionAt(newInsn, newArrayInsnIdx);
+    Log.info("Changed the length of the array to " + newInsn.insn.vregB);
+    stats.incrementStat("Changed length of new array");
+  }
+
+  private boolean isNewArray(MInsn mInsn) {
+    Opcode opcode = mInsn.insn.info.opcode;
+    return opcode == Opcode.NEW_ARRAY;
+  }
+}
\ No newline at end of file
diff --git a/tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java b/tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java
new file mode 100644
index 0000000..11da1d4
--- /dev/null
+++ b/tools/dexfuzz/src/dexfuzz/program/mutators/RegisterClobber.java
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package dexfuzz.program.mutators;
+
+import dexfuzz.Log;
+import dexfuzz.MutationStats;
+import dexfuzz.program.MInsn;
+import dexfuzz.program.MutatableCode;
+import dexfuzz.program.Mutation;
+import dexfuzz.rawdex.Instruction;
+import dexfuzz.rawdex.Opcode;
+
+import java.util.List;
+import java.util.Random;
+
+public class RegisterClobber extends CodeMutator{
+
+  /**
+   * Every CodeMutator has an AssociatedMutation, representing the
+   * mutation that this CodeMutator can perform, to allow separate
+   * generateMutation() and applyMutation() phases, allowing serialization.
+   */
+  public static class AssociatedMutation extends Mutation{
+
+    int regClobberIdx;
+
+    @Override
+    public String getString() {
+      return Integer.toString(regClobberIdx);
+    }
+
+    @Override
+    public void parseString(String[] elements) {
+      Integer.parseInt(elements[2]);
+    }
+  }
+
+  // The following two methods are here for the benefit of MutationSerializer,
+  // so it can create a CodeMutator and get the correct associated Mutation, as it
+  // reads in mutations from a dump of mutations.
+  @Override
+  public Mutation getNewMutation() {
+    return new AssociatedMutation();
+  }
+
+  public RegisterClobber() {}
+
+  public RegisterClobber(Random rng, MutationStats stats, List<Mutation> mutations) {
+    super(rng, stats, mutations);
+    likelihood = 40;
+  }
+
+  @Override
+  protected boolean canMutate(MutatableCode mutatableCode) {
+    return mutatableCode.registersSize > 0;
+  }
+
+  @Override
+  protected Mutation generateMutation(MutatableCode mutatableCode) {
+    int insertionIdx = rng.nextInt(mutatableCode.getInstructionCount());
+
+    AssociatedMutation mutation = new AssociatedMutation();
+    mutation.setup(this.getClass(), mutatableCode);
+    mutation.regClobberIdx = insertionIdx;
+    return mutation;
+  }
+
+  @Override
+  protected void applyMutation(Mutation uncastMutation) {
+    AssociatedMutation mutation = (AssociatedMutation) uncastMutation;
+    MutatableCode mutatableCode = mutation.mutatableCode;
+
+    int totalRegUsed = mutatableCode.registersSize;
+    for (int i = 0; i < totalRegUsed; i++) {
+      MInsn newInsn = new MInsn();
+      newInsn.insn = new Instruction();
+      newInsn.insn.info = Instruction.getOpcodeInfo(Opcode.CONST_16);
+      newInsn.insn.vregA = i;
+      newInsn.insn.vregB = 0;
+      mutatableCode.insertInstructionAt(newInsn, mutation.regClobberIdx + i);
+    }
+
+    Log.info("Assigned zero to the registers from 0 to " + (totalRegUsed - 1) +
+        " at index " + mutation.regClobberIdx);
+    stats.incrementStat("Clobbered the registers");
+  }
+}
\ No newline at end of file
diff --git a/tools/runtime_memusage/README b/tools/runtime_memusage/README
index 2543df1..2af1de5 100644
--- a/tools/runtime_memusage/README
+++ b/tools/runtime_memusage/README
@@ -40,6 +40,17 @@
 
 ===========================================================================
 Usage: sanitizer_logcat_analysis.sh [options] [LOGCAT_FILE] [CATEGORIES...]
+    -a
+        Forces all pids associated with registered dex
+        files in the logcat to be processed.
+        default: only the last pid is processed
+
+    -b  [DEX_FILE_NUMBER]
+        Outputs data for the specified baksmali
+        dump if -p is provided.
+        default: first baksmali dump in order of dex
+          file registration
+
     -d  OUT_DIRECTORY
         Puts all output in specified directory.
         If not given, output will be put in a local
@@ -52,14 +63,31 @@
         the -m argument or by prune_sanitizer_output.py
 
     -f
-        forces redo of all commands even if output
-        files exist.
+        Forces redo of all commands even if output
+        files exist. Steps are skipped if their output
+        exist already and this is not enabled.
 
     -m  [MINIMUM_CALLS_PER_TRACE]
         Filters out all traces that do not have
         at least MINIMUM_CALLS_PER_TRACE lines.
         default: specified by prune_sanitizer_output.py
 
+    -o  [OFFSET],[OFFSET]
+        Filters out all Dex File offsets outside the
+        range between provided offsets. 'inf' can be
+        provided for infinity.
+        default: 0,inf
+
+    -p  [PACKAGE_NAME]
+        Using the package name, uses baksmali to get
+        a dump of the Dex File format for the package.
+
+    -t  [TIME_OFFSET],[TIME_OFFSET]
+        Filters out all time offsets outside the
+        range between provided offsets. 'inf' can be
+        provided for infinity.
+        default: 0,inf
+
     CATEGORIES are words that are expected to show in
        a large subset of symbolized traces. Splits
        output based on each word.
diff --git a/tools/runtime_memusage/prune_sanitizer_output.py b/tools/runtime_memusage/prune_sanitizer_output.py
index d95b2ce..3cc51cf 100755
--- a/tools/runtime_memusage/prune_sanitizer_output.py
+++ b/tools/runtime_memusage/prune_sanitizer_output.py
@@ -33,7 +33,7 @@
     """
     # Hard coded string are necessary since each trace must have the address
     # accessed, which is printed before trace lines.
-    if match == "use-after-poison":
+    if match == "use-after-poison" or match == "unknown-crash":
         return -2
     elif match == "READ":
         return -1
@@ -43,6 +43,9 @@
 
 def clean_trace_if_valid(trace, stack_min_size, prune_exact):
     """Cleans trace if it meets a certain standard. Returns None otherwise."""
+    # Note: Sample input may contain "unknown-crash" instead of
+    # "use-after-poison"
+    #
     # Sample input:
     #   trace:
     # "...ERROR: AddressSanitizer: use-after-poison on address 0x0071126a870a...
@@ -68,6 +71,7 @@
     trace_line_matches = [(match_to_int(match.group()), match.start())
                           for match in re.finditer("#[0-9]+ "
                                                    "|use-after-poison"
+                                                   "|unknown-crash"
                                                    "|READ", trace)
                           ]
     # Finds the first index where the line number ordering isn't in sequence or
@@ -135,16 +139,17 @@
                          ]
     trace_clean_split = [trace for trace in trace_clean_split
                          if trace is not None]
-
-    outfile = os.path.join(out_dir_name, trace_file.name + "_filtered")
+    filename = os.path.basename(trace_file.name + "_filtered")
+    outfile = os.path.join(out_dir_name, filename)
     with open(outfile, "w") as output_file:
         output_file.write(STACK_DIVIDER.join(trace_clean_split))
 
     filter_percent = 100.0 - (float(len(trace_clean_split)) /
                               len(trace_split) * 100)
     filter_amount = len(trace_split) - len(trace_clean_split)
-    print("Filtered out %d (%f%%) of %d."
-          % (filter_amount, filter_percent, len(trace_split)))
+    print("Filtered out %d (%f%%) of %d. %d (%f%%) remain."
+          % (filter_amount, filter_percent, len(trace_split),
+             len(trace_split) - filter_amount, 1 - filter_percent))
 
 
 if __name__ == "__main__":
diff --git a/tools/runtime_memusage/sanitizer_logcat_analysis.sh b/tools/runtime_memusage/sanitizer_logcat_analysis.sh
index 66b48fa..e1a8161 100755
--- a/tools/runtime_memusage/sanitizer_logcat_analysis.sh
+++ b/tools/runtime_memusage/sanitizer_logcat_analysis.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Copyright (C) 2017 The Android Open Source Project
 #
@@ -22,11 +22,12 @@
 USE_TEMP=true
 DO_REDO=false
 PACKAGE_NAME=""
+BAKSMALI_NUM=0
 # EXACT_ARG and MIN_ARG are passed to prune_sanitizer_output.py
 EXACT_ARG=""
-MIN_ARG=""
-OFFSET_ARGS=""
-TIME_ARGS=""
+MIN_ARG=()
+OFFSET_ARGS=()
+TIME_ARGS=()
 usage() {
   echo "Usage: $0 [options] [LOGCAT_FILE] [CATEGORIES...]"
   echo "    -a"
@@ -34,6 +35,12 @@
   echo "        files in the logcat to be processed."
   echo "        default: only the last pid is processed"
   echo
+  echo "    -b  [DEX_FILE_NUMBER]"
+  echo "        Outputs data for the specified baksmali"
+  echo "        dump if -p is provided."
+  echo "        default: first baksmali dump in order of dex"
+  echo "          file registration"
+  echo
   echo "    -d  OUT_DIRECTORY"
   echo "        Puts all output in specified directory."
   echo "        If not given, output will be put in a local"
@@ -80,11 +87,18 @@
 }
 
 
-while getopts ":ad:efm:o:p:t:" opt ; do
+while getopts ":ab:d:efm:o:p:t:" opt ; do
 case ${opt} in
   a)
     ALL_PIDS=true
     ;;
+  b)
+    if ! [[ "$OPTARG" -eq "$OPTARG" ]]; then
+      usage
+      exit
+    fi
+    BAKSMALI_NUM=$OPTARG
+    ;;
   d)
     USE_TEMP=false
     OUT_DIR=$OPTARG
@@ -96,35 +110,37 @@
     DO_REDO=true
     ;;
   m)
-    if ! [ "$OPTARG" -eq "$OPTARG" ]; then
+    if ! [[ "$OPTARG" -eq "$OPTARG" ]]; then
       usage
       exit
     fi
-    MIN_ARG='-m '"$OPTARG"
+    MIN_ARG=( "-m" "$OPTARG" )
     ;;
   o)
     set -f
-    OLD_IFS=$IFS
+    old_ifs=$IFS
     IFS=","
     OFFSET_ARGS=( $OPTARG )
-    if [ "${#OFFSET_ARGS[@]}" -ne 2 ]; then
+    if [[ "${#OFFSET_ARGS[@]}" -ne 2 ]]; then
       usage
       exit
     fi
     OFFSET_ARGS=( "--offsets" "${OFFSET_ARGS[@]}" )
-    IFS=$OLD_IFS
+    IFS=$old_ifs
+    set +f
     ;;
   t)
     set -f
-    OLD_IFS=$IFS
+    old_ifs=$IFS
     IFS=","
     TIME_ARGS=( $OPTARG )
-    if [ "${#TIME_ARGS[@]}" -ne 2 ]; then
+    if [[ "${#TIME_ARGS[@]}" -ne 2 ]]; then
       usage
       exit
     fi
     TIME_ARGS=( "--times" "${TIME_ARGS[@]}" )
-    IFS=$OLD_IFS
+    IFS=$old_ifs
+    set +f
     ;;
   p)
     PACKAGE_NAME=$OPTARG
@@ -136,7 +152,7 @@
 done
 shift $((OPTIND -1))
 
-if [ $# -lt 1 ]; then
+if [[ $# -lt 1 ]]; then
   usage
   exit
 fi
@@ -145,21 +161,24 @@
 NUM_CAT=$(($# - 1))
 
 # Use a temp directory that will be deleted
-if [ $USE_TEMP = true ]; then
-  OUT_DIR=$(mktemp -d --tmpdir=$PWD)
+if [[ $USE_TEMP = true ]]; then
+  OUT_DIR=$(mktemp -d --tmpdir="$PWD")
   DO_REDO=true
 fi
 
-if [ ! -d "$OUT_DIR" ]; then
-  mkdir $OUT_DIR
+if [[ ! -d "$OUT_DIR" ]]; then
+  mkdir "$OUT_DIR"
   DO_REDO=true
 fi
 
 # Note: Steps are skipped if their output exists until -f flag is enabled
 echo "Output folder: $OUT_DIR"
-unique_pids=( $(grep "RegisterDexFile" "$LOGCAT_FILE" | grep -v "zygote64" | tr -s ' ' | cut -f3 -d' ' | awk '!a[$0]++') )
+# Finds the lines matching pattern criteria and prints out unique instances of
+# the 3rd word (PID)
+unique_pids=( $(awk '/RegisterDexFile:/ && !/zygote/ {if(!a[$3]++) print $3}' \
+  "$LOGCAT_FILE") )
 echo "List of pids: ${unique_pids[@]}"
-if [ $ALL_PIDS = false ]; then
+if [[ $ALL_PIDS = false ]]; then
   unique_pids=( ${unique_pids[-1]} )
 fi
 
@@ -168,99 +187,142 @@
   echo
   echo "Current pid: $pid"
   echo
-  PID_DIR=$OUT_DIR/$pid
-  if [ ! -d "$PID_DIR" ]; then
-    mkdir $PID_DIR
+  pid_dir=$OUT_DIR/$pid
+  if [[ ! -d "$pid_dir" ]]; then
+    mkdir "$pid_dir"
     DO_REDO[$pid]=true
   fi
 
-  INTERMEDIATES_DIR=$PID_DIR/intermediates
-  RESULTS_DIR=$PID_DIR/results
-  LOGCAT_PID_FILE=$PID_DIR/logcat
+  intermediates_dir=$pid_dir/intermediates
+  results_dir=$pid_dir/results
+  logcat_pid_file=$pid_dir/logcat
 
-  if [ ! -f "$PID_DIR/logcat" ] || [ "${DO_REDO[$pid]}" = true ] || [ $DO_REDO = true ]; then
+  if [[ ! -f "$logcat_pid_file" ]] || \
+     [[ "${DO_REDO[$pid]}" = true ]] || \
+     [[ $DO_REDO = true ]]; then
     DO_REDO[$pid]=true
-    awk '{if($3 == '$pid') print $0}' $LOGCAT_FILE > $LOGCAT_PID_FILE
+    awk "{if(\$3 == $pid) print \$0}" "$LOGCAT_FILE" > "$logcat_pid_file"
   fi
 
-  if [ ! -d "$INTERMEDIATES_DIR" ]; then
-    mkdir $INTERMEDIATES_DIR
+  if [[ ! -d "$intermediates_dir" ]]; then
+    mkdir "$intermediates_dir"
     DO_REDO[$pid]=true
   fi
 
   # Step 1 - Only output lines related to Sanitizer
   # Folder that holds all file output
-  ASAN_OUT=$INTERMEDIATES_DIR/asan_output
-  if [ ! -f $ASAN_OUT ] || [ "${DO_REDO[$pid]}" = true ] || [ $DO_REDO = true ]; then
+  asan_out=$intermediates_dir/asan_output
+  if [[ ! -f "$asan_out" ]] || \
+     [[ "${DO_REDO[$pid]}" = true ]] || \
+     [[ $DO_REDO = true ]]; then
     DO_REDO[$pid]=true
     echo "Extracting ASAN output"
-    grep "app_process64" $LOGCAT_PID_FILE > $ASAN_OUT
+    grep "app_process64" "$logcat_pid_file" > "$asan_out"
   else
     echo "Skipped: Extracting ASAN output"
   fi
 
   # Step 2 - Only output lines containing Dex File Start Addresses
-  DEX_START=$INTERMEDIATES_DIR/dex_start
-  if [ ! -f $DEX_START ] || [ "${DO_REDO[$pid]}" = true ] || [ $DO_REDO = true ]; then
+  dex_start=$intermediates_dir/dex_start
+  if [[ ! -f "$dex_start" ]] || \
+     [[ "${DO_REDO[$pid]}" = true ]] || \
+     [[ $DO_REDO = true ]]; then
     DO_REDO[$pid]=true
     echo "Extracting Start of Dex File(s)"
-    grep "RegisterDexFile" $LOGCAT_PID_FILE > $DEX_START
+    if [[ ! -z "$PACKAGE_NAME" ]]; then
+      awk '/RegisterDexFile:/ && /'"$PACKAGE_NAME"'/ && /\/data\/app/' \
+        "$logcat_pid_file" > "$dex_start"
+    else
+      grep "RegisterDexFile:" "$logcat_pid_file" > "$dex_start"
+    fi
   else
     echo "Skipped: Extracting Start of Dex File(s)"
   fi
 
   # Step 3 - Clean Sanitizer output from Step 2 since logcat cannot
   # handle large amounts of output.
-  ASAN_OUT_FILTERED=$INTERMEDIATES_DIR/asan_output_filtered
-  if [ ! -f $ASAN_OUT_FILTERED ] || [ "${DO_REDO[$pid]}" = true ] || [ $DO_REDO = true ]; then
+  asan_out_filtered=$intermediates_dir/asan_output_filtered
+  if [[ ! -f "$asan_out_filtered" ]] || \
+     [[ "${DO_REDO[$pid]}" = true ]] || \
+     [[ $DO_REDO = true ]]; then
     DO_REDO[$pid]=true
     echo "Filtering/Cleaning ASAN output"
-    python $ANDROID_BUILD_TOP/art/tools/runtime_memusage/prune_sanitizer_output.py \
-    $EXACT_ARG $MIN_ARG -d $INTERMEDIATES_DIR $ASAN_OUT
+    python "$ANDROID_BUILD_TOP"/art/tools/runtime_memusage/prune_sanitizer_output.py \
+      "$EXACT_ARG" "${MIN_ARG[@]}" -d "$intermediates_dir" "$asan_out"
   else
     echo "Skipped: Filtering/Cleaning ASAN output"
   fi
 
   # Step 4 - Retrieve symbolized stack traces from Step 3 output
-  SYM_FILTERED=$INTERMEDIATES_DIR/sym_filtered
-  if [ ! -f $SYM_FILTERED ] || [ "${DO_REDO[$pid]}" = true ] || [ $DO_REDO = true ]; then
+  sym_filtered=$intermediates_dir/sym_filtered
+  if [[ ! -f "$sym_filtered" ]] || \
+     [[ "${DO_REDO[$pid]}" = true ]] || \
+     [[ $DO_REDO = true ]]; then
     DO_REDO[$pid]=true
     echo "Retrieving symbolized traces"
-    $ANDROID_BUILD_TOP/development/scripts/stack $ASAN_OUT_FILTERED > $SYM_FILTERED
+    "$ANDROID_BUILD_TOP"/development/scripts/stack "$asan_out_filtered" \
+      > "$sym_filtered"
   else
     echo "Skipped: Retrieving symbolized traces"
   fi
 
   # Step 4.5 - Obtain Dex File Format of dex file related to package
-  BAKSMALI_DMP_OUT="$INTERMEDIATES_DIR""/baksmali_dex_file"
-  BAKSMALI_DMP_ARG="--dex-file="$BAKSMALI_DMP_OUT
-  if [ ! -f $BAKSMALI_DMP_OUT ] || [ "${DO_REDO[$pid]}" = true ] || [ $DO_REDO = true ]; then
-    if [ $PACKAGE_NAME != "" ]; then
+  filtered_dex_start=$intermediates_dir/filtered_dex_start
+  baksmali_dmp_ctr=0
+  baksmali_dmp_prefix=$intermediates_dir"/baksmali_dex_file_"
+  baksmali_dmp_files=( $baksmali_dmp_prefix* )
+  baksmali_dmp_arg="--dex-file "${baksmali_dmp_files[$BAKSMALI_NUM]}
+  apk_dex_files=( )
+  if [[ ! -f "$baksmali_dmp_prefix""$BAKSMALI_NUM" ]] || \
+     [[ ! -f "$filtered_dex_start" ]] || \
+     [[ "${DO_REDO[$pid]}" = true ]] || \
+     [[ $DO_REDO = true ]]; then
+    if [[ ! -z "$PACKAGE_NAME" ]]; then
+      DO_REDO[$pid]=true
       # Extracting Dex File path on device from Dex File related to package
-      apk_directory=$(dirname $(grep $PACKAGE_NAME $DEX_START | tail -n1 | awk '{print $8}'))
-      apk_dex_files=$(adb shell find $apk_directory -name "*.?dex" -type f 2> /dev/null)
-      for apk_file in $apk_dex_files; do
-        base_name=$(basename $apk_file)
-        adb pull $apk_file $INTERMEDIATES_DIR/base."${base_name#*.}"
+      apk_directory=$(dirname "$(tail -n1 "$dex_start" | awk "{print \$8}")")
+      for dex_file in $(awk "{print \$8}" "$dex_start"); do
+        apk_dex_files+=( $(basename "$dex_file") )
       done
-      oatdump --oat-file=$INTERMEDIATES_DIR/base.odex --export-dex-to=$INTERMEDIATES_DIR --output=/dev/null
-      export_dex=( $INTERMEDIATES_DIR/*apk_export* )
-      baksmali -JXmx1024M dump $export_dex > $BAKSMALI_DMP_OUT 2> /dev/null
-      if ! [ -s $BAKSMALI_DMP_OUT ]; then
-        rm $BAKSMALI_DMP_OUT
-        BAKSMALI_DMP_ARG=""
-        echo "Failed to retrieve Dex File format"
-      fi
+      apk_oat_files=$(adb shell find "$apk_directory" -name "*.?dex" -type f \
+        2> /dev/null)
+      # Pulls the .odex and .vdex files associated with the package
+      for apk_file in $apk_oat_files; do
+        base_name=$(basename "$apk_file")
+        adb pull "$apk_file" "$intermediates_dir/base.${base_name#*.}"
+      done
+      oatdump --oat-file="$intermediates_dir"/base.odex \
+        --export-dex-to="$intermediates_dir" --output=/dev/null
+      for dex_file in "${apk_dex_files[@]}"; do
+        exported_dex_file=$intermediates_dir/$dex_file"_export.dex"
+        baksmali_dmp_out="$baksmali_dmp_prefix""$((baksmali_dmp_ctr++))"
+        baksmali -JXmx1024M dump "$exported_dex_file" \
+          > "$baksmali_dmp_out" 2> "$intermediates_dir"/error
+        if ! [[ -s "$baksmali_dmp_out" ]]; then
+          rm "$baksmali_dmp_prefix"*
+          baksmali_dmp_arg=""
+          echo "Failed to retrieve Dex File format"
+          break
+        fi
+      done
+      baksmali_dmp_files=( "$baksmali_dmp_prefix"* )
+      baksmali_dmp_arg="--dex-file "${baksmali_dmp_files[$BAKSMALI_NUM]}
+      # Gets the baksmali dump associated with BAKSMALI_NUM
+      awk "NR == $((BAKSMALI_NUM + 1))" "$dex_start" > "$filtered_dex_start"
+      results_dir=$results_dir"_"$BAKSMALI_NUM
+      echo "Skipped: Retrieving Dex File format from baksmali; no package given"
     else
-      BAKSMALI_DMP_ARG=""
-      echo "Failed to retrieve Dex File format"
+      cp "$dex_start" "$filtered_dex_start"
+      baksmali_dmp_arg=""
     fi
   else
+    awk "NR == $((BAKSMALI_NUM + 1))" "$dex_start" > "$filtered_dex_start"
+    results_dir=$results_dir"_"$BAKSMALI_NUM
     echo "Skipped: Retrieving Dex File format from baksmali"
   fi
 
-  if [ ! -d "$RESULTS_DIR" ]; then
-    mkdir $RESULTS_DIR
+  if [[ ! -d "$results_dir" ]]; then
+    mkdir "$results_dir"
     DO_REDO[$pid]=true
   fi
 
@@ -268,35 +330,45 @@
   # and trace data
   # Only the category names are needed for the commands giving final output
   shift
-  TIME_OUTPUT=($RESULTS_DIR/time_output_*.dat)
-  if [ ! -e ${TIME_OUTPUT[0]} ] || [ "${DO_REDO[$pid]}" = true ] || [ $DO_REDO = true ]; then
+  time_output=($results_dir/time_output_*.dat)
+  if [[ ! -e ${time_output[0]} ]] || \
+     [[ "${DO_REDO[$pid]}" = true ]] || \
+     [[ $DO_REDO = true ]]; then
     DO_REDO[$pid]=true
     echo "Creating Categorized Time Table"
-    python $ANDROID_BUILD_TOP/art/tools/runtime_memusage/symbol_trace_info.py \
-      -d $RESULTS_DIR ${OFFSET_ARGS[@]} ${TIME_ARGS[@]} $BAKSMALI_DMP_ARG $ASAN_OUT_FILTERED $SYM_FILTERED $DEX_START $@
+    baksmali_dmp_args=( $baksmali_dmp_arg )
+    python "$ANDROID_BUILD_TOP"/art/tools/runtime_memusage/symbol_trace_info.py \
+      -d "$results_dir" "${OFFSET_ARGS[@]}" "${baksmali_dmp_args[@]}" \
+      "${TIME_ARGS[@]}" "$asan_out_filtered" "$sym_filtered" \
+      "$filtered_dex_start" "$@"
   else
     echo "Skipped: Creating Categorized Time Table"
   fi
 
   # Step 6 - Use graph data from Step 5 to plot graph
   # Contains the category names used for legend of gnuplot
-  PLOT_CATS=`echo \"Uncategorized $@\"`
-  PACKAGE_STRING=""
-  if [ $PACKAGE_NAME != "" ]; then
-    PACKAGE_STRING="Package name: "$PACKAGE_NAME" "
+  plot_cats="\"Uncategorized $*\""
+  package_string=""
+  dex_name=""
+  if [[ ! -z "$PACKAGE_NAME" ]]; then
+    package_string="Package name: $PACKAGE_NAME "
+  fi
+  if [[ ! -z "$baksmali_dmp_arg" ]]; then
+    dex_file_path="$(awk "{print \$8}" "$filtered_dex_start" | tail -n1)"
+    dex_name="Dex File name: $(basename "$dex_file_path") "
   fi
   echo "Plotting Categorized Time Table"
   # Plots the information from logcat
   gnuplot --persist -e \
-    'filename(n) = sprintf("'"$RESULTS_DIR"'/time_output_%d.dat", n);
-     catnames = '"$PLOT_CATS"';
-     set title "'"$PACKAGE_STRING"'PID: '"$pid"'";
+    'filename(n) = sprintf("'"$results_dir"'/time_output_%d.dat", n);
+     catnames = '"$plot_cats"';
+     set title "'"$package_string""$dex_name"'PID: '"$pid"'";
      set xlabel "Time (milliseconds)";
      set ylabel "Dex File Offset (bytes)";
      plot for [i=0:'"$NUM_CAT"'] filename(i) using 1:2 title word(catnames, i + 1);'
 
-  if [ $USE_TEMP = true ]; then
+  if [[ $USE_TEMP = true ]]; then
     echo "Removing temp directory and files"
-    rm -rf $OUT_DIR
+    rm -rf "$OUT_DIR"
   fi
 done
diff --git a/tools/runtime_memusage/symbol_trace_info.py b/tools/runtime_memusage/symbol_trace_info.py
index a5ced38..22f8ee9 100755
--- a/tools/runtime_memusage/symbol_trace_info.py
+++ b/tools/runtime_memusage/symbol_trace_info.py
@@ -38,15 +38,15 @@
 
 def absolute_to_relative(data_lists, symbol_traces):
     """Address changed to Dex File offset and shifting time to 0 min in ms."""
-    plot_list = data_lists["plot_list"]
-    dex_start_list = data_lists["dex_start_list"]
-    cat_list = data_lists["cat_list"]
+
     offsets = data_lists["offsets"]
-    time_offsets = data_lists["time_offsets"]
+    time_offsets = data_lists["times"]
+
+    # Format of time provided by logcat
     time_format_str = "%H:%M:%S.%f"
-    first_access_time = datetime.strptime(plot_list[0][0],
+    first_access_time = datetime.strptime(data_lists["plot_list"][0][0],
                                           time_format_str)
-    for ind, elem in enumerate(plot_list):
+    for ind, elem in enumerate(data_lists["plot_list"]):
         elem_date_time = datetime.strptime(elem[0], time_format_str)
         # Shift time values so that first access is at time 0 milliseconds
         elem[0] = int((elem_date_time - first_access_time).total_seconds() *
@@ -54,25 +54,23 @@
         address_access = int(elem[1], 16)
         # For each poisoned address, find highest Dex File starting address less
         # than address_access
-        dex_file_start = dex_start_list[bisect.bisect(dex_start_list,
-                                                      address_access) - 1
-                                        ]
-        dex_offset = address_access - dex_file_start
+        dex_start_list, dex_size_list = zip(*data_lists["dex_ends_list"])
+        dex_file_ind = bisect.bisect(dex_start_list, address_access) - 1
+        dex_offset = address_access - dex_start_list[dex_file_ind]
+        # Assumes that offsets is already sorted and constrains offset to be
+        # within range of the dex_file
+        max_offset = min(offsets[1], dex_size_list[dex_file_ind])
         # Meant to nullify data that does not meet offset criteria if specified
-        # Assumes that offsets is already sorted
-        if (dex_offset >= offsets[0] and dex_offset < offsets[1] and
-            elem[0] >= time_offsets[0] and elem[0] < time_offsets[1]):
+        if (dex_offset >= offsets[0] and dex_offset < max_offset and
+                elem[0] >= time_offsets[0] and elem[0] < time_offsets[1]):
 
             elem.insert(1, dex_offset)
             # Category that a data point belongs to
-            elem.insert(2, cat_list[ind])
+            elem.insert(2, data_lists["cat_list"][ind])
         else:
-            elem[0] = None
-            elem[1] = None
-            elem.append(None)
-            elem.append(None)
+            elem[:] = 4 * [None]
             symbol_traces[ind] = None
-            cat_list[ind] = None
+            data_lists["cat_list"][ind] = None
 
 
 def print_category_info(cat_split, outname, out_dir_name, title):
@@ -98,7 +96,7 @@
 def print_categories(categories, symbol_file_split, out_dir_name):
     """Prints details of all categories."""
     symbol_file_split = [trace for trace in symbol_file_split
-                          if trace is not None]
+                         if trace is not None]
     # Info of traces containing a call to current category
     for cat_num, cat_name in enumerate(categories[1:]):
         print("\nCategory #%d" % (cat_num + 1))
@@ -184,8 +182,8 @@
 def get_dex_offset_data(line, dex_file_item):
     """ Returns a tuple of dex file offset, item name, and data of a line."""
     return (int(line[:line.find(":")], 16),
-                (dex_file_item,
-                 line.split("|")[1].strip())
+            (dex_file_item,
+             line.split("|")[1].strip())
             )
 
 
@@ -206,27 +204,28 @@
     logcat_file_data = parsed_argv.sanitizer_trace.readlines()
     parsed_argv.sanitizer_trace.close()
 
-    symbol_file_split = parsed_argv.symbol_trace.read().split("Stack Trace")[
-        1:]
+    symbol_file_split = parsed_argv.symbol_trace.read().split("Stack Trace")
+    # Removes text before first trace
+    symbol_file_split = symbol_file_split[1:]
     parsed_argv.symbol_trace.close()
 
     dex_start_file_data = parsed_argv.dex_starts.readlines()
     parsed_argv.dex_starts.close()
 
-    if parsed_argv.dex_file != None:
+    if parsed_argv.dex_file is not None:
         dex_file_data = parsed_argv.dex_file.read()
         parsed_argv.dex_file.close()
         # Splits baksmali dump by each item
         item_split = [s.splitlines() for s in re.split(r"\|\[[0-9]+\] ",
-                                                          dex_file_data)]
+                                                       dex_file_data)]
         # Splits each item by line and creates a list of offsets and a
         # corresponding list of the data associated with that line
         offset_list, offset_data = zip(*[get_dex_offset_data(line, item[0])
-                                      for item in item_split
+                                         for item in item_split
                                          for line in item[1:]
-                                            if re.search("[0-9a-f]{6}:", line)
-                                                is not None
-                                            and line.find("|") != -1])
+                                         if re.search("[0-9a-f]{6}:", line)
+                                         is not None and
+                                         line.find("|") != -1])
         data_lists["offset_list"] = offset_list
         data_lists["offset_data"] = offset_data
     else:
@@ -237,7 +236,8 @@
                                 if elem[0] in (1, 11)
                                 ]
                                for line in logcat_file_data
-                               if "use-after-poison" in line
+                               if "use-after-poison" in line or
+                               "unknown-crash" in line
                                ]
     # Contains a mapping between traces and the category they belong to
     # based on arguments
@@ -246,27 +246,25 @@
 
     # Contains a list of starting address of all dex files to calculate dex
     # offsets
-    data_lists["dex_start_list"] = [int(line.split("@")[1], 16)
-                                    for line in dex_start_file_data
-                                    if "RegisterDexFile" in line
-                                    ]
+    data_lists["dex_ends_list"] = [(int(line.split()[9], 16),
+                                    int(line.split()[12])
+                                    )
+                                   for line in dex_start_file_data
+                                   if "RegisterDexFile" in line
+                                   ]
     # Dex File Starting addresses must be sorted because bisect requires sorted
     # lists.
-    data_lists["dex_start_list"].sort()
+    data_lists["dex_ends_list"].sort()
 
     return data_lists, categories, symbol_file_split
 
 
 def main():
     """Takes in trace information and outputs details about them."""
-
     parsed_argv = parse_args(None)
     data_lists, categories, symbol_file_split = read_data(parsed_argv)
 
     # Formats plot_list such that each element is a data point
-    #absolute_to_relative(data_lists["plot_list"], data_lists["dex_start_list"],
-    #                        data_lists["cat_list"], data_lists["offsets"],
-    #                            data_lists["times"], symbol_file_split)
     absolute_to_relative(data_lists, symbol_file_split)
     for file_ext, cat_name in enumerate(categories):
         out_file_name = os.path.join(parsed_argv.out_dir_name, "time_output_" +
@@ -287,10 +285,10 @@
                         hex(dex_offset) +
                         " " +
                         str(address))
-                    if data_lists.has_key("offset_list"):
+                    if "offset_list" in data_lists:
                         dex_offset_index = bisect.bisect(
-                                              data_lists["offset_list"],
-                                                            dex_offset) - 1
+                            data_lists["offset_list"],
+                            dex_offset) - 1
                         aligned_dex_offset = (data_lists["offset_list"]
                                                         [dex_offset_index])
                         dex_offset_data = (data_lists["offset_data"]