Merge "Let jvmti allow JDWP connections"
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
new file mode 100644
index 0000000..cf1832b
--- /dev/null
+++ b/PREUPLOAD.cfg
@@ -0,0 +1,2 @@
+[Hook Scripts]
+check_generated_files_up_to_date = tools/cpp-define-generator/presubmit-check-files-up-to-date
diff --git a/build/Android.bp b/build/Android.bp
index cd9d74a..b1553c7 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -70,6 +70,8 @@
         "-DART_STACK_OVERFLOW_GAP_mips64=16384",
         "-DART_STACK_OVERFLOW_GAP_x86=8192",
         "-DART_STACK_OVERFLOW_GAP_x86_64=8192",
+        // Enable thread annotations for std::mutex, etc.
+        "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
     ],
 
     target: {
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index bc08384..e525808 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -113,6 +113,7 @@
 ART_GTEST_stub_test_DEX_DEPS := AllFields
 ART_GTEST_transaction_test_DEX_DEPS := Transaction
 ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup
+ART_GTEST_unstarted_runtime_test_DEX_DEPS := Nested
 ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps MultiDex
 ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler
 
diff --git a/build/art.go b/build/art.go
index baa6e59..e7f7e21 100644
--- a/build/art.go
+++ b/build/art.go
@@ -68,10 +68,6 @@
 		asflags = append(asflags,
 			"-DART_USE_READ_BARRIER=1",
 			"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
-
-		// Temporarily override -fstack-protector-strong with -fstack-protector to avoid a major
-		// slowdown with the read barrier config. b/26744236.
-		cflags = append(cflags, "-fstack-protector")
 	}
 
 	if envTrue(ctx, "ART_USE_VIXL_ARM_BACKEND") {
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 18a9165..cdd1e53 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -104,10 +104,10 @@
         for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
           StackMap stack_map = code_info.GetStackMapAt(s, encoding);
           DCHECK(stack_map.IsValid());
-          const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding, isa);
-          const int32_t dex = stack_map.GetDexPc(encoding.stack_map_encoding);
+          const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
+          const int32_t dex = stack_map.GetDexPc(encoding.stack_map.encoding);
           pc2dex_map.push_back({pc, dex});
-          if (stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
+          if (stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
             // Guess that the first map with local variables is the end of prologue.
             prologue_end = std::min(prologue_end, pc);
           }
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index bce5387..cbfdbdd 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -104,7 +104,7 @@
   for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
     StackMap stack_map = code_info.GetStackMapAt(s, encoding);
     DCHECK(stack_map.IsValid());
-    if (!stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
+    if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
       // The compiler creates stackmaps without register maps at the start of
       // basic blocks in order to keep instruction-accurate line number mapping.
       // However, we never stop at those (breakpoint locations always have map).
@@ -112,7 +112,7 @@
       // The main reason for this is to save space by avoiding undefined gaps.
       continue;
     }
-    const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map_encoding, isa);
+    const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
     DCHECK_LE(pc_offset, method_info->code_size);
     DCHECK_LE(compilation_unit_code_address, method_info->code_address);
     const uint32_t low_pc = dchecked_integral_cast<uint32_t>(
@@ -136,7 +136,7 @@
     }
 
     // Check that the stack map is in the requested range.
-    uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map_encoding);
+    uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
     if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
       // The variable is not in scope at this PC. Therefore omit the entry.
       // Note that this is different to None() entry which means in scope, but unknown location.
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index f056dd3..f296851 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -135,65 +135,6 @@
   return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx);
 }
 
-template <typename ArtMember>
-inline std::pair<bool, bool> CompilerDriver::IsClassOfStaticMemberAvailableToReferrer(
-    mirror::DexCache* dex_cache,
-    mirror::Class* referrer_class,
-    ArtMember* resolved_member,
-    uint16_t member_idx,
-    dex::TypeIndex* storage_index) {
-  DCHECK(resolved_member->IsStatic());
-  if (LIKELY(referrer_class != nullptr)) {
-    ObjPtr<mirror::Class> members_class = resolved_member->GetDeclaringClass();
-    if (members_class == referrer_class) {
-      *storage_index = members_class->GetDexTypeIndex();
-      return std::make_pair(true, true);
-    }
-    if (CanAccessResolvedMember<ArtMember>(
-        referrer_class, members_class.Ptr(), resolved_member, dex_cache, member_idx)) {
-      // We have the resolved member, we must make it into a index for the referrer
-      // in its static storage (which may fail if it doesn't have a slot for it)
-      // TODO: for images we can elide the static storage base null check
-      // if we know there's a non-null entry in the image
-      const DexFile* dex_file = dex_cache->GetDexFile();
-      dex::TypeIndex storage_idx(DexFile::kDexNoIndex16);
-      if (LIKELY(members_class->GetDexCache() == dex_cache)) {
-        // common case where the dex cache of both the referrer and the member are the same,
-        // no need to search the dex file
-        storage_idx = members_class->GetDexTypeIndex();
-      } else {
-        // Search dex file for localized ssb index, may fail if member's class is a parent
-        // of the class mentioned in the dex file and there is no dex cache entry.
-        storage_idx = resolved_member->GetDeclaringClass()->FindTypeIndexInOtherDexFile(*dex_file);
-      }
-      if (storage_idx.IsValid()) {
-        *storage_index = storage_idx;
-        return std::make_pair(true, !resolved_member->IsFinal());
-      }
-    }
-  }
-  // Conservative defaults.
-  *storage_index = dex::TypeIndex(DexFile::kDexNoIndex16);
-  return std::make_pair(false, false);
-}
-
-inline std::pair<bool, bool> CompilerDriver::IsFastStaticField(
-    mirror::DexCache* dex_cache, mirror::Class* referrer_class,
-    ArtField* resolved_field, uint16_t field_idx, dex::TypeIndex* storage_index) {
-  return IsClassOfStaticMemberAvailableToReferrer(
-      dex_cache, referrer_class, resolved_field, field_idx, storage_index);
-}
-
-inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer(
-    mirror::DexCache* dex_cache, mirror::Class* referrer_class,
-    ArtMethod* resolved_method, uint16_t method_idx, dex::TypeIndex* storage_index) {
-  std::pair<bool, bool> result = IsClassOfStaticMemberAvailableToReferrer(
-      dex_cache, referrer_class, resolved_method, method_idx, storage_index);
-  // Only the first member of `result` is meaningful, as there is no
-  // "write access" to a method.
-  return result.first;
-}
-
 inline ArtMethod* CompilerDriver::ResolveMethod(
     ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
     Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 1d4eaf8..7af850a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -878,7 +878,7 @@
   MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
 
   for (const DexFile* dex_file : dex_files) {
-    dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file, false));
+    dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file));
     TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
 
     size_t class_def_count = dex_file->NumClassDefs();
@@ -1182,10 +1182,12 @@
       Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file,
                                                                                      nullptr)));
       Handle<mirror::Class> klass(hs2.NewHandle(
-          class_linker->ResolveType(*dex_file,
-                                    exception_type_idx,
-                                    dex_cache,
-                                    ScopedNullHandle<mirror::ClassLoader>())));
+          (dex_cache.Get() != nullptr)
+              ? class_linker->ResolveType(*dex_file,
+                                          exception_type_idx,
+                                          dex_cache,
+                                          ScopedNullHandle<mirror::ClassLoader>())
+              : nullptr));
       if (klass.Get() == nullptr) {
         const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
         const char* descriptor = dex_file->GetTypeDescriptor(type_id);
@@ -1776,7 +1778,7 @@
     Handle<mirror::ClassLoader> class_loader(
         hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
-        soa.Self(), dex_file, false)));
+        soa.Self(), dex_file)));
     // Resolve the class.
     mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache,
                                                      class_loader);
@@ -1875,10 +1877,9 @@
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
         dex_file,
         class_loader.Get())));
-    mirror::Class* klass = class_linker->ResolveType(dex_file,
-                                                     dex::TypeIndex(type_idx),
-                                                     dex_cache,
-                                                     class_loader);
+    ObjPtr<mirror::Class> klass = (dex_cache.Get() != nullptr)
+        ? class_linker->ResolveType(dex_file, dex::TypeIndex(type_idx), dex_cache, class_loader)
+        : nullptr;
 
     if (klass == nullptr) {
       soa.Self()->AssertPendingException();
@@ -2135,7 +2136,7 @@
        * will be rejected by the verifier and later skipped during compilation in the compiler.
        */
       Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
-          soa.Self(), dex_file, false)));
+          soa.Self(), dex_file)));
       std::string error_msg;
       failure_kind =
           verifier::MethodVerifier::VerifyClass(soa.Self(),
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 503fe3a..5b4c751 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -233,27 +233,6 @@
       ArtField* resolved_field, uint16_t field_idx)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
-  // of the declaring class in the referrer's dex file.
-  std::pair<bool, bool> IsFastStaticField(mirror::DexCache* dex_cache,
-                                          mirror::Class* referrer_class,
-                                          ArtField* resolved_field,
-                                          uint16_t field_idx,
-                                          dex::TypeIndex* storage_index)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Return whether the declaring class of `resolved_method` is
-  // available to `referrer_class`. If this is true, compute the type
-  // index of the declaring class in the referrer's dex file and
-  // return it through the out argument `storage_index`; otherwise
-  // return DexFile::kDexNoIndex through `storage_index`.
-  bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache,
-                                                mirror::Class* referrer_class,
-                                                ArtMethod* resolved_method,
-                                                uint16_t method_idx,
-                                                dex::TypeIndex* storage_index)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Resolve a method. Returns null on failure, including incompatible class change.
   ArtMethod* ResolveMethod(
       ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -379,21 +358,6 @@
   }
 
  private:
-  // Return whether the declaring class of `resolved_member` is
-  // available to `referrer_class` for read or write access using two
-  // Boolean values returned as a pair. If is true at least for read
-  // access, compute the type index of the declaring class in the
-  // referrer's dex file and return it through the out argument
-  // `storage_index`; otherwise return DexFile::kDexNoIndex through
-  // `storage_index`.
-  template <typename ArtMember>
-  std::pair<bool, bool> IsClassOfStaticMemberAvailableToReferrer(mirror::DexCache* dex_cache,
-                                                                 mirror::Class* referrer_class,
-                                                                 ArtMember* resolved_member,
-                                                                 uint16_t member_idx,
-                                                                 dex::TypeIndex* storage_index)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Can `referrer_class` access the resolved `member`?
   // Dispatch call to mirror::Class::CanAccessResolvedField or
   // mirror::Class::CanAccessResolvedMember depending on the value of
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index d5842a8..66111f6 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -149,11 +149,10 @@
                 File* oat_file,
                 const std::vector<const char*>& dex_filenames,
                 SafeMap<std::string, std::string>& key_value_store,
-                bool verify) {
+                bool verify,
+                ProfileCompilationInfo* profile_compilation_info) {
     TimingLogger timings("WriteElf", false, false);
-    OatWriter oat_writer(/*compiling_boot_image*/false,
-                         &timings,
-                         /*profile_compilation_info*/nullptr);
+    OatWriter oat_writer(/*compiling_boot_image*/false, &timings, profile_compilation_info);
     for (const char* dex_filename : dex_filenames) {
       if (!oat_writer.AddDexFileSource(dex_filename, dex_filename)) {
         return false;
@@ -264,7 +263,7 @@
     return true;
   }
 
-  void TestDexFileInput(bool verify, bool low_4gb);
+  void TestDexFileInput(bool verify, bool low_4gb, bool use_profile);
   void TestZipFileInput(bool verify);
 
   std::unique_ptr<const InstructionSetFeatures> insn_features_;
@@ -568,7 +567,7 @@
   }
 }
 
-void OatTest::TestDexFileInput(bool verify, bool low_4gb) {
+void OatTest::TestDexFileInput(bool verify, bool low_4gb, bool use_profile) {
   TimingLogger timings("OatTest::DexFileInput", false, false);
 
   std::vector<const char*> input_filenames;
@@ -606,11 +605,14 @@
   ScratchFile oat_file, vdex_file(oat_file, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
   key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
+  std::unique_ptr<ProfileCompilationInfo>
+      profile_compilation_info(use_profile ? new ProfileCompilationInfo() : nullptr);
   success = WriteElf(vdex_file.GetFile(),
                      oat_file.GetFile(),
                      input_filenames,
                      key_value_store,
-                     verify);
+                     verify,
+                     profile_compilation_info.get());
 
   // In verify mode, we expect failure.
   if (verify) {
@@ -654,15 +656,19 @@
 }
 
 TEST_F(OatTest, DexFileInputCheckOutput) {
-  TestDexFileInput(false, /*low_4gb*/false);
+  TestDexFileInput(/*verify*/false, /*low_4gb*/false, /*use_profile*/false);
 }
 
 TEST_F(OatTest, DexFileInputCheckOutputLow4GB) {
-  TestDexFileInput(false, /*low_4gb*/true);
+  TestDexFileInput(/*verify*/false, /*low_4gb*/true, /*use_profile*/false);
 }
 
 TEST_F(OatTest, DexFileInputCheckVerifier) {
-  TestDexFileInput(true, /*low_4gb*/false);
+  TestDexFileInput(/*verify*/true, /*low_4gb*/false, /*use_profile*/false);
+}
+
+TEST_F(OatTest, DexFileFailsVerifierWithLayout) {
+  TestDexFileInput(/*verify*/true, /*low_4gb*/false, /*use_profile*/true);
 }
 
 void OatTest::TestZipFileInput(bool verify) {
@@ -717,8 +723,8 @@
     std::vector<const char*> input_filenames { zip_file.GetFilename().c_str() };  // NOLINT [readability/braces] [4]
 
     ScratchFile oat_file, vdex_file(oat_file, ".vdex");
-    success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(),
-                       input_filenames, key_value_store, verify);
+    success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(), input_filenames,
+                       key_value_store, verify, /*profile_compilation_info*/nullptr);
 
     if (verify) {
       ASSERT_FALSE(success);
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bd2c5e3..7c0cdbf 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -1250,7 +1250,7 @@
   const ScopedObjectAccess soa_;
   const ScopedAssertNoThreadSuspension no_thread_suspension_;
   ClassLinker* const class_linker_;
-  mirror::DexCache* dex_cache_;
+  ObjPtr<mirror::DexCache> dex_cache_;
   std::vector<uint8_t> patched_code_;
 
   void ReportWriteFailure(const char* what, const ClassDataItemIterator& it) {
@@ -1261,7 +1261,7 @@
   ArtMethod* GetTargetMethod(const LinkerPatch& patch)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     MethodReference ref = patch.TargetMethod();
-    mirror::DexCache* dex_cache =
+    ObjPtr<mirror::DexCache> dex_cache =
         (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
             Thread::Current(), *ref.dex_file);
     ArtMethod* method = dex_cache->GetResolvedMethod(
@@ -1295,7 +1295,7 @@
     return target_offset;
   }
 
-  mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
+  ObjPtr<mirror::DexCache> GetDexCache(const DexFile* target_dex_file)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return (target_dex_file == dex_file_)
         ? dex_cache_
@@ -1303,7 +1303,7 @@
   }
 
   mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
-    mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
+    ObjPtr<mirror::DexCache> dex_cache = GetDexCache(patch.TargetTypeDexFile());
     mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
     CHECK(type != nullptr);
     return type;
@@ -2266,6 +2266,10 @@
     File* raw_file = oat_dex_file->source_.GetRawFile();
     dex_file = DexFile::OpenDex(raw_file->Fd(), location, /* verify_checksum */ true, &error_msg);
   }
+  if (dex_file == nullptr) {
+    LOG(ERROR) << "Failed to open dex file for layout: " << error_msg;
+    return false;
+  }
   Options options;
   options.output_to_memmap_ = true;
   DexLayout dex_layout(options, profile_compilation_info_, nullptr);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 8cf4089..e4ad422 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -32,6 +32,8 @@
 
 namespace art {
 
+class CodeGenerator;
+
 class HGraphBuilder : public ValueObject {
  public:
   HGraphBuilder(HGraph* graph,
@@ -40,6 +42,7 @@
                 const DexFile* dex_file,
                 const DexFile::CodeItem& code_item,
                 CompilerDriver* driver,
+                CodeGenerator* code_generator,
                 OptimizingCompilerStats* compiler_stats,
                 const uint8_t* interpreter_metadata,
                 Handle<mirror::DexCache> dex_cache,
@@ -61,6 +64,7 @@
                              dex_compilation_unit,
                              outer_compilation_unit,
                              driver,
+                             code_generator,
                              interpreter_metadata,
                              compiler_stats,
                              dex_cache,
@@ -89,6 +93,7 @@
                              /* dex_compilation_unit */ nullptr,
                              /* outer_compilation_unit */ nullptr,
                              /* compiler_driver */ nullptr,
+                             /* code_generator */ nullptr,
                              /* interpreter_metadata */ nullptr,
                              /* compiler_stats */ nullptr,
                              null_dex_cache_,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index f5b6ebe..20cdae3 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3993,8 +3993,11 @@
 void InstructionCodeGeneratorARM::VisitNewArray(HNewArray* instruction) {
   // Note: if heap poisoning is enabled, the entry point takes cares
   // of poisoning the reference.
-  codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+  QuickEntrypointEnum entrypoint =
+      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+  DCHECK(!codegen_->IsLeafMethod());
 }
 
 void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
@@ -5719,6 +5722,9 @@
 HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind(
     HLoadClass::LoadKind desired_class_load_kind) {
   switch (desired_class_load_kind) {
+    case HLoadClass::LoadKind::kInvalid:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5849,6 +5855,7 @@
       break;
     }
     case HLoadClass::LoadKind::kDexCacheViaMethod:
+    case HLoadClass::LoadKind::kInvalid:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
   }
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 26c8254..598be47 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4360,6 +4360,9 @@
 HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
     HLoadClass::LoadKind desired_class_load_kind) {
   switch (desired_class_load_kind) {
+    case HLoadClass::LoadKind::kInvalid:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -4498,6 +4501,7 @@
       break;
     }
     case HLoadClass::LoadKind::kDexCacheViaMethod:
+    case HLoadClass::LoadKind::kInvalid:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
   }
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index f4d3ec5..e189608 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -4005,8 +4005,11 @@
 void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
   // Note: if heap poisoning is enabled, the entry point takes cares
   // of poisoning the reference.
-  codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+  QuickEntrypointEnum entrypoint =
+      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
+  DCHECK(!codegen_->IsLeafMethod());
 }
 
 void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5796,6 +5799,9 @@
 HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
     HLoadClass::LoadKind desired_class_load_kind) {
   switch (desired_class_load_kind) {
+    case HLoadClass::LoadKind::kInvalid:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -5916,6 +5922,7 @@
       break;
     }
     case HLoadClass::LoadKind::kDexCacheViaMethod:
+    case HLoadClass::LoadKind::kInvalid:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
   }
@@ -7253,8 +7260,7 @@
   // save one load. However, since this is just an intrinsic slow path we prefer this
   // simple and more robust approach rather that trying to determine if that's the case.
   SlowPathCode* slow_path = GetCurrentSlowPath();
-  DCHECK(slow_path != nullptr);  // For intrinsified invokes the call is emitted on the slow path.
-  if (slow_path->IsCoreRegisterSaved(RegisterFrom(location).GetCode())) {
+  if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(RegisterFrom(location).GetCode())) {
     int stack_offset = slow_path->GetStackOffsetOfCoreRegister(RegisterFrom(location).GetCode());
     GetAssembler()->LoadFromOffset(kLoadWord, temp, sp, stack_offset);
     return temp;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index a095970..0677dad 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -484,6 +484,8 @@
       type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_address_patches_(std::less<uint32_t>(),
                                   graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       clobbered_ra_(false) {
   // Save RA (containing the return address) to mimic Quick.
   AddAllocatedRegister(Location::RegisterLocation(RA));
@@ -704,9 +706,6 @@
   // (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
   // into the path that creates a stack frame so that RA can be explicitly saved and restored.
   // RA can't otherwise be saved/restored when it's the only spilled register.
-  // TODO: Can this be improved? It causes creation of a stack frame (while RA might be
-  // saved in an unused temporary register) and saving of RA and the current method pointer
-  // in the frame.
   return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
 }
 
@@ -1160,6 +1159,67 @@
   // offset to `out` (e.g. lw, jialc, addiu).
 }
 
+CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
+    const DexFile& dex_file,
+    dex::StringIndex dex_index,
+    Handle<mirror::String> handle) {
+  jit_string_roots_.Overwrite(StringReference(&dex_file, dex_index),
+                              reinterpret_cast64<uint64_t>(handle.GetReference()));
+  jit_string_patches_.emplace_back(dex_file, dex_index.index_);
+  return &jit_string_patches_.back();
+}
+
+CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
+    const DexFile& dex_file,
+    dex::TypeIndex dex_index,
+    Handle<mirror::Class> handle) {
+  jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index),
+                             reinterpret_cast64<uint64_t>(handle.GetReference()));
+  jit_class_patches_.emplace_back(dex_file, dex_index.index_);
+  return &jit_class_patches_.back();
+}
+
+void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
+                                        const uint8_t* roots_data,
+                                        const CodeGeneratorMIPS::JitPatchInfo& info,
+                                        uint64_t index_in_table) const {
+  uint32_t literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
+  uintptr_t address =
+      reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+  uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
+  // lui reg, addr32_high
+  DCHECK_EQ(code[literal_offset + 0], 0x34);
+  DCHECK_EQ(code[literal_offset + 1], 0x12);
+  DCHECK_EQ((code[literal_offset + 2] & 0xE0), 0x00);
+  DCHECK_EQ(code[literal_offset + 3], 0x3C);
+  // lw reg, reg, addr32_low
+  DCHECK_EQ(code[literal_offset + 4], 0x78);
+  DCHECK_EQ(code[literal_offset + 5], 0x56);
+  DCHECK_EQ((code[literal_offset + 7] & 0xFC), 0x8C);
+  addr32 += (addr32 & 0x8000) << 1;  // Account for sign extension in "lw reg, reg, addr32_low".
+  // lui reg, addr32_high
+  code[literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
+  code[literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
+  // lw reg, reg, addr32_low
+  code[literal_offset + 4] = static_cast<uint8_t>(addr32 >> 0);
+  code[literal_offset + 5] = static_cast<uint8_t>(addr32 >> 8);
+}
+
+void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
+  for (const JitPatchInfo& info : jit_string_patches_) {
+    const auto& it = jit_string_roots_.find(StringReference(&info.target_dex_file,
+                                                            dex::StringIndex(info.index)));
+    DCHECK(it != jit_string_roots_.end());
+    PatchJitRootUse(code, roots_data, info, it->second);
+  }
+  for (const JitPatchInfo& info : jit_class_patches_) {
+    const auto& it = jit_class_roots_.find(TypeReference(&info.target_dex_file,
+                                                         dex::TypeIndex(info.index)));
+    DCHECK(it != jit_class_roots_.end());
+    PatchJitRootUse(code, roots_data, info, it->second);
+  }
+}
+
 void CodeGeneratorMIPS::MarkGCCard(Register object,
                                    Register value,
                                    bool value_can_be_null) {
@@ -5225,8 +5285,7 @@
       break;
     case HLoadString::LoadKind::kJitTableAddress:
       DCHECK(Runtime::Current()->UseJitCompilation());
-      // TODO: implement.
-      fallback_load = true;
+      fallback_load = false;
       break;
     case HLoadString::LoadKind::kDexCacheViaMethod:
       fallback_load = false;
@@ -5249,6 +5308,9 @@
   bool is_r6 = GetInstructionSetFeatures().IsR6();
   bool fallback_load = has_irreducible_loops && !is_r6;
   switch (desired_class_load_kind) {
+    case HLoadClass::LoadKind::kInvalid:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
     case HLoadClass::LoadKind::kReferrersClass:
       fallback_load = false;
       break;
@@ -5265,8 +5327,7 @@
       break;
     case HLoadClass::LoadKind::kJitTableAddress:
       DCHECK(Runtime::Current()->UseJitCompilation());
-      // TODO: implement.
-      fallback_load = true;
+      fallback_load = false;
       break;
     case HLoadClass::LoadKind::kDexCacheViaMethod:
       fallback_load = false;
@@ -5591,10 +5652,18 @@
       break;
     }
     case HLoadClass::LoadKind::kJitTableAddress: {
-      LOG(FATAL) << "Unimplemented";
+      CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
+                                                                             cls->GetTypeIndex(),
+                                                                             cls->GetClass());
+      bool reordering = __ SetReorder(false);
+      __ Bind(&info->high_label);
+      __ Lui(out, /* placeholder */ 0x1234);
+      GenerateGcRootFieldLoad(cls, out_loc, out, /* placeholder */ 0x5678);
+      __ SetReorder(reordering);
       break;
     }
     case HLoadClass::LoadKind::kDexCacheViaMethod:
+    case HLoadClass::LoadKind::kInvalid:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
   }
@@ -5730,6 +5799,18 @@
       __ Bind(slow_path->GetExitLabel());
       return;
     }
+    case HLoadString::LoadKind::kJitTableAddress: {
+      CodeGeneratorMIPS::JitPatchInfo* info =
+          codegen_->NewJitRootStringPatch(load->GetDexFile(),
+                                          load->GetStringIndex(),
+                                          load->GetString());
+      bool reordering = __ SetReorder(false);
+      __ Bind(&info->high_label);
+      __ Lui(out, /* placeholder */ 0x1234);
+      GenerateGcRootFieldLoad(load, out_loc, out, /* placeholder */ 0x5678);
+      __ SetReorder(reordering);
+      return;
+    }
     default:
       break;
   }
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index e92eeef..47eba50 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -352,6 +352,7 @@
 
   // Emit linker patches.
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
 
   void MarkGCCard(Register object, Register value, bool value_can_be_null);
 
@@ -465,6 +466,31 @@
 
   void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, Register out, Register base);
 
+  // The JitPatchInfo is used for JIT string and class loads.
+  struct JitPatchInfo {
+    JitPatchInfo(const DexFile& dex_file, uint64_t idx)
+        : target_dex_file(dex_file), index(idx) { }
+    JitPatchInfo(JitPatchInfo&& other) = default;
+
+    const DexFile& target_dex_file;
+    // String/type index.
+    uint64_t index;
+    // Label for the instruction loading the most significant half of the address.
+    // The least significant half is loaded with the instruction that follows immediately.
+    MipsLabel high_label;
+  };
+
+  void PatchJitRootUse(uint8_t* code,
+                       const uint8_t* roots_data,
+                       const JitPatchInfo& info,
+                       uint64_t index_in_table) const;
+  JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
+                                      dex::StringIndex dex_index,
+                                      Handle<mirror::String> handle);
+  JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
+                                     dex::TypeIndex dex_index,
+                                     Handle<mirror::Class> handle);
+
  private:
   Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
 
@@ -512,6 +538,10 @@
   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
   // Deduplication map for patchable boot image addresses.
   Uint32ToLiteralMap boot_image_address_patches_;
+  // Patches for string root accesses in JIT compiled code.
+  ArenaDeque<JitPatchInfo> jit_string_patches_;
+  // Patches for class root accesses in JIT compiled code.
+  ArenaDeque<JitPatchInfo> jit_class_patches_;
 
   // PC-relative loads on R2 clobber RA, which may need to be preserved explicitly in leaf methods.
   // This is a flag set by pc_relative_fixups_mips and dex_cache_array_fixups_mips optimizations.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e96e3d7..4c8dabf 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -91,9 +91,6 @@
   // Space on the stack is reserved for all arguments.
   stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
 
-  // TODO: shouldn't we use a whole machine word per argument on the stack?
-  // Implicit 4-byte method pointer (and such) will cause misalignment.
-
   return next_location;
 }
 
@@ -434,7 +431,11 @@
       pc_relative_type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       type_bss_entry_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_address_patches_(std::less<uint32_t>(),
-                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+                                  graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      jit_string_patches_(StringReferenceValueComparator(),
+                          graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+      jit_class_patches_(TypeReferenceValueComparator(),
+                         graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
   // Save RA (containing the return address) to mimic Quick.
   AddAllocatedRegister(Location::RegisterLocation(RA));
 }
@@ -1055,6 +1056,49 @@
   // offset to `out` (e.g. ld, jialc, daddiu).
 }
 
+Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
+                                                          dex::StringIndex string_index,
+                                                          Handle<mirror::String> handle) {
+  jit_string_roots_.Overwrite(StringReference(&dex_file, string_index),
+                              reinterpret_cast64<uint64_t>(handle.GetReference()));
+  return jit_string_patches_.GetOrCreate(
+      StringReference(&dex_file, string_index),
+      [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
+                                                         dex::TypeIndex type_index,
+                                                         Handle<mirror::Class> handle) {
+  jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index),
+                             reinterpret_cast64<uint64_t>(handle.GetReference()));
+  return jit_class_patches_.GetOrCreate(
+      TypeReference(&dex_file, type_index),
+      [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+}
+
+void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
+                                          const uint8_t* roots_data,
+                                          const Literal* literal,
+                                          uint64_t index_in_table) const {
+  uint32_t literal_offset = GetAssembler().GetLabelLocation(literal->GetLabel());
+  uintptr_t address =
+      reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
+  reinterpret_cast<uint32_t*>(code + literal_offset)[0] = dchecked_integral_cast<uint32_t>(address);
+}
+
+void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
+  for (const auto& entry : jit_string_patches_) {
+    const auto& it = jit_string_roots_.find(entry.first);
+    DCHECK(it != jit_string_roots_.end());
+    PatchJitRootUse(code, roots_data, entry.second, it->second);
+  }
+  for (const auto& entry : jit_class_patches_) {
+    const auto& it = jit_class_roots_.find(entry.first);
+    DCHECK(it != jit_class_roots_.end());
+    PatchJitRootUse(code, roots_data, entry.second, it->second);
+  }
+}
+
 void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
   // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
   blocked_core_registers_[ZERO] = true;
@@ -3309,8 +3353,6 @@
       break;
     case HLoadString::LoadKind::kJitTableAddress:
       DCHECK(Runtime::Current()->UseJitCompilation());
-      // TODO: implement.
-      fallback_load = true;
       break;
   }
   if (fallback_load) {
@@ -3326,6 +3368,9 @@
   }
   bool fallback_load = false;
   switch (desired_class_load_kind) {
+    case HLoadClass::LoadKind::kInvalid:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -3341,8 +3386,6 @@
       break;
     case HLoadClass::LoadKind::kJitTableAddress:
       DCHECK(Runtime::Current()->UseJitCompilation());
-      // TODO: implement.
-      fallback_load = true;
       break;
     case HLoadClass::LoadKind::kDexCacheViaMethod:
       break;
@@ -3580,11 +3623,16 @@
       generate_null_check = true;
       break;
     }
-    case HLoadClass::LoadKind::kJitTableAddress: {
-      LOG(FATAL) << "Unimplemented";
+    case HLoadClass::LoadKind::kJitTableAddress:
+      __ LoadLiteral(out,
+                     kLoadUnsignedWord,
+                     codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
+                                                          cls->GetTypeIndex(),
+                                                          cls->GetClass()));
+      GenerateGcRootFieldLoad(cls, out_loc, out, 0);
       break;
-    }
     case HLoadClass::LoadKind::kDexCacheViaMethod:
+    case HLoadClass::LoadKind::kInvalid:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
   }
@@ -3685,6 +3733,14 @@
       __ Bind(slow_path->GetExitLabel());
       return;
     }
+    case HLoadString::LoadKind::kJitTableAddress:
+      __ LoadLiteral(out,
+                     kLoadUnsignedWord,
+                     codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
+                                                           load->GetStringIndex(),
+                                                           load->GetString()));
+      GenerateGcRootFieldLoad(load, out_loc, out, 0);
+      return;
     default:
       break;
   }
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 5ba8912..26cc7dc 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -52,7 +52,7 @@
 
 
 static constexpr GpuRegister kCoreCalleeSaves[] =
-    { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA };  // TODO: review
+    { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA };
 static constexpr FpuRegister kFpuCalleeSaves[] =
     { F24, F25, F26, F27, F28, F29, F30, F31 };
 
@@ -312,6 +312,7 @@
 
   // Emit linker patches.
   void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
 
   void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
 
@@ -425,10 +426,27 @@
 
   void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info, GpuRegister out);
 
+  void PatchJitRootUse(uint8_t* code,
+                       const uint8_t* roots_data,
+                       const Literal* literal,
+                       uint64_t index_in_table) const;
+  Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
+                                       dex::StringIndex string_index,
+                                       Handle<mirror::String> handle);
+  Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
+                                      dex::TypeIndex type_index,
+                                      Handle<mirror::Class> handle);
+
  private:
   using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
   using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
   using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
+  using StringToLiteralMap = ArenaSafeMap<StringReference,
+                                          Literal*,
+                                          StringReferenceValueComparator>;
+  using TypeToLiteralMap = ArenaSafeMap<TypeReference,
+                                        Literal*,
+                                        TypeReferenceValueComparator>;
   using BootStringToLiteralMap = ArenaSafeMap<StringReference,
                                               Literal*,
                                               StringReferenceValueComparator>;
@@ -476,6 +494,10 @@
   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
   // Deduplication map for patchable boot image addresses.
   Uint32ToLiteralMap boot_image_address_patches_;
+  // Patches for string root accesses in JIT compiled code.
+  StringToLiteralMap jit_string_patches_;
+  // Patches for class root accesses in JIT compiled code.
+  TypeToLiteralMap jit_class_patches_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
 };
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1b74316..137b554 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4214,7 +4214,9 @@
 void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
   // Note: if heap poisoning is enabled, the entry point takes cares
   // of poisoning the reference.
-  codegen_->InvokeRuntime(kQuickAllocArrayResolved, instruction, instruction->GetDexPc());
+  QuickEntrypointEnum entrypoint =
+      CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+  codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
   CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
   DCHECK(!codegen_->IsLeafMethod());
 }
@@ -6022,6 +6024,9 @@
 HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
     HLoadClass::LoadKind desired_class_load_kind) {
   switch (desired_class_load_kind) {
+    case HLoadClass::LoadKind::kInvalid:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
@@ -6157,6 +6162,7 @@
       break;
     }
     case HLoadClass::LoadKind::kDexCacheViaMethod:
+    case HLoadClass::LoadKind::kInvalid:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
   }
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index abd8246..c5367ce 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5427,6 +5427,9 @@
 HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
     HLoadClass::LoadKind desired_class_load_kind) {
   switch (desired_class_load_kind) {
+    case HLoadClass::LoadKind::kInvalid:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 21c3ae6..ecb8687 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -146,6 +146,12 @@
   return InputRegisterAt(instr, 0);
 }
 
+inline vixl::aarch32::DRegister DRegisterFromS(vixl::aarch32::SRegister s) {
+  vixl::aarch32::DRegister d = vixl::aarch32::DRegister(s.GetCode() / 2);
+  DCHECK(s.Is(d.GetLane(0)) || s.Is(d.GetLane(1)));
+  return d;
+}
+
 inline int32_t Int32ConstantFrom(HInstruction* instr) {
   if (instr->IsIntConstant()) {
     return instr->AsIntConstant()->GetValue();
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 7772e8f..f0afccb 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -558,9 +558,13 @@
                                                                is_referrer,
                                                                invoke_instruction->GetDexPc(),
                                                                /* needs_access_check */ false);
+  HLoadClass::LoadKind kind = HSharpening::SharpenClass(
+      load_class, codegen_, compiler_driver_, caller_compilation_unit_);
+  DCHECK(kind != HLoadClass::LoadKind::kInvalid)
+      << "We should always be able to reference a class for inline caches";
+  // Insert before setting the kind, as setting the kind affects the inputs.
   bb_cursor->InsertInstructionAfter(load_class, receiver_class);
-  // Sharpen after adding the instruction, as the sharpening may remove inputs.
-  HSharpening::SharpenClass(load_class, codegen_, compiler_driver_);
+  load_class->SetLoadKind(kind);
 
   // TODO: Extend reference type propagation to understand the guard.
   HNotEqual* compare = new (graph_->GetArena()) HNotEqual(load_class, receiver_class);
@@ -1286,6 +1290,7 @@
                         resolved_method->GetDexFile(),
                         *code_item,
                         compiler_driver_,
+                        codegen_,
                         inline_stats.get(),
                         resolved_method->GetQuickenedInfo(class_linker->GetImagePointerSize()),
                         dex_cache,
@@ -1416,10 +1421,13 @@
         return false;
       }
 
-      if (!same_dex_file && current->NeedsEnvironment()) {
+      if (current->NeedsEnvironment() &&
+          !CanEncodeInlinedMethodInStackMap(*caller_compilation_unit_.GetDexFile(),
+                                            resolved_method)) {
         VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
                        << " could not be inlined because " << current->DebugName()
-                       << " needs an environment and is in a different dex file";
+                       << " needs an environment, is in a different dex file"
+                       << ", and cannot be encoded in the stack maps.";
         return false;
       }
 
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index cac385c..a1c391f 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -22,6 +22,7 @@
 #include "dex_instruction-inl.h"
 #include "driver/compiler_options.h"
 #include "imtable-inl.h"
+#include "sharpening.h"
 #include "scoped_thread_state_change-inl.h"
 
 namespace art {
@@ -847,7 +848,7 @@
     ScopedObjectAccess soa(Thread::Current());
     if (invoke_type == kStatic) {
       clinit_check = ProcessClinitCheckForInvoke(
-          dex_pc, resolved_method, method_idx, &clinit_check_requirement);
+          dex_pc, resolved_method, &clinit_check_requirement);
     } else if (invoke_type == kSuper) {
       if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
         // Update the method index to the one resolved. Note that this may be a no-op if
@@ -933,15 +934,8 @@
 
 bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
   ScopedObjectAccess soa(Thread::Current());
-  Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
-  Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
 
-  if (outer_dex_cache.Get() != dex_cache.Get()) {
-    // We currently do not support inlining allocations across dex files.
-    return false;
-  }
-
-  HLoadClass* load_class = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+  HLoadClass* load_class = BuildLoadClass(type_index, dex_pc);
 
   HInstruction* cls = load_class;
   Handle<mirror::Class> klass = load_class->GetClass();
@@ -1005,39 +999,23 @@
 HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
       uint32_t dex_pc,
       ArtMethod* resolved_method,
-      uint32_t method_idx,
       HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
-  Thread* self = Thread::Current();
-  StackHandleScope<2> hs(self);
-  Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
-  Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
-  Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
-  Handle<mirror::Class> resolved_method_class(hs.NewHandle(resolved_method->GetDeclaringClass()));
-
-  // The index at which the method's class is stored in the DexCache's type array.
-  dex::TypeIndex storage_index;
-  bool is_outer_class = (resolved_method->GetDeclaringClass() == outer_class.Get());
-  if (is_outer_class) {
-    storage_index = outer_class->GetDexTypeIndex();
-  } else if (outer_dex_cache.Get() == dex_cache.Get()) {
-    // Get `storage_index` from IsClassOfStaticMethodAvailableToReferrer.
-    compiler_driver_->IsClassOfStaticMethodAvailableToReferrer(outer_dex_cache.Get(),
-                                                               GetCompilingClass(),
-                                                               resolved_method,
-                                                               method_idx,
-                                                               &storage_index);
-  }
+  Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
 
   HClinitCheck* clinit_check = nullptr;
-
-  if (IsInitialized(resolved_method_class)) {
+  if (IsInitialized(klass)) {
     *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
-  } else if (storage_index.IsValid()) {
-    *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
-    HLoadClass* cls = BuildLoadClass(
-        storage_index, dex_pc, /* check_access */ false, /* outer */ true);
-    clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
-    AppendInstruction(clinit_check);
+  } else {
+    HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
+                                     klass->GetDexFile(),
+                                     klass,
+                                     dex_pc,
+                                     /* needs_access_check */ false);
+    if (cls != nullptr) {
+      *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
+      clinit_check = new (arena_) HClinitCheck(cls, dex_pc);
+      AppendInstruction(clinit_check);
+    }
   }
   return clinit_check;
 }
@@ -1216,9 +1194,7 @@
   }
 
   ScopedObjectAccess soa(Thread::Current());
-  ArtField* resolved_field =
-      compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
-
+  ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
 
   // Generate an explicit null check on the reference, unless the field access
   // is unresolved. In that case, we rely on the runtime to perform various
@@ -1336,6 +1312,56 @@
   }
 }
 
+ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<2> hs(soa.Self());
+
+  ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
+  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+      soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
+  Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
+
+  ArtField* resolved_field = class_linker->ResolveField(*dex_compilation_unit_->GetDexFile(),
+                                                        field_idx,
+                                                        dex_compilation_unit_->GetDexCache(),
+                                                        class_loader,
+                                                        is_static);
+
+  if (UNLIKELY(resolved_field == nullptr)) {
+    // Clean up any exception left by type resolution.
+    soa.Self()->ClearException();
+    return nullptr;
+  }
+
+  // Check static/instance. The class linker has a fast path for looking into the dex cache
+  // and does not check static/instance if it hits it.
+  if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+    return nullptr;
+  }
+
+  // Check access.
+  if (compiling_class.Get() == nullptr) {
+    if (!resolved_field->IsPublic()) {
+      return nullptr;
+    }
+  } else if (!compiling_class->CanAccessResolvedField(resolved_field->GetDeclaringClass(),
+                                                      resolved_field,
+                                                      dex_compilation_unit_->GetDexCache().Get(),
+                                                      field_idx)) {
+    return nullptr;
+  }
+
+  if (is_put &&
+      resolved_field->IsFinal() &&
+      (compiling_class.Get() != resolved_field->GetDeclaringClass())) {
+    // Final fields can only be updated within their own class.
+    // TODO: Only allow it in constructors. b/34966607.
+    return nullptr;
+  }
+
+  return resolved_field;
+}
+
 bool HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
                                                  uint32_t dex_pc,
                                                  bool is_put) {
@@ -1343,12 +1369,7 @@
   uint16_t field_index = instruction.VRegB_21c();
 
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
-  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
-      soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
-  ArtField* resolved_field = compiler_driver_->ResolveField(
-      soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
+  ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
 
   if (resolved_field == nullptr) {
     MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
@@ -1358,38 +1379,23 @@
   }
 
   Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
-  Handle<mirror::DexCache> outer_dex_cache = outer_compilation_unit_->GetDexCache();
-  Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
 
-  // The index at which the field's class is stored in the DexCache's type array.
-  dex::TypeIndex storage_index;
-  bool is_outer_class = (outer_class.Get() == resolved_field->GetDeclaringClass());
-  if (is_outer_class) {
-    storage_index = outer_class->GetDexTypeIndex();
-  } else if (outer_dex_cache.Get() != dex_cache.Get()) {
-    // The compiler driver cannot currently understand multiple dex caches involved. Just bailout.
-    return false;
-  } else {
-    // TODO: This is rather expensive. Perf it and cache the results if needed.
-    std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
-        outer_dex_cache.Get(),
-        GetCompilingClass(),
-        resolved_field,
-        field_index,
-        &storage_index);
-    bool can_easily_access = is_put ? pair.second : pair.first;
-    if (!can_easily_access) {
-      MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
-      BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
-      return true;
-    }
+  Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
+  HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
+                                        klass->GetDexFile(),
+                                        klass,
+                                        dex_pc,
+                                        /* needs_access_check */ false);
+
+  if (constant == nullptr) {
+    // The class cannot be referenced from this compiled code. Generate
+    // an unresolved access.
+    MaybeRecordStat(MethodCompilationStat::kUnresolvedFieldNotAFastAccess);
+    BuildUnresolvedStaticFieldAccess(instruction, dex_pc, is_put, field_type);
+    return true;
   }
 
-  HLoadClass* constant = BuildLoadClass(
-      storage_index, dex_pc, /* check_access */ false, /* outer */ true);
-
   HInstruction* cls = constant;
-  Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
   if (!IsInitialized(klass)) {
     cls = new (arena_) HClinitCheck(constant, dex_pc);
     AppendInstruction(cls);
@@ -1497,7 +1503,7 @@
                                               uint32_t* args,
                                               uint32_t register_index) {
   HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
-  HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+  HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
   HInstruction* object = new (arena_) HNewArray(cls, length, dex_pc);
   AppendInstruction(object);
 
@@ -1627,44 +1633,68 @@
   }
 }
 
-HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
-                                                uint32_t dex_pc,
-                                                bool check_access,
-                                                bool outer) {
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc) {
   ScopedObjectAccess soa(Thread::Current());
-  const DexCompilationUnit* compilation_unit =
-      outer ? outer_compilation_unit_ : dex_compilation_unit_;
-  const DexFile& dex_file = *compilation_unit->GetDexFile();
-  StackHandleScope<1> hs(soa.Self());
+  StackHandleScope<2> hs(soa.Self());
+  const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
       soa.Decode<mirror::ClassLoader>(dex_compilation_unit_->GetClassLoader())));
   Handle<mirror::Class> klass = handles_->NewHandle(compiler_driver_->ResolveClass(
-      soa, compilation_unit->GetDexCache(), class_loader, type_index, compilation_unit));
+      soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_));
 
-  bool is_accessible = false;
-  if (!check_access) {
-    is_accessible = true;
-  } else if (klass.Get() != nullptr) {
+  bool needs_access_check = true;
+  if (klass.Get() != nullptr) {
     if (klass->IsPublic()) {
-      is_accessible = true;
+      needs_access_check = false;
     } else {
       mirror::Class* compiling_class = GetCompilingClass();
       if (compiling_class != nullptr && compiling_class->CanAccess(klass.Get())) {
-        is_accessible = true;
+        needs_access_check = false;
       }
     }
   }
 
+  return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
+}
+
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+                                                const DexFile& dex_file,
+                                                Handle<mirror::Class> klass,
+                                                uint32_t dex_pc,
+                                                bool needs_access_check) {
+  // Try to find a reference in the compiling dex file.
+  const DexFile* actual_dex_file = &dex_file;
+  if (!IsSameDexFile(dex_file, *dex_compilation_unit_->GetDexFile())) {
+    dex::TypeIndex local_type_index =
+        klass->FindTypeIndexInOtherDexFile(*dex_compilation_unit_->GetDexFile());
+    if (local_type_index.IsValid()) {
+      type_index = local_type_index;
+      actual_dex_file = dex_compilation_unit_->GetDexFile();
+    }
+  }
+
+  // Note: `klass` must be from `handles_`.
   HLoadClass* load_class = new (arena_) HLoadClass(
       graph_->GetCurrentMethod(),
       type_index,
-      dex_file,
+      *actual_dex_file,
       klass,
       klass.Get() != nullptr && (klass.Get() == GetOutermostCompilingClass()),
       dex_pc,
-      !is_accessible);
+      needs_access_check);
 
+  HLoadClass::LoadKind load_kind = HSharpening::SharpenClass(load_class,
+                                                             code_generator_,
+                                                             compiler_driver_,
+                                                             *dex_compilation_unit_);
+
+  if (load_kind == HLoadClass::LoadKind::kInvalid) {
+    // We actually cannot reference this class, we're forced to bail.
+    return nullptr;
+  }
+  // Append the instruction first, as setting the load kind affects the inputs.
   AppendInstruction(load_class);
+  load_class->SetLoadKind(load_kind);
   return load_class;
 }
 
@@ -1674,7 +1704,7 @@
                                          dex::TypeIndex type_index,
                                          uint32_t dex_pc) {
   HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
-  HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+  HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
 
   ScopedObjectAccess soa(Thread::Current());
   TypeCheckKind check_kind = ComputeTypeCheckKind(cls->GetClass());
@@ -2498,7 +2528,7 @@
     case Instruction::NEW_ARRAY: {
       dex::TypeIndex type_index(instruction.VRegC_22c());
       HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
-      HLoadClass* cls = BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+      HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
       AppendInstruction(new (arena_) HNewArray(cls, length, dex_pc));
       UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
       break;
@@ -2673,7 +2703,7 @@
 
     case Instruction::CONST_CLASS: {
       dex::TypeIndex type_index(instruction.VRegB_21c());
-      BuildLoadClass(type_index, dex_pc, /* check_access */ true);
+      BuildLoadClass(type_index, dex_pc);
       UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
       break;
     }
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 5efe950..3bb680c 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -31,6 +31,7 @@
 
 namespace art {
 
+class CodeGenerator;
 class Instruction;
 
 class HInstructionBuilder : public ValueObject {
@@ -44,6 +45,7 @@
                       DexCompilationUnit* dex_compilation_unit,
                       const DexCompilationUnit* const outer_compilation_unit,
                       CompilerDriver* driver,
+                      CodeGenerator* code_generator,
                       const uint8_t* interpreter_metadata,
                       OptimizingCompilerStats* compiler_stats,
                       Handle<mirror::DexCache> dex_cache,
@@ -61,6 +63,7 @@
         current_locals_(nullptr),
         latest_result_(nullptr),
         compiler_driver_(driver),
+        code_generator_(code_generator),
         dex_compilation_unit_(dex_compilation_unit),
         outer_compilation_unit_(outer_compilation_unit),
         interpreter_metadata_(interpreter_metadata),
@@ -228,10 +231,14 @@
   // Builds a `HLoadClass` loading the given `type_index`. If `outer` is true,
   // this method will use the outer class's dex file to lookup the type at
   // `type_index`.
+  HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
+
   HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+                             const DexFile& dex_file,
+                             Handle<mirror::Class> klass,
                              uint32_t dex_pc,
-                             bool check_access,
-                             bool outer = false);
+                             bool needs_access_check)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns the outer-most compiling method's class.
   mirror::Class* GetOutermostCompilingClass() const;
@@ -275,7 +282,6 @@
   HClinitCheck* ProcessClinitCheckForInvoke(
       uint32_t dex_pc,
       ArtMethod* method,
-      uint32_t method_idx,
       HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -290,6 +296,10 @@
   // not be resolved.
   ArtMethod* ResolveMethod(uint16_t method_idx, InvokeType invoke_type);
 
+  // Try to resolve a field using the class linker. Return null if it could not
+  // be found.
+  ArtField* ResolveField(uint16_t field_idx, bool is_static, bool is_put);
+
   ArenaAllocator* const arena_;
   HGraph* const graph_;
   VariableSizedHandleScope* handles_;
@@ -311,6 +321,8 @@
 
   CompilerDriver* const compiler_driver_;
 
+  CodeGenerator* const code_generator_;
+
   // The compilation unit of the current method being compiled. Note that
   // it can be an inlined method.
   DexCompilationUnit* const dex_compilation_unit_;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 1e73cf6..6425e13 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -31,6 +31,9 @@
 static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U;
 static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000);
 
+static constexpr uint32_t kNanFloat = 0x7fc00000U;
+static constexpr uint64_t kNanDouble = 0x7ff8000000000000;
+
 // Recognize intrinsics from HInvoke nodes.
 class IntrinsicsRecognizer : public HOptimization {
  public:
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 1a10173..70a3d38 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -40,10 +40,12 @@
 using helpers::LowRegisterFrom;
 using helpers::LowSRegisterFrom;
 using helpers::OutputDRegister;
+using helpers::OutputSRegister;
 using helpers::OutputRegister;
 using helpers::OutputVRegister;
 using helpers::RegisterFrom;
 using helpers::SRegisterFrom;
+using helpers::DRegisterFromS;
 
 using namespace vixl::aarch32;  // NOLINT(build/namespaces)
 
@@ -462,6 +464,214 @@
   GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
 }
 
+static void GenMinMaxFloat(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+  Location op1_loc = invoke->GetLocations()->InAt(0);
+  Location op2_loc = invoke->GetLocations()->InAt(1);
+  Location out_loc = invoke->GetLocations()->Out();
+
+  // Optimization: don't generate any code if inputs are the same.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in location builder.
+    return;
+  }
+
+  vixl32::SRegister op1 = SRegisterFrom(op1_loc);
+  vixl32::SRegister op2 = SRegisterFrom(op2_loc);
+  vixl32::SRegister out = OutputSRegister(invoke);
+  UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+  const vixl32::Register temp1 = temps.Acquire();
+  vixl32::Register temp2 = RegisterFrom(invoke->GetLocations()->GetTemp(0));
+  vixl32::Label nan, done;
+
+  DCHECK(op1.Is(out));
+
+  __ Vcmp(op1, op2);
+  __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+  __ B(vs, &nan, /* far_target */ false);  // if un-ordered, go to NaN handling.
+
+  // op1 <> op2
+  vixl32::ConditionType cond = is_min ? gt : lt;
+  {
+    ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
+                                2 * kMaxInstructionSizeInBytes,
+                                CodeBufferCheckScope::kMaximumSize);
+    __ it(cond);
+    __ vmov(cond, F32, out, op2);
+  }
+  __ B(ne, &done, /* far_target */ false);  // for <>(not equal), we've done min/max calculation.
+
+  // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
+  __ Vmov(temp1, op1);
+  __ Vmov(temp2, op2);
+  if (is_min) {
+    __ Orr(temp1, temp1, temp2);
+  } else {
+    __ And(temp1, temp1, temp2);
+  }
+  __ Vmov(out, temp1);
+  __ B(&done);
+
+  // handle NaN input.
+  __ Bind(&nan);
+  __ Movt(temp1, High16Bits(kNanFloat));  // 0x7FC0xxxx is a NaN.
+  __ Vmov(out, temp1);
+
+  __ Bind(&done);
+}
+
+static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresFpuRegister());
+  locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+  invoke->GetLocations()->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
+  GenMinMaxFloat(invoke, /* is_min */ true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+  invoke->GetLocations()->AddTemp(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
+  GenMinMaxFloat(invoke, /* is_min */ false, GetAssembler());
+}
+
+static void GenMinMaxDouble(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+  Location op1_loc = invoke->GetLocations()->InAt(0);
+  Location op2_loc = invoke->GetLocations()->InAt(1);
+  Location out_loc = invoke->GetLocations()->Out();
+
+  // Optimization: don't generate any code if inputs are the same.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in.
+    return;
+  }
+
+  vixl32::DRegister op1 = DRegisterFrom(op1_loc);
+  vixl32::DRegister op2 = DRegisterFrom(op2_loc);
+  vixl32::DRegister out = OutputDRegister(invoke);
+  vixl32::Label handle_nan_eq, done;
+
+  DCHECK(op1.Is(out));
+
+  __ Vcmp(op1, op2);
+  __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+  __ B(vs, &handle_nan_eq, /* far_target */ false);  // if un-ordered, go to NaN handling.
+
+  // op1 <> op2
+  vixl32::ConditionType cond = is_min ? gt : lt;
+  {
+    ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
+                                2 * kMaxInstructionSizeInBytes,
+                                CodeBufferCheckScope::kMaximumSize);
+    __ it(cond);
+    __ vmov(cond, F64, out, op2);
+  }
+  __ B(ne, &done, /* far_target */ false);  // for <>(not equal), we've done min/max calculation.
+
+  // handle op1 == op2, max(+0.0,-0.0).
+  if (!is_min) {
+    __ Vand(F64, out, op1, op2);
+    __ B(&done);
+  }
+
+  // handle op1 == op2, min(+0.0,-0.0), NaN input.
+  __ Bind(&handle_nan_eq);
+  __ Vorr(F64, out, op1, op2);  // assemble op1/-0.0/NaN.
+
+  __ Bind(&done);
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
+  GenMinMaxDouble(invoke, /* is_min */ true , GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+  CreateFPFPToFPLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
+  GenMinMaxDouble(invoke, /* is_min */ false, GetAssembler());
+}
+
+static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
+  Location op1_loc = invoke->GetLocations()->InAt(0);
+  Location op2_loc = invoke->GetLocations()->InAt(1);
+  Location out_loc = invoke->GetLocations()->Out();
+
+  // Optimization: don't generate any code if inputs are the same.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in location builder.
+    return;
+  }
+
+  vixl32::Register op1_lo = LowRegisterFrom(op1_loc);
+  vixl32::Register op1_hi = HighRegisterFrom(op1_loc);
+  vixl32::Register op2_lo = LowRegisterFrom(op2_loc);
+  vixl32::Register op2_hi = HighRegisterFrom(op2_loc);
+  vixl32::Register out_lo = LowRegisterFrom(out_loc);
+  vixl32::Register out_hi = HighRegisterFrom(out_loc);
+  UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+  const vixl32::Register temp = temps.Acquire();
+
+  DCHECK(op1_lo.Is(out_lo));
+  DCHECK(op1_hi.Is(out_hi));
+
+  // Compare op1 >= op2, or op1 < op2.
+  __ Cmp(out_lo, op2_lo);
+  __ Sbcs(temp, out_hi, op2_hi);
+
+  // Now GE/LT condition code is correct for the long comparison.
+  {
+    vixl32::ConditionType cond = is_min ? ge : lt;
+    ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
+                                3 * kMaxInstructionSizeInBytes,
+                                CodeBufferCheckScope::kMaximumSize);
+    __ itt(cond);
+    __ mov(cond, out_lo, op2_lo);
+    __ mov(cond, out_hi, op2_hi);
+  }
+}
+
+static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
+  LocationSummary* locations = new (arena) LocationSummary(invoke,
+                                                           LocationSummary::kNoCall,
+                                                           kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
+  CreateLongLongToLongLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
+  GenMinMaxLong(invoke, /* is_min */ true, GetAssembler());
+}
+
+void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
+  CreateLongLongToLongLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
+  GenMinMaxLong(invoke, /* is_min */ false, GetAssembler());
+}
+
 static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
   vixl32::Register op1 = InputRegisterAt(invoke, 0);
   vixl32::Register op2 = InputRegisterAt(invoke, 1);
@@ -2778,12 +2988,6 @@
   __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
 }
 
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinFloatFloat)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxDoubleDouble)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxFloatFloat)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMinLongLong)
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathMaxLongLong)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble)   // Could be done by changing rounding mode, maybe?
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundFloat)    // Could be done by changing rounding mode, maybe?
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong)     // High register pressure.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 76900f2..abbb91a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2464,16 +2464,15 @@
   }
 }
 
-void HLoadClass::SetLoadKindInternal(LoadKind load_kind) {
-  // Once sharpened, the load kind should not be changed again.
-  // Also, kReferrersClass should never be overwritten.
-  DCHECK_EQ(GetLoadKind(), LoadKind::kDexCacheViaMethod);
+void HLoadClass::SetLoadKind(LoadKind load_kind) {
   SetPackedField<LoadKindField>(load_kind);
 
-  if (load_kind != LoadKind::kDexCacheViaMethod) {
+  if (load_kind != LoadKind::kDexCacheViaMethod &&
+      load_kind != LoadKind::kReferrersClass) {
     RemoveAsUserOfInput(0u);
     SetRawInputAt(0u, nullptr);
   }
+
   if (!NeedsEnvironment()) {
     RemoveEnvironment();
     SetSideEffects(SideEffects::None());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index acf14aa..96f9aba 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4322,6 +4322,11 @@
     return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
   }
 
+  bool NeedsDexCacheOfDeclaringClass() const OVERRIDE {
+    // The assembly stub currently needs it.
+    return true;
+  }
+
   uint32_t GetImtIndex() const { return imt_index_; }
   uint32_t GetDexMethodIndex() const { return dex_method_index_; }
 
@@ -5508,6 +5513,9 @@
  public:
   // Determines how to load the Class.
   enum class LoadKind {
+    // We cannot load this class. See HSharpening::SharpenLoadClass.
+    kInvalid = -1,
+
     // Use the Class* from the method's own ArtMethod*.
     kReferrersClass,
 
@@ -5564,18 +5572,7 @@
     SetPackedFlag<kFlagGenerateClInitCheck>(false);
   }
 
-  void SetLoadKind(LoadKind load_kind) {
-    SetLoadKindInternal(load_kind);
-  }
-
-  void SetLoadKindWithTypeReference(LoadKind load_kind,
-                                    const DexFile& dex_file,
-                                    dex::TypeIndex type_index) {
-    DCHECK(HasTypeReference(load_kind));
-    DCHECK(IsSameDexFile(dex_file_, dex_file));
-    DCHECK_EQ(type_index_, type_index);
-    SetLoadKindInternal(load_kind);
-  }
+  void SetLoadKind(LoadKind load_kind);
 
   LoadKind GetLoadKind() const {
     return GetPackedField<LoadKindField>();
@@ -5694,6 +5691,11 @@
   // for PC-relative loads, i.e. kBssEntry or kBootImageLinkTimePcRelative.
   HUserRecord<HInstruction*> special_input_;
 
+  // A type index and dex file where the class can be accessed. The dex file can be:
+  // - The compiling method's dex file if the class is defined there too.
+  // - The compiling method's dex file if the class is referenced there.
+  // - The dex file where the class is defined. When the load kind can only be
+  //   kBssEntry or kDexCacheViaMethod, we cannot emit code for this `HLoadClass`.
   const dex::TypeIndex type_index_;
   const DexFile& dex_file_;
 
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1ab6710..727ca7d 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -999,6 +999,7 @@
                           &dex_file,
                           *code_item,
                           compiler_driver,
+                          codegen.get(),
                           compilation_stats_.get(),
                           interpreter_metadata,
                           dex_cache,
@@ -1133,6 +1134,25 @@
   return false;
 }
 
+bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
+  // Note: the runtime is null only for unit testing.
+  return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
+}
+
+bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
+  if (!Runtime::Current()->IsAotCompiler()) {
+    // JIT can always encode methods in stack maps.
+    return true;
+  }
+  if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
+    return true;
+  }
+  // TODO(ngeoffray): Support more AOT cases for inlining:
+  // - methods in multidex
+  // - methods in boot image for on-device non-PIC compilation.
+  return false;
+}
+
 bool OptimizingCompiler::JitCompile(Thread* self,
                                     jit::JitCodeCache* code_cache,
                                     ArtMethod* method,
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index 0c89da1..d8cea30 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -17,10 +17,15 @@
 #ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
 #define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
 
+#include "base/mutex.h"
+#include "globals.h"
+
 namespace art {
 
+class ArtMethod;
 class Compiler;
 class CompilerDriver;
+class DexFile;
 
 Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
 
@@ -29,6 +34,10 @@
 // information for checking invariants.
 bool IsCompilingWithCoreImage();
 
+bool EncodeArtMethodInInlineInfo(ArtMethod* method);
+bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index b02f250..c55fccc 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -24,9 +24,8 @@
 
 namespace art {
 
-static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
-                                                     const DexFile& dex_file,
-                                                     Handle<mirror::DexCache> hint_dex_cache)
+static inline ObjPtr<mirror::DexCache> FindDexCacheWithHint(
+    Thread* self, const DexFile& dex_file, Handle<mirror::DexCache> hint_dex_cache)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (LIKELY(hint_dex_cache->GetDexFile() == &dex_file)) {
     return hint_dex_cache.Get();
@@ -542,7 +541,7 @@
   DCHECK_EQ(instr->GetType(), Primitive::kPrimNot);
 
   ScopedObjectAccess soa(Thread::Current());
-  mirror::DexCache* dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
+  ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_);
   // Get type from dex cache assuming it was populated by the verifier.
   SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
 }
@@ -562,7 +561,7 @@
                                            dex::TypeIndex type_idx,
                                            Handle<mirror::DexCache> hint_dex_cache)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
+  ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
   // Get type from dex cache assuming it was populated by the verifier.
   return dex_cache->GetResolvedType(type_idx);
 }
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index e745c73..f07f02a 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -42,8 +42,6 @@
       HInstruction* instruction = it.Current();
       if (instruction->IsInvokeStaticOrDirect()) {
         ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
-      } else if (instruction->IsLoadClass()) {
-        ProcessLoadClass(instruction->AsLoadClass());
       } else if (instruction->IsLoadString()) {
         ProcessLoadString(instruction->AsLoadString());
       }
@@ -133,104 +131,93 @@
   invoke->SetDispatchInfo(dispatch_info);
 }
 
-void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
-  ScopedObjectAccess soa(Thread::Current());
-  SharpenClass(load_class, codegen_, compiler_driver_);
-}
-
-void HSharpening::SharpenClass(HLoadClass* load_class,
-                               CodeGenerator* codegen,
-                               CompilerDriver* compiler_driver) {
+HLoadClass::LoadKind HSharpening::SharpenClass(HLoadClass* load_class,
+                                               CodeGenerator* codegen,
+                                               CompilerDriver* compiler_driver,
+                                               const DexCompilationUnit& dex_compilation_unit) {
   Handle<mirror::Class> klass = load_class->GetClass();
   DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
          load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
       << load_class->GetLoadKind();
   DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
 
+  HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
+
   if (load_class->NeedsAccessCheck()) {
     // We need to call the runtime anyway, so we simply get the class as that call's return value.
-    return;
-  }
-
-  if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+  } else if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
     // Loading from the ArtMethod* is the most efficient retrieval in code size.
     // TODO: This may not actually be true for all architectures and
     // locations of target classes. The additional register pressure
     // for using the ArtMethod* should be considered.
-    return;
-  }
-
-  const DexFile& dex_file = load_class->GetDexFile();
-  dex::TypeIndex type_index = load_class->GetTypeIndex();
-
-  bool is_in_boot_image = false;
-  HLoadClass::LoadKind desired_load_kind = static_cast<HLoadClass::LoadKind>(-1);
-  Runtime* runtime = Runtime::Current();
-  if (codegen->GetCompilerOptions().IsBootImage()) {
-    // Compiling boot image. Check if the class is a boot image class.
-    DCHECK(!runtime->UseJitCompilation());
-    if (!compiler_driver->GetSupportBootImageFixup()) {
-      // compiler_driver_test. Do not sharpen.
-      desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
-    } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
-        dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
-      is_in_boot_image = true;
-      desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
-          ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
-          : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
-    } else {
-      // Not a boot image class.
-      DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
-      desired_load_kind = HLoadClass::LoadKind::kBssEntry;
-    }
   } else {
-    is_in_boot_image = (klass.Get() != nullptr) &&
-        runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
-    if (runtime->UseJitCompilation()) {
-      // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
-      // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
-      if (is_in_boot_image) {
-        // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
-        desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
-      } else if (klass.Get() != nullptr) {
-        desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
-      } else {
-        // Class not loaded yet. This happens when the dex code requesting
-        // this `HLoadClass` hasn't been executed in the interpreter.
-        // Fallback to the dex cache.
-        // TODO(ngeoffray): Generate HDeoptimize instead.
+    const DexFile& dex_file = load_class->GetDexFile();
+    dex::TypeIndex type_index = load_class->GetTypeIndex();
+
+    bool is_in_boot_image = false;
+    HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
+    Runtime* runtime = Runtime::Current();
+    if (codegen->GetCompilerOptions().IsBootImage()) {
+      // Compiling boot image. Check if the class is a boot image class.
+      DCHECK(!runtime->UseJitCompilation());
+      if (!compiler_driver->GetSupportBootImageFixup()) {
+        // compiler_driver_test. Do not sharpen.
         desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+      } else if ((klass.Get() != nullptr) && compiler_driver->IsImageClass(
+          dex_file.StringDataByIdx(dex_file.GetTypeId(type_index).descriptor_idx_))) {
+        is_in_boot_image = true;
+        desired_load_kind = codegen->GetCompilerOptions().GetCompilePic()
+            ? HLoadClass::LoadKind::kBootImageLinkTimePcRelative
+            : HLoadClass::LoadKind::kBootImageLinkTimeAddress;
+      } else {
+        // Not a boot image class.
+        DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
+        desired_load_kind = HLoadClass::LoadKind::kBssEntry;
       }
-    } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
-      // AOT app compilation. Check if the class is in the boot image.
-      desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
     } else {
-      // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
-      desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+      is_in_boot_image = (klass.Get() != nullptr) &&
+          runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
+      if (runtime->UseJitCompilation()) {
+        // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
+        // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
+        if (is_in_boot_image) {
+          // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
+          desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+        } else if (klass.Get() != nullptr) {
+          desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+        } else {
+          // Class not loaded yet. This happens when the dex code requesting
+          // this `HLoadClass` hasn't been executed in the interpreter.
+          // Fallback to the dex cache.
+          // TODO(ngeoffray): Generate HDeoptimize instead.
+          desired_load_kind = HLoadClass::LoadKind::kDexCacheViaMethod;
+        }
+      } else if (is_in_boot_image && !codegen->GetCompilerOptions().GetCompilePic()) {
+        // AOT app compilation. Check if the class is in the boot image.
+        desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+      } else {
+        // Not JIT and either the klass is not in boot image or we are compiling in PIC mode.
+        desired_load_kind = HLoadClass::LoadKind::kBssEntry;
+      }
+    }
+    DCHECK_NE(desired_load_kind, HLoadClass::LoadKind::kInvalid);
+
+    if (is_in_boot_image) {
+      load_class->MarkInBootImage();
+    }
+    load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
+  }
+
+  if (!IsSameDexFile(load_class->GetDexFile(), *dex_compilation_unit.GetDexFile())) {
+    if ((load_kind == HLoadClass::LoadKind::kDexCacheViaMethod) ||
+        (load_kind == HLoadClass::LoadKind::kBssEntry)) {
+      // We actually cannot reference this class, we're forced to bail.
+      // We cannot reference this class with Bss, as the entrypoint will lookup the class
+      // in the caller's dex file, but that dex file does not reference the class.
+      return HLoadClass::LoadKind::kInvalid;
     }
   }
-  DCHECK_NE(desired_load_kind, static_cast<HLoadClass::LoadKind>(-1));
-
-  if (is_in_boot_image) {
-    load_class->MarkInBootImage();
-  }
-
-  HLoadClass::LoadKind load_kind = codegen->GetSupportedLoadClassKind(desired_load_kind);
-  switch (load_kind) {
-    case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
-    case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBssEntry:
-    case HLoadClass::LoadKind::kDexCacheViaMethod:
-      load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index);
-      break;
-    case HLoadClass::LoadKind::kBootImageAddress:
-    case HLoadClass::LoadKind::kJitTableAddress:
-      load_class->SetLoadKind(load_kind);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected load kind: " << load_kind;
-      UNREACHABLE();
-  }
+  return load_kind;
 }
 
 void HSharpening::ProcessLoadString(HLoadString* load_string) {
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index ae3d83e..4240b2f 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_SHARPENING_H_
 #define ART_COMPILER_OPTIMIZING_SHARPENING_H_
 
+#include "nodes.h"
 #include "optimization.h"
 
 namespace art {
@@ -24,7 +25,6 @@
 class CodeGenerator;
 class CompilerDriver;
 class DexCompilationUnit;
-class HInvokeStaticOrDirect;
 
 // Optimization that tries to improve the way we dispatch methods and access types,
 // fields, etc. Besides actual method sharpening based on receiver type (for example
@@ -47,15 +47,15 @@
 
   static constexpr const char* kSharpeningPassName = "sharpening";
 
-  // Used internally but also by the inliner.
-  static void SharpenClass(HLoadClass* load_class,
-                           CodeGenerator* codegen,
-                           CompilerDriver* compiler_driver)
+  // Used by the builder and the inliner.
+  static HLoadClass::LoadKind SharpenClass(HLoadClass* load_class,
+                                           CodeGenerator* codegen,
+                                           CompilerDriver* compiler_driver,
+                                           const DexCompilationUnit& dex_compilation_unit)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
-  void ProcessLoadClass(HLoadClass* load_class);
   void ProcessLoadString(HLoadString* load_string);
 
   CodeGenerator* codegen_;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ae1e369..487e4dd 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -17,8 +17,10 @@
 #include "ssa_builder.h"
 
 #include "bytecode_utils.h"
+#include "mirror/class-inl.h"
 #include "nodes.h"
 #include "reference_type_propagation.h"
+#include "scoped_thread_state_change-inl.h"
 #include "ssa_phi_elimination.h"
 
 namespace art {
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 668108d..f8e01b7 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -18,8 +18,9 @@
 
 #include <unordered_map>
 
+#include "art_method-inl.h"
 #include "base/stl_util.h"
-#include "art_method.h"
+#include "optimizing/optimizing_compiler.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
 
@@ -107,11 +108,6 @@
   current_dex_register_++;
 }
 
-static bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
-  // Note: the runtime is null only for unit testing.
-  return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
-}
-
 void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
                                           uint32_t dex_pc,
                                           uint32_t num_dex_registers,
@@ -157,56 +153,35 @@
 }
 
 size_t StackMapStream::PrepareForFillIn() {
-  const size_t stack_mask_size_in_bits = stack_mask_max_ + 1;  // Need room for max element too.
-  const size_t number_of_stack_masks = PrepareStackMasks(stack_mask_size_in_bits);
-  const size_t register_mask_size_in_bits = MinimumBitsToStore(register_mask_max_);
-  const size_t number_of_register_masks = PrepareRegisterMasks();
-  dex_register_maps_size_ = ComputeDexRegisterMapsSize();
-  ComputeInlineInfoEncoding();  // needs dex_register_maps_size_.
-  inline_info_size_ = inline_infos_.size() * inline_info_encoding_.GetEntrySize();
+  CodeInfoEncoding encoding;
+  encoding.dex_register_map.num_entries = 0;  // TODO: Remove this field.
+  encoding.dex_register_map.num_bytes = ComputeDexRegisterMapsSize();
+  encoding.location_catalog.num_entries = location_catalog_entries_.size();
+  encoding.location_catalog.num_bytes = ComputeDexRegisterLocationCatalogSize();
+  encoding.inline_info.num_entries = inline_infos_.size();
+  ComputeInlineInfoEncoding(&encoding.inline_info.encoding,
+                            encoding.dex_register_map.num_bytes);
   CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
-  // The stack map contains compressed native PC offsets.
-  const size_t stack_map_size = stack_map_encoding_.SetFromSizes(
+  // Prepare the CodeInfo variable-sized encoding.
+  encoding.stack_mask.encoding.num_bits = stack_mask_max_ + 1;  // Need room for max element too.
+  encoding.stack_mask.num_entries = PrepareStackMasks(encoding.stack_mask.encoding.num_bits);
+  encoding.register_mask.encoding.num_bits = MinimumBitsToStore(register_mask_max_);
+  encoding.register_mask.num_entries = PrepareRegisterMasks();
+  encoding.stack_map.num_entries = stack_maps_.size();
+  encoding.stack_map.encoding.SetFromSizes(
+      // The stack map contains compressed native PC offsets.
       max_native_pc_offset.CompressedValue(),
       dex_pc_max_,
-      dex_register_maps_size_,
-      inline_info_size_,
-      number_of_register_masks,
-      number_of_stack_masks);
-  stack_maps_size_ = RoundUp(stack_maps_.size() * stack_map_size, kBitsPerByte) / kBitsPerByte;
-  dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
-  const size_t stack_masks_bits = number_of_stack_masks * stack_mask_size_in_bits;
-  const size_t register_masks_bits = number_of_register_masks * register_mask_size_in_bits;
-  // Register masks are last, stack masks are right before that last.
-  // They are both bit packed / aligned.
-  const size_t non_header_size =
-      stack_maps_size_ +
-      dex_register_location_catalog_size_ +
-      dex_register_maps_size_ +
-      inline_info_size_ +
-      RoundUp(stack_masks_bits + register_masks_bits, kBitsPerByte) / kBitsPerByte;
-
-  // Prepare the CodeInfo variable-sized encoding.
-  CodeInfoEncoding code_info_encoding;
-  code_info_encoding.non_header_size = non_header_size;
-  code_info_encoding.number_of_stack_maps = stack_maps_.size();
-  code_info_encoding.number_of_stack_masks = number_of_stack_masks;
-  code_info_encoding.number_of_register_masks = number_of_register_masks;
-  code_info_encoding.stack_mask_size_in_bits = stack_mask_size_in_bits;
-  code_info_encoding.register_mask_size_in_bits = register_mask_size_in_bits;
-  code_info_encoding.stack_map_encoding = stack_map_encoding_;
-  code_info_encoding.inline_info_encoding = inline_info_encoding_;
-  code_info_encoding.number_of_location_catalog_entries = location_catalog_entries_.size();
-  code_info_encoding.Compress(&code_info_encoding_);
-
-  // TODO: Move the catalog at the end. It is currently too expensive at runtime
-  // to compute its size (note that we do not encode that size in the CodeInfo).
-  dex_register_location_catalog_start_ = code_info_encoding_.size() + stack_maps_size_;
-  dex_register_maps_start_ =
-      dex_register_location_catalog_start_ + dex_register_location_catalog_size_;
-  inline_infos_start_ = dex_register_maps_start_ + dex_register_maps_size_;
-
-  needed_size_ = code_info_encoding_.size() + non_header_size;
+      encoding.dex_register_map.num_bytes,
+      encoding.inline_info.num_entries,
+      encoding.register_mask.num_entries,
+      encoding.stack_mask.num_entries);
+  DCHECK_EQ(code_info_encoding_.size(), 0u);
+  encoding.Compress(&code_info_encoding_);
+  encoding.ComputeTableOffsets();
+  // Compute table offsets so we can get the non header size.
+  DCHECK_EQ(encoding.HeaderSize(), code_info_encoding_.size());
+  needed_size_ = code_info_encoding_.size() + encoding.NonHeaderSize();
   return needed_size_;
 }
 
@@ -259,7 +234,8 @@
   return size;
 }
 
-void StackMapStream::ComputeInlineInfoEncoding() {
+void StackMapStream::ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
+                                               size_t dex_register_maps_bytes) {
   uint32_t method_index_max = 0;
   uint32_t dex_pc_max = DexFile::kDexNoIndex;
   uint32_t extra_data_max = 0;
@@ -285,10 +261,7 @@
   }
   DCHECK_EQ(inline_info_index, inline_infos_.size());
 
-  inline_info_encoding_.SetFromSizes(method_index_max,
-                                     dex_pc_max,
-                                     extra_data_max,
-                                     dex_register_maps_size_);
+  encoding->SetFromSizes(method_index_max, dex_pc_max, extra_data_max, dex_register_maps_bytes);
 }
 
 void StackMapStream::FillIn(MemoryRegion region) {
@@ -303,19 +276,18 @@
   // Write the CodeInfo header.
   region.CopyFrom(0, MemoryRegion(code_info_encoding_.data(), code_info_encoding_.size()));
 
-  MemoryRegion dex_register_locations_region = region.Subregion(
-      dex_register_maps_start_, dex_register_maps_size_);
-
-  MemoryRegion inline_infos_region = region.Subregion(
-      inline_infos_start_, inline_info_size_);
-
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  DCHECK_EQ(code_info.GetStackMapsSize(encoding), stack_maps_size_);
+  DCHECK_EQ(encoding.stack_map.num_entries, stack_maps_.size());
+
+  MemoryRegion dex_register_locations_region = region.Subregion(
+      encoding.dex_register_map.byte_offset,
+      encoding.dex_register_map.num_bytes);
 
   // Set the Dex register location catalog.
   MemoryRegion dex_register_location_catalog_region = region.Subregion(
-      dex_register_location_catalog_start_, dex_register_location_catalog_size_);
+      encoding.location_catalog.byte_offset,
+      encoding.location_catalog.num_bytes);
   DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
   // Offset in `dex_register_location_catalog` where to store the next
   // register location.
@@ -329,27 +301,27 @@
 
   ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false, kArenaAllocStackMapStream);
   uintptr_t next_dex_register_map_offset = 0;
-  uintptr_t next_inline_info_offset = 0;
+  uintptr_t next_inline_info_index = 0;
   for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) {
     StackMap stack_map = code_info.GetStackMapAt(i, encoding);
     StackMapEntry entry = stack_maps_[i];
 
-    stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc);
-    stack_map.SetNativePcCodeOffset(stack_map_encoding_, entry.native_pc_code_offset);
-    stack_map.SetRegisterMaskIndex(stack_map_encoding_, entry.register_mask_index);
-    stack_map.SetStackMaskIndex(stack_map_encoding_, entry.stack_mask_index);
+    stack_map.SetDexPc(encoding.stack_map.encoding, entry.dex_pc);
+    stack_map.SetNativePcCodeOffset(encoding.stack_map.encoding, entry.native_pc_code_offset);
+    stack_map.SetRegisterMaskIndex(encoding.stack_map.encoding, entry.register_mask_index);
+    stack_map.SetStackMaskIndex(encoding.stack_map.encoding, entry.stack_mask_index);
 
     if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
       // No dex map available.
-      stack_map.SetDexRegisterMapOffset(stack_map_encoding_, StackMap::kNoDexRegisterMap);
+      stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, StackMap::kNoDexRegisterMap);
     } else {
       // Search for an entry with the same dex map.
       if (entry.same_dex_register_map_as_ != kNoSameDexMapFound) {
         // If we have a hit reuse the offset.
         stack_map.SetDexRegisterMapOffset(
-            stack_map_encoding_,
+            encoding.stack_map.encoding,
             code_info.GetStackMapAt(entry.same_dex_register_map_as_, encoding)
-                .GetDexRegisterMapOffset(stack_map_encoding_));
+                .GetDexRegisterMapOffset(encoding.stack_map.encoding));
       } else {
         // New dex registers maps should be added to the stack map.
         MemoryRegion register_region = dex_register_locations_region.Subregion(
@@ -358,7 +330,8 @@
         next_dex_register_map_offset += register_region.size();
         DexRegisterMap dex_register_map(register_region);
         stack_map.SetDexRegisterMapOffset(
-            stack_map_encoding_, register_region.begin() - dex_register_locations_region.begin());
+            encoding.stack_map.encoding,
+            register_region.begin() - dex_register_locations_region.begin());
 
         // Set the dex register location.
         FillInDexRegisterMap(dex_register_map,
@@ -370,37 +343,37 @@
 
     // Set the inlining info.
     if (entry.inlining_depth != 0) {
-      MemoryRegion inline_region = inline_infos_region.Subregion(
-          next_inline_info_offset,
-          entry.inlining_depth * inline_info_encoding_.GetEntrySize());
-      next_inline_info_offset += inline_region.size();
-      InlineInfo inline_info(inline_region);
+      InlineInfo inline_info = code_info.GetInlineInfo(next_inline_info_index, encoding);
 
-      // Currently relative to the dex register map.
-      stack_map.SetInlineDescriptorOffset(
-          stack_map_encoding_, inline_region.begin() - dex_register_locations_region.begin());
+      // Fill in the index.
+      stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, next_inline_info_index);
+      DCHECK_EQ(next_inline_info_index, entry.inline_infos_start_index);
+      next_inline_info_index += entry.inlining_depth;
 
-      inline_info.SetDepth(inline_info_encoding_, entry.inlining_depth);
+      inline_info.SetDepth(encoding.inline_info.encoding, entry.inlining_depth);
       DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
+
       for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
         InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
         if (inline_entry.method != nullptr) {
           inline_info.SetMethodIndexAtDepth(
-              inline_info_encoding_,
+              encoding.inline_info.encoding,
               depth,
               High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
           inline_info.SetExtraDataAtDepth(
-              inline_info_encoding_,
+              encoding.inline_info.encoding,
               depth,
               Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
         } else {
-          inline_info.SetMethodIndexAtDepth(inline_info_encoding_, depth, inline_entry.method_index);
-          inline_info.SetExtraDataAtDepth(inline_info_encoding_, depth, 1);
+          inline_info.SetMethodIndexAtDepth(encoding.inline_info.encoding,
+                                            depth,
+                                            inline_entry.method_index);
+          inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
         }
-        inline_info.SetDexPcAtDepth(inline_info_encoding_, depth, inline_entry.dex_pc);
+        inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
         if (inline_entry.num_dex_registers == 0) {
           // No dex map available.
-          inline_info.SetDexRegisterMapOffsetAtDepth(inline_info_encoding_,
+          inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
                                                      depth,
                                                      StackMap::kNoDexRegisterMap);
           DCHECK(inline_entry.live_dex_registers_mask == nullptr);
@@ -412,8 +385,9 @@
           next_dex_register_map_offset += register_region.size();
           DexRegisterMap dex_register_map(register_region);
           inline_info.SetDexRegisterMapOffsetAtDepth(
-              inline_info_encoding_,
-              depth, register_region.begin() - dex_register_locations_region.begin());
+              encoding.inline_info.encoding,
+              depth,
+              register_region.begin() - dex_register_locations_region.begin());
 
           FillInDexRegisterMap(dex_register_map,
                                inline_entry.num_dex_registers,
@@ -421,30 +395,28 @@
                                inline_entry.dex_register_locations_start_index);
         }
       }
-    } else {
-      if (inline_info_size_ != 0) {
-        stack_map.SetInlineDescriptorOffset(stack_map_encoding_, StackMap::kNoInlineInfo);
-      }
+    } else if (encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
+      stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, StackMap::kNoInlineInfo);
     }
   }
 
   // Write stack masks table.
-  size_t stack_mask_bits = encoding.stack_mask_size_in_bits;
+  const size_t stack_mask_bits = encoding.stack_mask.encoding.BitSize();
   if (stack_mask_bits > 0) {
     size_t stack_mask_bytes = RoundUp(stack_mask_bits, kBitsPerByte) / kBitsPerByte;
-    for (size_t i = 0; i < encoding.number_of_stack_masks; ++i) {
+    for (size_t i = 0; i < encoding.stack_mask.num_entries; ++i) {
       MemoryRegion source(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes);
-      BitMemoryRegion stack_mask = code_info.GetStackMask(encoding, i);
-      for (size_t bit_index = 0; bit_index < encoding.stack_mask_size_in_bits; ++bit_index) {
+      BitMemoryRegion stack_mask = code_info.GetStackMask(i, encoding);
+      for (size_t bit_index = 0; bit_index < stack_mask_bits; ++bit_index) {
         stack_mask.StoreBit(bit_index, source.LoadBit(bit_index));
       }
     }
   }
 
   // Write register masks table.
-  for (size_t i = 0; i < encoding.number_of_register_masks; ++i) {
-    BitMemoryRegion register_mask = code_info.GetRegisterMask(encoding, i);
-    register_mask.StoreBits(0, register_masks_[i], encoding.register_mask_size_in_bits);
+  for (size_t i = 0; i < encoding.register_mask.num_entries; ++i) {
+    BitMemoryRegion register_mask = code_info.GetRegisterMask(i, encoding);
+    register_mask.StoreBits(0, register_masks_[i], encoding.register_mask.encoding.BitSize());
   }
 
   // Verify all written data in debug build.
@@ -546,7 +518,8 @@
     }
     // Compare to the seen location.
     if (expected.GetKind() == DexRegisterLocation::Kind::kNone) {
-      DCHECK(!dex_register_map.IsValid() || !dex_register_map.IsDexRegisterLive(reg));
+      DCHECK(!dex_register_map.IsValid() || !dex_register_map.IsDexRegisterLive(reg))
+          << dex_register_map.IsValid() << " " << dex_register_map.IsDexRegisterLive(reg);
     } else {
       DCHECK(dex_register_map.IsDexRegisterLive(reg));
       DexRegisterLocation seen = dex_register_map.GetDexRegisterLocation(
@@ -599,7 +572,7 @@
   DCHECK_EQ(code_info.GetNumberOfStackMaps(encoding), stack_maps_.size());
   for (size_t s = 0; s < stack_maps_.size(); ++s) {
     const StackMap stack_map = code_info.GetStackMapAt(s, encoding);
-    const StackMapEncoding& stack_map_encoding = encoding.stack_map_encoding;
+    const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
     StackMapEntry entry = stack_maps_[s];
 
     // Check main stack map fields.
@@ -633,18 +606,18 @@
     DCHECK_EQ(stack_map.HasInlineInfo(stack_map_encoding), (entry.inlining_depth != 0));
     if (entry.inlining_depth != 0) {
       InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-      DCHECK_EQ(inline_info.GetDepth(encoding.inline_info_encoding), entry.inlining_depth);
+      DCHECK_EQ(inline_info.GetDepth(encoding.inline_info.encoding), entry.inlining_depth);
       for (size_t d = 0; d < entry.inlining_depth; ++d) {
         size_t inline_info_index = entry.inline_infos_start_index + d;
         DCHECK_LT(inline_info_index, inline_infos_.size());
         InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
-        DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, d),
+        DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, d),
                   inline_entry.dex_pc);
-        if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, d)) {
-          DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info_encoding, d),
+        if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, d)) {
+          DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info.encoding, d),
                     inline_entry.method);
         } else {
-          DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info_encoding, d),
+          DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info.encoding, d),
                     inline_entry.method_index);
         }
 
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index b1069a1..08c1d3e 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -79,13 +79,6 @@
         current_entry_(),
         current_inline_info_(),
         code_info_encoding_(allocator->Adapter(kArenaAllocStackMapStream)),
-        inline_info_size_(0),
-        dex_register_maps_size_(0),
-        stack_maps_size_(0),
-        dex_register_location_catalog_size_(0),
-        dex_register_location_catalog_start_(0),
-        dex_register_maps_start_(0),
-        inline_infos_start_(0),
         needed_size_(0),
         current_dex_register_(0),
         in_inline_frame_(false) {
@@ -160,7 +153,8 @@
   size_t ComputeDexRegisterMapSize(uint32_t num_dex_registers,
                                    const BitVector* live_dex_registers_mask) const;
   size_t ComputeDexRegisterMapsSize() const;
-  void ComputeInlineInfoEncoding();
+  void ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
+                                 size_t dex_register_maps_bytes);
 
   CodeOffset ComputeMaxNativePcCodeOffset() const;
 
@@ -214,16 +208,7 @@
 
   StackMapEntry current_entry_;
   InlineInfoEntry current_inline_info_;
-  StackMapEncoding stack_map_encoding_;
-  InlineInfoEncoding inline_info_encoding_;
   ArenaVector<uint8_t> code_info_encoding_;
-  size_t inline_info_size_;
-  size_t dex_register_maps_size_;
-  size_t stack_maps_size_;
-  size_t dex_register_location_catalog_size_;
-  size_t dex_register_location_catalog_start_;
-  size_t dex_register_maps_start_;
-  size_t inline_infos_start_;
   size_t needed_size_;
   uint32_t current_dex_register_;
   bool in_inline_frame_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index ce6d5c2..bd0aa6d 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -32,10 +32,10 @@
     const StackMap& stack_map,
     const BitVector& bit_vector) {
   BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
-  if (bit_vector.GetNumberOfBits() > encoding.stack_mask_size_in_bits) {
+  if (bit_vector.GetNumberOfBits() > encoding.stack_mask.encoding.BitSize()) {
     return false;
   }
-  for (size_t i = 0; i < encoding.stack_mask_size_in_bits; ++i) {
+  for (size_t i = 0; i < encoding.stack_mask.encoding.BitSize(); ++i) {
     if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) {
       return false;
     }
@@ -78,13 +78,13 @@
   StackMap stack_map = code_info.GetStackMapAt(0, encoding);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
   ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
 
   ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask));
 
-  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
   DexRegisterMap dex_register_map =
       code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
   ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -123,7 +123,7 @@
   ASSERT_EQ(0, location0.GetValue());
   ASSERT_EQ(-2, location1.GetValue());
 
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
 }
 
 TEST(StackMapTest, Test2) {
@@ -193,13 +193,13 @@
     StackMap stack_map = code_info.GetStackMapAt(0, encoding);
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-    ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
-    ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+    ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+    ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
     ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
 
     ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
     DexRegisterMap dex_register_map =
         code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -238,13 +238,13 @@
     ASSERT_EQ(0, location0.GetValue());
     ASSERT_EQ(-2, location1.GetValue());
 
-    ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+    ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
     InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-    ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info_encoding));
-    ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
-    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
+    ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info.encoding));
+    ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
   }
 
   // Second stack map.
@@ -252,13 +252,13 @@
     StackMap stack_map = code_info.GetStackMapAt(1, encoding);
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
-    ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
-    ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+    ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
+    ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
     ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(encoding, stack_map));
 
     ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask2));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
     DexRegisterMap dex_register_map =
         code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -298,7 +298,7 @@
     ASSERT_EQ(18, location0.GetValue());
     ASSERT_EQ(3, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
   }
 
   // Third stack map.
@@ -306,13 +306,13 @@
     StackMap stack_map = code_info.GetStackMapAt(2, encoding);
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
-    ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map_encoding));
-    ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+    ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map.encoding));
+    ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
     ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(encoding, stack_map));
 
     ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask3));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
     DexRegisterMap dex_register_map =
         code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -352,7 +352,7 @@
     ASSERT_EQ(6, location0.GetValue());
     ASSERT_EQ(8, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
   }
 
   // Fourth stack map.
@@ -360,13 +360,13 @@
     StackMap stack_map = code_info.GetStackMapAt(3, encoding);
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
     ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
-    ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map_encoding));
-    ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+    ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map.encoding));
+    ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
     ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(encoding, stack_map));
 
     ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask4));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
     DexRegisterMap dex_register_map =
         code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -406,7 +406,7 @@
     ASSERT_EQ(3, location0.GetValue());
     ASSERT_EQ(1, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
   }
 }
 
@@ -442,11 +442,11 @@
   StackMap stack_map = code_info.GetStackMapAt(0, encoding);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
   ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
 
-  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
   DexRegisterMap dex_register_map =
       code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
   ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
@@ -483,7 +483,7 @@
   ASSERT_EQ(0, location0.GetValue());
   ASSERT_EQ(-2, location1.GetValue());
 
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
 }
 
 // Generate a stack map whose dex register offset is
@@ -543,13 +543,13 @@
   ASSERT_EQ(255u, dex_register_map0.Size());
 
   StackMap stack_map1 = code_info.GetStackMapAt(1, encoding);
-  ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding.stack_map_encoding));
+  ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding.stack_map.encoding));
   // ...the offset of the second Dex register map (relative to the
   // beginning of the Dex register maps region) is 255 (i.e.,
   // kNoDexRegisterMapSmallEncoding).
-  ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding.stack_map_encoding),
+  ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
             StackMap::kNoDexRegisterMap);
-  ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding.stack_map_encoding), 0xFFu);
+  ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding), 0xFFu);
 }
 
 TEST(StackMapTest, TestShareDexRegisterMap) {
@@ -602,12 +602,12 @@
   ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci, encoding));
 
   // Verify dex register map offsets.
-  ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding.stack_map_encoding),
-            sm1.GetDexRegisterMapOffset(encoding.stack_map_encoding));
-  ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding.stack_map_encoding),
-            sm2.GetDexRegisterMapOffset(encoding.stack_map_encoding));
-  ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding.stack_map_encoding),
-            sm2.GetDexRegisterMapOffset(encoding.stack_map_encoding));
+  ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
+            sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+  ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
+            sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+  ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
+            sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
 }
 
 TEST(StackMapTest, TestNoDexRegisterMap) {
@@ -641,22 +641,22 @@
   StackMap stack_map = code_info.GetStackMapAt(0, encoding);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
   ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
 
-  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
 
   stack_map = code_info.GetStackMapAt(1, encoding);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding)));
-  ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
-  ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA));
+  ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
+  ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
   ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(encoding, stack_map));
 
-  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
 }
 
 TEST(StackMapTest, InlineTest) {
@@ -743,11 +743,11 @@
     ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
 
     InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
-    ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info_encoding));
-    ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
-    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
+    ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info.encoding));
+    ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
 
     DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
     ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -767,13 +767,13 @@
     ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
 
     InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
-    ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info_encoding));
-    ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
-    ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
+    ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info.encoding));
+    ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
 
     DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
     ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
@@ -783,7 +783,7 @@
     ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci, encoding));
     ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
 
-    ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, 2));
+    ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 2));
   }
 
   {
@@ -793,7 +793,7 @@
     DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, encoding, 2);
     ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
     ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
-    ASSERT_FALSE(sm2.HasInlineInfo(encoding.stack_map_encoding));
+    ASSERT_FALSE(sm2.HasInlineInfo(encoding.stack_map.encoding));
   }
 
   {
@@ -805,15 +805,15 @@
     ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
 
     InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
-    ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info_encoding));
-    ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 0));
-    ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 1));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 1));
-    ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info_encoding, 2));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info_encoding, 2));
+    ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info.encoding));
+    ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
 
-    ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, 0));
+    ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 0));
 
     DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, encoding, 1);
     ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci, encoding));
@@ -865,8 +865,8 @@
 
   StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4, encoding);
   StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8, encoding);
-  EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map_encoding),
-            stack_map2.GetStackMaskIndex(encoding.stack_map_encoding));
+  EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map.encoding),
+            stack_map2.GetStackMaskIndex(encoding.stack_map.encoding));
 }
 
 }  // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 196d8d4..192fc27 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1644,6 +1644,12 @@
       dex_caches_.push_back(soa.AddLocalReference<jobject>(
           class_linker->RegisterDexFile(*dex_file,
                                         soa.Decode<mirror::ClassLoader>(class_loader_).Ptr())));
+      if (dex_caches_.back() == nullptr) {
+        soa.Self()->AssertPendingException();
+        soa.Self()->ClearException();
+        PLOG(ERROR) << "Failed to register dex file.";
+        return false;
+      }
       // Pre-register dex files so that we can access verification results without locks during
       // compilation and verification.
       verification_results_->AddDexFile(dex_file);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 0f02da7..ce63e18 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -586,20 +586,26 @@
       kByteKindQuickMethodHeader,
       kByteKindCodeInfoLocationCatalog,
       kByteKindCodeInfoDexRegisterMap,
-      kByteKindCodeInfoInlineInfo,
       kByteKindCodeInfoEncoding,
-      kByteKindCodeInfoOther,
       kByteKindCodeInfoStackMasks,
       kByteKindCodeInfoRegisterMasks,
       kByteKindStackMapNativePc,
       kByteKindStackMapDexPc,
       kByteKindStackMapDexRegisterMap,
-      kByteKindStackMapInlineInfo,
+      kByteKindStackMapInlineInfoIndex,
       kByteKindStackMapRegisterMaskIndex,
       kByteKindStackMapStackMaskIndex,
+      kByteKindInlineInfoMethodIndex,
+      kByteKindInlineInfoDexPc,
+      kByteKindInlineInfoExtraData,
+      kByteKindInlineInfoDexRegisterMap,
+      kByteKindInlineInfoIsLast,
       kByteKindCount,
-      kByteKindStackMapFirst = kByteKindCodeInfoOther,
+      // Special ranges for std::accumulate convenience.
+      kByteKindStackMapFirst = kByteKindStackMapNativePc,
       kByteKindStackMapLast = kByteKindStackMapStackMaskIndex,
+      kByteKindInlineInfoFirst = kByteKindInlineInfoMethodIndex,
+      kByteKindInlineInfoLast = kByteKindInlineInfoIsLast,
     };
     int64_t bits[kByteKindCount] = {};
     // Since code has deduplication, seen tracks already seen pointers to avoid double counting
@@ -624,17 +630,17 @@
       const int64_t sum = std::accumulate(bits, bits + kByteKindCount, 0u);
       os.Stream() << "Dumping cumulative use of " << sum / kBitsPerByte << " accounted bytes\n";
       if (sum > 0) {
-        const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst,
-                                                       bits + kByteKindStackMapLast + 1,
-                                                       0u);
         Dump(os, "Code                            ", bits[kByteKindCode], sum);
         Dump(os, "QuickMethodHeader               ", bits[kByteKindQuickMethodHeader], sum);
         Dump(os, "CodeInfoEncoding                ", bits[kByteKindCodeInfoEncoding], sum);
         Dump(os, "CodeInfoLocationCatalog         ", bits[kByteKindCodeInfoLocationCatalog], sum);
         Dump(os, "CodeInfoDexRegisterMap          ", bits[kByteKindCodeInfoDexRegisterMap], sum);
-        Dump(os, "CodeInfoInlineInfo              ", bits[kByteKindCodeInfoInlineInfo], sum);
         Dump(os, "CodeInfoStackMasks              ", bits[kByteKindCodeInfoStackMasks], sum);
         Dump(os, "CodeInfoRegisterMasks           ", bits[kByteKindCodeInfoRegisterMasks], sum);
+        // Stack map section.
+        const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst,
+                                                       bits + kByteKindStackMapLast + 1,
+                                                       0u);
         Dump(os, "CodeInfoStackMap                ", stack_map_bits, sum);
         {
           ScopedIndentation indent1(&os);
@@ -654,8 +660,8 @@
                stack_map_bits,
                "stack map");
           Dump(os,
-               "StackMapInlineInfo            ",
-               bits[kByteKindStackMapInlineInfo],
+               "StackMapInlineInfoIndex       ",
+               bits[kByteKindStackMapInlineInfoIndex],
                stack_map_bits,
                "stack map");
           Dump(os,
@@ -669,6 +675,39 @@
                stack_map_bits,
                "stack map");
         }
+        // Inline info section.
+        const int64_t inline_info_bits = std::accumulate(bits + kByteKindInlineInfoFirst,
+                                                         bits + kByteKindInlineInfoLast + 1,
+                                                         0u);
+        Dump(os, "CodeInfoInlineInfo              ", inline_info_bits, sum);
+        {
+          ScopedIndentation indent1(&os);
+          Dump(os,
+               "InlineInfoMethodIndex         ",
+               bits[kByteKindInlineInfoMethodIndex],
+               inline_info_bits,
+               "inline info");
+          Dump(os,
+               "InlineInfoDexPc               ",
+               bits[kByteKindStackMapDexPc],
+               inline_info_bits,
+               "inline info");
+          Dump(os,
+               "InlineInfoExtraData           ",
+               bits[kByteKindInlineInfoExtraData],
+               inline_info_bits,
+               "inline info");
+          Dump(os,
+               "InlineInfoDexRegisterMap      ",
+               bits[kByteKindInlineInfoDexRegisterMap],
+               inline_info_bits,
+               "inline info");
+          Dump(os,
+               "InlineInfoIsLast              ",
+               bits[kByteKindInlineInfoIsLast],
+               inline_info_bits,
+               "inline info");
+        }
       }
       os.Stream() << "\n" << std::flush;
     }
@@ -1434,6 +1473,7 @@
       Runtime* const runtime = Runtime::Current();
       Handle<mirror::DexCache> dex_cache(
           hs->NewHandle(runtime->GetClassLinker()->RegisterDexFile(*dex_file, nullptr)));
+      CHECK(dex_cache.Get() != nullptr);
       DCHECK(options_.class_loader_ != nullptr);
       return verifier::MethodVerifier::VerifyMethodAndDump(
           soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
@@ -1461,8 +1501,8 @@
         StackMap last = code_info_.GetStackMapAt(0u, encoding_);
         for (size_t i = 1; i != number_of_stack_maps_; ++i) {
           StackMap current = code_info_.GetStackMapAt(i, encoding_);
-          if (last.GetNativePcOffset(encoding_.stack_map_encoding, instruction_set) >
-              current.GetNativePcOffset(encoding_.stack_map_encoding, instruction_set)) {
+          if (last.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set) >
+              current.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set)) {
             ordered = false;
             break;
           }
@@ -1478,16 +1518,16 @@
                     indexes_.end(),
                     [this](size_t lhs, size_t rhs) {
                       StackMap left = code_info_.GetStackMapAt(lhs, encoding_);
-                      uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map_encoding,
+                      uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map.encoding,
                                                                 instruction_set_);
                       StackMap right = code_info_.GetStackMapAt(rhs, encoding_);
-                      uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map_encoding,
+                      uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map.encoding,
                                                                   instruction_set_);
                       // If the PCs are the same, compare indexes to preserve the original order.
                       return (left_pc < right_pc) || (left_pc == right_pc && lhs < rhs);
                     });
         }
-        offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map_encoding,
+        offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map.encoding,
                                                      instruction_set_);
       }
     }
@@ -1512,7 +1552,7 @@
       ++stack_map_index_;
       offset_ = (stack_map_index_ == number_of_stack_maps_)
           ? static_cast<uint32_t>(-1)
-          : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map_encoding,
+          : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map.encoding,
                                                               instruction_set_);
     }
 
@@ -1550,14 +1590,14 @@
       StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
       {
         CodeInfoEncoding encoding(helper.GetEncoding());
-        StackMapEncoding stack_map_encoding(encoding.stack_map_encoding);
-        // helper.GetCodeInfo().GetStackMapAt(0, encoding).;
-        const size_t num_stack_maps = encoding.number_of_stack_maps;
+        StackMapEncoding stack_map_encoding(encoding.stack_map.encoding);
+        const size_t num_stack_maps = encoding.stack_map.num_entries;
         std::vector<uint8_t> size_vector;
         encoding.Compress(&size_vector);
         if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfoEncoding,
                                    size_vector.size() * kBitsPerByte,
                                    oat_method.GetVmapTable())) {
+          // Stack maps
           stats_.AddBits(
               Stats::kByteKindStackMapNativePc,
               stack_map_encoding.GetNativePcEncoding().BitSize() * num_stack_maps);
@@ -1568,7 +1608,7 @@
               Stats::kByteKindStackMapDexRegisterMap,
               stack_map_encoding.GetDexRegisterMapEncoding().BitSize() * num_stack_maps);
           stats_.AddBits(
-              Stats::kByteKindStackMapInlineInfo,
+              Stats::kByteKindStackMapInlineInfoIndex,
               stack_map_encoding.GetInlineInfoEncoding().BitSize() * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapRegisterMaskIndex,
@@ -1576,30 +1616,47 @@
           stats_.AddBits(
               Stats::kByteKindStackMapStackMaskIndex,
               stack_map_encoding.GetStackMaskIndexEncoding().BitSize() * num_stack_maps);
+
+          // Stack masks
           stats_.AddBits(
               Stats::kByteKindCodeInfoStackMasks,
-              helper.GetCodeInfo().GetNumberOfStackMaskBits(encoding) *
-                  encoding.number_of_stack_masks);
+              encoding.stack_mask.encoding.BitSize() * encoding.stack_mask.num_entries);
+
+          // Register masks
           stats_.AddBits(
               Stats::kByteKindCodeInfoRegisterMasks,
-              encoding.register_mask_size_in_bits * encoding.number_of_stack_masks);
-          const size_t stack_map_bytes = helper.GetCodeInfo().GetStackMapsSize(encoding);
+              encoding.register_mask.encoding.BitSize() * encoding.register_mask.num_entries);
+
+          // Location catalog
           const size_t location_catalog_bytes =
               helper.GetCodeInfo().GetDexRegisterLocationCatalogSize(encoding);
           stats_.AddBits(Stats::kByteKindCodeInfoLocationCatalog,
                          kBitsPerByte * location_catalog_bytes);
+          // Dex register bytes.
           const size_t dex_register_bytes =
               helper.GetCodeInfo().GetDexRegisterMapsSize(encoding, code_item->registers_size_);
           stats_.AddBits(
               Stats::kByteKindCodeInfoDexRegisterMap,
               kBitsPerByte * dex_register_bytes);
-          const size_t inline_info_bytes =
-              encoding.non_header_size -
-              stack_map_bytes -
-              location_catalog_bytes -
-              dex_register_bytes;
-          stats_.AddBits(Stats::kByteKindCodeInfoInlineInfo,
-                         inline_info_bytes * kBitsPerByte);
+
+          // Inline infos.
+          const size_t num_inline_infos = encoding.inline_info.num_entries;
+          if (num_inline_infos > 0u) {
+            stats_.AddBits(
+                Stats::kByteKindInlineInfoMethodIndex,
+                encoding.inline_info.encoding.GetMethodIndexEncoding().BitSize() * num_inline_infos);
+            stats_.AddBits(
+                Stats::kByteKindInlineInfoDexPc,
+                encoding.inline_info.encoding.GetDexPcEncoding().BitSize() * num_inline_infos);
+            stats_.AddBits(
+                Stats::kByteKindInlineInfoExtraData,
+                encoding.inline_info.encoding.GetExtraDataEncoding().BitSize() * num_inline_infos);
+            stats_.AddBits(
+                Stats::kByteKindInlineInfoDexRegisterMap,
+                encoding.inline_info.encoding.GetDexRegisterMapEncoding().BitSize() *
+                    num_inline_infos);
+            stats_.AddBits(Stats::kByteKindInlineInfoIsLast, num_inline_infos);
+          }
         }
       }
       const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
@@ -2690,7 +2747,9 @@
     std::string error_msg;
     const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
     CHECK(dex_file != nullptr) << error_msg;
-    class_linker->RegisterDexFile(*dex_file, nullptr);
+    ObjPtr<mirror::DexCache> dex_cache =
+        class_linker->RegisterDexFile(*dex_file, nullptr);
+    CHECK(dex_cache != nullptr);
     class_path->push_back(dex_file);
   }
 
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 503cd4d..c7c3ddd 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -104,6 +104,7 @@
         expected_prefixes.push_back("DEX CODE:");
         expected_prefixes.push_back("CODE:");
         expected_prefixes.push_back("CodeInfoEncoding");
+        expected_prefixes.push_back("CodeInfoInlineInfo");
       }
       if (mode == kModeArt) {
         exec_argv.push_back("--image=" + core_art_location_);
diff --git a/profman/profman.cc b/profman/profman.cc
index ffebb6a..b0cbed1 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -248,8 +248,11 @@
     return result;
   }
 
-  int DumpOneProfile(const std::string& banner, const std::string& filename, int fd,
-                     const std::vector<const DexFile*>* dex_files, std::string* dump) {
+  int DumpOneProfile(const std::string& banner,
+                     const std::string& filename,
+                     int fd,
+                     const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+                     std::string* dump) {
     if (!filename.empty()) {
       fd = open(filename.c_str(), O_RDWR);
       if (fd < 0) {
@@ -277,7 +280,7 @@
 
     // Open apk/zip files and and read dex files.
     MemMap::Init();  // for ZipArchive::OpenFromFd
-    std::vector<const DexFile*> dex_files;
+    std::vector<std::unique_ptr<const DexFile>> dex_files;
     assert(dex_locations_.size() == apks_fd_.size());
     static constexpr bool kVerifyChecksum = true;
     for (size_t i = 0; i < dex_locations_.size(); ++i) {
@@ -293,7 +296,7 @@
         continue;
       }
       for (std::unique_ptr<const DexFile>& dex_file : dex_files_for_location) {
-        dex_files.push_back(dex_file.release());
+        dex_files.emplace_back(std::move(dex_file));
       }
     }
 
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 276f304..9585ba2 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -208,6 +208,7 @@
         "verifier/reg_type_cache.cc",
         "verifier/register_line.cc",
         "verifier/verifier_deps.cc",
+        "verify_object.cc",
         "well_known_classes.cc",
         "zip_archive.cc",
 
@@ -379,6 +380,10 @@
     },
     cflags: ["-DBUILDING_LIBART=1"],
     generated_sources: ["art_operator_srcs"],
+    // asm_support_gen.h (used by asm_support.h) is generated with cpp-define-generator
+    generated_headers: ["cpp-define-generator-asm-support"],
+    // export our headers so the libart-gtest targets can use it as well.
+    export_generated_headers: ["cpp-define-generator-asm-support"],
     clang: true,
     include_dirs: [
         "art/cmdline",
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index ed36436..a443a40 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1086,11 +1086,37 @@
     DELIVER_PENDING_EXCEPTION_FRAME_READY
 END art_quick_resolve_string
 
+
 // Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+// Comment out allocators that have arm specific asm.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
 
 // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_resolved_rosalloc
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
     // Fast path rosalloc allocation.
     // r0: type/return value, r9: Thread::Current
     // r1, r2, r3, r12: free.
@@ -1099,13 +1125,13 @@
                                                               // TODO: consider using ldrd.
     ldr    r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
     cmp    r3, r12
-    bhs    .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    bhs    .Lslow_path\c_name
 
     ldr    r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (r3)
     cmp    r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE        // Check if the size is for a thread
                                                               // local allocation. Also does the
                                                               // initialized and finalizable checks.
-    bhs    .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    bhs    .Lslow_path\c_name
                                                               // Compute the rosalloc bracket index
                                                               // from the size. Since the size is
                                                               // already aligned we can combine the
@@ -1119,7 +1145,7 @@
                                                               // Load the free list head (r3). This
                                                               // will be the return val.
     ldr    r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
-    cbz    r3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    cbz    r3, .Lslow_path\c_name
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
     ldr    r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET]               // Load the next pointer of the head
                                                               // and update the list head with the
@@ -1164,16 +1190,20 @@
     mov    r0, r3                                             // Set the return value and return.
     bx     lr
 
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
     SETUP_SAVE_REFS_ONLY_FRAME r2     @ save callee saves in case of GC
     mov    r1, r9                     @ pass Thread::Current
-    bl     artAllocObjectFromCodeResolvedRosAlloc     @ (mirror::Class* cls, Thread*)
+    bl     \cxx_name                  @ (mirror::Class* cls, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_rosalloc
+END \c_name
+.endm
 
-// The common fast path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
+// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
 //
 // r0: type r9: Thread::Current, r1, r2, r3, r12: free.
 // Need to preserve r0 to the slow path.
@@ -1212,41 +1242,173 @@
     bx     lr
 .endm
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_resolved_tlab
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
+ENTRY \name
     // Fast path tlab allocation.
     // r0: type, r9: Thread::Current
     // r1, r2, r3, r12: free.
-#if defined(USE_READ_BARRIER)
-    mvn    r0, #0                                             // Read barrier not supported here.
-    bx     lr                                                 // Return -1.
-#endif
-    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
+    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name
+.Lslow_path\name:
     SETUP_SAVE_REFS_ONLY_FRAME r2                             // Save callee saves in case of GC.
     mov    r1, r9                                             // Pass Thread::Current.
-    bl     artAllocObjectFromCodeResolvedTLAB                 // (mirror::Class* klass, Thread*)
+    bl     \entrypoint                                        // (mirror::Class* klass, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_tlab
+END \name
+.endm
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_resolved_region_tlab
-    // Fast path tlab allocation.
-    // r0: type, r9: Thread::Current, r1, r2, r3, r12: free.
-#if !defined(USE_READ_BARRIER)
-    eor    r0, r0, r0                                         // Read barrier must be enabled here.
-    sub    r0, r0, #1                                         // Return -1.
-    bx     lr
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+
+
+// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
+// and art_quick_alloc_array_resolved/initialized_region_tlab.
+//
+// r0: type r1: component_count r2: total_size r9: Thread::Current, r3, r12: free.
+// Need to preserve r0 and r1 to the slow path.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
+    and    r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED             // Apply alignemnt mask
+                                                              // (addr + 7) & ~7.
+
+                                                              // Load thread_local_pos (r3) and
+                                                              // thread_local_end (r12) with ldrd.
+                                                              // Check constraints for ldrd.
+#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
+#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
 #endif
-    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
-    SETUP_SAVE_REFS_ONLY_FRAME r2                             // Save callee saves in case of GC.
-    mov    r1, r9                                             // Pass Thread::Current.
-    bl     artAllocObjectFromCodeResolvedRegionTLAB           // (mirror::Class* klass, Thread*)
+    ldrd   r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
+    sub    r12, r12, r3                                       // Compute the remaining buf size.
+    cmp    r2, r12                                            // Check if the total_size fits.
+    bhi    \slowPathLabel
+    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+    add    r2, r2, r3
+    str    r2, [r9, #THREAD_LOCAL_POS_OFFSET]                 // Store new thread_local_pos.
+    ldr    r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]             // Increment thread_local_objects.
+    add    r2, r2, #1
+    str    r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+    POISON_HEAP_REF r0
+    str    r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
+    str    r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET]              // Store the array length.
+                                                              // Fence. This is "ish" not "ishst" so
+                                                              // that the code after this allocation
+                                                              // site will see the right values in
+                                                              // the fields of the class.
+                                                              // Alternatively we could use "ishst"
+                                                              // if we use load-acquire for the
+                                                              // object size load.)
+    mov    r0, r3
+    dmb    ish
+    bx     lr
+.endm
+
+.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
+ENTRY \name
+    // Fast path array allocation for region tlab allocation.
+    // r0: mirror::Class* type
+    // r1: int32_t component_count
+    // r9: thread
+    // r2, r3, r12: free.
+    \size_setup .Lslow_path\name
+    ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name
+.Lslow_path\name:
+    // r0: mirror::Class* klass
+    // r1: int32_t component_count
+    // r2: Thread* self
+    SETUP_SAVE_REFS_ONLY_FRAME r2  // save callee saves in case of GC
+    mov    r2, r9                  // pass Thread::Current
+    bl     \entrypoint
     RESTORE_SAVE_REFS_ONLY_FRAME
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_region_tlab
+END \name
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
+    bkpt                                                    // We should never enter here.
+                                                            // Code below is for reference.
+                                                            // Possibly a large object, go slow.
+                                                            // Also does negative array size check.
+    movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8)
+    cmp r1, r2
+    bhi \slow_path
+                                                            // Array classes are never finalizable
+                                                            // or uninitialized, no need to check.
+    ldr    r3, [r0, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]    // Load component type
+    UNPOISON_HEAP_REF r3
+    ldr    r3, [r3, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
+    lsr    r3, r3, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT         // Component size shift is in high 16
+                                                            // bits.
+    lsl    r2, r1, r3                                       // Calculate data size
+                                                            // Add array data offset and alignment.
+    add    r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+
+    add    r3, r3, #1                                       // Add 4 to the length only if the
+                                                            // component size shift is 3
+                                                            // (for 64 bit alignment).
+    and    r3, r3, #4
+    add    r2, r2, r3
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_8 slow_path
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    movw r2, #(MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET)
+    cmp r1, r2
+    bhi \slow_path
+    // Add array data offset and alignment.
+    add    r2, r1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_16 slow_path
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2)
+    cmp r1, r2
+    bhi \slow_path
+    lsl    r2, r1, #1
+    // Add array data offset and alignment.
+    add    r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_32 slow_path
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4)
+    cmp r1, r2
+    bhi \slow_path
+    lsl    r2, r1, #2
+    // Add array data offset and alignment.
+    add    r2, r2, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+.macro COMPUTE_ARRAY_SIZE_64 slow_path
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    movw r2, #((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_LONG_ARRAY_DATA_OFFSET) / 8)
+    cmp r1, r2
+    bhi \slow_path
+    lsl    r2, r1, #3
+    // Add array data offset and alignment.
+    add    r2, r2, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+.endm
+
+# TODO(ngeoffray): art_quick_alloc_array_resolved_region_tlab is not used for arm, remove
+# the entrypoint once all backends have been updated to use the size variants.
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6a2034f..219d8b4 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1626,7 +1626,7 @@
 END art_quick_resolve_string
 
 // Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
 // Comment out allocators that have arm64 specific asm.
 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
@@ -1640,8 +1640,20 @@
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_resolved_rosalloc
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
+
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
     // Fast path rosalloc allocation.
     // x0: type, xSELF(x19): Thread::Current
     // x1-x7: free.
@@ -1650,13 +1662,13 @@
                                                               // ldp won't work due to large offset.
     ldr    x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
     cmp    x3, x4
-    bhs    .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    bhs    .Lslow_path\c_name
     ldr    w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (x3)
     cmp    x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE        // Check if the size is for a thread
                                                               // local allocation. Also does the
                                                               // finalizable and initialization
                                                               // checks.
-    bhs    .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    bhs    .Lslow_path\c_name
                                                               // Compute the rosalloc bracket index
                                                               // from the size. Since the size is
                                                               // already aligned we can combine the
@@ -1669,7 +1681,7 @@
                                                               // Load the free list head (x3). This
                                                               // will be the return val.
     ldr    x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
-    cbz    x3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    cbz    x3, .Lslow_path\c_name
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
     ldr    x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET]               // Load the next pointer of the head
                                                               // and update the list head with the
@@ -1713,13 +1725,65 @@
 
     mov    x0, x3                                             // Set the return value and return.
     ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
     SETUP_SAVE_REFS_ONLY_FRAME                      // save callee saves in case of GC
     mov    x1, xSELF                                // pass Thread::Current
-    bl     artAllocObjectFromCodeResolvedRosAlloc   // (mirror::Class* klass, Thread*)
+    bl     \cxx_name
     RESTORE_SAVE_REFS_ONLY_FRAME
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_rosalloc
+END \c_name
+.endm
+
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
+    ldr    x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
+    ldr    x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
+    ldr    w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (x7).
+    add    x6, x4, x7                                         // Add object size to tlab pos.
+    cmp    x6, x5                                             // Check if it fits, overflow works
+                                                              // since the tlab pos and end are 32
+                                                              // bit values.
+    bhi    \slowPathLabel
+    str    x6, [xSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
+    ldr    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
+    add    x5, x5, #1
+    str    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
+    POISON_HEAP_REF w0
+    str    w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
+                                                              // Fence. This is "ish" not "ishst" so
+                                                              // that the code after this allocation
+                                                              // site will see the right values in
+                                                              // the fields of the class.
+                                                              // Alternatively we could use "ishst"
+                                                              // if we use load-acquire for the
+                                                              // object size load.)
+    mov    x0, x4
+    dmb    ish
+    ret
+.endm
+
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint
+ENTRY \name
+    // Fast path region tlab allocation.
+    // x0: type, xSELF(x19): Thread::Current
+    // x1-x7: free.
+    ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lslow_path\name
+.Lslow_path\name:
+    SETUP_SAVE_REFS_ONLY_FRAME                 // Save callee saves in case of GC.
+    mov    x1, xSELF                           // Pass Thread::Current.
+    bl     \entrypoint                         // (mirror::Class*, Thread*)
+    RESTORE_SAVE_REFS_ONLY_FRAME
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \name
+.endm
+
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
 
 .macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
     and    \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignemnt mask
@@ -1759,93 +1823,12 @@
     ret
 .endm
 
-// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
-// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
-    ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
-.endm
-
-.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
-    ldr    x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
-    ldr    x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
-    ldr    w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (x7).
-    add    x6, x4, x7                                         // Add object size to tlab pos.
-    cmp    x6, x5                                             // Check if it fits, overflow works
-                                                              // since the tlab pos and end are 32
-                                                              // bit values.
-    bhi    \slowPathLabel
-    str    x6, [xSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
-    ldr    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
-    add    x5, x5, #1
-    str    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
-    POISON_HEAP_REF w0
-    str    w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
-                                                              // Fence. This is "ish" not "ishst" so
-                                                              // that the code after this allocation
-                                                              // site will see the right values in
-                                                              // the fields of the class.
-                                                              // Alternatively we could use "ishst"
-                                                              // if we use load-acquire for the
-                                                              // object size load.)
-    mov    x0, x4
-    dmb    ish
-    ret
-.endm
-
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB).
-ENTRY art_quick_alloc_object_resolved_tlab
-    // Fast path tlab allocation.
-    // x0: type, xSELF(x19): Thread::Current
-    // x1-x7: free.
-#if defined(USE_READ_BARRIER)
-    mvn    x0, xzr                                            // Read barrier not supported here.
-    ret                                                       // Return -1.
-#endif
-    ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
-    SETUP_SAVE_REFS_ONLY_FRAME                    // Save callee saves in case of GC.
-    mov    x1, xSELF                              // Pass Thread::Current.
-    bl     artAllocObjectFromCodeResolvedTLAB     // (mirror::Class*, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_resolved_tlab
-
-// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB name, entrypoint, fast_path
-ENTRY \name
-    // Fast path region tlab allocation.
-    // x0: type, xSELF(x19): Thread::Current
-    // x1-x7: free.
-#if !defined(USE_READ_BARRIER)
-    mvn    x0, xzr                                            // Read barrier must be enabled here.
-    ret                                                       // Return -1.
-#endif
-.Ldo_allocation\name:
-    \fast_path .Lslow_path\name
-.Lslow_path\name:
-    SETUP_SAVE_REFS_ONLY_FRAME                 // Save callee saves in case of GC.
-    mov    x1, xSELF                           // Pass Thread::Current.
-    bl     \entrypoint                         // (mirror::Class*, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED
-GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED
-
-// TODO: We could use this macro for the normal tlab allocator too.
-
-.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, size_setup
+.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
 ENTRY \name
     // Fast path array allocation for region tlab allocation.
     // x0: mirror::Class* type
     // x1: int32_t component_count
     // x2-x7: free.
-#if !defined(USE_READ_BARRIER)
-    mvn    x0, xzr                                            // Read barrier must be enabled here.
-    ret                                                       // Return -1.
-#endif
     mov    x3, x0
     \size_setup x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
     ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
@@ -1904,17 +1887,21 @@
 .macro COMPUTE_ARRAY_SIZE_64 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
     lsl    \xTemp1, \xCount, #3
     // Add array data offset and alignment.
-    // Add 4 to the size for 64 bit alignment.
-    add    \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK + 4)
+    add    \xTemp1, \xTemp1, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
 .endm
 
 # TODO(ngeoffray): art_quick_alloc_array_resolved_region_tlab is not used for arm64, remove
 # the entrypoint once all backends have been updated to use the size variants.
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
 
     /*
      * Called by managed code when the thread has been asked to suspend.
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 2d5eca0..663cb6c 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1578,6 +1578,7 @@
 
 
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
 
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index f3629d9..5fee575 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1534,6 +1534,7 @@
 GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
 
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
 
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 9204d85..2b3525b 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -145,7 +145,7 @@
 
 // This is to be separately defined for each architecture to allow a hand-written assembly fast path.
 // GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_rosalloc, RosAlloc)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9e75cba..0bf08a6 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1051,7 +1051,7 @@
     // resolved/initialized cases)
     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
                             reinterpret_cast<size_t>(nullptr),
-                            StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
+                            StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
                             self);
     EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
@@ -1071,7 +1071,7 @@
     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
                             GB,  // that should fail...
                             reinterpret_cast<size_t>(nullptr),
-                            StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
+                            StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
                             self);
 
     EXPECT_TRUE(self->IsExceptionPending());
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 47dc34a..76615e8 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -947,10 +947,37 @@
 END_MACRO
 
 // Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
+
+// Comment out allocators that have x86 specific asm.
+// Region TLAB:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+// Normal TLAB:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
 
 // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
+MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
+    DEFINE_FUNCTION VAR(c_name)
     // Fast path rosalloc allocation.
     // eax: type/return value
     // ecx, ebx, edx: free
@@ -959,14 +986,14 @@
                                                         // stack has room
     movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %ecx
     cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %ecx
-    jae  .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    jae  .Lslow_path\c_name
 
     movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx  // Load the object size (ecx)
                                                         // Check if the size is for a thread
                                                         // local allocation. Also does the
                                                         // finalizable and initialization check.
     cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %ecx
-    ja   .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    ja   .Lslow_path\c_name
     shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %ecx // Calculate the rosalloc bracket index
                                                             // from object size.
                                                         // Load thread local rosalloc run (ebx)
@@ -977,7 +1004,7 @@
                                                         // Load free_list head (edi),
                                                         // this will be the return value.
     movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %ecx
-    jecxz   .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    jecxz   .Lslow_path\c_name
                                                         // Point of no slow path. Won't go to
                                                         // the slow path from here on.
                                                         // Load the next pointer of the head
@@ -1008,7 +1035,7 @@
                                                         // No fence needed for x86.
     movl %ecx, %eax                                     // Move object to return register
     ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
     SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx          // save ref containing registers for GC
     // Outgoing argument set up
     subl LITERAL(8), %esp                       // alignment padding
@@ -1020,10 +1047,14 @@
     CFI_ADJUST_CFA_OFFSET(-16)
     RESTORE_SAVE_REFS_ONLY_FRAME                 // restore frame up to return address
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER      // return or deliver exception
-END_FUNCTION art_quick_alloc_object_resolved_rosalloc
+    END_FUNCTION VAR(c_name)
+END_MACRO
 
-// The common fast path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
+// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
 //
 // EAX: type/return_value
 MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
@@ -1047,8 +1078,8 @@
     ret                                                 // Fast path succeeded.
 END_MACRO
 
-// The common slow path code for art_quick_alloc_object_resolved_tlab
-// and art_quick_alloc_object_resolved_region_tlab.
+// The common slow path code for art_quick_alloc_object_resolved/initialized_tlab
+// and art_quick_alloc_object_resolved/initialized_region_tlab.
 MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
     POP edi
     SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx                 // save ref containing registers for GC
@@ -1065,33 +1096,154 @@
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER             // return or deliver exception
 END_MACRO
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be called
-// for CC if the GC is not marking.
-DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
+MACRO2(ART_QUICK_ALLOC_OBJECT_TLAB, c_name, cxx_name)
+    DEFINE_FUNCTION VAR(c_name)
     // Fast path tlab allocation.
     // EAX: type
     // EBX, ECX, EDX: free.
     PUSH edi
-    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
-.Lart_quick_alloc_object_resolved_tlab_slow_path:
-    ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
-END_FUNCTION art_quick_alloc_object_resolved_tlab
+    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\c_name
+.Lslow_path\c_name:
+    ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH RAW_VAR(cxx_name)
+    END_FUNCTION VAR(c_name)
+END_MACRO
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
-    // Fast path region tlab allocation.
-    // EAX: type/return value
-    // EBX, ECX, EDX: free.
-#if !defined(USE_READ_BARRIER)
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB
+ART_QUICK_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB
+
+// The fast path code for art_quick_alloc_array_region_tlab.
+// Inputs: EAX: the class, ECX: int32_t component_count, EDX: total_size
+// Free temp: EBX
+// Output: EAX: return value.
+MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE, slowPathLabel)
+    mov %fs:THREAD_SELF_OFFSET, %ebx                          // ebx = thread
+    // Mask out the unaligned part to make sure we are 8 byte aligned.
+    andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %edx
+    movl THREAD_LOCAL_END_OFFSET(%ebx), %edi
+    subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi
+    cmpl %edi, %edx                                           // Check if it fits.
+    ja   RAW_VAR(slowPathLabel)
+    movl THREAD_LOCAL_POS_OFFSET(%ebx), %edi
+    addl %edi, %edx                                            // Add the object size.
+    movl %edx, THREAD_LOCAL_POS_OFFSET(%ebx)                   // Update thread_local_pos_
+    addl LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%ebx)         // Increase thread_local_objects.
+                                                               // Store the class pointer in the
+                                                               // header.
+                                                               // No fence needed for x86.
+    POISON_HEAP_REF eax
+    movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%edi)
+    movl %ecx, MIRROR_ARRAY_LENGTH_OFFSET(%edi)
+    movl %edi, %eax
+    POP edi
+    ret                                                        // Fast path succeeded.
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_UNKNOWN, slow_path)
+    // We should never enter here. Code is provided for reference.
     int3
-    int3
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8), %ecx
+    ja RAW_VAR(slow_path)
+    PUSH ecx
+    movl %ecx, %edx
+    movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%eax), %ecx        // Load component type.
+    UNPOISON_HEAP_REF ecx
+    movl MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET(%ecx), %ecx // Load primitive type.
+    shr MACRO_LITERAL(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT), %ecx        // Get component size shift.
+    sall %cl, %edx                                              // Calculate array count shifted.
+    // Add array header + alignment rounding.
+    add MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %edx
+    // Add 4 extra bytes if we are doing a long array.
+    add MACRO_LITERAL(1), %ecx
+    and MACRO_LITERAL(4), %ecx
+#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
 #endif
-    PUSH edi
-    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
-    ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
-END_FUNCTION art_quick_alloc_object_resolved_region_tlab
+    addl %ecx, %edx
+    POP ecx
+END_MACRO
 
+MACRO1(COMPUTE_ARRAY_SIZE_8, slow_path)
+    // EAX: mirror::Class* klass, ECX: int32_t component_count
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    cmpl LITERAL(MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET), %ecx
+    ja RAW_VAR(slow_path)
+    // Add array header + alignment rounding.
+    leal (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)(%ecx), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_16, slow_path)
+    // EAX: mirror::Class* klass, ECX: int32_t component_count
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2), %ecx
+    ja RAW_VAR(slow_path)
+    // Add array header + alignment rounding.
+    leal ((MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 2)(%ecx), %edx
+    sall MACRO_LITERAL(1), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_32, slow_path)
+    // EAX: mirror::Class* klass, ECX: int32_t component_count
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4), %ecx
+    ja RAW_VAR(slow_path)
+    // Add array header + alignment rounding.
+    leal ((MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 4)(%ecx), %edx
+    sall MACRO_LITERAL(2), %edx
+END_MACRO
+
+MACRO1(COMPUTE_ARRAY_SIZE_64, slow_path)
+    // EAX: mirror::Class* klass, ECX: int32_t component_count
+    // Possibly a large object, go slow.
+    // Also does negative array size check.
+    cmpl LITERAL((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8), %ecx
+    ja RAW_VAR(slow_path)
+    // Add array header + alignment rounding.
+    leal ((MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) / 8)(%ecx), %edx
+    sall MACRO_LITERAL(3), %edx
+END_MACRO
+
+MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
+    DEFINE_FUNCTION VAR(c_entrypoint)
+    // EAX: mirror::Class* klass, ECX: int32_t component_count
+    PUSH edi
+    CALL_MACRO(size_setup) .Lslow_path\c_entrypoint
+    ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\c_entrypoint
+.Lslow_path\c_entrypoint:
+    POP edi
+    SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx                        // save ref containing registers for GC
+    // Outgoing argument set up
+    PUSH eax                                                   // alignment padding
+    pushl %fs:THREAD_SELF_OFFSET                               // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    PUSH ecx
+    PUSH eax
+    call CALLVAR(cxx_name)                                     // cxx_name(arg0, arg1, Thread*)
+    addl LITERAL(16), %esp                                     // pop arguments
+    CFI_ADJUST_CFA_OFFSET(-16)
+    RESTORE_SAVE_REFS_ONLY_FRAME                               // restore frame up to return address
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER                    // return or deliver exception
+    END_FUNCTION VAR(c_entrypoint)
+END_MACRO
+
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
+
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
+GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
 
 DEFINE_FUNCTION art_quick_resolve_string
     SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 10f9047..a1ae858 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1006,7 +1006,8 @@
 
 
 // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
+MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
+    DEFINE_FUNCTION VAR(c_name)
     // Fast path rosalloc allocation.
     // RDI: mirror::Class*, RAX: return value
     // RSI, RDX, RCX, R8, R9: free.
@@ -1015,14 +1016,14 @@
     movq   %gs:THREAD_SELF_OFFSET, %r8                     // r8 = thread
     movq   THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx  // rcx = alloc stack top.
     cmpq   THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
-    jae    .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    jae    .Lslow_path\c_name
                                                            // Load the object size
     movl   MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %eax
                                                            // Check if the size is for a thread
                                                            // local allocation. Also does the
                                                            // initialized and finalizable checks.
     cmpl   LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
-    ja     .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    ja     .Lslow_path\c_name
                                                            // Compute the rosalloc bracket index
                                                            // from the size.
     shrq   LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
@@ -1036,7 +1037,7 @@
                                                            // will be the return val.
     movq   (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
     testq  %rax, %rax
-    jz     .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+    jz     .Lslow_path\c_name
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
                                                            // Push the new object onto the thread
                                                            // local allocation stack and
@@ -1063,25 +1064,19 @@
     decl   (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
                                                            // No fence necessary for x86.
     ret
-.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+.Lslow_path\c_name:
     SETUP_SAVE_REFS_ONLY_FRAME                             // save ref containing registers for GC
     // Outgoing argument set up
     movq %gs:THREAD_SELF_OFFSET, %rsi                      // pass Thread::Current()
-    call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc)    // cxx_name(arg0, Thread*)
+    call CALLVAR(cxx_name)                                 // cxx_name(arg0, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME                           // restore frame up to return address
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER                // return or deliver exception
-END_FUNCTION art_quick_alloc_object_rosalloc
-
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
-MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
-    testl %edx, %edx                                       // Check null class
-    jz   RAW_VAR(slowPathLabel)
-    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
+    END_FUNCTION VAR(c_name)
 END_MACRO
 
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
 // The common fast path code for art_quick_alloc_object_resolved_region_tlab.
 // TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
 // ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
@@ -1220,12 +1215,7 @@
     movq %rsi, %r9
     salq MACRO_LITERAL(3), %r9
     // Add array header + alignment rounding.
-    // Add 4 extra bytes for array data alignment
-    addq MACRO_LITERAL(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK + 4), %r9
-END_MACRO
-
-// The slow path code for art_quick_alloc_array_*tlab.
-MACRO1(ALLOC_ARRAY_TLAB_SLOW_PATH, cxx_name)
+    addq MACRO_LITERAL(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK), %r9
 END_MACRO
 
 MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 61ff417..6cb8544 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -446,6 +446,8 @@
                                                  PointerSize pointer_size,
                                                  bool* found)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  // We shouldn't be calling this with obsolete methods.
+  DCHECK(!method->IsObsolete());
   // Although we overwrite the trampoline of non-static methods, we may get here via the resolution
   // method for direct methods (or virtual methods made direct).
   mirror::Class* declaring_class = method->GetDeclaringClass();
diff --git a/runtime/art_method.h b/runtime/art_method.h
index d4a65c8..3836303 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -201,6 +201,10 @@
     return (GetAccessFlags() & kAccCompileDontBother) == 0;
   }
 
+  void SetDontCompile() {
+    AddAccessFlags(kAccCompileDontBother);
+  }
+
   // A default conflict method is a special sentinel method that stands for a conflict between
   // multiple default methods. It cannot be invoked, throwing an IncompatibleClassChangeError if one
   // attempts to do so.
@@ -226,7 +230,7 @@
   void SetIsObsolete() {
     // TODO We should really support redefining intrinsic if possible.
     DCHECK(!IsIntrinsic());
-    SetAccessFlags(GetAccessFlags() | kAccObsoleteMethod);
+    AddAccessFlags(kAccObsoleteMethod);
   }
 
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 46f2c08..c7a94a9 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -72,7 +72,7 @@
 // Import platform-independent constant defines from our autogenerated list.
 // Export new defines (for assembly use) by editing cpp-define-generator def files.
 #define DEFINE_CHECK_EQ ADD_TEST_EQ
-#include "generated/asm_support_gen.h"
+#include "asm_support_gen.h"
 
 // Offset of field Thread::tlsPtr_.exception.
 #define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
diff --git a/runtime/atomic.h b/runtime/atomic.h
index e2a7259..45c3165 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -235,6 +235,11 @@
     this->store(desired, std::memory_order_seq_cst);
   }
 
+  // Atomically replace the value with desired value.
+  T ExchangeRelaxed(T desired_value) {
+    return this->exchange(desired_value, std::memory_order_relaxed);
+  }
+
   // Atomically replace the value with desired value if it matches the expected value.
   // Participates in total ordering of atomic operations.
   bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
@@ -283,6 +288,10 @@
     return this->fetch_sub(value, std::memory_order_seq_cst);  // Return old value.
   }
 
+  T FetchAndSubRelaxed(const T value) {
+    return this->fetch_sub(value, std::memory_order_relaxed);  // Return old value.
+  }
+
   T FetchAndOrSequentiallyConsistent(const T value) {
     return this->fetch_or(value, std::memory_order_seq_cst);  // Return old_value.
   }
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 9fdb0cc..db43319 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -146,7 +146,9 @@
 }
 
 #pragma GCC diagnostic push
+#if __clang_major__ >= 4
 #pragma GCC diagnostic ignored "-Winstantiation-after-specialization"
+#endif
 // Explicitly instantiate the used implementation.
 template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
 #pragma GCC diagnostic pop
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index e05a85a..6e102be 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -49,7 +49,6 @@
 Mutex* Locks::jni_function_table_lock_ = nullptr;
 Mutex* Locks::jni_libraries_lock_ = nullptr;
 Mutex* Locks::logging_lock_ = nullptr;
-Mutex* Locks::mem_maps_lock_ = nullptr;
 Mutex* Locks::modify_ldt_lock_ = nullptr;
 MutatorMutex* Locks::mutator_lock_ = nullptr;
 Mutex* Locks::profiler_lock_ = nullptr;
@@ -1116,10 +1115,6 @@
     DCHECK(unexpected_signal_lock_ == nullptr);
     unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
 
-    UPDATE_CURRENT_LOCK_LEVEL(kMemMapsLock);
-    DCHECK(mem_maps_lock_ == nullptr);
-    mem_maps_lock_ = new Mutex("mem maps lock", current_lock_level);
-
     UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
     DCHECK(logging_lock_ == nullptr);
     logging_lock_ = new Mutex("logging lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 21dd437..ffe18c6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -55,7 +55,6 @@
 // [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
 enum LockLevel {
   kLoggingLock = 0,
-  kMemMapsLock,
   kSwapMutexesLock,
   kUnexpectedSignalLock,
   kThreadSuspendCountLock,
@@ -712,9 +711,6 @@
   // One unexpected signal at a time lock.
   static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
 
-  // Guards the maps in mem_map.
-  static Mutex* mem_maps_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
-
   // Have an exclusive logging thread.
   static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
 };
diff --git a/runtime/bit_memory_region.h b/runtime/bit_memory_region.h
index c3b5be4..3a696f1 100644
--- a/runtime/bit_memory_region.h
+++ b/runtime/bit_memory_region.h
@@ -40,6 +40,10 @@
     return region_.size_in_bits();
   }
 
+  ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_size) const {
+    return BitMemoryRegion(region_, bit_start_ + bit_offset, bit_size);
+  }
+
   // Load a single bit in the region. The bit at offset 0 is the least
   // significant bit in the first byte.
   ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
diff --git a/runtime/cha.cc b/runtime/cha.cc
index e726bdb..d11b12f 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -16,6 +16,7 @@
 
 #include "cha.h"
 
+#include "art_method-inl.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "runtime.h"
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index edd6e3b..7db8368 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1322,16 +1322,8 @@
         // Make sure to do this after we update the arrays since we store the resolved types array
         // in DexCacheData in RegisterDexFileLocked. We need the array pointer to be the one in the
         // BSS.
-        ObjPtr<mirror::DexCache> existing_dex_cache = FindDexCacheLocked(self,
-                                                                         *dex_file,
-                                                                         /*allow_failure*/true);
-        CHECK(existing_dex_cache == nullptr);
-        StackHandleScope<1> hs3(self);
-        Handle<mirror::DexCache> h_dex_cache = hs3.NewHandle(dex_cache);
-        RegisterDexFileLocked(*dex_file, h_dex_cache);
-        if (kIsDebugBuild) {
-          dex_cache.Assign(h_dex_cache.Get());  // Update dex_cache, used below in debug build.
-        }
+        CHECK(!FindDexCacheDataLocked(*dex_file).IsValid());
+        RegisterDexFileLocked(*dex_file, dex_cache, class_loader.Get());
       }
       if (kIsDebugBuild) {
         CHECK(new_class_set != nullptr);
@@ -1675,11 +1667,9 @@
     return false;
   }
 
-  StackHandleScope<1> hs2(self);
-  MutableHandle<mirror::DexCache> h_dex_cache(hs2.NewHandle<mirror::DexCache>(nullptr));
   for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
-    h_dex_cache.Assign(dex_caches->Get(i));
-    std::string dex_file_location(h_dex_cache->GetLocation()->ToModifiedUtf8());
+    ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+    std::string dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
     // TODO: Only store qualified paths.
     // If non qualified, qualify it.
     if (dex_file_location.find('/') == std::string::npos) {
@@ -1699,9 +1689,9 @@
     if (app_image) {
       // The current dex file field is bogus, overwrite it so that we can get the dex file in the
       // loop below.
-      h_dex_cache->SetDexFile(dex_file.get());
-      GcRoot<mirror::Class>* const types = h_dex_cache->GetResolvedTypes();
-      for (int32_t j = 0, num_types = h_dex_cache->NumResolvedTypes(); j < num_types; j++) {
+      dex_cache->SetDexFile(dex_file.get());
+      GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
+      for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
         ObjPtr<mirror::Class> klass = types[j].Read();
         if (klass != nullptr) {
           DCHECK(!klass->IsErroneous()) << klass->GetStatus();
@@ -1711,11 +1701,11 @@
       if (kSanityCheckObjects) {
         ImageSanityChecks::CheckPointerArray(heap,
                                              this,
-                                             h_dex_cache->GetResolvedMethods(),
-                                             h_dex_cache->NumResolvedMethods());
+                                             dex_cache->GetResolvedMethods(),
+                                             dex_cache->NumResolvedMethods());
       }
       // Register dex files, keep track of existing ones that are conflicts.
-      AppendToBootClassPath(*dex_file.get(), h_dex_cache);
+      AppendToBootClassPath(*dex_file.get(), dex_cache);
     }
     out_dex_files->push_back(std::move(dex_file));
   }
@@ -2656,7 +2646,7 @@
   }
   ObjPtr<mirror::DexCache> dex_cache = RegisterDexFile(*new_dex_file, class_loader.Get());
   if (dex_cache == nullptr) {
-    self->AssertPendingOOMException();
+    self->AssertPendingException();
     return nullptr;
   }
   klass->SetDexCache(dex_cache);
@@ -3264,28 +3254,27 @@
 }
 
 void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
-  StackHandleScope<1> hs(self);
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocAndInitializeDexCache(
+  ObjPtr<mirror::DexCache> dex_cache = AllocAndInitializeDexCache(
       self,
       dex_file,
-      Runtime::Current()->GetLinearAlloc())));
-  CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
-                                    << dex_file.GetLocation();
+      Runtime::Current()->GetLinearAlloc());
+  CHECK(dex_cache != nullptr) << "Failed to allocate dex cache for " << dex_file.GetLocation();
   AppendToBootClassPath(dex_file, dex_cache);
 }
 
 void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
-                                        Handle<mirror::DexCache> dex_cache) {
-  CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
+                                        ObjPtr<mirror::DexCache> dex_cache) {
+  CHECK(dex_cache != nullptr) << dex_file.GetLocation();
   boot_class_path_.push_back(&dex_file);
-  RegisterDexFile(dex_file, dex_cache);
+  RegisterBootClassPathDexFile(dex_file, dex_cache);
 }
 
 void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
-                                        Handle<mirror::DexCache> dex_cache) {
+                                        ObjPtr<mirror::DexCache> dex_cache,
+                                        ObjPtr<mirror::ClassLoader> class_loader) {
   Thread* const self = Thread::Current();
   Locks::dex_lock_->AssertExclusiveHeld(self);
-  CHECK(dex_cache.Get() != nullptr) << dex_file.GetLocation();
+  CHECK(dex_cache != nullptr) << dex_file.GetLocation();
   // For app images, the dex cache location may be a suffix of the dex file location since the
   // dex file location is an absolute path.
   const std::string dex_cache_location = dex_cache->GetLocation()->ToModifiedUtf8();
@@ -3313,25 +3302,49 @@
       ++it;
     }
   }
-  jweak dex_cache_jweak = vm->AddWeakGlobalRef(self, dex_cache.Get());
+  jweak dex_cache_jweak = vm->AddWeakGlobalRef(self, dex_cache);
   dex_cache->SetDexFile(&dex_file);
   DexCacheData data;
   data.weak_root = dex_cache_jweak;
   data.dex_file = dex_cache->GetDexFile();
   data.resolved_methods = dex_cache->GetResolvedMethods();
+  data.class_table = ClassTableForClassLoader(class_loader);
+  DCHECK(data.class_table != nullptr);
   dex_caches_.push_back(data);
 }
 
-mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file,
-                                               ObjPtr<mirror::ClassLoader> class_loader) {
+ObjPtr<mirror::DexCache> ClassLinker::DecodeDexCache(Thread* self, const DexCacheData& data) {
+  return data.IsValid()
+      ? ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root))
+      : nullptr;
+}
+
+ObjPtr<mirror::DexCache> ClassLinker::EnsureSameClassLoader(
+    Thread* self,
+    ObjPtr<mirror::DexCache> dex_cache,
+    const DexCacheData& data,
+    ObjPtr<mirror::ClassLoader> class_loader) {
+  DCHECK_EQ(dex_cache->GetDexFile(), data.dex_file);
+  if (data.class_table != ClassTableForClassLoader(class_loader)) {
+    self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+                             "Attempt to register dex file %s with multiple class loaders",
+                             data.dex_file->GetLocation().c_str());
+    return nullptr;
+  }
+  return dex_cache;
+}
+
+ObjPtr<mirror::DexCache> ClassLinker::RegisterDexFile(const DexFile& dex_file,
+                                                      ObjPtr<mirror::ClassLoader> class_loader) {
   Thread* self = Thread::Current();
+  DexCacheData old_data;
   {
     ReaderMutexLock mu(self, *Locks::dex_lock_);
-    ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
-    if (dex_cache != nullptr) {
-      // TODO: Check if the dex file was registered with the same class loader. Bug: 34193123
-      return dex_cache.Ptr();
-    }
+    old_data = FindDexCacheDataLocked(dex_file);
+  }
+  ObjPtr<mirror::DexCache> old_dex_cache = DecodeDexCache(self, old_data);
+  if (old_dex_cache != nullptr) {
+    return EnsureSameClassLoader(self, old_dex_cache, old_data, class_loader);
   }
   LinearAlloc* const linear_alloc = GetOrCreateAllocatorForClassLoader(class_loader);
   DCHECK(linear_alloc != nullptr);
@@ -3343,7 +3356,8 @@
   // Don't alloc while holding the lock, since allocation may need to
   // suspend all threads and another thread may need the dex_lock_ to
   // get to a suspend point.
-  StackHandleScope<2> hs(self);
+  StackHandleScope<3> hs(self);
+  Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
   ObjPtr<mirror::String> location;
   Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(/*out*/&location,
                                                                   self,
@@ -3351,77 +3365,94 @@
   Handle<mirror::String> h_location(hs.NewHandle(location));
   {
     WriterMutexLock mu(self, *Locks::dex_lock_);
-    ObjPtr<mirror::DexCache> dex_cache = FindDexCacheLocked(self, dex_file, true);
-    if (dex_cache != nullptr) {
-      // Another thread managed to initialize the dex cache faster, so use that DexCache.
-      // If this thread encountered OOME, ignore it.
-      DCHECK_EQ(h_dex_cache.Get() == nullptr, self->IsExceptionPending());
-      self->ClearException();
-      return dex_cache.Ptr();
+    old_data = FindDexCacheDataLocked(dex_file);
+    old_dex_cache = DecodeDexCache(self, old_data);
+    if (old_dex_cache == nullptr && h_dex_cache.Get() != nullptr) {
+      // Do InitializeDexCache while holding dex lock to make sure two threads don't call it at the
+      // same time with the same dex cache. Since the .bss is shared this can cause failing DCHECK
+      // that the arrays are null.
+      mirror::DexCache::InitializeDexCache(self,
+                                           h_dex_cache.Get(),
+                                           h_location.Get(),
+                                           &dex_file,
+                                           linear_alloc,
+                                           image_pointer_size_);
+      RegisterDexFileLocked(dex_file, h_dex_cache.Get(), h_class_loader.Get());
     }
-    if (h_dex_cache.Get() == nullptr) {
-      self->AssertPendingOOMException();
-      return nullptr;
-    }
-    // Do InitializeDexCache while holding dex lock to make sure two threads don't call it at the
-    // same time with the same dex cache. Since the .bss is shared this can cause failing DCHECK
-    // that the arrays are null.
-    mirror::DexCache::InitializeDexCache(self,
-                                         h_dex_cache.Get(),
-                                         h_location.Get(),
-                                         &dex_file,
-                                         linear_alloc,
-                                         image_pointer_size_);
-    RegisterDexFileLocked(dex_file, h_dex_cache);
+  }
+  if (old_dex_cache != nullptr) {
+    // Another thread managed to initialize the dex cache faster, so use that DexCache.
+    // If this thread encountered OOME, ignore it.
+    DCHECK_EQ(h_dex_cache.Get() == nullptr, self->IsExceptionPending());
+    self->ClearException();
+    // We cannot call EnsureSameClassLoader() while holding the dex_lock_.
+    return EnsureSameClassLoader(self, old_dex_cache, old_data, h_class_loader.Get());
+  }
+  if (h_dex_cache.Get() == nullptr) {
+    self->AssertPendingOOMException();
+    return nullptr;
   }
   table->InsertStrongRoot(h_dex_cache.Get());
   return h_dex_cache.Get();
 }
 
-void ClassLinker::RegisterDexFile(const DexFile& dex_file,
-                                  Handle<mirror::DexCache> dex_cache) {
+void ClassLinker::RegisterBootClassPathDexFile(const DexFile& dex_file,
+                                               ObjPtr<mirror::DexCache> dex_cache) {
   WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
-  RegisterDexFileLocked(dex_file, dex_cache);
+  RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr);
 }
 
-mirror::DexCache* ClassLinker::FindDexCache(Thread* self,
-                                            const DexFile& dex_file,
-                                            bool allow_failure) {
+bool ClassLinker::IsDexFileRegistered(Thread* self, const DexFile& dex_file) {
   ReaderMutexLock mu(self, *Locks::dex_lock_);
-  return FindDexCacheLocked(self, dex_file, allow_failure);
+  return DecodeDexCache(self, FindDexCacheDataLocked(dex_file)) != nullptr;
 }
 
-mirror::DexCache* ClassLinker::FindDexCacheLocked(Thread* self,
-                                                  const DexFile& dex_file,
-                                                  bool allow_failure) {
-  // Search assuming unique-ness of dex file.
-  for (const DexCacheData& data : dex_caches_) {
-    // Avoid decoding (and read barriers) other unrelated dex caches.
-    if (data.dex_file == &dex_file) {
-      ObjPtr<mirror::DexCache> dex_cache =
-          ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
-      if (dex_cache != nullptr) {
-        return dex_cache.Ptr();
-      }
-      break;
-    }
+ObjPtr<mirror::DexCache> ClassLinker::FindDexCache(Thread* self, const DexFile& dex_file) {
+  ReaderMutexLock mu(self, *Locks::dex_lock_);
+  ObjPtr<mirror::DexCache> dex_cache = DecodeDexCache(self, FindDexCacheDataLocked(dex_file));
+  if (dex_cache != nullptr) {
+    return dex_cache;
   }
-  if (allow_failure) {
-    return nullptr;
-  }
-  std::string location(dex_file.GetLocation());
   // Failure, dump diagnostic and abort.
+  std::string location(dex_file.GetLocation());
   for (const DexCacheData& data : dex_caches_) {
-    ObjPtr<mirror::DexCache> dex_cache =
-        ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
-    if (dex_cache != nullptr) {
-      LOG(ERROR) << "Registered dex file " << dex_cache->GetDexFile()->GetLocation();
+    if (DecodeDexCache(self, data) != nullptr) {
+      LOG(ERROR) << "Registered dex file " << data.dex_file->GetLocation();
     }
   }
   LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
   UNREACHABLE();
 }
 
+ClassTable* ClassLinker::FindClassTable(Thread* self, ObjPtr<mirror::DexCache> dex_cache) {
+  const DexFile* dex_file = dex_cache->GetDexFile();
+  DCHECK(dex_file != nullptr);
+  ReaderMutexLock mu(self, *Locks::dex_lock_);
+  // Search assuming unique-ness of dex file.
+  for (const DexCacheData& data : dex_caches_) {
+    // Avoid decoding (and read barriers) other unrelated dex caches.
+    if (data.dex_file == dex_file) {
+      ObjPtr<mirror::DexCache> registered_dex_cache = DecodeDexCache(self, data);
+      if (registered_dex_cache != nullptr) {
+        CHECK_EQ(registered_dex_cache, dex_cache) << dex_file->GetLocation();
+        return data.class_table;
+      }
+    }
+  }
+  return nullptr;
+}
+
+ClassLinker::DexCacheData ClassLinker::FindDexCacheDataLocked(const DexFile& dex_file) {
+  // Search assuming unique-ness of dex file.
+  for (const DexCacheData& data : dex_caches_) {
+    // Avoid decoding (and read barriers) other unrelated dex caches.
+    if (data.dex_file == &dex_file) {
+      return data;
+    }
+  }
+  return DexCacheData();
+}
+
 void ClassLinker::FixupDexCaches(ArtMethod* resolution_method) {
   Thread* const self = Thread::Current();
   ReaderMutexLock mu(self, *Locks::dex_lock_);
@@ -5679,14 +5710,7 @@
 const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
 const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
 
-// b/30419309
-#if defined(__i386__)
-#define X86_OPTNONE __attribute__((optnone))
-#else
-#define X86_OPTNONE
-#endif
-
-X86_OPTNONE bool ClassLinker::LinkVirtualMethods(
+bool ClassLinker::LinkVirtualMethods(
     Thread* self,
     Handle<mirror::Class> klass,
     /*out*/std::unordered_map<size_t, ClassLinker::MethodTranslation>* default_translations) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 5042fb7..62d3c29 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -382,11 +382,11 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
-  mirror::DexCache* RegisterDexFile(const DexFile& dex_file,
-                                    ObjPtr<mirror::ClassLoader> class_loader)
+  ObjPtr<mirror::DexCache> RegisterDexFile(const DexFile& dex_file,
+                                           ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES(!Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+  void RegisterBootClassPathDexFile(const DexFile& dex_file, ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES(!Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -413,9 +413,13 @@
       REQUIRES(!Locks::dex_lock_, !Locks::classlinker_classes_lock_, !Locks::trace_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  mirror::DexCache* FindDexCache(Thread* self,
-                                 const DexFile& dex_file,
-                                 bool allow_failure = false)
+  bool IsDexFileRegistered(Thread* self, const DexFile& dex_file)
+      REQUIRES(!Locks::dex_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<mirror::DexCache> FindDexCache(Thread* self, const DexFile& dex_file)
+      REQUIRES(!Locks::dex_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  ClassTable* FindClassTable(Thread* self, ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES(!Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void FixupDexCaches(ArtMethod* resolution_method)
@@ -655,6 +659,18 @@
       REQUIRES(!Locks::dex_lock_);
 
   struct DexCacheData {
+    // Construct an invalid data object.
+    DexCacheData()
+        : weak_root(nullptr),
+          dex_file(nullptr),
+          resolved_methods(nullptr),
+          class_table(nullptr) { }
+
+    // Check if the data is valid.
+    bool IsValid() const {
+      return dex_file != nullptr;
+    }
+
     // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
     // not work properly.
     jweak weak_root;
@@ -663,6 +679,11 @@
     // class unloading.)
     const DexFile* dex_file;
     ArtMethod** resolved_methods;
+    // Identify the associated class loader's class table. This is used to make sure that
+    // the Java call to native DexCache.setResolvedType() inserts the resolved type in that
+    // class table. It is also used to make sure we don't register the same dex cache with
+    // multiple class loaders.
+    ClassTable* class_table;
   };
 
  private:
@@ -749,7 +770,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
-  void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+  void AppendToBootClassPath(const DexFile& dex_file, ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
@@ -810,12 +831,24 @@
       REQUIRES(!Locks::classlinker_classes_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+  void RegisterDexFileLocked(const DexFile& dex_file,
+                             ObjPtr<mirror::DexCache> dex_cache,
+                             ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES(Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  mirror::DexCache* FindDexCacheLocked(Thread* self, const DexFile& dex_file, bool allow_failure)
+  DexCacheData FindDexCacheDataLocked(const DexFile& dex_file)
       REQUIRES(Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  static ObjPtr<mirror::DexCache> DecodeDexCache(Thread* self, const DexCacheData& data)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  // Called to ensure that the dex cache has been registered with the same class loader.
+  // If yes, returns the dex cache, otherwise throws InternalError and returns null.
+  ObjPtr<mirror::DexCache> EnsureSameClassLoader(Thread* self,
+                                                 ObjPtr<mirror::DexCache> dex_cache,
+                                                 const DexCacheData& data,
+                                                 ObjPtr<mirror::ClassLoader> class_loader)
+      REQUIRES(!Locks::dex_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool InitializeClass(Thread* self,
                        Handle<mirror::Class> klass,
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 17510bb..03105cb 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -439,7 +439,7 @@
     TestRootVisitor visitor;
     class_linker_->VisitRoots(&visitor, kVisitRootFlagAllRoots);
     // Verify the dex cache has resolution methods in all resolved method slots
-    mirror::DexCache* dex_cache = class_linker_->FindDexCache(Thread::Current(), dex);
+    ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(Thread::Current(), dex);
     auto* resolved_methods = dex_cache->GetResolvedMethods();
     for (size_t i = 0, num_methods = dex_cache->NumResolvedMethods(); i != num_methods; ++i) {
       EXPECT_TRUE(
@@ -1454,7 +1454,7 @@
   {
     WriterMutexLock mu(soa.Self(), *Locks::dex_lock_);
     // Check that inserting with a UTF16 name works.
-    class_linker->RegisterDexFileLocked(*dex_file, dex_cache);
+    class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader */ nullptr);
   }
 }
 
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index ff846a7..af4f998 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -55,10 +55,6 @@
   return nullptr;
 }
 
-// Bug: http://b/31104323 Ignore -Wunreachable-code from the for loop below
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunreachable-code"
-
 mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
   WriterMutexLock mu(Thread::Current(), lock_);
   // Should only be updating latest table.
@@ -84,8 +80,6 @@
   return existing;
 }
 
-#pragma clang diagnostic pop  // http://b/31104323
-
 size_t ClassTable::CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
                                               const ClassSet& set) const {
   size_t count = 0;
@@ -123,6 +117,19 @@
   return nullptr;
 }
 
+ObjPtr<mirror::Class> ClassTable::TryInsert(ObjPtr<mirror::Class> klass) {
+  TableSlot slot(klass);
+  WriterMutexLock mu(Thread::Current(), lock_);
+  for (ClassSet& class_set : classes_) {
+    auto it = class_set.Find(slot);
+    if (it != class_set.end()) {
+      return it->Read();
+    }
+  }
+  classes_.back().Insert(slot);
+  return klass;
+}
+
 void ClassTable::Insert(ObjPtr<mirror::Class> klass) {
   const uint32_t hash = TableSlot::HashDescriptor(klass);
   WriterMutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index c8ec28e..711eae4 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -192,6 +192,12 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Try to insert a class and return the inserted class if successful. If another class
+  // with the same descriptor is already in the table, return the existing entry.
+  ObjPtr<mirror::Class> TryInsert(ObjPtr<mirror::Class> klass)
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   void Insert(ObjPtr<mirror::Class> klass)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 0fec856..9dca4c0 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -338,13 +338,16 @@
   ScopedObjectAccess soa(Thread::Current());
   std::unique_ptr<const DexFile> raw(OpenTestDexFile("Nested"));
   ASSERT_TRUE(raw.get() != nullptr);
-  EXPECT_EQ(2U, raw->NumClassDefs());
+  EXPECT_EQ(3U, raw->NumClassDefs());
 
   const DexFile::ClassDef& c0 = raw->GetClassDef(0);
-  EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c0));
+  EXPECT_STREQ("LNested$1;", raw->GetClassDescriptor(c0));
 
   const DexFile::ClassDef& c1 = raw->GetClassDef(1);
-  EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c1));
+  EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c1));
+
+  const DexFile::ClassDef& c2 = raw->GetClassDef(2);
+  EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c2));
 }
 
 TEST_F(DexFileTest, GetMethodSignature) {
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 06c11f5..fb8139b 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -204,12 +204,12 @@
       CodeInfoEncoding encoding = code_info.ExtractEncoding();
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
       DCHECK(stack_map.IsValid());
-      if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
+      if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
         InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
         caller = GetResolvedMethod(outer_method,
                                    inline_info,
-                                   encoding.inline_info_encoding,
-                                   inline_info.GetDepth(encoding.inline_info_encoding) - 1);
+                                   encoding.inline_info.encoding,
+                                   inline_info.GetDepth(encoding.inline_info.encoding) - 1);
       }
     }
     if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 670dadc..158c1d6 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -20,7 +20,7 @@
 #include "indirect_reference_table.h"
 #include "mirror/object-inl.h"
 #include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index bde9009..3ef47c4 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -346,12 +346,12 @@
       CodeInfoEncoding encoding = code_info.ExtractEncoding();
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
       DCHECK(stack_map.IsValid());
-      if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
+      if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
         InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-        return inline_info.GetDexPcAtDepth(encoding.inline_info_encoding,
-                                           inline_info.GetDepth(encoding.inline_info_encoding)-1);
+        return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding,
+                                           inline_info.GetDepth(encoding.inline_info.encoding)-1);
       } else {
-        return stack_map.GetDexPc(encoding.stack_map_encoding);
+        return stack_map.GetDexPc(encoding.stack_map.encoding);
       }
     } else {
       return current_code->ToDexPc(*caller_sp, outer_pc);
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 7c64952..854d0a5 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -22,6 +22,7 @@
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
 #include "gc/space/region_space.h"
+#include "mirror/object-inl.h"
 #include "lock_word.h"
 
 namespace art {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 0819ba0..f12ad80 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1875,8 +1875,10 @@
 
 // Scan ref fields of an object.
 inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
-  if (kDisallowReadBarrierDuringScan) {
+  if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
     // Avoid all read barriers during visit references to help performance.
+    // Don't do this in transaction mode because we may read the old value of an field which may
+    // trigger read barriers.
     Thread::Current()->ModifyDebugDisallowReadBarrier(1);
   }
   DCHECK(!region_space_->IsInFromSpace(to_ref));
@@ -1885,7 +1887,7 @@
   // Disable the read barrier for a performance reason.
   to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
       visitor, visitor);
-  if (kDisallowReadBarrierDuringScan) {
+  if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
     Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
   }
 }
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 54f2210..394e541 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -34,7 +34,7 @@
 #include "handle_scope-inl.h"
 #include "thread-inl.h"
 #include "utils.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index aa15714..051f3f7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -78,6 +78,7 @@
 #include "scoped_thread_state_change-inl.h"
 #include "handle_scope-inl.h"
 #include "thread_list.h"
+#include "verify_object-inl.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -127,8 +128,6 @@
 // Dump the rosalloc stats on SIGQUIT.
 static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
 
-static constexpr size_t kNativeAllocationHistogramBuckets = 16;
-
 // Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
 // config.
 static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
@@ -194,18 +193,12 @@
       capacity_(capacity),
       growth_limit_(growth_limit),
       max_allowed_footprint_(initial_size),
-      native_footprint_gc_watermark_(initial_size),
-      native_need_to_run_finalization_(false),
       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       num_bytes_allocated_(0),
-      native_bytes_allocated_(0),
-      native_histogram_lock_("Native allocation lock"),
-      native_allocation_histogram_("Native allocation sizes",
-                                   1U,
-                                   kNativeAllocationHistogramBuckets),
-      native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
+      new_native_bytes_allocated_(0),
+      old_native_bytes_allocated_(0),
       num_bytes_freed_revoke_(0),
       verify_missing_card_marks_(false),
       verify_system_weaks_(false),
@@ -544,6 +537,12 @@
   gc_complete_lock_ = new Mutex("GC complete lock");
   gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
                                                 *gc_complete_lock_));
+  native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
+  native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
+                                                       *native_blocking_gc_lock_));
+  native_blocking_gc_in_progress_ = false;
+  native_blocking_gcs_finished_ = 0;
+
   thread_flip_lock_ = new Mutex("GC thread flip lock");
   thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
                                                 *thread_flip_lock_));
@@ -1111,19 +1110,9 @@
     rosalloc_space_->DumpStats(os);
   }
 
-  {
-    MutexLock mu(Thread::Current(), native_histogram_lock_);
-    if (native_allocation_histogram_.SampleSize() > 0u) {
-      os << "Histogram of native allocation ";
-      native_allocation_histogram_.DumpBins(os);
-      os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
-    }
-    if (native_free_histogram_.SampleSize() > 0u) {
-      os << "Histogram of native free ";
-      native_free_histogram_.DumpBins(os);
-      os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
-    }
-  }
+  os << "Registered native bytes allocated: "
+     << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
+     << "\n";
 
   BaseMutex::DumpAll(os);
 }
@@ -1208,6 +1197,7 @@
   STLDeleteElements(&continuous_spaces_);
   STLDeleteElements(&discontinuous_spaces_);
   delete gc_complete_lock_;
+  delete native_blocking_gc_lock_;
   delete thread_flip_lock_;
   delete pending_task_lock_;
   delete backtrace_lock_;
@@ -2655,6 +2645,13 @@
   // Approximate heap size.
   ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
 
+  if (gc_type == NonStickyGcType()) {
+    // Move all bytes from new_native_bytes_allocated_ to
+    // old_native_bytes_allocated_ now that GC has been triggered, resetting
+    // new_native_bytes_allocated_ to zero in the process.
+    old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
+  }
+
   DCHECK_LT(gc_type, collector::kGcTypeMax);
   DCHECK_NE(gc_type, collector::kGcTypeNone);
 
@@ -3514,18 +3511,6 @@
   return false;
 }
 
-void Heap::UpdateMaxNativeFootprint() {
-  size_t native_size = native_bytes_allocated_.LoadRelaxed();
-  // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
-  size_t target_size = native_size / GetTargetHeapUtilization();
-  if (target_size > native_size + max_free_) {
-    target_size = native_size + max_free_;
-  } else if (target_size < native_size + min_free_) {
-    target_size = native_size + min_free_;
-  }
-  native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
-}
-
 collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
   for (const auto& collector : garbage_collectors_) {
     if (collector->GetCollectorType() == collector_type_ &&
@@ -3565,11 +3550,9 @@
     target_size = bytes_allocated + delta * multiplier;
     target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
     target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
-    native_need_to_run_finalization_ = true;
     next_gc_type_ = collector::kGcTypeSticky;
   } else {
-    collector::GcType non_sticky_gc_type =
-        HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+    collector::GcType non_sticky_gc_type = NonStickyGcType();
     // Find what the next non sticky collector will be.
     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
@@ -3720,7 +3703,7 @@
       collector::GcType next_gc_type = next_gc_type_;
       // If forcing full and next gc type is sticky, override with a non-sticky type.
       if (force_full && next_gc_type == collector::kGcTypeSticky) {
-        next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+        next_gc_type = NonStickyGcType();
       }
       if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
           collector::kGcTypeNone) {
@@ -3877,70 +3860,79 @@
 }
 
 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
-  Thread* self = ThreadForEnv(env);
-  {
-    MutexLock mu(self, native_histogram_lock_);
-    native_allocation_histogram_.AddValue(bytes);
-  }
-  if (native_need_to_run_finalization_) {
-    RunFinalization(env, kNativeAllocationFinalizeTimeout);
-    UpdateMaxNativeFootprint();
-    native_need_to_run_finalization_ = false;
-  }
-  // Total number of native bytes allocated.
-  size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
-  new_native_bytes_allocated += bytes;
-  if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
-    collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
-        collector::kGcTypeFull;
+  // See the REDESIGN section of go/understanding-register-native-allocation
+  // for an explanation of how RegisterNativeAllocation works.
+  size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
+  if (new_value > NativeAllocationBlockingGcWatermark()) {
+    // Wait for a new GC to finish and finalizers to run, because the
+    // allocation rate is too high.
+    Thread* self = ThreadForEnv(env);
 
-    // The second watermark is higher than the gc watermark. If you hit this it means you are
-    // allocating native objects faster than the GC can keep up with.
-    if (new_native_bytes_allocated > growth_limit_) {
-      if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
-        // Just finished a GC, attempt to run finalizers.
-        RunFinalization(env, kNativeAllocationFinalizeTimeout);
-        CHECK(!env->ExceptionCheck());
-        // Native bytes allocated may be updated by finalization, refresh it.
-        new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
+    bool run_gc = false;
+    {
+      MutexLock mu(self, *native_blocking_gc_lock_);
+      uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
+      if (native_blocking_gc_in_progress_) {
+        // A native blocking GC is in progress from the last time the native
+        // allocation blocking GC watermark was exceeded. Wait for that GC to
+        // finish before addressing the fact that we exceeded the blocking
+        // watermark again.
+        do {
+          native_blocking_gc_cond_->Wait(self);
+        } while (native_blocking_gcs_finished_ == initial_gcs_finished);
+        initial_gcs_finished++;
       }
-      // If we still are over the watermark, attempt a GC for alloc and run finalizers.
-      if (new_native_bytes_allocated > growth_limit_) {
-        CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
-        RunFinalization(env, kNativeAllocationFinalizeTimeout);
-        native_need_to_run_finalization_ = false;
-        CHECK(!env->ExceptionCheck());
-      }
-      // We have just run finalizers, update the native watermark since it is very likely that
-      // finalizers released native managed allocations.
-      UpdateMaxNativeFootprint();
-    } else if (!IsGCRequestPending()) {
-      if (IsGcConcurrent()) {
-        RequestConcurrentGC(self, true);  // Request non-sticky type.
+
+      // It's possible multiple threads have seen that we exceeded the
+      // blocking watermark. Ensure that only one of those threads runs the
+      // blocking GC. The rest of the threads should instead wait for the
+      // blocking GC to complete.
+      if (native_blocking_gc_in_progress_) {
+        do {
+          native_blocking_gc_cond_->Wait(self);
+        } while (native_blocking_gcs_finished_ == initial_gcs_finished);
       } else {
-        CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
+        native_blocking_gc_in_progress_ = true;
+        run_gc = true;
       }
     }
+
+    if (run_gc) {
+      CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+      RunFinalization(env, kNativeAllocationFinalizeTimeout);
+      CHECK(!env->ExceptionCheck());
+
+      MutexLock mu(self, *native_blocking_gc_lock_);
+      native_blocking_gc_in_progress_ = false;
+      native_blocking_gcs_finished_++;
+      native_blocking_gc_cond_->Broadcast(self);
+    }
+  } else if (new_value > NativeAllocationGcWatermark() && !IsGCRequestPending()) {
+    // Trigger another GC because there have been enough native bytes
+    // allocated since the last GC.
+    if (IsGcConcurrent()) {
+      RequestConcurrentGC(ThreadForEnv(env), /*force_full*/true);
+    } else {
+      CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+    }
   }
 }
 
-void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
-  size_t expected_size;
-  {
-    MutexLock mu(Thread::Current(), native_histogram_lock_);
-    native_free_histogram_.AddValue(bytes);
-  }
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+  // Take the bytes freed out of new_native_bytes_allocated_ first. If
+  // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
+  // out of old_native_bytes_allocated_ to ensure all freed bytes are
+  // accounted for.
+  size_t allocated;
+  size_t new_freed_bytes;
   do {
-    expected_size = native_bytes_allocated_.LoadRelaxed();
-    if (UNLIKELY(bytes > expected_size)) {
-      ScopedObjectAccess soa(env);
-      env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
-                    StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
-                    "registered as allocated", bytes, expected_size).c_str());
-      break;
-    }
-  } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
-                                                               expected_size - bytes));
+    allocated = new_native_bytes_allocated_.LoadRelaxed();
+    new_freed_bytes = std::min(allocated, bytes);
+  } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated,
+                                                                   allocated - new_freed_bytes));
+  if (new_freed_bytes < bytes) {
+    old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
+  }
 }
 
 size_t Heap::GetTotalMemory() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3a8e29b..a4d300b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -260,9 +260,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
-  void RegisterNativeFree(JNIEnv* env, size_t bytes)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
+  void RegisterNativeFree(JNIEnv* env, size_t bytes);
 
   // Change the allocator, updates entrypoints.
   void ChangeAllocator(AllocatorType allocator)
@@ -562,7 +561,7 @@
   space::Space* FindSpaceFromAddress(const void* ptr) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+  void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
 
   // Do a pending collector transition.
   void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
@@ -679,7 +678,7 @@
 
   // GC performance measuring
   void DumpGcPerformanceInfo(std::ostream& os)
-      REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+      REQUIRES(!*gc_complete_lock_);
   void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
 
   // Thread pool.
@@ -979,10 +978,6 @@
   void PostGcVerificationPaused(collector::GarbageCollector* gc)
       REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
 
-  // Update the watermark for the native allocated bytes based on the current number of native
-  // bytes allocated and the target utilization ratio.
-  void UpdateMaxNativeFootprint();
-
   // Find a collector based on GC type.
   collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
 
@@ -1066,6 +1061,31 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
 
+  collector::GcType NonStickyGcType() const {
+    return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+  }
+
+  // How large new_native_bytes_allocated_ can grow before we trigger a new
+  // GC.
+  ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
+    // Reuse max_free_ for the native allocation gc watermark, so that the
+    // native heap is treated in the same way as the Java heap in the case
+    // where the gc watermark update would exceed max_free_. Using max_free_
+    // instead of the target utilization means the watermark doesn't depend on
+    // the current number of registered native allocations.
+    return max_free_;
+  }
+
+  // How large new_native_bytes_allocated_ can grow while GC is in progress
+  // before we block the allocating thread to allow GC to catch up.
+  ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
+    // Historically the native allocations were bounded by growth_limit_. This
+    // uses that same value, dividing growth_limit_ by 2 to account for
+    // the fact that now the bound is relative to the number of retained
+    // registered native allocations rather than absolute.
+    return growth_limit_ / 2;
+  }
+
   // All-known continuous spaces, where objects lie within fixed bounds.
   std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
 
@@ -1184,12 +1204,6 @@
   // a GC should be triggered.
   size_t max_allowed_footprint_;
 
-  // The watermark at which a concurrent GC is requested by registerNativeAllocation.
-  size_t native_footprint_gc_watermark_;
-
-  // Whether or not we need to run finalizers in the next native allocation.
-  bool native_need_to_run_finalization_;
-
   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   // it completes ahead of an allocation failing.
   size_t concurrent_start_bytes_;
@@ -1203,13 +1217,25 @@
   // Number of bytes allocated.  Adjusted after each allocation and free.
   Atomic<size_t> num_bytes_allocated_;
 
-  // Bytes which are allocated and managed by native code but still need to be accounted for.
-  Atomic<size_t> native_bytes_allocated_;
+  // Number of registered native bytes allocated since the last time GC was
+  // triggered. Adjusted after each RegisterNativeAllocation and
+  // RegisterNativeFree. Used to determine when to trigger GC for native
+  // allocations.
+  // See the REDESIGN section of go/understanding-register-native-allocation.
+  Atomic<size_t> new_native_bytes_allocated_;
 
-  // Native allocation stats.
-  Mutex native_histogram_lock_;
-  Histogram<uint64_t> native_allocation_histogram_;
-  Histogram<uint64_t> native_free_histogram_;
+  // Number of registered native bytes allocated prior to the last time GC was
+  // triggered, for debugging purposes. The current number of registered
+  // native bytes is determined by taking the sum of
+  // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+  Atomic<size_t> old_native_bytes_allocated_;
+
+  // Used for synchronization of blocking GCs triggered by
+  // RegisterNativeAllocation.
+  Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
+  bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
+  uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
 
   // Number of bytes freed by thread local buffer revokes. This will
   // cancel out the ahead-of-time bulk counting of bytes allocated in
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index b212d09..077f45e 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -23,7 +23,7 @@
 #include "handle.h"
 #include "obj_ptr-inl.h"
 #include "thread-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index aab1d9c..f888482 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -17,10 +17,12 @@
 #include <type_traits>
 
 #include "base/enums.h"
+#include "class_linker-inl.h"
 #include "common_runtime_test.h"
 #include "gtest/gtest.h"
 #include "handle.h"
 #include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
 #include "mirror/object.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index 0e66ae9..24ee227 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -25,7 +25,7 @@
 #include "gc_root-inl.h"
 #include "obj_ptr-inl.h"
 #include "runtime-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 namespace mirror {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c737119..9fbb2e9 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -25,7 +25,6 @@
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 #include "utils.h"
-#include "verify_object-inl.h"
 
 #include <cstdlib>
 
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index feb6e08..545cc1a 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -21,6 +21,7 @@
 #include <stdlib.h>
 
 #include <cmath>
+#include <initializer_list>
 #include <limits>
 #include <locale>
 #include <unordered_map>
@@ -401,6 +402,25 @@
   result->SetL(constructor);
 }
 
+void UnstartedRuntime::UnstartedClassGetDeclaringClass(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> klass(hs.NewHandle(
+      reinterpret_cast<mirror::Class*>(shadow_frame->GetVRegReference(arg_offset))));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    result->SetL(nullptr);
+    return;
+  }
+  // Return null for anonymous classes.
+  JValue is_anon_result;
+  UnstartedClassIsAnonymousClass(self, shadow_frame, &is_anon_result, arg_offset);
+  if (is_anon_result.GetZ() != 0) {
+    result->SetL(nullptr);
+    return;
+  }
+  result->SetL(annotations::GetDeclaringClass(klass));
+}
+
 void UnstartedRuntime::UnstartedClassGetEnclosingClass(
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
   StackHandleScope<1> hs(self);
@@ -420,6 +440,23 @@
   result->SetI(mirror::Class::GetInnerClassFlags(klass, default_value));
 }
 
+void UnstartedRuntime::UnstartedClassIsAnonymousClass(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> klass(hs.NewHandle(
+      reinterpret_cast<mirror::Class*>(shadow_frame->GetVRegReference(arg_offset))));
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+    result->SetZ(false);
+    return;
+  }
+  mirror::String* class_name = nullptr;
+  if (!annotations::GetInnerClass(klass, &class_name)) {
+    result->SetZ(false);
+    return;
+  }
+  result->SetZ(class_name == nullptr);
+}
+
 static std::unique_ptr<MemMap> FindAndExtractEntry(const std::string& jar_file,
                                                    const char* entry_name,
                                                    size_t* size,
@@ -847,43 +884,74 @@
   GetSystemProperty(self, shadow_frame, result, arg_offset, true);
 }
 
-void UnstartedRuntime::UnstartedThreadLocalGet(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
-  std::string caller(ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod()));
-  bool ok = false;
-  if (caller == "void java.lang.FloatingDecimal.developLongDigits(int, long, long)" ||
-      caller == "java.lang.String java.lang.FloatingDecimal.toJavaFormatString()") {
-    // Allocate non-threadlocal buffer.
-    result->SetL(mirror::CharArray::Alloc(self, 26));
-    ok = true;
-  } else if (caller ==
-             "java.lang.FloatingDecimal java.lang.FloatingDecimal.getThreadLocalInstance()") {
-    // Allocate new object.
-    StackHandleScope<2> hs(self);
-    Handle<mirror::Class> h_real_to_string_class(hs.NewHandle(
-        shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
-    Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
-        h_real_to_string_class->AllocObject(self)));
-    if (h_real_to_string_obj.Get() != nullptr) {
-      auto* cl = Runtime::Current()->GetClassLinker();
-      ArtMethod* init_method = h_real_to_string_class->FindDirectMethod(
-          "<init>", "()V", cl->GetImagePointerSize());
-      if (init_method == nullptr) {
-        h_real_to_string_class->DumpClass(LOG_STREAM(FATAL), mirror::Class::kDumpClassFullDetail);
-      } else {
-        JValue invoke_result;
-        EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
-                                   nullptr);
-        if (!self->IsExceptionPending()) {
-          result->SetL(h_real_to_string_obj.Get());
-          ok = true;
-        }
-      }
+static std::string GetImmediateCaller(ShadowFrame* shadow_frame)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (shadow_frame->GetLink() == nullptr) {
+    return "<no caller>";
+  }
+  return ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod());
+}
+
+static bool CheckCallers(ShadowFrame* shadow_frame,
+                         std::initializer_list<std::string> allowed_call_stack)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  for (const std::string& allowed_caller : allowed_call_stack) {
+    if (shadow_frame->GetLink() == nullptr) {
+      return false;
     }
+
+    std::string found_caller = ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod());
+    if (allowed_caller != found_caller) {
+      return false;
+    }
+
+    shadow_frame = shadow_frame->GetLink();
+  }
+  return true;
+}
+
+static ObjPtr<mirror::Object> CreateInstanceOf(Thread* self, const char* class_descriptor)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Find the requested class.
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ObjPtr<mirror::Class> klass =
+      class_linker->FindClass(self, class_descriptor, ScopedNullHandle<mirror::ClassLoader>());
+  if (klass == nullptr) {
+    AbortTransactionOrFail(self, "Could not load class %s", class_descriptor);
+    return nullptr;
   }
 
-  if (!ok) {
-    AbortTransactionOrFail(self, "Could not create RealToString object");
+  StackHandleScope<2> hs(self);
+  Handle<mirror::Class> h_class(hs.NewHandle(klass));
+  Handle<mirror::Object> h_obj(hs.NewHandle(h_class->AllocObject(self)));
+  if (h_obj.Get() != nullptr) {
+    ArtMethod* init_method = h_class->FindDirectMethod(
+        "<init>", "()V", class_linker->GetImagePointerSize());
+    if (init_method == nullptr) {
+      AbortTransactionOrFail(self, "Could not find <init> for %s", class_descriptor);
+      return nullptr;
+    } else {
+      JValue invoke_result;
+      EnterInterpreterFromInvoke(self, init_method, h_obj.Get(), nullptr, nullptr);
+      if (!self->IsExceptionPending()) {
+        return h_obj.Get();
+      }
+      AbortTransactionOrFail(self, "Could not run <init> for %s", class_descriptor);
+    }
+  }
+  AbortTransactionOrFail(self, "Could not allocate instance of %s", class_descriptor);
+  return nullptr;
+}
+
+void UnstartedRuntime::UnstartedThreadLocalGet(
+    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+  if (CheckCallers(shadow_frame, { "sun.misc.FloatingDecimal$BinaryToASCIIBuffer "
+                                       "sun.misc.FloatingDecimal.getBinaryToASCIIBuffer()" })) {
+    result->SetL(CreateInstanceOf(self, "Lsun/misc/FloatingDecimal$BinaryToASCIIBuffer;"));
+  } else {
+    AbortTransactionOrFail(self,
+                           "ThreadLocal.get() does not support %s",
+                           GetImmediateCaller(shadow_frame).c_str());
   }
 }
 
@@ -1216,12 +1284,12 @@
 //       initialization of other classes, so will *use* the value.
 void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
-  std::string caller(ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod()));
-  if (caller == "void java.util.concurrent.SynchronousQueue.<clinit>()") {
+  if (CheckCallers(shadow_frame, { "void java.util.concurrent.SynchronousQueue.<clinit>()" })) {
     // SynchronousQueue really only separates between single- and multiprocessor case. Return
     // 8 as a conservative upper approximation.
     result->SetI(8);
-  } else if (caller == "void java.util.concurrent.ConcurrentHashMap.<clinit>()") {
+  } else if (CheckCallers(shadow_frame,
+                          { "void java.util.concurrent.ConcurrentHashMap.<clinit>()" })) {
     // ConcurrentHashMap uses it for striding. 8 still seems an OK general value, as it's likely
     // a good upper bound.
     // TODO: Consider resetting in the zygote?
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index b8553b5..96b35e4 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -28,8 +28,10 @@
   V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \
   V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \
   V(ClassGetDeclaredConstructor, "java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructorInternal(java.lang.Class[])") \
+  V(ClassGetDeclaringClass, "java.lang.Class java.lang.Class.getDeclaringClass()") \
   V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \
   V(ClassGetInnerClassFlags, "int java.lang.Class.getInnerClassFlags(int)") \
+  V(ClassIsAnonymousClass, "boolean java.lang.Class.isAnonymousClass()") \
   V(ClassLoaderGetResourceAsStream, "java.io.InputStream java.lang.ClassLoader.getResourceAsStream(java.lang.String)") \
   V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \
   V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index b190c81..31be587 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -885,5 +885,159 @@
   ShadowFrame::DeleteDeoptimizedFrame(tmp);
 }
 
+TEST_F(UnstartedRuntimeTest, IsAnonymousClass) {
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+
+  JValue result;
+  ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+  mirror::Class* class_klass = mirror::Class::GetJavaLangClass();
+  shadow_frame->SetVRegReference(0, class_klass);
+  UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0);
+  EXPECT_EQ(result.GetZ(), 0);
+
+  jobject class_loader = LoadDex("Nested");
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::ClassLoader> loader(
+      hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+  mirror::Class* c = class_linker_->FindClass(soa.Self(), "LNested$1;", loader);
+  ASSERT_TRUE(c != nullptr);
+  shadow_frame->SetVRegReference(0, c);
+  UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0);
+  EXPECT_EQ(result.GetZ(), 1);
+
+  ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
+TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+
+  JValue result;
+  ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+  jobject class_loader = LoadDex("Nested");
+  StackHandleScope<4> hs(self);
+  Handle<mirror::ClassLoader> loader(
+      hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+
+  Handle<mirror::Class> nested_klass(hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "LNested;", loader)));
+  Handle<mirror::Class> inner_klass(hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "LNested$Inner;", loader)));
+  Handle<mirror::Class> anon_klass(hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "LNested$1;", loader)));
+
+  shadow_frame->SetVRegReference(0, nested_klass.Get());
+  UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+  EXPECT_EQ(result.GetL(), nullptr);
+
+  shadow_frame->SetVRegReference(0, inner_klass.Get());
+  UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+  EXPECT_EQ(result.GetL(), nested_klass.Get());
+
+  shadow_frame->SetVRegReference(0, anon_klass.Get());
+  UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+  EXPECT_EQ(result.GetL(), nullptr);
+
+  ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
+TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+
+  JValue result;
+  ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+  StackHandleScope<1> hs(self);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+  // Positive test. See that We get something for float conversion.
+  {
+    Handle<mirror::Class> floating_decimal = hs.NewHandle(
+        class_linker->FindClass(self,
+                                "Lsun/misc/FloatingDecimal;",
+                                ScopedNullHandle<mirror::ClassLoader>()));
+    ASSERT_TRUE(floating_decimal.Get() != nullptr);
+    ASSERT_TRUE(class_linker->EnsureInitialized(self, floating_decimal, true, true));
+
+    ArtMethod* caller_method = floating_decimal->FindDeclaredDirectMethod(
+        "getBinaryToASCIIBuffer",
+        "()Lsun/misc/FloatingDecimal$BinaryToASCIIBuffer;",
+        class_linker->GetImagePointerSize());
+    // floating_decimal->DumpClass(LOG_STREAM(ERROR), mirror::Class::kDumpClassFullDetail);
+    ASSERT_TRUE(caller_method != nullptr);
+    ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
+    shadow_frame->SetLink(caller_frame);
+
+    UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+    EXPECT_TRUE(result.GetL() != nullptr);
+    EXPECT_FALSE(self->IsExceptionPending());
+
+    ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+  }
+
+  // Negative test.
+  PrepareForAborts();
+
+  {
+    // Just use a method in Class.
+    ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+    ArtMethod* caller_method =
+        &*class_class->GetDeclaredMethods(class_linker->GetImagePointerSize()).begin();
+    ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
+    shadow_frame->SetLink(caller_frame);
+
+    Transaction transaction;
+    Runtime::Current()->EnterTransactionMode(&transaction);
+    UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+    Runtime::Current()->ExitTransactionMode();
+    ASSERT_TRUE(self->IsExceptionPending());
+    ASSERT_TRUE(transaction.IsAborted());
+    self->ClearException();
+
+    ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+  }
+
+  ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
+TEST_F(UnstartedRuntimeTest, FloatConversion) {
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+
+  StackHandleScope<1> hs(self);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  Handle<mirror::Class> double_class = hs.NewHandle(
+          class_linker->FindClass(self,
+                                  "Ljava/lang/Double;",
+                                  ScopedNullHandle<mirror::ClassLoader>()));
+  ASSERT_TRUE(double_class.Get() != nullptr);
+  ASSERT_TRUE(class_linker->EnsureInitialized(self, double_class, true, true));
+
+  ArtMethod* method = double_class->FindDeclaredDirectMethod("toString",
+                                                             "(D)Ljava/lang/String;",
+                                                             class_linker->GetImagePointerSize());
+  ASSERT_TRUE(method != nullptr);
+
+  // create instruction data for invoke-direct {v0, v1} of method with fake index
+  uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
+  const Instruction* inst = Instruction::At(inst_data);
+
+  JValue result;
+  ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, method, 0);
+  shadow_frame->SetVRegDouble(0, 1.23);
+  interpreter::DoCall<false, false>(method, self, *shadow_frame, inst, inst_data[0], &result);
+  ObjPtr<mirror::String> string_result = reinterpret_cast<mirror::String*>(result.GetL());
+  ASSERT_TRUE(string_result != nullptr);
+
+  std::string mod_utf = string_result->ToModifiedUtf8();
+  EXPECT_EQ("1.23", mod_utf);
+
+  ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
 }  // namespace interpreter
 }  // namespace art
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 170887e..4615574 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -19,6 +19,7 @@
 #include "handle_scope-inl.h"
 #include "jni_internal.h"
 #include "mirror/class.h"
+#include "mirror/throwable.h"
 #include "obj_ptr-inl.h"
 #include "scoped_thread_state_change-inl.h"
 
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 6deb03d..1ec4749 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -145,7 +145,12 @@
              cumulative_timings_("JIT timings"),
              memory_use_("Memory used for compilation", 16),
              lock_("JIT memory use lock"),
-             use_jit_compilation_(true) {}
+             use_jit_compilation_(true),
+             hot_method_threshold_(0),
+             warm_method_threshold_(0),
+             osr_method_threshold_(0),
+             priority_thread_weight_(0),
+             invoke_transition_weight_(0) {}
 
 Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
   DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
@@ -289,7 +294,11 @@
 void Jit::CreateThreadPool() {
   // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
   // is not null when we instrument.
-  thread_pool_.reset(new ThreadPool("Jit thread pool", 1));
+
+  // We need peers as we may report the JIT thread, e.g., in the debugger.
+  constexpr bool kJitPoolNeedsPeers = true;
+  thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
+
   thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
   Start();
 }
@@ -514,7 +523,7 @@
       }
     }
 
-    native_pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA) +
+    native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) +
         osr_method->GetEntryPoint();
     VLOG(jit) << "Jumping to "
               << method_name
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 1405c40..9ba2d1a 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -597,6 +597,24 @@
   return total;
 }
 
+// Produce a non-owning vector from a vector.
+template<typename T>
+const std::vector<T*>* MakeNonOwningVector(const std::vector<std::unique_ptr<T>>* owning_vector) {
+  auto non_owning_vector = new std::vector<T*>();
+  for (auto& element : *owning_vector) {
+    non_owning_vector->push_back(element.get());
+  }
+  return non_owning_vector;
+}
+
+std::string ProfileCompilationInfo::DumpInfo(
+    const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+    bool print_full_dex_location) const {
+  std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
+      MakeNonOwningVector(dex_files));
+  return DumpInfo(non_owning_dex_files.get(), print_full_dex_location);
+}
+
 std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
                                              bool print_full_dex_location) const {
   std::ostringstream os;
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index f8061bc..b1587c0 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
 #define ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
 
+#include <memory>
 #include <set>
 #include <vector>
 
@@ -72,6 +73,8 @@
   // If dex_files is not null then the method indices will be resolved to their
   // names.
   // This is intended for testing and debugging.
+  std::string DumpInfo(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+                       bool print_full_dex_location = true) const;
   std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
                        bool print_full_dex_location = true) const;
 
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 19a65bb..dce56b3 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -70,6 +70,7 @@
   return os;
 }
 
+std::mutex* MemMap::mem_maps_lock_ = nullptr;
 MemMap::Maps* MemMap::maps_ = nullptr;
 
 #if USE_ART_LOW_4G_ALLOCATOR
@@ -139,7 +140,7 @@
   // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
   // further.
   {
-    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
     for (auto& pair : *maps_) {
       MemMap* const map = pair.second;
       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
@@ -490,7 +491,7 @@
   }
 
   // Remove it from maps_.
-  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+  std::lock_guard<std::mutex> mu(*mem_maps_lock_);
   bool found = false;
   DCHECK(maps_ != nullptr);
   for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
@@ -518,7 +519,7 @@
     CHECK_NE(base_size_, 0U);
 
     // Add it to maps_.
-    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
     DCHECK(maps_ != nullptr);
     maps_->insert(std::make_pair(base_begin_, this));
   }
@@ -637,7 +638,7 @@
 }
 
 bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
-  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+  std::lock_guard<std::mutex> mu(*mem_maps_lock_);
   CHECK(begin_map != nullptr);
   CHECK(end_map != nullptr);
   CHECK(HasMemMap(begin_map));
@@ -656,7 +657,7 @@
 }
 
 void MemMap::DumpMaps(std::ostream& os, bool terse) {
-  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+  std::lock_guard<std::mutex> mu(*mem_maps_lock_);
   DumpMapsLocked(os, terse);
 }
 
@@ -747,17 +748,31 @@
 }
 
 void MemMap::Init() {
-  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
-  if (maps_ == nullptr) {
+  if (mem_maps_lock_ != nullptr) {
     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
-    maps_ = new Maps;
+    return;
   }
+  mem_maps_lock_ = new std::mutex();
+  // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_).
+  std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+  DCHECK(maps_ == nullptr);
+  maps_ = new Maps;
 }
 
 void MemMap::Shutdown() {
-  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
-  delete maps_;
-  maps_ = nullptr;
+  if (mem_maps_lock_ == nullptr) {
+    // If MemMap::Shutdown is called more than once, there is no effect.
+    return;
+  }
+  {
+    // Not for thread safety, but for the annotation that maps_ is GUARDED_BY(mem_maps_lock_).
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+    DCHECK(maps_ != nullptr);
+    delete maps_;
+    maps_ = nullptr;
+  }
+  delete mem_maps_lock_;
+  mem_maps_lock_ = nullptr;
 }
 
 void MemMap::SetSize(size_t new_size) {
@@ -813,7 +828,7 @@
   if (low_4gb && addr == nullptr) {
     bool first_run = true;
 
-    MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
       // Use maps_ as an optimization to skip over large maps.
       // Find the first map which is address > ptr.
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 0fea1a5..71db3f7 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -21,6 +21,7 @@
 
 #include <string>
 #include <map>
+#include <mutex>
 
 #include <stddef.h>
 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
@@ -120,7 +121,7 @@
                                   std::string* error_msg);
 
   // Releases the memory mapping.
-  ~MemMap() REQUIRES(!Locks::mem_maps_lock_);
+  ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
 
   const std::string& GetName() const {
     return name_;
@@ -175,14 +176,17 @@
                      bool use_ashmem = true);
 
   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
-      REQUIRES(!Locks::mem_maps_lock_);
+      REQUIRES(!MemMap::mem_maps_lock_);
   static void DumpMaps(std::ostream& os, bool terse = false)
-      REQUIRES(!Locks::mem_maps_lock_);
+      REQUIRES(!MemMap::mem_maps_lock_);
 
   typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
 
-  static void Init() REQUIRES(!Locks::mem_maps_lock_);
-  static void Shutdown() REQUIRES(!Locks::mem_maps_lock_);
+  // Init and Shutdown are NOT thread safe.
+  // Both may be called multiple times and MemMap objects may be created any
+  // time after the first call to Init and before the first call to Shutodwn.
+  static void Init() REQUIRES(!MemMap::mem_maps_lock_);
+  static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
 
   // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
   // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
@@ -197,16 +201,16 @@
          size_t base_size,
          int prot,
          bool reuse,
-         size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_);
+         size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
 
   static void DumpMapsLocked(std::ostream& os, bool terse)
-      REQUIRES(Locks::mem_maps_lock_);
+      REQUIRES(MemMap::mem_maps_lock_);
   static bool HasMemMap(MemMap* map)
-      REQUIRES(Locks::mem_maps_lock_);
+      REQUIRES(MemMap::mem_maps_lock_);
   static MemMap* GetLargestMemMapAt(void* address)
-      REQUIRES(Locks::mem_maps_lock_);
+      REQUIRES(MemMap::mem_maps_lock_);
   static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
-      REQUIRES(!Locks::mem_maps_lock_);
+      REQUIRES(!MemMap::mem_maps_lock_);
 
   // Internal version of mmap that supports low 4gb emulation.
   static void* MapInternal(void* addr,
@@ -236,8 +240,10 @@
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
 #endif
 
+  static std::mutex* mem_maps_lock_;
+
   // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
-  static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
+  static Maps* maps_ GUARDED_BY(MemMap::mem_maps_lock_);
 
   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 6a65e12..2cff47e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -65,6 +65,17 @@
       OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
 }
 
+inline void Class::SetSuperClass(ObjPtr<Class> new_super_class) {
+  // Super class is assigned once, except during class linker initialization.
+  if (kIsDebugBuild) {
+    ObjPtr<Class> old_super_class =
+        GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
+    DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
+  }
+  DCHECK(new_super_class != nullptr);
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
+}
+
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline ClassLoader* Class::GetClassLoader() {
   return GetFieldObject<ClassLoader, kVerifyFlags, kReadBarrierOption>(
@@ -635,23 +646,6 @@
   }
 }
 
-template<VerifyObjectFlags kVerifyFlags>
-inline uint32_t Class::GetAccessFlags() {
-  // Check class is loaded/retired or this is java.lang.String that has a
-  // circularity issue during loading the names of its members
-  DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
-         IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
-         this == String::GetJavaLangString())
-      << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
-      << " IsRetired=" << IsRetired<kVerifyFlags>()
-      << " IsErroneous=" <<
-          IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
-      << " IsString=" << (this == String::GetJavaLangString())
-      << " status= " << GetStatus<kVerifyFlags>()
-      << " descriptor=" << PrettyDescriptor();
-  return GetField32<kVerifyFlags>(AccessFlagsOffset());
-}
-
 inline String* Class::GetName() {
   return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
 }
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f08d4da..1b8f3f8 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1345,5 +1345,26 @@
   return result;
 }
 
+template<VerifyObjectFlags kVerifyFlags> void Class::GetAccessFlagsDCheck() {
+  // Check class is loaded/retired or this is java.lang.String that has a
+  // circularity issue during loading the names of its members
+  DCHECK(IsIdxLoaded<kVerifyFlags>() || IsRetired<kVerifyFlags>() ||
+         IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
+         this == String::GetJavaLangString())
+              << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
+              << " IsRetired=" << IsRetired<kVerifyFlags>()
+              << " IsErroneous=" <<
+              IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
+              << " IsString=" << (this == String::GetJavaLangString())
+              << " status= " << GetStatus<kVerifyFlags>()
+              << " descriptor=" << PrettyDescriptor();
+}
+// Instantiate the common cases.
+template void Class::GetAccessFlagsDCheck<kVerifyNone>();
+template void Class::GetAccessFlagsDCheck<kVerifyThis>();
+template void Class::GetAccessFlagsDCheck<kVerifyReads>();
+template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
+template void Class::GetAccessFlagsDCheck<kVerifyAll>();
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index c9f27ad..d34f09c 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -231,7 +231,13 @@
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (kIsDebugBuild) {
+      GetAccessFlagsDCheck<kVerifyFlags>();
+    }
+    return GetField32<kVerifyFlags>(AccessFlagsOffset());
+  }
+
   static MemberOffset AccessFlagsOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
   }
@@ -683,14 +689,7 @@
   // `This` and `klass` must be classes.
   ObjPtr<Class> GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Super class is assigned once, except during class linker initialization.
-    ObjPtr<Class> old_super_class =
-        GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
-    DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
-    DCHECK(new_super_class != nullptr);
-    SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
-  }
+  void SetSuperClass(ObjPtr<Class> new_super_class) REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return GetSuperClass() != nullptr;
@@ -1397,6 +1396,9 @@
 
   bool ProxyDescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  template<VerifyObjectFlags kVerifyFlags>
+  void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Check that the pointer size matches the one in the class linker.
   ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
 
diff --git a/runtime/mirror/method_handle_impl.h b/runtime/mirror/method_handle_impl.h
index dca3062..53d267b 100644
--- a/runtime/mirror/method_handle_impl.h
+++ b/runtime/mirror/method_handle_impl.h
@@ -19,7 +19,7 @@
 
 #include "class.h"
 #include "gc_root.h"
-#include "object.h"
+#include "object-inl.h"
 #include "method_handles.h"
 #include "method_type.h"
 
diff --git a/runtime/mirror/method_type_test.cc b/runtime/mirror/method_type_test.cc
index 03ab930..637bafd 100644
--- a/runtime/mirror/method_type_test.cc
+++ b/runtime/mirror/method_type_test.cc
@@ -19,12 +19,13 @@
 #include <string>
 #include <vector>
 
+#include "class-inl.h"
 #include "class_linker.h"
+#include "class_loader.h"
 #include "common_runtime_test.h"
 #include "handle_scope-inl.h"
-#include "runtime/mirror/class.h"
-#include "runtime/mirror/class_loader.h"
-#include "scoped_thread_state_change.h"
+#include "object_array-inl.h"
+#include "scoped_thread_state_change-inl.h"
 
 namespace art {
 namespace mirror {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 354410e..8e591e4 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -142,8 +142,10 @@
 }
 
 inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
-#ifdef USE_BAKER_READ_BARRIER
-  CHECK(kUseBakerReadBarrier);
+  if (!kUseBakerReadBarrier) {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
 #if defined(__arm__)
   uintptr_t obj = reinterpret_cast<uintptr_t>(this);
   uintptr_t result;
@@ -190,37 +192,29 @@
   UNREACHABLE();
   UNUSED(fake_address_dependency);
 #endif
-#else  // !USE_BAKER_READ_BARRIER
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
-  UNUSED(fake_address_dependency);
-#endif
 }
 
 inline uint32_t Object::GetReadBarrierState() {
-#ifdef USE_BAKER_READ_BARRIER
+  if (!kUseBakerReadBarrier) {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
   DCHECK(kUseBakerReadBarrier);
   LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
   uint32_t rb_state = lw.ReadBarrierState();
   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   return rb_state;
-#else
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
-#endif
 }
 
 inline uint32_t Object::GetReadBarrierStateAcquire() {
-#ifdef USE_BAKER_READ_BARRIER
-  DCHECK(kUseBakerReadBarrier);
+  if (!kUseBakerReadBarrier) {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
   LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
   uint32_t rb_state = lw.ReadBarrierState();
   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   return rb_state;
-#else
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
-#endif
 }
 
 inline uint32_t Object::GetMarkBit() {
@@ -233,23 +227,22 @@
 }
 
 inline void Object::SetReadBarrierState(uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
-  DCHECK(kUseBakerReadBarrier);
+  if (!kUseBakerReadBarrier) {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   LockWord lw = GetLockWord(false);
   lw.SetReadBarrierState(rb_state);
   SetLockWord(lw, false);
-#else
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
-  UNUSED(rb_state);
-#endif
 }
 
 template<bool kCasRelease>
 inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
-#ifdef USE_BAKER_READ_BARRIER
-  DCHECK(kUseBakerReadBarrier);
+  if (!kUseBakerReadBarrier) {
+    LOG(FATAL) << "Unreachable";
+    UNREACHABLE();
+  }
   DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
   DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
   LockWord expected_lw;
@@ -272,11 +265,6 @@
              CasLockWordWeakRelease(expected_lw, new_lw) :
              CasLockWordWeakRelaxed(expected_lw, new_lw)));
   return true;
-#else
-  UNUSED(expected_rb_state, rb_state);
-  LOG(FATAL) << "Unreachable";
-  UNREACHABLE();
-#endif
 }
 
 inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
@@ -691,19 +679,6 @@
       field_offset, new_value);
 }
 
-template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
-inline int32_t Object::GetField32(MemberOffset field_offset) {
-  if (kVerifyFlags & kVerifyThis) {
-    VerifyObject(this);
-  }
-  return GetField<int32_t, kIsVolatile>(field_offset);
-}
-
-template<VerifyObjectFlags kVerifyFlags>
-inline int32_t Object::GetField32Volatile(MemberOffset field_offset) {
-  return GetField32<kVerifyFlags, true>(field_offset);
-}
-
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
     bool kIsVolatile>
 inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) {
@@ -854,28 +829,6 @@
                                                                                new_value);
 }
 
-template<typename kSize, bool kIsVolatile>
-inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
-  uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
-  kSize* addr = reinterpret_cast<kSize*>(raw_addr);
-  if (kIsVolatile) {
-    reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
-  } else {
-    reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
-  }
-}
-
-template<typename kSize, bool kIsVolatile>
-inline kSize Object::GetField(MemberOffset field_offset) {
-  const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
-  const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
-  if (kIsVolatile) {
-    return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
-  } else {
-    return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
-  }
-}
-
 template<typename kSize>
 inline kSize Object::GetFieldAcquire(MemberOffset field_offset) {
   const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index db58a60..4541ce2 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_OBJECT_H_
 #define ART_RUNTIME_MIRROR_OBJECT_H_
 
+#include "atomic.h"
 #include "base/casts.h"
 #include "base/enums.h"
 #include "globals.h"
@@ -432,11 +433,18 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (kVerifyFlags & kVerifyThis) {
+      VerifyObject(this);
+    }
+    return GetField<int32_t, kIsVolatile>(field_offset);
+  }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetField32<kVerifyFlags, true>(field_offset);
+  }
 
   template<bool kTransactionActive, bool kCheckTransaction = true,
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
@@ -611,10 +619,28 @@
  private:
   template<typename kSize, bool kIsVolatile>
   ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+    kSize* addr = reinterpret_cast<kSize*>(raw_addr);
+    if (kIsVolatile) {
+      reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
+    } else {
+      reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
+    }
+  }
+
   template<typename kSize, bool kIsVolatile>
   ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
+    const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
+    if (kIsVolatile) {
+      return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
+    } else {
+      return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
+    }
+  }
+
   // Get a field with acquire semantics.
   template<typename kSize>
   ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 0ceb23a..a32003e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -356,40 +356,44 @@
     // Do this before releasing the lock so that we don't get deflated.
     size_t num_waiters = num_waiters_;
     ++num_waiters_;
+
+    // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
+    // lock and then re-acquiring the mutator lock can deadlock.
+    bool started_trace = false;
+    if (ATRACE_ENABLED()) {
+      if (owner_ != nullptr) {  // Did the owner_ give the lock up?
+        std::ostringstream oss;
+        std::string name;
+        owner_->GetThreadName(name);
+        oss << PrettyContentionInfo(name,
+                                    owner_->GetTid(),
+                                    owners_method,
+                                    owners_dex_pc,
+                                    num_waiters);
+        // Add info for contending thread.
+        uint32_t pc;
+        ArtMethod* m = self->GetCurrentMethod(&pc);
+        const char* filename;
+        int32_t line_number;
+        TranslateLocation(m, pc, &filename, &line_number);
+        oss << " blocking from "
+            << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
+            << ":" << line_number << ")";
+        ATRACE_BEGIN(oss.str().c_str());
+        started_trace = true;
+      }
+    }
+
     monitor_lock_.Unlock(self);  // Let go of locks in order.
     self->SetMonitorEnterObject(GetObject());
     {
-      uint32_t original_owner_thread_id = 0u;
       ScopedThreadSuspension tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
+      uint32_t original_owner_thread_id = 0u;
       {
         // Reacquire monitor_lock_ without mutator_lock_ for Wait.
         MutexLock mu2(self, monitor_lock_);
         if (owner_ != nullptr) {  // Did the owner_ give the lock up?
           original_owner_thread_id = owner_->GetThreadId();
-          if (ATRACE_ENABLED()) {
-            std::ostringstream oss;
-            {
-              // Reacquire mutator_lock_ for getting the location info.
-              ScopedObjectAccess soa(self);
-              std::string name;
-              owner_->GetThreadName(name);
-              oss << PrettyContentionInfo(name,
-                                          owner_->GetTid(),
-                                          owners_method,
-                                          owners_dex_pc,
-                                          num_waiters);
-              // Add info for contending thread.
-              uint32_t pc;
-              ArtMethod* m = self->GetCurrentMethod(&pc);
-              const char* filename;
-              int32_t line_number;
-              TranslateLocation(m, pc, &filename, &line_number);
-              oss << " blocking from "
-                  << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
-                  << ":" << line_number << ")";
-            }
-            ATRACE_BEGIN(oss.str().c_str());
-          }
           monitor_contenders_.Wait(self);  // Still contended so wait.
         }
       }
@@ -448,9 +452,11 @@
             }
           }
         }
-        ATRACE_END();
       }
     }
+    if (started_trace) {
+      ATRACE_END();
+    }
     self->SetMonitorEnterObject(nullptr);
     monitor_lock_.Lock(self);  // Reacquire locks in order.
     --num_waiters_;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index cd0e55f..1234933 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -188,7 +188,7 @@
     if (array == nullptr) {
       ScopedObjectAccess soa(env);
       for (auto& dex_file : dex_files) {
-        if (linker->FindDexCache(soa.Self(), *dex_file, true) != nullptr) {
+        if (linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
           dex_file.release();
         }
       }
@@ -230,7 +230,7 @@
       if (dex_file != nullptr) {
         // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
         // are calls to DexFile.close while the ART DexFile is still in use.
-        if (class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr) {
+        if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
           // Clear the element in the array so that we can call close again.
           long_dex_files->Set(i, 0);
           delete dex_file;
@@ -281,7 +281,13 @@
       StackHandleScope<1> hs(soa.Self());
       Handle<mirror::ClassLoader> class_loader(
           hs.NewHandle(soa.Decode<mirror::ClassLoader>(javaLoader)));
-      class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+      ObjPtr<mirror::DexCache> dex_cache =
+          class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+      if (dex_cache == nullptr) {
+        // OOME or InternalError (dexFile already registered with a different class loader).
+        soa.Self()->AssertPendingException();
+        return nullptr;
+      }
       ObjPtr<mirror::Class> result = class_linker->DefineClass(soa.Self(),
                                                                descriptor.c_str(),
                                                                hash,
diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
index 981be68..0795960 100644
--- a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
+++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
@@ -128,7 +128,7 @@
   if (kIsDebugBuild) {
     ScopedObjectAccess soa(env);
     ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
-    CHECK(class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr);
+    CHECK(!class_linker->IsDexFileRegistered(soa.Self(), *dex_file));
   }
   delete dex_file;
 }
@@ -153,7 +153,13 @@
     StackHandleScope<1> handle_scope(soa.Self());
     Handle<mirror::ClassLoader> class_loader(
         handle_scope.NewHandle(soa.Decode<mirror::ClassLoader>(loader)));
-    class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+    ObjPtr<mirror::DexCache> dex_cache =
+        class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+    if (dex_cache == nullptr) {
+      // OOME or InternalError (dexFile already registered with a different class loader).
+      soa.Self()->AssertPendingException();
+      return nullptr;
+    }
     ObjPtr<mirror::Class> result = class_linker->DefineClass(
         soa.Self(),
         class_descriptor,
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 1af8619..24308d9 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -448,11 +448,8 @@
   Thread* const self = Thread::Current();
   for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
     CHECK(dex_file != nullptr);
-    ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file, true);
-    // If dex cache was deallocated, just continue.
-    if (dex_cache == nullptr) {
-      continue;
-    }
+    ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file);
+    CHECK(dex_cache != nullptr);  // Boot class path dex caches are never unloaded.
     for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
       ObjPtr<mirror::String> string = dex_cache->GetResolvedString(dex::StringIndex(j));
       if (string != nullptr) {
@@ -515,7 +512,7 @@
     CHECK(dex_file != nullptr);
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file, nullptr)));
-
+    CHECK(dex_cache.Get() != nullptr);  // Boot class path dex caches are never unloaded.
     if (kPreloadDexCachesStrings) {
       for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
         PreloadDexCachesResolveString(dex_cache, dex::StringIndex(j), strings);
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index f1c350f..b1ed74a 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -65,12 +65,22 @@
       dex_cache->GetResolvedString(dex::StringIndex(string_index)));
 }
 
-static void DexCache_setResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index,
+static void DexCache_setResolvedType(JNIEnv* env,
+                                     jobject javaDexCache,
+                                     jint type_index,
                                      jobject type) {
   ScopedFastNativeObjectAccess soa(env);
   ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
-  CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
-  dex_cache->SetResolvedType(dex::TypeIndex(type_index), soa.Decode<mirror::Class>(type));
+  const DexFile& dex_file = *dex_cache->GetDexFile();
+  CHECK_LT(static_cast<size_t>(type_index), dex_file.NumTypeIds());
+  ObjPtr<mirror::Class> t = soa.Decode<mirror::Class>(type);
+  if (t != nullptr && t->DescriptorEquals(dex_file.StringByTypeIdx(dex::TypeIndex(type_index)))) {
+    ClassTable* table =
+        Runtime::Current()->GetClassLinker()->FindClassTable(soa.Self(), dex_cache);
+    if (table != nullptr && table->TryInsert(t) == t) {
+      dex_cache->SetResolvedType(dex::TypeIndex(type_index), t);
+    }
+  }
 }
 
 static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index,
@@ -78,7 +88,10 @@
   ScopedFastNativeObjectAccess soa(env);
   ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
   CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
-  dex_cache->SetResolvedString(dex::StringIndex(string_index), soa.Decode<mirror::String>(string));
+  ObjPtr<mirror::String> s = soa.Decode<mirror::String>(string);
+  if (s != nullptr) {
+    dex_cache->SetResolvedString(dex::StringIndex(string_index), s);
+  }
 }
 
 static JNINativeMethod gMethods[] = {
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index ea266d1..f1d6ff5 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -25,7 +25,7 @@
 #include "scoped_fast_native_object_access-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "ScopedLocalRef.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index fcb0175..195091f 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -25,7 +25,7 @@
 #include "ScopedUtfChars.h"
 #include "thread.h"
 #include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index ece0338..70cd6aa 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -22,7 +22,7 @@
 #include "mirror/object_array.h"
 #include "mirror/string.h"
 #include "scoped_fast_native_object_access-inl.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 
diff --git a/runtime/oat.h b/runtime/oat.h
index 532c968..e7e8328 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  static constexpr uint8_t kOatVersion[] = { '1', '0', '9', '\0' };  // Register mask change.
+  static constexpr uint8_t kOatVersion[] = { '1', '1', '0', '\0' };  // Clean up code info change.
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 33bd0f3..a46b470 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -22,6 +22,7 @@
 
 #include "android-base/stringprintf.h"
 
+#include "art_field-inl.h"
 #include "base/logging.h"
 #include "base/stl_util.h"
 #include "base/systrace.h"
@@ -32,11 +33,13 @@
 #include "handle_scope-inl.h"
 #include "jni_internal.h"
 #include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
 #include "oat_file_assistant.h"
 #include "obj_ptr-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
 #include "thread_list.h"
+#include "well_known_classes.h"
 
 namespace art {
 
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index fd84426..b4e4285 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -44,7 +44,7 @@
     CodeInfoEncoding encoding = code_info.ExtractEncoding();
     StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
     if (stack_map.IsValid()) {
-      return stack_map.GetDexPc(encoding.stack_map_encoding);
+      return stack_map.GetDexPc(encoding.stack_map.encoding);
     }
   } else {
     DCHECK(method->IsNative());
@@ -80,7 +80,7 @@
                                    : code_info.GetStackMapForDexPc(dex_pc, encoding);
   if (stack_map.IsValid()) {
     return reinterpret_cast<uintptr_t>(entry_point) +
-           stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA);
+           stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA);
   }
   if (abort_on_failure) {
     ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index 2f51e27..bdaad20 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -46,7 +46,7 @@
 #include "scoped_thread_state_change-inl.h"
 #include "ScopedUtfChars.h"
 #include "mirror/class_loader.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "../../libcore/ojluni/src/main/native/jvm.h"  // TODO(narayan): fix it
diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc
index c2f1792..afec0bf 100644
--- a/runtime/openjdkjvmti/ti_class_loader.cc
+++ b/runtime/openjdkjvmti/ti_class_loader.cc
@@ -62,7 +62,7 @@
                                          art::Handle<art::mirror::ClassLoader> loader,
                                          const art::DexFile* dex_file) {
   art::ScopedObjectAccessUnchecked soa(self);
-  art::StackHandleScope<2> hs(self);
+  art::StackHandleScope<3> hs(self);
   if (art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
     art::Runtime::Current()->GetClassLinker()->AppendToBootClassPath(self, *dex_file);
     return true;
@@ -72,8 +72,9 @@
   if (java_dex_file_obj.IsNull()) {
     return false;
   }
+  art::Handle<art::mirror::LongArray> old_cookie(hs.NewHandle(GetDexFileCookie(java_dex_file_obj)));
   art::Handle<art::mirror::LongArray> cookie(hs.NewHandle(
-      AllocateNewDexFileCookie(self, java_dex_file_obj, dex_file)));
+      AllocateNewDexFileCookie(self, old_cookie, dex_file)));
   if (cookie.IsNull()) {
     return false;
   }
@@ -99,12 +100,8 @@
   }
 }
 
-// TODO Really wishing I had that mirror of java.lang.DexFile now.
-art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::AllocateNewDexFileCookie(
-    art::Thread* self,
-    art::Handle<art::mirror::Object> java_dex_file_obj,
-    const art::DexFile* dex_file) {
-  art::StackHandleScope<2> hs(self);
+art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::GetDexFileCookie(
+    art::Handle<art::mirror::Object> java_dex_file_obj) {
   // mCookie is nulled out if the DexFile has been closed but mInternalCookie sticks around until
   // the object is finalized. Since they always point to the same array if mCookie is not null we
   // just use the mInternalCookie field. We will update one or both of these fields later.
@@ -113,9 +110,15 @@
       "mInternalCookie", "Ljava/lang/Object;");
   // TODO Add check that mCookie is either null or same as mInternalCookie
   CHECK(internal_cookie_field != nullptr);
-  art::Handle<art::mirror::LongArray> cookie(
-      hs.NewHandle(internal_cookie_field->GetObject(java_dex_file_obj.Get())->AsLongArray()));
-  // TODO Maybe make these non-fatal.
+  return internal_cookie_field->GetObject(java_dex_file_obj.Get())->AsLongArray();
+}
+
+// TODO Really wishing I had that mirror of java.lang.DexFile now.
+art::ObjPtr<art::mirror::LongArray> ClassLoaderHelper::AllocateNewDexFileCookie(
+    art::Thread* self,
+    art::Handle<art::mirror::LongArray> cookie,
+    const art::DexFile* dex_file) {
+  art::StackHandleScope<1> hs(self);
   CHECK(cookie.Get() != nullptr);
   CHECK_GE(cookie->GetLength(), 1);
   art::Handle<art::mirror::LongArray> new_cookie(
@@ -128,8 +131,9 @@
   // TODO Should I clear this field?
   // TODO This is a really crappy thing here with the first element being different.
   new_cookie->SetWithoutChecks<false>(0, cookie->GetWithoutChecks(0));
+  // This must match the casts in runtime/native/dalvik_system_DexFile.cc:ConvertDexFilesToJavaArray
   new_cookie->SetWithoutChecks<false>(
-      1, static_cast<int64_t>(reinterpret_cast<intptr_t>(dex_file)));
+      1, static_cast<int64_t>(reinterpret_cast<uintptr_t>(dex_file)));
   new_cookie->Memcpy(2, cookie.Get(), 1, cookie->GetLength() - 1);
   return new_cookie.Get();
 }
diff --git a/runtime/openjdkjvmti/ti_class_loader.h b/runtime/openjdkjvmti/ti_class_loader.h
index 17ed0eb..1ac4988 100644
--- a/runtime/openjdkjvmti/ti_class_loader.h
+++ b/runtime/openjdkjvmti/ti_class_loader.h
@@ -82,9 +82,12 @@
       art::Thread* self, art::Handle<art::mirror::ClassLoader> loader)
       REQUIRES_SHARED(art::Locks::mutator_lock_);
 
+  static art::ObjPtr<art::mirror::LongArray> GetDexFileCookie(
+      art::Handle<art::mirror::Object> java_dex_file) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
   static art::ObjPtr<art::mirror::LongArray> AllocateNewDexFileCookie(
       art::Thread* self,
-      art::Handle<art::mirror::Object> java_dex_file,
+      art::Handle<art::mirror::LongArray> old_dex_file_cookie,
       const art::DexFile* new_dex_file) REQUIRES_SHARED(art::Locks::mutator_lock_);
 
   static void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 7b2521d..fe3e52b 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -303,11 +303,11 @@
 
           art::Thread* thread = FindThread(info);
           if (thread != nullptr) {
-            art::mirror::Object* thread_obj = thread->GetPeer();
+            art::mirror::Object* thread_obj;
             if (thread->IsStillStarting()) {
               thread_obj = nullptr;
             } else {
-              thread_obj = thread->GetPeer();
+              thread_obj = thread->GetPeerFromOtherThread();
             }
             if (thread_obj != nullptr) {
               ref_info->jni_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
@@ -333,11 +333,11 @@
 
           art::Thread* thread = FindThread(info);
           if (thread != nullptr) {
-            art::mirror::Object* thread_obj = thread->GetPeer();
+            art::mirror::Object* thread_obj;
             if (thread->IsStillStarting()) {
               thread_obj = nullptr;
             } else {
-              thread_obj = thread->GetPeer();
+              thread_obj = thread->GetPeerFromOtherThread();
             }
             if (thread_obj != nullptr) {
               ref_info->stack_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
diff --git a/runtime/openjdkjvmti/ti_monitor.cc b/runtime/openjdkjvmti/ti_monitor.cc
index b827683..645faea 100644
--- a/runtime/openjdkjvmti/ti_monitor.cc
+++ b/runtime/openjdkjvmti/ti_monitor.cc
@@ -54,7 +54,7 @@
   JvmtiMonitor() : owner_(nullptr), count_(0) {
   }
 
-  static bool Destroy(art::Thread* self, JvmtiMonitor* monitor) {
+  static bool Destroy(art::Thread* self, JvmtiMonitor* monitor) NO_THREAD_SAFETY_ANALYSIS {
     // Check whether this thread holds the monitor, or nobody does.
     art::Thread* owner_thread = monitor->owner_.load(std::memory_order_relaxed);
     if (owner_thread != nullptr && self != owner_thread) {
@@ -71,7 +71,7 @@
     return true;
   }
 
-  void MonitorEnter(art::Thread* self) {
+  void MonitorEnter(art::Thread* self) NO_THREAD_SAFETY_ANALYSIS {
     // Check for recursive enter.
     if (IsOwner(self)) {
       count_++;
@@ -86,7 +86,7 @@
     count_ = 1;
   }
 
-  bool MonitorExit(art::Thread* self) {
+  bool MonitorExit(art::Thread* self) NO_THREAD_SAFETY_ANALYSIS {
     if (!IsOwner(self)) {
       return false;
     }
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index 4b8108a..74c9aed8 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -48,7 +48,7 @@
 #include "jit/jit_code_cache.h"
 #include "jni_env_ext-inl.h"
 #include "jvmti_allocator.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
 #include "mirror/class_ext.h"
 #include "mirror/object.h"
 #include "object_lock.h"
@@ -56,6 +56,8 @@
 #include "ScopedLocalRef.h"
 #include "ti_class_loader.h"
 #include "transform.h"
+#include "verifier/method_verifier.h"
+#include "verifier/verifier_log_mode.h"
 
 namespace openjdkjvmti {
 
@@ -121,6 +123,7 @@
         new_obsolete_method->CopyFrom(old_method, ptr_size);
         DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
         new_obsolete_method->SetIsObsolete();
+        new_obsolete_method->SetDontCompile();
         obsolete_maps_->insert({old_method, new_obsolete_method});
         // Update JIT Data structures to point to the new method.
         art::jit::Jit* jit = art::Runtime::Current()->GetJit();
@@ -378,7 +381,7 @@
 
 art::mirror::DexCache* Redefiner::ClassRedefinition::CreateNewDexCache(
     art::Handle<art::mirror::ClassLoader> loader) {
-  return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get());
+  return driver_->runtime_->GetClassLinker()->RegisterDexFile(*dex_file_, loader.Get()).Ptr();
 }
 
 void Redefiner::RecordFailure(jvmtiError result,
@@ -449,9 +452,13 @@
   CallbackCtx ctx(linker->GetAllocatorForClassLoader(art_klass->GetClassLoader()));
   // Add all the declared methods to the map
   for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
-    ctx.obsolete_methods.insert(&m);
-    // TODO Allow this or check in IsModifiableClass.
-    DCHECK(!m.IsIntrinsic());
+    // Since native methods cannot have their implementation changed we shouldn't bother making
+    // obsolete versions of them.
+    if (!m.IsNative()) {
+      ctx.obsolete_methods.insert(&m);
+      // TODO Allow this or check in IsModifiableClass.
+      DCHECK(!m.IsIntrinsic());
+    }
   }
   {
     art::MutexLock mu(driver_->self_, *art::Locks::thread_list_lock_);
@@ -490,6 +497,143 @@
   }
 }
 
+// Try and get the declared method. First try to get a virtual method then a direct method if that's
+// not found.
+static art::ArtMethod* FindMethod(art::Handle<art::mirror::Class> klass,
+                                  const char* name,
+                                  art::Signature sig) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  art::ArtMethod* m = klass->FindDeclaredVirtualMethod(name, sig, art::kRuntimePointerSize);
+  if (m == nullptr) {
+    m = klass->FindDeclaredDirectMethod(name, sig, art::kRuntimePointerSize);
+  }
+  return m;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameMethods() {
+  art::StackHandleScope<1> hs(driver_->self_);
+  art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+  DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+
+  art::ClassDataItemIterator new_iter(*dex_file_,
+                                      dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+
+  // Make sure we have the same number of methods.
+  uint32_t num_new_method = new_iter.NumVirtualMethods() + new_iter.NumDirectMethods();
+  uint32_t num_old_method = h_klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size();
+  if (num_new_method != num_old_method) {
+    bool bigger = num_new_method > num_old_method;
+    RecordFailure(bigger ? ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+                         : ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
+                  StringPrintf("Total number of declared methods changed from %d to %d",
+                               num_old_method, num_new_method));
+    return false;
+  }
+
+  // Skip all of the fields. We should have already checked this.
+  while (new_iter.HasNextStaticField() || new_iter.HasNextInstanceField()) {
+    new_iter.Next();
+  }
+  // Check each of the methods. NB we don't need to specifically check for removals since the 2 dex
+  // files have the same number of methods, which means there must be an equal amount of additions
+  // and removals.
+  for (; new_iter.HasNextVirtualMethod() || new_iter.HasNextDirectMethod(); new_iter.Next()) {
+    // Get the data on the method we are searching for
+    const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(new_iter.GetMemberIndex());
+    const char* new_method_name = dex_file_->GetMethodName(new_method_id);
+    art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
+    art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
+    // If we got past the check for the same number of methods above that means there must be at
+    // least one added and one removed method. We will return the ADDED failure message since it is
+    // easier to get a useful error report for it.
+    if (old_method == nullptr) {
+      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
+                    StringPrintf("Unknown method '%s' (sig: %s) was added!",
+                                  new_method_name,
+                                  new_method_signature.ToString().c_str()));
+      return false;
+    }
+    // Since direct methods have different flags than virtual ones (specifically direct methods must
+    // have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
+    // virtual to direct.
+    uint32_t new_flags = new_iter.GetMethodAccessFlags();
+    if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
+      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
+                    StringPrintf("method '%s' (sig: %s) had different access flags",
+                                 new_method_name,
+                                 new_method_signature.ToString().c_str()));
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameFields() {
+  art::StackHandleScope<1> hs(driver_->self_);
+  art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+  DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+  art::ClassDataItemIterator new_iter(*dex_file_,
+                                      dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+  const art::DexFile& old_dex_file = h_klass->GetDexFile();
+  art::ClassDataItemIterator old_iter(old_dex_file,
+                                      old_dex_file.GetClassData(*h_klass->GetClassDef()));
+  // Instance and static fields can be differentiated by their flags so no need to check them
+  // separately.
+  while (new_iter.HasNextInstanceField() || new_iter.HasNextStaticField()) {
+    // Get the data on the method we are searching for
+    const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_iter.GetMemberIndex());
+    const char* new_field_name = dex_file_->GetFieldName(new_field_id);
+    const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
+
+    if (!(old_iter.HasNextInstanceField() || old_iter.HasNextStaticField())) {
+      // We are missing the old version of this method!
+      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+                    StringPrintf("Unknown field '%s' (type: %s) added!",
+                                  new_field_name,
+                                  new_field_type));
+      return false;
+    }
+
+    const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter.GetMemberIndex());
+    const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
+    const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
+
+    // Check name and type.
+    if (strcmp(old_field_name, new_field_name) != 0 ||
+        strcmp(old_field_type, new_field_type) != 0) {
+      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+                    StringPrintf("Field changed from '%s' (sig: %s) to '%s' (sig: %s)!",
+                                  old_field_name,
+                                  old_field_type,
+                                  new_field_name,
+                                  new_field_type));
+      return false;
+    }
+
+    // Since static fields have different flags than instance ones (specifically static fields must
+    // have the kAccStatic flag) we can tell if a field changes from static to instance.
+    if (new_iter.GetFieldAccessFlags() != old_iter.GetFieldAccessFlags()) {
+      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+                    StringPrintf("Field '%s' (sig: %s) had different access flags",
+                                  new_field_name,
+                                  new_field_type));
+      return false;
+    }
+
+    new_iter.Next();
+    old_iter.Next();
+  }
+  if (old_iter.HasNextInstanceField() || old_iter.HasNextStaticField()) {
+    RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+                  StringPrintf("field '%s' (sig: %s) is missing!",
+                                old_dex_file.GetFieldName(old_dex_file.GetFieldId(
+                                    old_iter.GetMemberIndex())),
+                                old_dex_file.GetFieldTypeDescriptor(old_dex_file.GetFieldId(
+                                    old_iter.GetMemberIndex()))));
+    return false;
+  }
+  return true;
+}
+
 bool Redefiner::ClassRedefinition::CheckClass() {
   // TODO Might just want to put it in a ObjPtr and NoSuspend assert.
   art::StackHandleScope<1> hs(driver_->self_);
@@ -565,7 +709,6 @@
     }
   }
   LOG(WARNING) << "No verification is done on annotations of redefined classes.";
-  LOG(WARNING) << "Bytecodes of redefinitions are not verified.";
 
   return true;
 }
@@ -628,26 +771,28 @@
   }
 
   // TODO Maybe make an iterable view type to simplify using this.
-  art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index)
+  art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index) const
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return art::down_cast<art::mirror::ClassLoader*>(GetSlot(klass_index, kSlotSourceClassLoader));
   }
-  art::mirror::Object* GetJavaDexFile(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  art::mirror::Object* GetJavaDexFile(jint klass_index) const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return GetSlot(klass_index, kSlotJavaDexFile);
   }
-  art::mirror::LongArray* GetNewDexFileCookie(jint klass_index)
+  art::mirror::LongArray* GetNewDexFileCookie(jint klass_index) const
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return art::down_cast<art::mirror::LongArray*>(GetSlot(klass_index, kSlotNewDexFileCookie));
   }
-  art::mirror::DexCache* GetNewDexCache(jint klass_index)
+  art::mirror::DexCache* GetNewDexCache(jint klass_index) const
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return art::down_cast<art::mirror::DexCache*>(GetSlot(klass_index, kSlotNewDexCache));
   }
-  art::mirror::Class* GetMirrorClass(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  art::mirror::Class* GetMirrorClass(jint klass_index) const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return art::down_cast<art::mirror::Class*>(GetSlot(klass_index, kSlotMirrorClass));
   }
 
-  art::mirror::ByteArray* GetOriginalDexFileBytes(jint klass_index)
+  art::mirror::ByteArray* GetOriginalDexFileBytes(jint klass_index) const
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return art::down_cast<art::mirror::ByteArray*>(GetSlot(klass_index, kSlotOrigDexFile));
   }
@@ -677,15 +822,15 @@
     SetSlot(klass_index, kSlotOrigDexFile, bytes);
   }
 
-  int32_t Length() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  int32_t Length() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return arr_->GetLength() / kNumSlots;
   }
 
  private:
-  art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+  mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
 
   art::mirror::Object* GetSlot(jint klass_index,
-                               DataSlot slot) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+                               DataSlot slot) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
     DCHECK_LT(klass_index, Length());
     return arr_->Get((kNumSlots * klass_index) + slot);
   }
@@ -701,6 +846,85 @@
   DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
 };
 
+// TODO Stash and update soft failure state
+bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
+                                                     const RedefinitionDataHolder& holder) {
+  DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+  art::StackHandleScope<2> hs(driver_->self_);
+  std::string error;
+  // TODO Make verification log level lower
+  art::verifier::MethodVerifier::FailureKind failure =
+      art::verifier::MethodVerifier::VerifyClass(driver_->self_,
+                                                 dex_file_.get(),
+                                                 hs.NewHandle(holder.GetNewDexCache(klass_index)),
+                                                 hs.NewHandle(GetClassLoader()),
+                                                 dex_file_->GetClassDef(0), /*class_def*/
+                                                 nullptr, /*compiler_callbacks*/
+                                                 false, /*allow_soft_failures*/
+                                                 /*log_level*/
+                                                 art::verifier::HardFailLogMode::kLogWarning,
+                                                 &error);
+  bool passes = failure == art::verifier::MethodVerifier::kNoFailure;
+  if (!passes) {
+    RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error);
+  }
+  return passes;
+}
+
+// Looks through the previously allocated cookies to see if we need to update them with another new
+// dexfile. This is so that even if multiple classes with the same classloader are redefined at
+// once they are all added to the classloader.
+bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
+    int32_t klass_index,
+    art::Handle<art::mirror::ClassLoader> source_class_loader,
+    art::Handle<art::mirror::Object> dex_file_obj,
+    /*out*/RedefinitionDataHolder* holder) {
+  art::StackHandleScope<2> hs(driver_->self_);
+  art::MutableHandle<art::mirror::LongArray> old_cookie(
+      hs.NewHandle<art::mirror::LongArray>(nullptr));
+  bool has_older_cookie = false;
+  // See if we already have a cookie that a previous redefinition got from the same classloader.
+  for (int32_t i = 0; i < klass_index; i++) {
+    if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+      // Since every instance of this classloader should have the same cookie associated with it we
+      // can stop looking here.
+      has_older_cookie = true;
+      old_cookie.Assign(holder->GetNewDexFileCookie(i));
+      break;
+    }
+  }
+  if (old_cookie.IsNull()) {
+    // No older cookie. Get it directly from the dex_file_obj
+    // We should not have seen this classloader elsewhere.
+    CHECK(!has_older_cookie);
+    old_cookie.Assign(ClassLoaderHelper::GetDexFileCookie(dex_file_obj));
+  }
+  // Use the old cookie to generate the new one with the new DexFile* added in.
+  art::Handle<art::mirror::LongArray>
+      new_cookie(hs.NewHandle(ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_,
+                                                                          old_cookie,
+                                                                          dex_file_.get())));
+  // Make sure the allocation worked.
+  if (new_cookie.IsNull()) {
+    return false;
+  }
+
+  // Save the cookie.
+  holder->SetNewDexFileCookie(klass_index, new_cookie.Get());
+  // If there are other copies of this same classloader we need to make sure that we all have the
+  // same cookie.
+  if (has_older_cookie) {
+    for (int32_t i = 0; i < klass_index; i++) {
+      // We will let the GC take care of the cookie we allocated for this one.
+      if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+        holder->SetNewDexFileCookie(i, new_cookie.Get());
+      }
+    }
+  }
+
+  return true;
+}
+
 bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
     int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
   art::ScopedObjectAccessUnchecked soa(driver_->self_);
@@ -719,11 +943,8 @@
       RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
       return false;
     }
-    holder->SetNewDexFileCookie(klass_index,
-                                ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_,
-                                                                            dex_file_obj,
-                                                                            dex_file_.get()).Ptr());
-    if (holder->GetNewDexFileCookie(klass_index) == nullptr) {
+    // Allocate the new dex file cookie.
+    if (!AllocateAndRememberNewDexFileCookie(klass_index, loader, dex_file_obj, holder)) {
       driver_->self_->AssertPendingOOMException();
       driver_->self_->ClearException();
       RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
@@ -732,7 +953,7 @@
   }
   holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
   if (holder->GetNewDexCache(klass_index) == nullptr) {
-    driver_->self_->AssertPendingOOMException();
+    driver_->self_->AssertPendingException();
     driver_->self_->ClearException();
     RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
     return false;
@@ -789,6 +1010,17 @@
   }
 }
 
+bool Redefiner::CheckAllClassesAreVerified(const RedefinitionDataHolder& holder) {
+  int32_t cnt = 0;
+  for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+    if (!redef.CheckVerification(cnt, holder)) {
+      return false;
+    }
+    cnt++;
+  }
+  return true;
+}
+
 jvmtiError Redefiner::Run() {
   art::StackHandleScope<1> hs(self_);
   // Allocate an array to hold onto all java temporary objects associated with this redefinition.
@@ -808,7 +1040,8 @@
   // try loop.
   if (!CheckAllRedefinitionAreValid() ||
       !EnsureAllClassAllocationsFinished() ||
-      !FinishAllRemainingAllocations(holder)) {
+      !FinishAllRemainingAllocations(holder) ||
+      !CheckAllClassesAreVerified(holder)) {
     // TODO Null out the ClassExt fields we allocated (if possible, might be racing with another
     // redefineclass call which made it even bigger. Leak shouldn't be huge (2x array of size
     // declared_methods_.length) but would be good to get rid of. All other allocations should be
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 5bcaef8..421d22e 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -145,6 +145,13 @@
     bool FinishRemainingAllocations(int32_t klass_index, /*out*/RedefinitionDataHolder* holder)
         REQUIRES_SHARED(art::Locks::mutator_lock_);
 
+    bool AllocateAndRememberNewDexFileCookie(
+        int32_t klass_index,
+        art::Handle<art::mirror::ClassLoader> source_class_loader,
+        art::Handle<art::mirror::Object> dex_file_obj,
+        /*out*/RedefinitionDataHolder* holder)
+          REQUIRES_SHARED(art::Locks::mutator_lock_);
+
     void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
         REQUIRES(art::Locks::mutator_lock_);
 
@@ -158,6 +165,11 @@
     // data has not been modified in an incompatible manner.
     bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
+    // Checks that the contained class can be successfully verified.
+    bool CheckVerification(int32_t klass_index,
+                           const RedefinitionDataHolder& holder)
+        REQUIRES_SHARED(art::Locks::mutator_lock_);
+
     // Preallocates all needed allocations in klass so that we can pause execution safely.
     // TODO We should be able to free the arrays if they end up not being used. Investigate doing
     // this in the future. For now we will just take the memory hit.
@@ -170,17 +182,11 @@
     // Checks that the class can even be redefined.
     bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
-    // Checks that the dex file does not add/remove methods.
-    bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
-      LOG(WARNING) << "methods are not checked for modification currently";
-      return true;
-    }
+    // Checks that the dex file does not add/remove methods, or change their modifiers or types.
+    bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
-    // Checks that the dex file does not modify fields
-    bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
-      LOG(WARNING) << "Fields are not checked for modification currently";
-      return true;
-    }
+    // Checks that the dex file does not modify fields types or modifiers.
+    bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
     void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
                            art::ObjPtr<art::mirror::LongArray> new_cookie)
@@ -238,6 +244,8 @@
   jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
   bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
+  bool CheckAllClassesAreVerified(const RedefinitionDataHolder& holder)
+      REQUIRES_SHARED(art::Locks::mutator_lock_);
   bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
   bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
       REQUIRES_SHARED(art::Locks::mutator_lock_);
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 4cf55a6..b5a6c6e 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -377,7 +377,8 @@
     jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
     jvmtiStackInfo& new_stack_info = stack_info[i];
 
-    jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(threads[i]->GetPeer());
+    jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(
+        threads[i]->GetPeerFromOtherThread());
     new_stack_info.thread = thread_peer;
 
     if (old_stack_info.frame_count > 0) {
@@ -453,7 +454,7 @@
         }
 
         // Get the peer, and check whether we know it.
-        art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+        art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
         for (size_t index = 0; index != handles.size(); ++index) {
           if (peer == handles[index].Get()) {
             // Found the thread.
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index 8327b5d..f8f8fa6 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -202,7 +202,7 @@
 
     info_ptr->is_daemon = self->IsDaemon();
 
-    art::ObjPtr<art::mirror::Object> peer = self->GetPeer();
+    art::ObjPtr<art::mirror::Object> peer = self->GetPeerFromOtherThread();
 
     // ThreadGroup.
     if (peer != nullptr) {
@@ -460,7 +460,7 @@
       continue;
     }
 
-    art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+    art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
     if (peer != nullptr) {
       peers.push_back(peer);
     }
diff --git a/runtime/openjdkjvmti/ti_threadgroup.cc b/runtime/openjdkjvmti/ti_threadgroup.cc
index 35b1bfd..e63ce65 100644
--- a/runtime/openjdkjvmti/ti_threadgroup.cc
+++ b/runtime/openjdkjvmti/ti_threadgroup.cc
@@ -174,7 +174,7 @@
     if (t->IsStillStarting()) {
       continue;
     }
-    art::ObjPtr<art::mirror::Object> peer = t->GetPeer();
+    art::ObjPtr<art::mirror::Object> peer = t->GetPeerFromOtherThread();
     if (peer == nullptr) {
       continue;
     }
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index bf99509..72e0500 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -441,7 +441,7 @@
           const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
           value = *reinterpret_cast<const uint32_t*>(addr);
           uint32_t bit = (offset >> 2);
-          if (bit < encoding.stack_mask_size_in_bits && stack_mask.LoadBit(bit)) {
+          if (bit < encoding.stack_mask.encoding.BitSize() && stack_mask.LoadBit(bit)) {
             is_reference = true;
           }
           break;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 693b8f4..9609bee 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -672,24 +672,6 @@
 
   started_ = true;
 
-  // Create the JIT either if we have to use JIT compilation or save profiling info.
-  // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
-  // recoding profiles. Maybe we should consider changing the name to be more clear it's
-  // not only about compiling. b/28295073.
-  if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
-    std::string error_msg;
-    if (!IsZygote()) {
-    // If we are the zygote then we need to wait until after forking to create the code cache
-    // due to SELinux restrictions on r/w/x memory regions.
-      CreateJit();
-    } else if (jit_options_->UseJitCompilation()) {
-      if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
-        // Try to load compiler pre zygote to reduce PSS. b/27744947
-        LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
-      }
-    }
-  }
-
   if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
     ScopedObjectAccess soa(self);
     StackHandleScope<2> hs(soa.Self());
@@ -714,6 +696,27 @@
 
   Thread::FinishStartup();
 
+  // Create the JIT either if we have to use JIT compilation or save profiling info. This is
+  // done after FinishStartup as the JIT pool needs Java thread peers, which require the main
+  // ThreadGroup to exist.
+  //
+  // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
+  // recoding profiles. Maybe we should consider changing the name to be more clear it's
+  // not only about compiling. b/28295073.
+  if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
+    std::string error_msg;
+    if (!IsZygote()) {
+    // If we are the zygote then we need to wait until after forking to create the code cache
+    // due to SELinux restrictions on r/w/x memory regions.
+      CreateJit();
+    } else if (jit_options_->UseJitCompilation()) {
+      if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
+        // Try to load compiler pre zygote to reduce PSS. b/27744947
+        LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
+      }
+    }
+  }
+
   // Send the start phase event. We have to wait till here as this is when the main thread peer
   // has just been generated, important root clinits have been run and JNI is completely functional.
   {
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index d4469f4..000da59 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -110,6 +110,10 @@
   Locks::mutator_lock_->AssertSharedHeld(Self());
 }
 
+inline ScopedObjectAccess::ScopedObjectAccess(JNIEnv* env) : ScopedObjectAccessUnchecked(env) {}
+inline ScopedObjectAccess::ScopedObjectAccess(Thread* self) : ScopedObjectAccessUnchecked(self) {}
+inline ScopedObjectAccess::~ScopedObjectAccess() {}
+
 inline ScopedThreadSuspension::ScopedThreadSuspension(Thread* self, ThreadState suspended_state)
     : self_(self), suspended_state_(suspended_state) {
   DCHECK(self_ != nullptr);
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b499258..24199f7 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -159,16 +159,14 @@
  public:
   ALWAYS_INLINE explicit ScopedObjectAccess(JNIEnv* env)
       REQUIRES(!Locks::thread_suspend_count_lock_)
-      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
-      : ScopedObjectAccessUnchecked(env) {}
+      SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
 
   ALWAYS_INLINE explicit ScopedObjectAccess(Thread* self)
       REQUIRES(!Locks::thread_suspend_count_lock_)
-      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
-      : ScopedObjectAccessUnchecked(self) {}
+      SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
 
   // Base class will release share of lock. Invoked after this destructor.
-  ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE {}
+  ~ScopedObjectAccess() UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE;
 
  private:
   // TODO: remove this constructor. It is used by check JNI's ScopedCheck to make it believe that
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 5ad00a4..d7ba1d7 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -37,7 +37,7 @@
 #include "runtime.h"
 #include "thread.h"
 #include "thread_list.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 
 namespace art {
 
@@ -96,13 +96,17 @@
   return false;
 }
 
-StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
-    : StackVisitor(thread, context, walk_kind, 0) {}
+StackVisitor::StackVisitor(Thread* thread,
+                           Context* context,
+                           StackWalkKind walk_kind,
+                           bool check_suspended)
+    : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
 
 StackVisitor::StackVisitor(Thread* thread,
                            Context* context,
                            StackWalkKind walk_kind,
-                           size_t num_frames)
+                           size_t num_frames,
+                           bool check_suspended)
     : thread_(thread),
       walk_kind_(walk_kind),
       cur_shadow_frame_(nullptr),
@@ -112,8 +116,11 @@
       num_frames_(num_frames),
       cur_depth_(0),
       current_inlining_depth_(0),
-      context_(context) {
-  DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+      context_(context),
+      check_suspended_(check_suspended) {
+  if (check_suspended_) {
+    DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+  }
 }
 
 InlineInfo StackVisitor::GetCurrentInlineInfo() const {
@@ -138,7 +145,7 @@
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
       return GetResolvedMethod(*GetCurrentQuickFrame(),
                                inline_info,
-                               encoding.inline_info_encoding,
+                               encoding.inline_info.encoding,
                                depth_in_stack_map);
     } else {
       return *cur_quick_frame_;
@@ -155,7 +162,7 @@
       size_t depth_in_stack_map = current_inlining_depth_ - 1;
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
       CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
-      return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info_encoding,
+      return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info.encoding,
                                                     depth_in_stack_map);
     } else if (cur_oat_quick_method_header_ == nullptr) {
       return DexFile::kDexNoIndex;
@@ -788,7 +795,9 @@
 
 template <StackVisitor::CountTransitions kCount>
 void StackVisitor::WalkStack(bool include_transitions) {
-  DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+  if (check_suspended_) {
+    DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+  }
   CHECK_EQ(cur_depth_, 0U);
   bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
   uint32_t instrumentation_stack_depth = 0;
@@ -817,10 +826,10 @@
           uint32_t native_pc_offset =
               cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
           StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
-          if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
+          if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
             InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
             DCHECK_EQ(current_inlining_depth_, 0u);
-            for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info_encoding);
+            for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info.encoding);
                  current_inlining_depth_ != 0;
                  --current_inlining_depth_) {
               bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index 9dceb29..90a0aee 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -590,7 +590,10 @@
   };
 
  protected:
-  StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind);
+  StackVisitor(Thread* thread,
+               Context* context,
+               StackWalkKind walk_kind,
+               bool check_suspended = true);
 
   bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -797,7 +800,11 @@
 
  private:
   // Private constructor known in the case that num_frames_ has already been computed.
-  StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
+  StackVisitor(Thread* thread,
+               Context* context,
+               StackWalkKind walk_kind,
+               size_t num_frames,
+               bool check_suspended = true)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
@@ -851,6 +858,7 @@
 
  protected:
   Context* const context_;
+  const bool check_suspended_;
 };
 
 }  // namespace art
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 4e7c3f4..d657311 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -126,9 +126,9 @@
       << ", number_of_stack_maps=" << number_of_stack_maps
       << ")\n";
   ScopedIndentation indent1(vios);
-  encoding.stack_map_encoding.Dump(vios);
+  encoding.stack_map.encoding.Dump(vios);
   if (HasInlineInfo(encoding)) {
-    encoding.inline_info_encoding.Dump(vios);
+    encoding.inline_info.encoding.Dump(vios);
   }
   // Display the Dex register location catalog.
   GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
@@ -193,22 +193,22 @@
                     uint16_t number_of_dex_registers,
                     InstructionSet instruction_set,
                     const std::string& header_suffix) const {
-  StackMapEncoding stack_map_encoding = encoding.stack_map_encoding;
+  StackMapEncoding stack_map_encoding = encoding.stack_map.encoding;
   const uint32_t pc_offset = GetNativePcOffset(stack_map_encoding, instruction_set);
   vios->Stream()
       << "StackMap" << header_suffix
       << std::hex
       << " [native_pc=0x" << code_offset + pc_offset << "]"
-      << " [entry_size=0x" << encoding.stack_map_encoding.BitSize() << " bits]"
+      << " [entry_size=0x" << encoding.stack_map.encoding.BitSize() << " bits]"
       << " (dex_pc=0x" << GetDexPc(stack_map_encoding)
       << ", native_pc_offset=0x" << pc_offset
       << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
-      << ", inline_info_offset=0x" << GetInlineDescriptorOffset(stack_map_encoding)
+      << ", inline_info_offset=0x" << GetInlineInfoIndex(stack_map_encoding)
       << ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this)
       << std::dec
       << ", stack_mask=0b";
   BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this);
-  for (size_t i = 0, e = encoding.stack_mask_size_in_bits; i < e; ++i) {
+  for (size_t i = 0, e = encoding.stack_mask.encoding.BitSize(); i < e; ++i) {
     vios->Stream() << stack_mask.LoadBit(e - i - 1);
   }
   vios->Stream() << ")\n";
@@ -229,7 +229,7 @@
 void InlineInfo::Dump(VariableIndentationOutputStream* vios,
                       const CodeInfo& code_info,
                       uint16_t number_of_dex_registers[]) const {
-  InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info_encoding;
+  InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
   vios->Stream() << "InlineInfo with depth "
                  << static_cast<uint32_t>(GetDepth(inline_info_encoding))
                  << "\n";
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 062404d..61d6a58 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -693,7 +693,7 @@
   size_t SetFromSizes(size_t native_pc_max,
                       size_t dex_pc_max,
                       size_t dex_register_map_size,
-                      size_t inline_info_size,
+                      size_t number_of_inline_info,
                       size_t number_of_register_masks,
                       size_t number_of_stack_masks) {
     total_bit_size_ = 0;
@@ -712,9 +712,7 @@
     // greater than the offset we might try to encode, we already implicitly have it.
     // If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
     inline_info_bit_offset_ = total_bit_size_;
-    if (inline_info_size != 0) {
-      total_bit_size_ += MinimumBitsToStore(dex_register_map_size + inline_info_size);
-    }
+    total_bit_size_ += MinimumBitsToStore(number_of_inline_info);
 
     register_mask_index_bit_offset_ = total_bit_size_;
     total_bit_size_ += MinimumBitsToStore(number_of_register_masks);
@@ -749,6 +747,18 @@
     return total_bit_size_;
   }
 
+  template<typename Vector>
+  void Encode(Vector* dest) const {
+    static_assert(alignof(StackMapEncoding) == 1, "Should not require alignment");
+    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
+    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
+  }
+
+  void Decode(const uint8_t** ptr) {
+    *this = *reinterpret_cast<const StackMapEncoding*>(*ptr);
+    *ptr += sizeof(*this);
+  }
+
   void Dump(VariableIndentationOutputStream* vios) const;
 
  private:
@@ -771,7 +781,7 @@
  *
  * The information is of the form:
  *
- *   [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_offset, register_mask_index,
+ *   [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_index, register_mask_index,
  *   stack_mask_index].
  */
 class StackMap {
@@ -809,12 +819,12 @@
     encoding.GetDexRegisterMapEncoding().Store(region_, offset);
   }
 
-  ALWAYS_INLINE uint32_t GetInlineDescriptorOffset(const StackMapEncoding& encoding) const {
+  ALWAYS_INLINE uint32_t GetInlineInfoIndex(const StackMapEncoding& encoding) const {
     return encoding.GetInlineInfoEncoding().Load(region_);
   }
 
-  ALWAYS_INLINE void SetInlineDescriptorOffset(const StackMapEncoding& encoding, uint32_t offset) {
-    encoding.GetInlineInfoEncoding().Store(region_, offset);
+  ALWAYS_INLINE void SetInlineInfoIndex(const StackMapEncoding& encoding, uint32_t index) {
+    encoding.GetInlineInfoEncoding().Store(region_, index);
   }
 
   ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const {
@@ -838,7 +848,7 @@
   }
 
   ALWAYS_INLINE bool HasInlineInfo(const StackMapEncoding& encoding) const {
-    return GetInlineDescriptorOffset(encoding) != kNoInlineInfo;
+    return GetInlineInfoIndex(encoding) != kNoInlineInfo;
   }
 
   ALWAYS_INLINE bool Equals(const StackMap& other) const {
@@ -908,12 +918,24 @@
   ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
     return FieldEncoding(dex_register_map_bit_offset_, total_bit_size_, -1 /* min_value */);
   }
-  ALWAYS_INLINE size_t GetEntrySize() const {
-    return RoundUp(total_bit_size_, kBitsPerByte) / kBitsPerByte;
+  ALWAYS_INLINE size_t BitSize() const {
+    return total_bit_size_;
   }
 
   void Dump(VariableIndentationOutputStream* vios) const;
 
+  template<typename Vector>
+  void Encode(Vector* dest) const {
+    static_assert(alignof(InlineInfoEncoding) == 1, "Should not require alignment");
+    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
+    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
+  }
+
+  void Decode(const uint8_t** ptr) {
+    *this = *reinterpret_cast<const InlineInfoEncoding*>(*ptr);
+    *ptr += sizeof(*this);
+  }
+
  private:
   static constexpr uint8_t kIsLastBitOffset = 0;
   static constexpr uint8_t kMethodIndexBitOffset = 1;
@@ -934,8 +956,7 @@
  */
 class InlineInfo {
  public:
-  explicit InlineInfo(MemoryRegion region) : region_(region) {
-  }
+  explicit InlineInfo(BitMemoryRegion region) : region_(region) {}
 
   ALWAYS_INLINE uint32_t GetDepth(const InlineInfoEncoding& encoding) const {
     size_t depth = 0;
@@ -1018,83 +1039,189 @@
             uint16_t* number_of_dex_registers) const;
 
  private:
-  ALWAYS_INLINE MemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
-                                              uint32_t depth) const {
-    size_t entry_size = encoding.GetEntrySize();
+  ALWAYS_INLINE BitMemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
+                                                 uint32_t depth) const {
+    size_t entry_size = encoding.BitSize();
     DCHECK_GT(entry_size, 0u);
     return region_.Subregion(depth * entry_size, entry_size);
   }
 
-  MemoryRegion region_;
+  BitMemoryRegion region_;
+};
+
+// Bit sized region encoding, may be more than 255 bits.
+class BitRegionEncoding {
+ public:
+  uint32_t num_bits = 0;
+
+  ALWAYS_INLINE size_t BitSize() const {
+    return num_bits;
+  }
+
+  template<typename Vector>
+  void Encode(Vector* dest) const {
+    EncodeUnsignedLeb128(dest, num_bits);  // Use leb in case num_bits is greater than 255.
+  }
+
+  void Decode(const uint8_t** ptr) {
+    num_bits = DecodeUnsignedLeb128(ptr);
+  }
+};
+
+// A table of bit sized encodings.
+template <typename Encoding>
+struct BitEncodingTable {
+  static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
+  // How the encoding is laid out (serialized).
+  Encoding encoding;
+
+  // Number of entries in the table (serialized).
+  size_t num_entries;
+
+  // Bit offset for the base of the table (computed).
+  size_t bit_offset = kInvalidOffset;
+
+  template<typename Vector>
+  void Encode(Vector* dest) const {
+    EncodeUnsignedLeb128(dest, num_entries);
+    encoding.Encode(dest);
+  }
+
+  ALWAYS_INLINE void Decode(const uint8_t** ptr) {
+    num_entries = DecodeUnsignedLeb128(ptr);
+    encoding.Decode(ptr);
+  }
+
+  // Set the bit offset in the table and adds the space used by the table to offset.
+  void UpdateBitOffset(size_t* offset) {
+    DCHECK(offset != nullptr);
+    bit_offset = *offset;
+    *offset += encoding.BitSize() * num_entries;
+  }
+
+  // Return the bit region for the map at index i.
+  ALWAYS_INLINE BitMemoryRegion BitRegion(MemoryRegion region, size_t index) const {
+    DCHECK_NE(bit_offset, kInvalidOffset) << "Invalid table offset";
+    DCHECK_LT(index, num_entries);
+    const size_t map_size = encoding.BitSize();
+    return BitMemoryRegion(region, bit_offset + index * map_size, map_size);
+  }
+};
+
+// A byte sized table of possible variable sized encodings.
+struct ByteSizedTable {
+  static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
+
+  // Number of entries in the table (serialized).
+  size_t num_entries = 0;
+
+  // Number of bytes of the table (serialized).
+  size_t num_bytes;
+
+  // Bit offset for the base of the table (computed).
+  size_t byte_offset = kInvalidOffset;
+
+  template<typename Vector>
+  void Encode(Vector* dest) const {
+    EncodeUnsignedLeb128(dest, num_entries);
+    EncodeUnsignedLeb128(dest, num_bytes);
+  }
+
+  ALWAYS_INLINE void Decode(const uint8_t** ptr) {
+    num_entries = DecodeUnsignedLeb128(ptr);
+    num_bytes = DecodeUnsignedLeb128(ptr);
+  }
+
+  // Set the bit offset of the table. Adds the total bit size of the table to offset.
+  void UpdateBitOffset(size_t* offset) {
+    DCHECK(offset != nullptr);
+    DCHECK_ALIGNED(*offset, kBitsPerByte);
+    byte_offset = *offset / kBitsPerByte;
+    *offset += num_bytes * kBitsPerByte;
+  }
 };
 
 // Most of the fields are encoded as ULEB128 to save space.
 struct CodeInfoEncoding {
-  uint32_t non_header_size;
-  uint32_t number_of_stack_maps;
-  uint32_t number_of_stack_masks;
-  uint32_t number_of_register_masks;
-  uint32_t stack_mask_size_in_bits;
-  uint32_t register_mask_size_in_bits;
-  uint32_t number_of_location_catalog_entries;
-  StackMapEncoding stack_map_encoding;
-  InlineInfoEncoding inline_info_encoding;
-  uint8_t header_size;
+  static constexpr uint32_t kInvalidSize = static_cast<size_t>(-1);
+  // Byte sized tables go first to avoid unnecessary alignment bits.
+  ByteSizedTable dex_register_map;
+  ByteSizedTable location_catalog;
+  BitEncodingTable<StackMapEncoding> stack_map;
+  BitEncodingTable<BitRegionEncoding> register_mask;
+  BitEncodingTable<BitRegionEncoding> stack_mask;
+  BitEncodingTable<InlineInfoEncoding> inline_info;
 
-  CodeInfoEncoding() { }
+  CodeInfoEncoding() {}
 
   explicit CodeInfoEncoding(const void* data) {
     const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
-    non_header_size = DecodeUnsignedLeb128(&ptr);
-    number_of_stack_maps = DecodeUnsignedLeb128(&ptr);
-    number_of_stack_masks = DecodeUnsignedLeb128(&ptr);
-    number_of_register_masks = DecodeUnsignedLeb128(&ptr);
-    stack_mask_size_in_bits = DecodeUnsignedLeb128(&ptr);
-    register_mask_size_in_bits = DecodeUnsignedLeb128(&ptr);
-    number_of_location_catalog_entries = DecodeUnsignedLeb128(&ptr);
-    static_assert(alignof(StackMapEncoding) == 1,
-                  "StackMapEncoding should not require alignment");
-    stack_map_encoding = *reinterpret_cast<const StackMapEncoding*>(ptr);
-    ptr += sizeof(StackMapEncoding);
-    if (stack_map_encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      static_assert(alignof(InlineInfoEncoding) == 1,
-                    "InlineInfoEncoding should not require alignment");
-      inline_info_encoding = *reinterpret_cast<const InlineInfoEncoding*>(ptr);
-      ptr += sizeof(InlineInfoEncoding);
+    dex_register_map.Decode(&ptr);
+    location_catalog.Decode(&ptr);
+    stack_map.Decode(&ptr);
+    register_mask.Decode(&ptr);
+    stack_mask.Decode(&ptr);
+    if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
+      inline_info.Decode(&ptr);
     } else {
-      inline_info_encoding = InlineInfoEncoding{};  // NOLINT.
+      inline_info = BitEncodingTable<InlineInfoEncoding>();
     }
-    header_size = dchecked_integral_cast<uint8_t>(ptr - reinterpret_cast<const uint8_t*>(data));
+    cache_header_size =
+        dchecked_integral_cast<uint32_t>(ptr - reinterpret_cast<const uint8_t*>(data));
+    ComputeTableOffsets();
   }
 
   template<typename Vector>
-  void Compress(Vector* dest) const {
-    EncodeUnsignedLeb128(dest, non_header_size);
-    EncodeUnsignedLeb128(dest, number_of_stack_maps);
-    EncodeUnsignedLeb128(dest, number_of_stack_masks);
-    EncodeUnsignedLeb128(dest, number_of_register_masks);
-    EncodeUnsignedLeb128(dest, stack_mask_size_in_bits);
-    EncodeUnsignedLeb128(dest, register_mask_size_in_bits);
-    EncodeUnsignedLeb128(dest, number_of_location_catalog_entries);
-    const uint8_t* stack_map_ptr = reinterpret_cast<const uint8_t*>(&stack_map_encoding);
-    dest->insert(dest->end(), stack_map_ptr, stack_map_ptr + sizeof(StackMapEncoding));
-    if (stack_map_encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      const uint8_t* inline_info_ptr = reinterpret_cast<const uint8_t*>(&inline_info_encoding);
-      dest->insert(dest->end(), inline_info_ptr, inline_info_ptr + sizeof(InlineInfoEncoding));
+  void Compress(Vector* dest) {
+    dex_register_map.Encode(dest);
+    location_catalog.Encode(dest);
+    stack_map.Encode(dest);
+    register_mask.Encode(dest);
+    stack_mask.Encode(dest);
+    if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
+      inline_info.Encode(dest);
     }
+    cache_header_size = dest->size();
   }
+
+  ALWAYS_INLINE void ComputeTableOffsets() {
+    // Skip the header.
+    size_t bit_offset = HeaderSize() * kBitsPerByte;
+    // The byte tables must be aligned so they must go first.
+    dex_register_map.UpdateBitOffset(&bit_offset);
+    location_catalog.UpdateBitOffset(&bit_offset);
+    // Other tables don't require alignment.
+    stack_map.UpdateBitOffset(&bit_offset);
+    register_mask.UpdateBitOffset(&bit_offset);
+    stack_mask.UpdateBitOffset(&bit_offset);
+    inline_info.UpdateBitOffset(&bit_offset);
+    cache_non_header_size = RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte - HeaderSize();
+  }
+
+  ALWAYS_INLINE size_t HeaderSize() const {
+    DCHECK_NE(cache_header_size, kInvalidSize) << "Uninitialized";
+    return cache_header_size;
+  }
+
+  ALWAYS_INLINE size_t NonHeaderSize() const {
+    DCHECK_NE(cache_non_header_size, kInvalidSize) << "Uninitialized";
+    return cache_non_header_size;
+  }
+
+ private:
+  // Computed fields (not serialized).
+  // Header size in bytes.
+  uint32_t cache_header_size = kInvalidSize;
+  // Non header size in bytes.
+  uint32_t cache_non_header_size = kInvalidSize;
 };
 
 /**
  * Wrapper around all compiler information collected for a method.
  * The information is of the form:
  *
- *   [CodeInfoEncoding, StackMap+, DexRegisterLocationCatalog+, DexRegisterMap+, InlineInfo*]
- *
- * where CodeInfoEncoding is of the form:
- *
- *   [non_header_size, number_of_stack_maps, stack_map_size_in_bits,
- *    number_of_location_catalog_entries, StackMapEncoding]
+ *   [CodeInfoEncoding, DexRegisterMap+, DexLocationCatalog+, StackMap+, RegisterMask+, StackMask+,
+ *    DexRegisterMap+, InlineInfo*]
  */
 class CodeInfo {
  public:
@@ -1104,7 +1231,7 @@
   explicit CodeInfo(const void* data) {
     CodeInfoEncoding encoding = CodeInfoEncoding(data);
     region_ = MemoryRegion(const_cast<void*>(data),
-                           encoding.header_size + encoding.non_header_size);
+                           encoding.HeaderSize() + encoding.NonHeaderSize());
   }
 
   CodeInfoEncoding ExtractEncoding() const {
@@ -1114,99 +1241,67 @@
   }
 
   bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map_encoding.GetInlineInfoEncoding().BitSize() > 0;
+    return encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0;
   }
 
   DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const CodeInfoEncoding& encoding) const {
-    return DexRegisterLocationCatalog(region_.Subregion(
-        GetDexRegisterLocationCatalogOffset(encoding),
-        GetDexRegisterLocationCatalogSize(encoding)));
+    return DexRegisterLocationCatalog(region_.Subregion(encoding.location_catalog.byte_offset,
+                                                        encoding.location_catalog.num_bytes));
   }
 
   ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_mask_size_in_bits;
+    return encoding.stack_mask.encoding.BitSize();
   }
 
-  ALWAYS_INLINE StackMap GetStackMapAt(size_t i, const CodeInfoEncoding& encoding) const {
-    const size_t map_size = encoding.stack_map_encoding.BitSize();
-    return StackMap(BitMemoryRegion(GetStackMaps(encoding), i * map_size, map_size));
+  ALWAYS_INLINE StackMap GetStackMapAt(size_t index, const CodeInfoEncoding& encoding) const {
+    return StackMap(encoding.stack_map.BitRegion(region_, index));
   }
 
-  BitMemoryRegion GetStackMask(const CodeInfoEncoding& encoding, size_t stack_mask_index) const {
-    // All stack mask data is stored before register map data (which is at the very end).
-    const size_t entry_size = GetNumberOfStackMaskBits(encoding);
-    const size_t register_mask_bits =
-        encoding.register_mask_size_in_bits * encoding.number_of_register_masks;
-    return BitMemoryRegion(region_,
-                           region_.size_in_bits() - register_mask_bits -
-                               entry_size * (stack_mask_index + 1),
-                           entry_size);
+  BitMemoryRegion GetStackMask(size_t index, const CodeInfoEncoding& encoding) const {
+    return encoding.stack_mask.BitRegion(region_, index);
   }
 
   BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding,
                                  const StackMap& stack_map) const {
-    return GetStackMask(encoding, stack_map.GetStackMaskIndex(encoding.stack_map_encoding));
+    return GetStackMask(stack_map.GetStackMaskIndex(encoding.stack_map.encoding), encoding);
   }
 
-  BitMemoryRegion GetRegisterMask(const CodeInfoEncoding& encoding, size_t index) const {
-    const size_t entry_size = encoding.register_mask_size_in_bits;
-    return BitMemoryRegion(region_,
-                           region_.size_in_bits() - entry_size * (index + 1),
-                           entry_size);
+  BitMemoryRegion GetRegisterMask(size_t index, const CodeInfoEncoding& encoding) const {
+    return encoding.register_mask.BitRegion(region_, index);
   }
 
   uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const {
-    size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map_encoding);
-    return GetRegisterMask(encoding, index).LoadBits(0u, encoding.register_mask_size_in_bits);
+    size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map.encoding);
+    return GetRegisterMask(index, encoding).LoadBits(0u, encoding.register_mask.encoding.BitSize());
   }
 
   uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
-    return encoding.number_of_location_catalog_entries;
+    return encoding.location_catalog.num_entries;
   }
 
   uint32_t GetDexRegisterLocationCatalogSize(const CodeInfoEncoding& encoding) const {
-    return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(encoding),
-                                                 GetNumberOfLocationCatalogEntries(encoding));
+    return encoding.location_catalog.num_bytes;
   }
 
   uint32_t GetNumberOfStackMaps(const CodeInfoEncoding& encoding) const {
-    return encoding.number_of_stack_maps;
+    return encoding.stack_map.num_entries;
   }
 
   // Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned.
   ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map_encoding.BitSize() * GetNumberOfStackMaps(encoding);
-  }
-
-  // Get the size of all the stack maps of this CodeInfo object, in bytes.
-  size_t GetStackMapsSize(const CodeInfoEncoding& encoding) const {
-    return RoundUp(GetStackMapsSizeInBits(encoding), kBitsPerByte) / kBitsPerByte;
-  }
-
-  uint32_t GetDexRegisterLocationCatalogOffset(const CodeInfoEncoding& encoding) const {
-    return GetStackMapsOffset(encoding) + GetStackMapsSize(encoding);
-  }
-
-  size_t GetDexRegisterMapsOffset(const CodeInfoEncoding& encoding) const {
-    return GetDexRegisterLocationCatalogOffset(encoding)
-         + GetDexRegisterLocationCatalogSize(encoding);
-  }
-
-  uint32_t GetStackMapsOffset(const CodeInfoEncoding& encoding) const {
-    return encoding.header_size;
+    return encoding.stack_map.encoding.BitSize() * GetNumberOfStackMaps(encoding);
   }
 
   DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
                                      const CodeInfoEncoding& encoding,
-                                     uint32_t number_of_dex_registers) const {
-    if (!stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
+                                     size_t number_of_dex_registers) const {
+    if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
       return DexRegisterMap();
-    } else {
-      uint32_t offset = GetDexRegisterMapsOffset(encoding)
-                        + stack_map.GetDexRegisterMapOffset(encoding.stack_map_encoding);
-      size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
-      return DexRegisterMap(region_.Subregion(offset, size));
     }
+    const uint32_t offset = encoding.dex_register_map.byte_offset +
+        stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding);
+    size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
+    return DexRegisterMap(region_.Subregion(offset, size));
   }
 
   size_t GetDexRegisterMapsSize(const CodeInfoEncoding& encoding,
@@ -1225,27 +1320,34 @@
                                           InlineInfo inline_info,
                                           const CodeInfoEncoding& encoding,
                                           uint32_t number_of_dex_registers) const {
-    if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info_encoding, depth)) {
+    if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, depth)) {
       return DexRegisterMap();
     } else {
-      uint32_t offset = GetDexRegisterMapsOffset(encoding) +
-          inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info_encoding, depth);
+      uint32_t offset = encoding.dex_register_map.byte_offset +
+          inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, depth);
       size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
       return DexRegisterMap(region_.Subregion(offset, size));
     }
   }
 
+  InlineInfo GetInlineInfo(size_t index, const CodeInfoEncoding& encoding) const {
+    // Since we do not know the depth, we just return the whole remaining map.
+    // TODO: Clean this up.
+    const size_t bit_offset = encoding.inline_info.bit_offset +
+        index * encoding.inline_info.encoding.BitSize();
+    return InlineInfo(BitMemoryRegion(region_, bit_offset, region_.size_in_bits() - bit_offset));
+  }
+
   InlineInfo GetInlineInfoOf(StackMap stack_map, const CodeInfoEncoding& encoding) const {
-    DCHECK(stack_map.HasInlineInfo(encoding.stack_map_encoding));
-    uint32_t offset = stack_map.GetInlineDescriptorOffset(encoding.stack_map_encoding)
-                      + GetDexRegisterMapsOffset(encoding);
-    return InlineInfo(region_.Subregion(offset, region_.size() - offset));
+    DCHECK(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+    uint32_t index = stack_map.GetInlineInfoIndex(encoding.stack_map.encoding);
+    return GetInlineInfo(index, encoding);
   }
 
   StackMap GetStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
     for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
       StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetDexPc(encoding.stack_map_encoding) == dex_pc) {
+      if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
         return stack_map;
       }
     }
@@ -1257,7 +1359,7 @@
   StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
     for (size_t i = GetNumberOfStackMaps(encoding); i > 0; --i) {
       StackMap stack_map = GetStackMapAt(i - 1, encoding);
-      if (stack_map.GetDexPc(encoding.stack_map_encoding) == dex_pc) {
+      if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
         return stack_map;
       }
     }
@@ -1272,7 +1374,7 @@
     }
     // Walk over all stack maps. If two consecutive stack maps are identical, then we
     // have found a stack map suitable for OSR.
-    const StackMapEncoding& stack_map_encoding = encoding.stack_map_encoding;
+    const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
     for (size_t i = 0; i < e - 1; ++i) {
       StackMap stack_map = GetStackMapAt(i, encoding);
       if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
@@ -1303,7 +1405,7 @@
     //       we could do binary search.
     for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
       StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetNativePcOffset(encoding.stack_map_encoding, kRuntimeISA) ==
+      if (stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) ==
           native_pc_offset) {
         return stack_map;
       }
@@ -1324,23 +1426,17 @@
 
   // Check that the code info has valid stack map and abort if it does not.
   void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
-    if (region_.size() != 0 && region_.size() < GetStackMapsSize(encoding)) {
+    if (region_.size() != 0 && region_.size_in_bits() < GetStackMapsSizeInBits(encoding)) {
       LOG(FATAL) << region_.size() << "\n"
-                 << encoding.header_size << "\n"
-                 << encoding.non_header_size << "\n"
-                 << encoding.number_of_location_catalog_entries << "\n"
-                 << encoding.number_of_stack_maps << "\n"
-                 << encoding.stack_map_encoding.BitSize();
+                 << encoding.HeaderSize() << "\n"
+                 << encoding.NonHeaderSize() << "\n"
+                 << encoding.location_catalog.num_entries << "\n"
+                 << encoding.stack_map.num_entries << "\n"
+                 << encoding.stack_map.encoding.BitSize();
     }
   }
 
  private:
-  ALWAYS_INLINE MemoryRegion GetStackMaps(const CodeInfoEncoding& encoding) const {
-    return region_.size() == 0
-        ? MemoryRegion()
-        : region_.Subregion(GetStackMapsOffset(encoding), GetStackMapsSize(encoding));
-  }
-
   // Compute the size of the Dex register map associated to the stack map at
   // `dex_register_map_offset_in_code_info`.
   size_t ComputeDexRegisterMapSizeOf(const CodeInfoEncoding& encoding,
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d843de5..7b65404 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -65,6 +65,7 @@
 #include "object_lock.h"
 #include "quick_exception_handler.h"
 #include "quick/quick_method_frame_info.h"
+#include "read_barrier-inl.h"
 #include "reflection.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
@@ -77,7 +78,7 @@
 #include "thread-inl.h"
 #include "utils.h"
 #include "verifier/method_verifier.h"
-#include "verify_object-inl.h"
+#include "verify_object.h"
 #include "well_known_classes.h"
 #include "interpreter/interpreter.h"
 
@@ -1583,15 +1584,24 @@
 }
 
 struct StackDumpVisitor : public StackVisitor {
-  StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
+  StackDumpVisitor(std::ostream& os_in,
+                   Thread* thread_in,
+                   Context* context,
+                   bool can_allocate_in,
+                   bool check_suspended = true,
+                   bool dump_locks_in = true)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+      : StackVisitor(thread_in,
+                     context,
+                     StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+                     check_suspended),
         os(os_in),
         can_allocate(can_allocate_in),
         last_method(nullptr),
         last_line_number(0),
         repetition_count(0),
-        frame_count(0) {}
+        frame_count(0),
+        dump_locks(dump_locks_in) {}
 
   virtual ~StackDumpVisitor() {
     if (frame_count == 0) {
@@ -1636,8 +1646,10 @@
       if (frame_count == 0) {
         Monitor::DescribeWait(os, GetThread());
       }
-      if (can_allocate) {
+      if (can_allocate && dump_locks) {
         // Visit locks, but do not abort on errors. This would trigger a nested abort.
+        // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in
+        // RegTypeCache::RegTypeCache due to thread_list_lock.
         Monitor::VisitLocks(this, DumpLockedObject, &os, false);
       }
     }
@@ -1681,6 +1693,7 @@
   int last_line_number;
   int repetition_count;
   int frame_count;
+  const bool dump_locks;
 };
 
 static bool ShouldShowNativeStack(const Thread* thread)
@@ -1712,7 +1725,7 @@
   return current_method != nullptr && current_method->IsNative();
 }
 
-void Thread::DumpJavaStack(std::ostream& os) const {
+void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const {
   // If flip_function is not null, it means we have run a checkpoint
   // before the thread wakes up to execute the flip function and the
   // thread roots haven't been forwarded.  So the following access to
@@ -1741,7 +1754,7 @@
 
   std::unique_ptr<Context> context(Context::Create());
   StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
-                          !tls32_.throwing_OutOfMemoryError);
+                          !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
   dumper.WalkStack();
 
   if (have_exception) {
@@ -1767,10 +1780,15 @@
     // If we're currently in native code, dump that stack before dumping the managed stack.
     if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
-      ArtMethod* method = GetCurrentMethod(nullptr, !(dump_for_abort || force_dump_stack));
+      ArtMethod* method =
+          GetCurrentMethod(nullptr,
+                           /*check_suspended*/ !force_dump_stack,
+                           /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
       DumpNativeStack(os, GetTid(), backtrace_map, "  native: ", method);
     }
-    DumpJavaStack(os);
+    DumpJavaStack(os,
+                  /*check_suspended*/ !force_dump_stack,
+                  /*dump_locks*/ !force_dump_stack);
   } else {
     os << "Not able to dump stack of thread that isn't suspended";
   }
@@ -1845,6 +1863,7 @@
     : tls32_(daemon),
       wait_monitor_(nullptr),
       interrupted_(false),
+      custom_tls_(nullptr),
       can_call_into_java_(true) {
   wait_mutex_ = new Mutex("a thread wait mutex");
   wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
@@ -2918,9 +2937,12 @@
 // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
 //       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
 struct CurrentMethodVisitor FINAL : public StackVisitor {
-  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
+  CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+      : StackVisitor(thread,
+                     context,
+                     StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+                     check_suspended),
         this_object_(nullptr),
         method_(nullptr),
         dex_pc_(0),
@@ -2944,8 +2966,13 @@
   const bool abort_on_error_;
 };
 
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
-  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+                                    bool check_suspended,
+                                    bool abort_on_error) const {
+  CurrentMethodVisitor visitor(const_cast<Thread*>(this),
+                               nullptr,
+                               check_suspended,
+                               abort_on_error);
   visitor.WalkStack(false);
   if (dex_pc != nullptr) {
     *dex_pc = visitor.dex_pc_;
@@ -3457,4 +3484,15 @@
   return Runtime::Current()->IsAotCompiler();
 }
 
+mirror::Object* Thread::GetPeerFromOtherThread() const {
+  mirror::Object* peer = GetPeer();
+  if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+    // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+    // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
+    // mark/forward it here.
+    peer = art::ReadBarrier::Mark(peer);
+  }
+  return peer;
+}
+
 }  // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index b59eac6..3a1b7da 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -201,7 +201,9 @@
       REQUIRES(!Locks::thread_suspend_count_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void DumpJavaStack(std::ostream& os) const
+  void DumpJavaStack(std::ostream& os,
+                     bool check_suspended = true,
+                     bool dump_locks = true) const
       REQUIRES(!Locks::thread_suspend_count_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -359,6 +361,10 @@
     CHECK(tlsPtr_.jpeer == nullptr);
     return tlsPtr_.opeer;
   }
+  // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
+  // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
+  // This function will explicitly mark/forward it.
+  mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool HasPeer() const {
     return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
@@ -411,7 +417,9 @@
 
   // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
   // abort the runtime iff abort_on_error is true.
-  ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
+  ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
+                              bool check_suspended = true,
+                              bool abort_on_error = true) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns whether the given exception was thrown by the current Java method being executed
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 2536968..56ff0a1 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -41,12 +41,12 @@
     MutexLock mu(Thread::Current(), log_lock_);
     size_t objects_count = object_logs_.size();
     size_t field_values_count = 0;
-    for (auto it : object_logs_) {
+    for (const auto& it : object_logs_) {
       field_values_count += it.second.Size();
     }
     size_t array_count = array_logs_.size();
     size_t array_values_count = 0;
-    for (auto it : array_logs_) {
+    for (const auto& it : array_logs_) {
       array_values_count += it.second.Size();
     }
     size_t intern_string_count = intern_string_logs_.size();
@@ -100,24 +100,30 @@
   return abort_message_;
 }
 
-void Transaction::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
-                                          uint8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
+                                          MemberOffset field_offset,
+                                          uint8_t value,
+                                          bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
   ObjectLog& object_log = object_logs_[obj];
   object_log.LogBooleanValue(field_offset, value, is_volatile);
 }
 
-void Transaction::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
-                                       int8_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldByte(mirror::Object* obj,
+                                       MemberOffset field_offset,
+                                       int8_t value,
+                                       bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
   ObjectLog& object_log = object_logs_[obj];
   object_log.LogByteValue(field_offset, value, is_volatile);
 }
 
-void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
-                                       uint16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldChar(mirror::Object* obj,
+                                       MemberOffset field_offset,
+                                       uint16_t value,
+                                       bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
   ObjectLog& object_log = object_logs_[obj];
@@ -125,8 +131,10 @@
 }
 
 
-void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
-                                        int16_t value, bool is_volatile) {
+void Transaction::RecordWriteFieldShort(mirror::Object* obj,
+                                        MemberOffset field_offset,
+                                        int16_t value,
+                                        bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
   ObjectLog& object_log = object_logs_[obj];
@@ -134,7 +142,9 @@
 }
 
 
-void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+void Transaction::RecordWriteField32(mirror::Object* obj,
+                                     MemberOffset field_offset,
+                                     uint32_t value,
                                      bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
@@ -142,7 +152,9 @@
   object_log.Log32BitsValue(field_offset, value, is_volatile);
 }
 
-void Transaction::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+void Transaction::RecordWriteField64(mirror::Object* obj,
+                                     MemberOffset field_offset,
+                                     uint64_t value,
                                      bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
@@ -150,8 +162,10 @@
   object_log.Log64BitsValue(field_offset, value, is_volatile);
 }
 
-void Transaction::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
-                                            mirror::Object* value, bool is_volatile) {
+void Transaction::RecordWriteFieldReference(mirror::Object* obj,
+                                            MemberOffset field_offset,
+                                            mirror::Object* value,
+                                            bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
   ObjectLog& object_log = object_logs_[obj];
@@ -163,8 +177,12 @@
   DCHECK(array->IsArrayInstance());
   DCHECK(!array->IsObjectArray());
   MutexLock mu(Thread::Current(), log_lock_);
-  ArrayLog& array_log = array_logs_[array];
-  array_log.LogValue(index, value);
+  auto it = array_logs_.find(array);
+  if (it == array_logs_.end()) {
+    ArrayLog log;
+    it = array_logs_.emplace(array, std::move(log)).first;
+  }
+  it->second.LogValue(index, value);
 }
 
 void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
@@ -172,33 +190,33 @@
   DCHECK(dex_cache != nullptr);
   DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
   MutexLock mu(Thread::Current(), log_lock_);
-  resolve_string_logs_.push_back(ResolveStringLog(dex_cache, string_idx));
+  resolve_string_logs_.emplace_back(dex_cache, string_idx);
 }
 
 void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
-  LogInternedString(log);
+  LogInternedString(std::move(log));
 }
 
 void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
-  LogInternedString(log);
+  LogInternedString(std::move(log));
 }
 
 void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
-  LogInternedString(log);
+  LogInternedString(std::move(log));
 }
 
 void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
   InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
-  LogInternedString(log);
+  LogInternedString(std::move(log));
 }
 
-void Transaction::LogInternedString(const InternStringLog& log) {
+void Transaction::LogInternedString(InternStringLog&& log) {
   Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
   MutexLock mu(Thread::Current(), log_lock_);
-  intern_string_logs_.push_front(log);
+  intern_string_logs_.push_front(std::move(log));
 }
 
 void Transaction::Rollback() {
@@ -216,7 +234,7 @@
 void Transaction::UndoObjectModifications() {
   // TODO we may not need to restore objects allocated during this transaction. Or we could directly
   // remove them from the heap.
-  for (auto it : object_logs_) {
+  for (const auto& it : object_logs_) {
     it.second.Undo(it.first);
   }
   object_logs_.clear();
@@ -225,7 +243,7 @@
 void Transaction::UndoArrayModifications() {
   // TODO we may not need to restore array allocated during this transaction. Or we could directly
   // remove them from the heap.
-  for (auto it : array_logs_) {
+  for (const auto& it : array_logs_) {
     it.second.Undo(it.first);
   }
   array_logs_.clear();
@@ -235,7 +253,7 @@
   InternTable* const intern_table = Runtime::Current()->GetInternTable();
   // We want to undo each operation from the most recent to the oldest. List has been filled so the
   // most recent operation is at list begin so just have to iterate over it.
-  for (InternStringLog& string_log : intern_string_logs_) {
+  for (const InternStringLog& string_log : intern_string_logs_) {
     string_log.Undo(intern_table);
   }
   intern_string_logs_.clear();
@@ -262,7 +280,7 @@
   std::list<ObjectPair> moving_roots;
 
   // Visit roots.
-  for (auto it : object_logs_) {
+  for (auto& it : object_logs_) {
     it.second.VisitRoots(visitor);
     mirror::Object* old_root = it.first;
     mirror::Object* new_root = old_root;
@@ -279,7 +297,7 @@
     auto old_root_it = object_logs_.find(old_root);
     CHECK(old_root_it != object_logs_.end());
     CHECK(object_logs_.find(new_root) == object_logs_.end());
-    object_logs_.insert(std::make_pair(new_root, old_root_it->second));
+    object_logs_.emplace(new_root, std::move(old_root_it->second));
     object_logs_.erase(old_root_it);
   }
 }
@@ -289,7 +307,7 @@
   typedef std::pair<mirror::Array*, mirror::Array*> ArrayPair;
   std::list<ArrayPair> moving_roots;
 
-  for (auto it : array_logs_) {
+  for (auto& it : array_logs_) {
     mirror::Array* old_root = it.first;
     CHECK(!old_root->IsObjectArray());
     mirror::Array* new_root = old_root;
@@ -306,7 +324,7 @@
     auto old_root_it = array_logs_.find(old_root);
     CHECK(old_root_it != array_logs_.end());
     CHECK(array_logs_.find(new_root) == array_logs_.end());
-    array_logs_.insert(std::make_pair(new_root, old_root_it->second));
+    array_logs_.emplace(new_root, std::move(old_root_it->second));
     array_logs_.erase(old_root_it);
   }
 }
@@ -347,23 +365,27 @@
   LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
 }
 
-void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile) {
+void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
+                                               mirror::Object* obj,
+                                               bool is_volatile) {
   LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
 }
 
 void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
-                                      MemberOffset offset, uint64_t value, bool is_volatile) {
+                                      MemberOffset offset,
+                                      uint64_t value,
+                                      bool is_volatile) {
   auto it = field_values_.find(offset.Uint32Value());
   if (it == field_values_.end()) {
     ObjectLog::FieldValue field_value;
     field_value.value = value;
     field_value.is_volatile = is_volatile;
     field_value.kind = kind;
-    field_values_.insert(std::make_pair(offset.Uint32Value(), field_value));
+    field_values_.emplace(offset.Uint32Value(), std::move(field_value));
   }
 }
 
-void Transaction::ObjectLog::Undo(mirror::Object* obj) {
+void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
   for (auto& it : field_values_) {
     // Garbage collector needs to access object's class and array's length. So we don't rollback
     // these values.
@@ -377,60 +399,71 @@
       // Skip Array::length field.
       continue;
     }
-    FieldValue& field_value = it.second;
+    const FieldValue& field_value = it.second;
     UndoFieldWrite(obj, field_offset, field_value);
   }
 }
 
-void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
-                                            const FieldValue& field_value) {
+void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
+                                            MemberOffset field_offset,
+                                            const FieldValue& field_value) const {
   // TODO We may want to abort a transaction while still being in transaction mode. In this case,
   // we'd need to disable the check.
   constexpr bool kCheckTransaction = true;
   switch (field_value.kind) {
     case kBoolean:
       if (UNLIKELY(field_value.is_volatile)) {
-        obj->SetFieldBooleanVolatile<false, kCheckTransaction>(field_offset,
-                                                         static_cast<bool>(field_value.value));
+        obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
+            field_offset,
+            static_cast<bool>(field_value.value));
       } else {
-        obj->SetFieldBoolean<false, kCheckTransaction>(field_offset,
-                                                 static_cast<bool>(field_value.value));
+        obj->SetFieldBoolean<false, kCheckTransaction>(
+            field_offset,
+            static_cast<bool>(field_value.value));
       }
       break;
     case kByte:
       if (UNLIKELY(field_value.is_volatile)) {
-        obj->SetFieldByteVolatile<false, kCheckTransaction>(field_offset,
-                                                         static_cast<int8_t>(field_value.value));
+        obj->SetFieldByteVolatile<false, kCheckTransaction>(
+            field_offset,
+            static_cast<int8_t>(field_value.value));
       } else {
-        obj->SetFieldByte<false, kCheckTransaction>(field_offset,
-                                                 static_cast<int8_t>(field_value.value));
+        obj->SetFieldByte<false, kCheckTransaction>(
+            field_offset,
+            static_cast<int8_t>(field_value.value));
       }
       break;
     case kChar:
       if (UNLIKELY(field_value.is_volatile)) {
-        obj->SetFieldCharVolatile<false, kCheckTransaction>(field_offset,
-                                                          static_cast<uint16_t>(field_value.value));
+        obj->SetFieldCharVolatile<false, kCheckTransaction>(
+            field_offset,
+            static_cast<uint16_t>(field_value.value));
       } else {
-        obj->SetFieldChar<false, kCheckTransaction>(field_offset,
-                                                  static_cast<uint16_t>(field_value.value));
+        obj->SetFieldChar<false, kCheckTransaction>(
+            field_offset,
+            static_cast<uint16_t>(field_value.value));
       }
       break;
     case kShort:
       if (UNLIKELY(field_value.is_volatile)) {
-        obj->SetFieldShortVolatile<false, kCheckTransaction>(field_offset,
-                                                          static_cast<int16_t>(field_value.value));
+        obj->SetFieldShortVolatile<false, kCheckTransaction>(
+            field_offset,
+            static_cast<int16_t>(field_value.value));
       } else {
-        obj->SetFieldShort<false, kCheckTransaction>(field_offset,
-                                                  static_cast<int16_t>(field_value.value));
+        obj->SetFieldShort<false, kCheckTransaction>(
+            field_offset,
+            static_cast<int16_t>(field_value.value));
       }
       break;
     case k32Bits:
       if (UNLIKELY(field_value.is_volatile)) {
-        obj->SetField32Volatile<false, kCheckTransaction>(field_offset,
-                                                          static_cast<uint32_t>(field_value.value));
+        obj->SetField32Volatile<false, kCheckTransaction>(
+            field_offset,
+            static_cast<uint32_t>(field_value.value));
       } else {
-        obj->SetField32<false, kCheckTransaction>(field_offset,
-                                                  static_cast<uint32_t>(field_value.value));
+        obj->SetField32<false, kCheckTransaction>(
+            field_offset,
+            static_cast<uint32_t>(field_value.value));
       }
       break;
     case k64Bits:
@@ -442,11 +475,13 @@
       break;
     case kReference:
       if (UNLIKELY(field_value.is_volatile)) {
-        obj->SetFieldObjectVolatile<false, kCheckTransaction>(field_offset,
-                                                              reinterpret_cast<mirror::Object*>(field_value.value));
+        obj->SetFieldObjectVolatile<false, kCheckTransaction>(
+            field_offset,
+            reinterpret_cast<mirror::Object*>(field_value.value));
       } else {
-        obj->SetFieldObject<false, kCheckTransaction>(field_offset,
-                                                      reinterpret_cast<mirror::Object*>(field_value.value));
+        obj->SetFieldObject<false, kCheckTransaction>(
+            field_offset,
+            reinterpret_cast<mirror::Object*>(field_value.value));
       }
       break;
     default:
@@ -456,7 +491,7 @@
 }
 
 void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
-  for (auto it : field_values_) {
+  for (auto& it : field_values_) {
     FieldValue& field_value = it.second;
     if (field_value.kind == ObjectLog::kReference) {
       visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
@@ -465,7 +500,7 @@
   }
 }
 
-void Transaction::InternStringLog::Undo(InternTable* intern_table) {
+void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
   DCHECK(intern_table != nullptr);
   switch (string_op_) {
     case InternStringLog::kInsert: {
@@ -506,7 +541,7 @@
   str_.VisitRoot(visitor, RootInfo(kRootInternedString));
 }
 
-void Transaction::ResolveStringLog::Undo() {
+void Transaction::ResolveStringLog::Undo() const {
   dex_cache_.Read()->ClearString(string_idx_);
 }
 
@@ -538,7 +573,7 @@
   }
 }
 
-void Transaction::ArrayLog::Undo(mirror::Array* array) {
+void Transaction::ArrayLog::Undo(mirror::Array* array) const {
   DCHECK(array != nullptr);
   DCHECK(array->IsArrayInstance());
   Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
@@ -547,8 +582,10 @@
   }
 }
 
-void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array, Primitive::Type array_type,
-                                           size_t index, uint64_t value) {
+void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
+                                           Primitive::Type array_type,
+                                           size_t index,
+                                           uint64_t value) const {
   // TODO We may want to abort a transaction while still being in transaction mode. In this case,
   // we'd need to disable the check.
   switch (array_type) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 1774657..7aa98cd 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -56,26 +56,40 @@
   bool IsAborted() REQUIRES(!log_lock_);
 
   // Record object field changes.
-  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
+  void RecordWriteFieldBoolean(mirror::Object* obj,
+                               MemberOffset field_offset,
+                               uint8_t value,
                                bool is_volatile)
       REQUIRES(!log_lock_);
-  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
-                               bool is_volatile)
-      REQUIRES(!log_lock_);
-  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
+  void RecordWriteFieldByte(mirror::Object* obj,
+                            MemberOffset field_offset,
+                            int8_t value,
                             bool is_volatile)
       REQUIRES(!log_lock_);
-  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
+  void RecordWriteFieldChar(mirror::Object* obj,
+                            MemberOffset field_offset,
+                            uint16_t value,
+                            bool is_volatile)
+      REQUIRES(!log_lock_);
+  void RecordWriteFieldShort(mirror::Object* obj,
+                             MemberOffset field_offset,
+                             int16_t value,
                              bool is_volatile)
       REQUIRES(!log_lock_);
-  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
+  void RecordWriteField32(mirror::Object* obj,
+                          MemberOffset field_offset,
+                          uint32_t value,
                           bool is_volatile)
       REQUIRES(!log_lock_);
-  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
+  void RecordWriteField64(mirror::Object* obj,
+                          MemberOffset field_offset,
+                          uint64_t value,
                           bool is_volatile)
       REQUIRES(!log_lock_);
-  void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
-                                 mirror::Object* value, bool is_volatile)
+  void RecordWriteFieldReference(mirror::Object* obj,
+                                 MemberOffset field_offset,
+                                 mirror::Object* value,
+                                 bool is_volatile)
       REQUIRES(!log_lock_);
 
   // Record array change.
@@ -122,13 +136,16 @@
     void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
     void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
 
-    void Undo(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+    void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
     size_t Size() const {
       return field_values_.size();
     }
 
+    ObjectLog() = default;
+    ObjectLog(ObjectLog&& log) = default;
+
    private:
     enum FieldValueKind {
       kBoolean,
@@ -144,33 +161,49 @@
       uint64_t value;
       FieldValueKind kind;
       bool is_volatile;
+
+      FieldValue() = default;
+      FieldValue(FieldValue&& log) = default;
+
+     private:
+      DISALLOW_COPY_AND_ASSIGN(FieldValue);
     };
 
     void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
-    void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
-                        const FieldValue& field_value) REQUIRES_SHARED(Locks::mutator_lock_);
+    void UndoFieldWrite(mirror::Object* obj,
+                        MemberOffset field_offset,
+                        const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
 
     // Maps field's offset to its value.
     std::map<uint32_t, FieldValue> field_values_;
+
+    DISALLOW_COPY_AND_ASSIGN(ObjectLog);
   };
 
   class ArrayLog : public ValueObject {
    public:
     void LogValue(size_t index, uint64_t value);
 
-    void Undo(mirror::Array* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+    void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
 
     size_t Size() const {
       return array_values_.size();
     }
 
+    ArrayLog() = default;
+    ArrayLog(ArrayLog&& log) = default;
+
    private:
-    void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
-                        uint64_t value) REQUIRES_SHARED(Locks::mutator_lock_);
+    void UndoArrayWrite(mirror::Array* array,
+                        Primitive::Type array_type,
+                        size_t index,
+                        uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
 
     // Maps index to value.
     // TODO use JValue instead ?
     std::map<size_t, uint64_t> array_values_;
+
+    DISALLOW_COPY_AND_ASSIGN(ArrayLog);
   };
 
   class InternStringLog : public ValueObject {
@@ -185,31 +218,38 @@
     };
     InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
 
-    void Undo(InternTable* intern_table)
+    void Undo(InternTable* intern_table) const
         REQUIRES_SHARED(Locks::mutator_lock_)
         REQUIRES(Locks::intern_table_lock_);
     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
+    InternStringLog() = default;
+    InternStringLog(InternStringLog&& log) = default;
+
    private:
-    GcRoot<mirror::String> str_;
+    mutable GcRoot<mirror::String> str_;
     const StringKind string_kind_;
     const StringOp string_op_;
+
+    DISALLOW_COPY_AND_ASSIGN(InternStringLog);
   };
 
   class ResolveStringLog : public ValueObject {
    public:
     ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
 
-    void Undo() REQUIRES_SHARED(Locks::mutator_lock_);
+    void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
 
     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
    private:
     GcRoot<mirror::DexCache> dex_cache_;
     const dex::StringIndex string_idx_;
+
+    DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
   };
 
-  void LogInternedString(const InternStringLog& log)
+  void LogInternedString(InternStringLog&& log)
       REQUIRES(Locks::intern_table_lock_)
       REQUIRES(!log_lock_);
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b915457..5f55f3f 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -415,12 +415,12 @@
       result.kind = kSoftFailure;
       if (method != nullptr &&
           !CanCompilerHandleVerificationFailure(verifier.encountered_failure_types_)) {
-        method->AddAccessFlags(kAccCompileDontBother);
+        method->SetDontCompile();
       }
     }
     if (method != nullptr) {
       if (verifier.HasInstructionThatWillThrow()) {
-        method->AddAccessFlags(kAccCompileDontBother);
+        method->SetDontCompile();
         if (Runtime::Current()->IsAotCompiler() &&
             (callbacks != nullptr) && !callbacks->IsBootImage()) {
           // When compiling apps, make HasInstructionThatWillThrow a soft error to trigger
diff --git a/runtime/verify_object-inl.h b/runtime/verify_object-inl.h
index 43151dd..363fde2 100644
--- a/runtime/verify_object-inl.h
+++ b/runtime/verify_object-inl.h
@@ -19,33 +19,11 @@
 
 #include "verify_object.h"
 
-#include "gc/heap.h"
 #include "mirror/object-inl.h"
 #include "obj_ptr-inl.h"
 
 namespace art {
 
-inline void VerifyObject(ObjPtr<mirror::Object> obj) {
-  if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
-    if (kVerifyObjectSupport > kVerifyObjectModeFast) {
-      // Slow object verification, try the heap right away.
-      Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
-    } else {
-      // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
-      // print the diagnostic message.
-      bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
-      if (!failed) {
-        mirror::Class* c = obj->GetClass<kVerifyNone>();
-        failed = failed || !IsAligned<kObjectAlignment>(c);
-        failed = failed || !VerifyClassClass(c);
-      }
-      if (UNLIKELY(failed)) {
-        Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
-      }
-    }
-  }
-}
-
 inline bool VerifyClassClass(ObjPtr<mirror::Class> c) {
   if (UNLIKELY(c == nullptr)) {
     return false;
diff --git a/runtime/verify_object.cc b/runtime/verify_object.cc
new file mode 100644
index 0000000..a031a07
--- /dev/null
+++ b/runtime/verify_object.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "verify_object-inl.h"
+
+#include "base/bit_utils.h"
+#include "gc/heap.h"
+#include "globals.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) {
+  if (kVerifyObjectSupport > kVerifyObjectModeFast) {
+    // Slow object verification, try the heap right away.
+    Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+  } else {
+    // Fast object verification, only call the heap if our quick sanity tests fail. The heap will
+    // print the diagnostic message.
+    bool failed = !IsAligned<kObjectAlignment>(obj.Ptr());
+    if (!failed) {
+      mirror::Class* c = obj->GetClass<kVerifyNone>();
+      failed = failed || !IsAligned<kObjectAlignment>(c);
+      failed = failed || !VerifyClassClass(c);
+    }
+    if (UNLIKELY(failed)) {
+      Runtime::Current()->GetHeap()->VerifyObjectBody(obj);
+    }
+  }
+}
+
+}  // namespace art
diff --git a/runtime/verify_object.h b/runtime/verify_object.h
index 384e56f..519f7f5 100644
--- a/runtime/verify_object.h
+++ b/runtime/verify_object.h
@@ -53,7 +53,16 @@
 static constexpr VerifyObjectMode kVerifyObjectSupport =
     kDefaultVerifyFlags != 0 ? kVerifyObjectModeFast : kVerifyObjectModeDisabled;
 
-ALWAYS_INLINE void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+// Implements the actual object checks.
+void VerifyObjectImpl(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS;
+
+// Is a front to optimize out any calls if no verification is enabled.
+ALWAYS_INLINE
+static inline void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANALYSIS {
+  if (kVerifyObjectSupport > kVerifyObjectModeDisabled && obj != nullptr) {
+    VerifyObjectImpl(obj);
+  }
+}
 
 // Check that c.getClass() == c.getClass().getClass().
 ALWAYS_INLINE bool VerifyClassClass(ObjPtr<mirror::Class> c) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/test/004-NativeAllocations/src/Main.java b/test/004-NativeAllocations/src/Main.java
index 92f4e21..8712755 100644
--- a/test/004-NativeAllocations/src/Main.java
+++ b/test/004-NativeAllocations/src/Main.java
@@ -16,6 +16,7 @@
 
 import java.lang.reflect.*;
 import java.lang.Runtime;
+import dalvik.system.VMRuntime;
 
 public class Main {
     static Object nativeLock = new Object();
@@ -33,10 +34,19 @@
         NativeAllocation(int bytes, boolean testingDeadlock) throws Exception {
             this.bytes = bytes;
             register_native_allocation.invoke(runtime, bytes);
+
+            // Register native allocation can only provide guarantees bounding
+            // the maximum outstanding allocations if finalizers don't time
+            // out. In case finalizers have timed out, wait longer for them
+            // now to complete so we can test the guarantees.
+            if (!testingDeadlock) {
+              VMRuntime.runFinalization(0);
+            }
+
             synchronized (nativeLock) {
                 if (!testingDeadlock) {
                     nativeBytes += bytes;
-                    if (nativeBytes > maxMem) {
+                    if (nativeBytes > 2 * maxMem) {
                         throw new OutOfMemoryError();
                     }
                 }
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index fad8a9f..072f0e6 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -535,6 +535,8 @@
     Assert.assertEquals(Math.min(0.0f, Float.MAX_VALUE), 0.0f);
     Assert.assertEquals(Math.min(Float.MIN_VALUE, 0.0f), 0.0f);
     Assert.assertEquals(Math.min(Float.MIN_VALUE, Float.MAX_VALUE), Float.MIN_VALUE);
+    // Should not have flush-to-zero behavior.
+    Assert.assertEquals(Math.min(Float.MIN_VALUE, Float.MIN_VALUE), Float.MIN_VALUE);
   }
 
   public static void test_Math_max_F() {
@@ -548,8 +550,10 @@
     Assert.assertEquals(Math.max(1.0f, 0.0f), 1.0f);
     Assert.assertEquals(Math.max(0.0f, 1.0f), 1.0f);
     Assert.assertEquals(Math.max(0.0f, Float.MAX_VALUE), Float.MAX_VALUE);
-    Assert.assertEquals(Math.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE);
     Assert.assertEquals(Math.max(Float.MIN_VALUE, Float.MAX_VALUE), Float.MAX_VALUE);
+    // Should not have flush-to-zero behavior.
+    Assert.assertEquals(Math.max(Float.MIN_VALUE, 0.0f), Float.MIN_VALUE);
+    Assert.assertEquals(Math.max(Float.MIN_VALUE, Float.MIN_VALUE), Float.MIN_VALUE);
   }
 
   public static void test_Math_min_D() {
@@ -565,6 +569,8 @@
     Assert.assertEquals(Math.min(0.0d, Double.MAX_VALUE), 0.0d);
     Assert.assertEquals(Math.min(Double.MIN_VALUE, 0.0d), 0.0d);
     Assert.assertEquals(Math.min(Double.MIN_VALUE, Double.MAX_VALUE), Double.MIN_VALUE);
+    // Should not have flush-to-zero behavior.
+    Assert.assertEquals(Math.min(Double.MIN_VALUE, Double.MIN_VALUE), Double.MIN_VALUE);
   }
 
   public static void test_Math_max_D() {
@@ -580,6 +586,9 @@
     Assert.assertEquals(Math.max(0.0d, Double.MAX_VALUE), Double.MAX_VALUE);
     Assert.assertEquals(Math.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE);
     Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
+    // Should not have flush-to-zero behavior.
+    Assert.assertEquals(Math.max(Double.MIN_VALUE, 0.0d), Double.MIN_VALUE);
+    Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MIN_VALUE), Double.MIN_VALUE);
   }
 
   public static void test_Math_sqrt() {
diff --git a/test/155-java-set-resolved-type/expected.txt b/test/155-java-set-resolved-type/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/155-java-set-resolved-type/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/155-java-set-resolved-type/info.txt b/test/155-java-set-resolved-type/info.txt
new file mode 100644
index 0000000..ba5bc0a
--- /dev/null
+++ b/test/155-java-set-resolved-type/info.txt
@@ -0,0 +1,2 @@
+Regression test for Java call to DexCache.setResolvedType() storing the
+type in the dex cache while it was not in the class loader's class table.
diff --git a/test/155-java-set-resolved-type/src-ex/TestInterface.java b/test/155-java-set-resolved-type/src-ex/TestInterface.java
new file mode 100644
index 0000000..037c760
--- /dev/null
+++ b/test/155-java-set-resolved-type/src-ex/TestInterface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface TestInterface {
+  public void foo();
+}
diff --git a/test/155-java-set-resolved-type/src/Main.java b/test/155-java-set-resolved-type/src/Main.java
new file mode 100644
index 0000000..f92363e
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/Main.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+public class Main {
+    public static String TEST_NAME = "155-java-set-resolved-type";
+
+    public static void main(String[] args) {
+        try {
+            Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+            System.loadLibrary(args[0]);
+        } catch (ClassNotFoundException e) {
+            usingRI = true;
+            // Add expected JNI_OnLoad log line to match expected.txt.
+            System.out.println("JNI_OnLoad called");
+        }
+        try {
+            String dex_location = System.getenv("DEX_LOCATION");
+            ClassLoader systemLoader = ClassLoader.getSystemClassLoader().getParent();
+            ClassLoader exLoader = getClassLoaderFor(dex_location, systemLoader, /* ex */ true);
+            ClassLoader mainLoader = getClassLoaderFor(dex_location, exLoader, /* ex */ false);
+
+            // Resolve TestParameter class. It shall be defined by mainLoader.
+            // This does not resolve method parameter types.
+            Class<?> tpc = Class.forName("TestParameter", false, mainLoader);
+            // Get declared methods of TestParameter.
+            // This still does not resolve method parameter types.
+            Method[] ms = tpc.getDeclaredMethods();
+            if (ms == null || ms.length != 1) { throw new Error("Unexpected methods"); };
+            // Call getParameterTypes() to resolve parameter types. The parameter type
+            // TestInterface shall be defined by the exLoader. This used to store the
+            // TestInterface class in the dex cache resolved types for the mainLoader
+            // but not in the mainLoader's class table. This discrepancy used to cause
+            // a crash further down.
+            ms[0].getParameterTypes();
+
+            // Resolve but do not initialize TestImplementation. During the resolution,
+            // we see the TestInterface in the dex cache, so we do not try to look it up
+            // or resolve it using the mainLoader.
+            Class<?> timpl = Class.forName("TestImplementation", false, mainLoader);
+            // Clear the dex cache resolved types to force a proper lookup the next time
+            // we need to find TestInterface.
+            // TODO: Enable clearing the dex cache when we switch to the hash-based type array
+            // and do a proper lookup. Currently, ClassLinker fully relies on the DexCache.
+            if (false) {
+                clearResolvedTypes(timpl);
+            }
+
+            // Force intialization of TestClass2. This expects the interface type to be
+            // resolved and found through simple lookup.
+            timpl.newInstance();
+        } catch (Throwable t) {
+            t.printStackTrace();
+        }
+    }
+
+    public static ClassLoader getClassLoaderFor(String location, ClassLoader parent, boolean ex)
+            throws Exception {
+        try {
+            Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
+            Constructor<?> ctor =
+                    class_loader_class.getConstructor(String.class, ClassLoader.class);
+            /* on Dalvik, this is a DexFile; otherwise, it's null */
+            String path = location + "/" + TEST_NAME + (ex ? "-ex.jar" : ".jar");
+            return (ClassLoader)ctor.newInstance(path, parent);
+        } catch (ClassNotFoundException e) {
+            // Running on RI. Use URLClassLoader.
+            String url = "file://" + location + (ex ? "/classes-ex/" : "/classes/");
+            return new java.net.URLClassLoader(
+                    new java.net.URL[] { new java.net.URL(url) }, parent);
+        }
+    }
+
+    public static void clearResolvedTypes(Class<?> c) {
+        if (!usingRI) {
+            nativeClearResolvedTypes(c);
+        }
+    }
+
+    private static boolean usingRI = false;
+
+    public static native void nativeClearResolvedTypes(Class<?> c);
+}
diff --git a/test/155-java-set-resolved-type/src/TestImplementation.java b/test/155-java-set-resolved-type/src/TestImplementation.java
new file mode 100644
index 0000000..4a3e74d
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/TestImplementation.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestImplementation implements TestInterface {
+  public void foo() { }
+}
diff --git a/test/155-java-set-resolved-type/src/TestInterface.java b/test/155-java-set-resolved-type/src/TestInterface.java
new file mode 100644
index 0000000..037c760
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/TestInterface.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface TestInterface {
+  public void foo();
+}
diff --git a/test/155-java-set-resolved-type/src/TestParameter.java b/test/155-java-set-resolved-type/src/TestParameter.java
new file mode 100644
index 0000000..c881f3f
--- /dev/null
+++ b/test/155-java-set-resolved-type/src/TestParameter.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestParameter {
+  public void bar(TestInterface ti) { }
+}
diff --git a/test/156-register-dex-file-multi-loader/expected.txt b/test/156-register-dex-file-multi-loader/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/156-register-dex-file-multi-loader/expected.txt
diff --git a/test/156-register-dex-file-multi-loader/info.txt b/test/156-register-dex-file-multi-loader/info.txt
new file mode 100644
index 0000000..49d153c
--- /dev/null
+++ b/test/156-register-dex-file-multi-loader/info.txt
@@ -0,0 +1,2 @@
+Regression test to check that we do not allow registering the same dex file
+with multiple class loaders.
diff --git a/test/156-register-dex-file-multi-loader/src/Main.java b/test/156-register-dex-file-multi-loader/src/Main.java
new file mode 100644
index 0000000..ff5a2bd
--- /dev/null
+++ b/test/156-register-dex-file-multi-loader/src/Main.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.List;
+
+class MyClassLoader extends ClassLoader {
+  MyClassLoader() throws Exception {
+    super(MyClassLoader.class.getClassLoader());
+
+    // Some magic to get access to the pathList field of BaseDexClassLoader.
+    ClassLoader loader = getClass().getClassLoader();
+    Class<?> baseDexClassLoader = loader.getClass().getSuperclass();
+    Field f = baseDexClassLoader.getDeclaredField("pathList");
+    f.setAccessible(true);
+    Object pathList = f.get(loader);
+
+    // Some magic to get access to the dexField field of pathList.
+    f = pathList.getClass().getDeclaredField("dexElements");
+    f.setAccessible(true);
+    dexElements = (Object[]) f.get(pathList);
+    dexFileField = dexElements[0].getClass().getDeclaredField("dexFile");
+    dexFileField.setAccessible(true);
+  }
+
+  Object[] dexElements;
+  Field dexFileField;
+
+  protected Class<?> loadClass(String className, boolean resolve) throws ClassNotFoundException {
+    // Mimic what DexPathList.findClass is doing.
+    try {
+      for (Object element : dexElements) {
+        Object dex = dexFileField.get(element);
+        Method method = dex.getClass().getDeclaredMethod(
+            "loadClassBinaryName", String.class, ClassLoader.class, List.class);
+
+        if (dex != null) {
+          Class<?> clazz = (Class<?>)method.invoke(dex, className, this, null);
+          if (clazz != null) {
+            return clazz;
+          }
+        }
+      }
+    } catch (InvocationTargetException ite) {
+      throw new ClassNotFoundException(className, ite.getCause());
+    } catch (Exception e) {
+      throw new Error(e);
+    }
+    return getParent().loadClass(className);
+  }
+}
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    MyClassLoader o = new MyClassLoader();
+    try {
+      Class<?> foo = o.loadClass("Main");
+      throw new Error("Unreachable");
+    } catch (ClassNotFoundException cnfe) {
+      boolean unexpected = false;
+      if (!(cnfe.getCause() instanceof InternalError)) {
+        unexpected = true;
+      } else {
+        String message = cnfe.getCause().getMessage();
+        unexpected = !message.startsWith("Attempt to register dex file ") ||
+                     !message.endsWith(" with multiple class loaders");
+      }
+      if (unexpected) {
+        cnfe.getCause().printStackTrace();
+      }
+    }
+  }
+}
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
index 65dfd41..86977d1 100644
--- a/test/482-checker-loop-back-edge-use/src/Main.java
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -164,6 +164,12 @@
     }
   }
 
+
+  static boolean $opt$noinline$ensureSideEffects() {
+    if (doThrow) throw new Error("");
+    return true;
+  }
+
   /// CHECK-START: void Main.loop9() liveness (after)
   /// CHECK:         <<Arg:z\d+>>  StaticFieldGet  liveness:<<ArgLiv:\d+>> ranges:{[<<ArgLiv>>,<<ArgLoopUse:\d+>>)} uses:[<<ArgUse:\d+>>,<<ArgLoopUse>>]
   /// CHECK:                       If [<<Arg>>]    liveness:<<IfLiv:\d+>>
@@ -178,7 +184,7 @@
     // Add some code at entry to avoid having the entry block be a pre header.
     // This avoids having to create a synthesized block.
     System.out.println("Enter");
-    while (Runtime.getRuntime() != null) {
+    while ($opt$noinline$ensureSideEffects()) {
       // 'incoming' must only have a use in the inner loop.
       boolean incoming = field;
       while (incoming) {}
@@ -189,4 +195,5 @@
   }
 
   static boolean field;
+  static boolean doThrow = false;
 }
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index bf0cbe6..dd77423 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -283,9 +283,6 @@
     return "non-boot-image-string";
   }
 
-  /// CHECK-START: java.lang.Class Main.$noinline$getStringClass() sharpening (before)
-  /// CHECK:                LoadClass load_kind:DexCacheViaMethod class_name:java.lang.String
-
   /// CHECK-START-X86: java.lang.Class Main.$noinline$getStringClass() sharpening (after)
   // Note: load kind depends on PIC/non-PIC
   // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress.
@@ -323,9 +320,6 @@
     return String.class;
   }
 
-  /// CHECK-START: java.lang.Class Main.$noinline$getOtherClass() sharpening (before)
-  /// CHECK:                LoadClass load_kind:DexCacheViaMethod class_name:Other
-
   /// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() sharpening (after)
   /// CHECK:                LoadClass load_kind:BssEntry class_name:Other
 
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index b035896..c0aedc1 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -15,6 +15,9 @@
  */
 
 #include "jni.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache-inl.h"
 #include "object_lock.h"
 #include "scoped_thread_state_change-inl.h"
 
diff --git a/test/626-const-class-linking/src/Main.java b/test/626-const-class-linking/src/Main.java
index 0029428..1bc94a7 100644
--- a/test/626-const-class-linking/src/Main.java
+++ b/test/626-const-class-linking/src/Main.java
@@ -23,8 +23,10 @@
 public class Main {
     public static void main(String[] args) throws Exception {
         try {
+            // Check if we're running dalvik or RI.
+            Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader");
             System.loadLibrary(args[0]);
-        } catch (UnsatisfiedLinkError ule) {
+        } catch (ClassNotFoundException e) {
             usingRI = true;
             // Add expected JNI_OnLoad log line to match expected.txt.
             System.out.println("JNI_OnLoad called");
diff --git a/test/636-wrong-static-access/expected.txt b/test/636-wrong-static-access/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/636-wrong-static-access/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/636-wrong-static-access/info.txt b/test/636-wrong-static-access/info.txt
new file mode 100644
index 0000000..184d858
--- /dev/null
+++ b/test/636-wrong-static-access/info.txt
@@ -0,0 +1,2 @@
+Test that the compiler checks if a resolved field is
+of the expected static/instance kind.
diff --git a/test/636-wrong-static-access/run b/test/636-wrong-static-access/run
new file mode 100755
index 0000000..5e99920
--- /dev/null
+++ b/test/636-wrong-static-access/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make verification soft fail, to ensure the verifier does not flag
+# the method we want to compile as "non-compilable" because it sees
+# the method will throw IncompatibleClassChangeError.
+exec ${RUN} $@ --verify-soft-fail
diff --git a/test/636-wrong-static-access/src-ex/Foo.java b/test/636-wrong-static-access/src-ex/Foo.java
new file mode 100644
index 0000000..9e3b7a7
--- /dev/null
+++ b/test/636-wrong-static-access/src-ex/Foo.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Foo {
+  public static void doTest() {
+    // Execute foo once to make sure the dex cache will be updated.
+    try {
+      foo();
+      throw new Error("Expected IncompatibleClassChangeError");
+    } catch (IncompatibleClassChangeError e) {
+      // Expected.
+    }
+    Main.ensureJitCompiled(Foo.class, "foo");
+    try {
+      foo();
+      throw new Error("Expected IncompatibleClassChangeError");
+    } catch (IncompatibleClassChangeError e) {
+      // Expected.
+    }
+  }
+
+  public static void foo() {
+    System.out.println(Holder.field);
+  }
+}
diff --git a/test/636-wrong-static-access/src/Holder.java b/test/636-wrong-static-access/src/Holder.java
new file mode 100644
index 0000000..f3b1c57
--- /dev/null
+++ b/test/636-wrong-static-access/src/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+  public static int field = 42;
+}
diff --git a/test/636-wrong-static-access/src/Main.java b/test/636-wrong-static-access/src/Main.java
new file mode 100644
index 0000000..bd8548e
--- /dev/null
+++ b/test/636-wrong-static-access/src/Main.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+
+public class Main {
+    static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/636-wrong-static-access-ex.jar";
+
+    public static void main(String[] args) throws Exception {
+        System.loadLibrary(args[0]);
+        Class<?> pathClassLoader = Class.forName("dalvik.system.PathClassLoader");
+        if (pathClassLoader == null) {
+            throw new AssertionError("Couldn't find path class loader class");
+        }
+        Constructor<?> constructor =
+            pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class);
+        ClassLoader loader = (ClassLoader) constructor.newInstance(
+            DEX_FILE, ClassLoader.getSystemClassLoader());
+        Class<?> foo = loader.loadClass("Foo");
+        Method doTest = foo.getDeclaredMethod("doTest");
+        doTest.invoke(null);
+    }
+
+    public static native void ensureJitCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/636-wrong-static-access/src2/Holder.java b/test/636-wrong-static-access/src2/Holder.java
new file mode 100644
index 0000000..a26da24
--- /dev/null
+++ b/test/636-wrong-static-access/src2/Holder.java
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Holder {
+  public int field = 42;
+}
diff --git a/test/911-get-stack-trace/src/PrintThread.java b/test/911-get-stack-trace/src/PrintThread.java
index 97815cc..136fd80 100644
--- a/test/911-get-stack-trace/src/PrintThread.java
+++ b/test/911-get-stack-trace/src/PrintThread.java
@@ -44,6 +44,9 @@
       if (name.contains("Daemon")) {
         // Do not print daemon stacks, as they're non-deterministic.
         stackSerialization = "<not printed>";
+      } else if (name.startsWith("Jit thread pool worker")) {
+        // Skip JIT thread pool. It may or may not be there depending on configuration.
+        continue;
       } else {
         StringBuilder sb = new StringBuilder();
         for (String[] stackElement : (String[][])stackInfo[1]) {
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index d13436e..e659ea3 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -17,9 +17,14 @@
 #include <stdio.h>
 
 #include "base/macros.h"
+#include "class_linker.h"
 #include "jni.h"
+#include "mirror/class_loader.h"
 #include "openjdkjvmti/jvmti.h"
+#include "runtime.h"
 #include "ScopedLocalRef.h"
+#include "ScopedUtfChars.h"
+#include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
 
 #include "ti-agent/common_helper.h"
@@ -278,69 +283,11 @@
   return tmp;
 }
 
-static std::string GetThreadName(jvmtiEnv* jenv, JNIEnv* jni_env, jthread thread) {
-  jvmtiThreadInfo info;
-  jvmtiError result = jenv->GetThreadInfo(thread, &info);
-  if (result != JVMTI_ERROR_NONE) {
-    if (jni_env != nullptr) {
-      JvmtiErrorToException(jni_env, result);
-    } else {
-      printf("Failed to get thread name.\n");
-    }
-    return "";
-  }
-
-  std::string tmp(info.name);
-  jenv->Deallocate(reinterpret_cast<unsigned char*>(info.name));
-  jni_env->DeleteLocalRef(info.context_class_loader);
-  jni_env->DeleteLocalRef(info.thread_group);
-
-  return tmp;
-}
-
-static std::string GetThreadName(Thread* thread) {
-  std::string tmp;
-  thread->GetThreadName(tmp);
-  return tmp;
-}
-
-static void JNICALL ClassPrepareCallback(jvmtiEnv* jenv,
-                                         JNIEnv* jni_env,
-                                         jthread thread,
-                                         jclass klass) {
-  std::string name = GetClassName(jenv, jni_env, klass);
-  if (name == "") {
-    return;
-  }
-  std::string thread_name = GetThreadName(jenv, jni_env, thread);
-  if (thread_name == "") {
-    return;
-  }
-  std::string cur_thread_name = GetThreadName(Thread::Current());
-  printf("Prepare: %s on %s (cur=%s)\n",
-         name.c_str(),
-         thread_name.c_str(),
-         cur_thread_name.c_str());
-}
-
-static void JNICALL ClassLoadCallback(jvmtiEnv* jenv,
-                                      JNIEnv* jni_env,
-                                      jthread thread,
-                                      jclass klass) {
-  std::string name = GetClassName(jenv, jni_env, klass);
-  if (name == "") {
-    return;
-  }
-  std::string thread_name = GetThreadName(jenv, jni_env, thread);
-  if (thread_name == "") {
-    return;
-  }
-  printf("Load: %s on %s\n", name.c_str(), thread_name.c_str());
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadEvents(
-    JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
-  if (b == JNI_FALSE) {
+static void EnableEvents(JNIEnv* env,
+                         jboolean enable,
+                         decltype(jvmtiEventCallbacks().ClassLoad) class_load,
+                         decltype(jvmtiEventCallbacks().ClassPrepare) class_prepare) {
+  if (enable == JNI_FALSE) {
     jvmtiError ret = jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
                                                          JVMTI_EVENT_CLASS_LOAD,
                                                          nullptr);
@@ -356,8 +303,8 @@
 
   jvmtiEventCallbacks callbacks;
   memset(&callbacks, 0, sizeof(jvmtiEventCallbacks));
-  callbacks.ClassLoad = ClassLoadCallback;
-  callbacks.ClassPrepare = ClassPrepareCallback;
+  callbacks.ClassLoad = class_load;
+  callbacks.ClassPrepare = class_prepare;
   jvmtiError ret = jvmti_env->SetEventCallbacks(&callbacks, sizeof(callbacks));
   if (JvmtiErrorToException(env, ret)) {
     return;
@@ -375,5 +322,113 @@
   JvmtiErrorToException(env, ret);
 }
 
+class ClassLoadPreparePrinter {
+ public:
+  static void JNICALL ClassLoadCallback(jvmtiEnv* jenv,
+                                        JNIEnv* jni_env,
+                                        jthread thread,
+                                        jclass klass) {
+    std::string name = GetClassName(jenv, jni_env, klass);
+    if (name == "") {
+      return;
+    }
+    std::string thread_name = GetThreadName(jenv, jni_env, thread);
+    if (thread_name == "") {
+      return;
+    }
+    printf("Load: %s on %s\n", name.c_str(), thread_name.c_str());
+  }
+
+  static void JNICALL ClassPrepareCallback(jvmtiEnv* jenv,
+                                           JNIEnv* jni_env,
+                                           jthread thread,
+                                           jclass klass) {
+    std::string name = GetClassName(jenv, jni_env, klass);
+    if (name == "") {
+      return;
+    }
+    std::string thread_name = GetThreadName(jenv, jni_env, thread);
+    if (thread_name == "") {
+      return;
+    }
+    std::string cur_thread_name = GetThreadName(Thread::Current());
+    printf("Prepare: %s on %s (cur=%s)\n",
+           name.c_str(),
+           thread_name.c_str(),
+           cur_thread_name.c_str());
+  }
+
+ private:
+  static std::string GetThreadName(jvmtiEnv* jenv, JNIEnv* jni_env, jthread thread) {
+    jvmtiThreadInfo info;
+    jvmtiError result = jenv->GetThreadInfo(thread, &info);
+    if (result != JVMTI_ERROR_NONE) {
+      if (jni_env != nullptr) {
+        JvmtiErrorToException(jni_env, result);
+      } else {
+        printf("Failed to get thread name.\n");
+      }
+      return "";
+    }
+
+    std::string tmp(info.name);
+    jenv->Deallocate(reinterpret_cast<unsigned char*>(info.name));
+    jni_env->DeleteLocalRef(info.context_class_loader);
+    jni_env->DeleteLocalRef(info.thread_group);
+
+    return tmp;
+  }
+
+  static std::string GetThreadName(Thread* thread) {
+    std::string tmp;
+    thread->GetThreadName(tmp);
+    return tmp;
+  }
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadPreparePrintEvents(
+    JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean enable) {
+  EnableEvents(env,
+               enable,
+               ClassLoadPreparePrinter::ClassLoadCallback,
+               ClassLoadPreparePrinter::ClassPrepareCallback);
+}
+
+struct ClassLoadSeen {
+  static void JNICALL ClassLoadSeenCallback(jvmtiEnv* jenv ATTRIBUTE_UNUSED,
+                                            JNIEnv* jni_env ATTRIBUTE_UNUSED,
+                                            jthread thread ATTRIBUTE_UNUSED,
+                                            jclass klass ATTRIBUTE_UNUSED) {
+    saw_event = true;
+  }
+
+  static bool saw_event;
+};
+bool ClassLoadSeen::saw_event = false;
+
+extern "C" JNIEXPORT void JNICALL Java_Main_enableClassLoadSeenEvents(
+    JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jboolean b) {
+  EnableEvents(env, b, ClassLoadSeen::ClassLoadSeenCallback, nullptr);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hadLoadEvent(
+    JNIEnv* env ATTRIBUTE_UNUSED, jclass Main_klass ATTRIBUTE_UNUSED) {
+  return ClassLoadSeen::saw_event ? JNI_TRUE : JNI_FALSE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isLoadedClass(
+    JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jstring class_name) {
+  ScopedUtfChars name(env, class_name);
+  ScopedObjectAccess soa(Thread::Current());
+  Runtime* current = Runtime::Current();
+  ClassLinker* class_linker = current->GetClassLinker();
+  bool found =
+      class_linker->LookupClass(
+          soa.Self(),
+          name.c_str(),
+          soa.Decode<mirror::ClassLoader>(current->GetSystemClassLoader())) != nullptr;
+  return found ? JNI_TRUE : JNI_FALSE;
+}
+
 }  // namespace Test912Classes
 }  // namespace art
diff --git a/test/912-classes/src/Main.java b/test/912-classes/src/Main.java
index 6ad23a4..e3aceb9 100644
--- a/test/912-classes/src/Main.java
+++ b/test/912-classes/src/Main.java
@@ -219,6 +219,15 @@
     }
     final ClassLoader boot = cl;
 
+    // The JIT may deeply inline and load some classes. Preload these for test determinism.
+    final String PRELOAD_FOR_JIT[] = {
+        "java.nio.charset.CoderMalfunctionError",
+        "java.util.NoSuchElementException"
+    };
+    for (String s : PRELOAD_FOR_JIT) {
+      Class.forName(s);
+    }
+
     Runnable r = new Runnable() {
       @Override
       public void run() {
@@ -238,7 +247,7 @@
 
     ensureJitCompiled(Main.class, "testClassEvents");
 
-    enableClassLoadEvents(true);
+    enableClassLoadPreparePrintEvents(true);
 
     ClassLoader cl1 = create(boot, DEX1, DEX2);
     System.out.println("B, false");
@@ -270,7 +279,37 @@
     t.start();
     t.join();
 
-    enableClassLoadEvents(false);
+    enableClassLoadPreparePrintEvents(false);
+
+    // Note: the JIT part of this test is about the JIT pulling in a class not yet touched by
+    //       anything else in the system. This could be the verifier or the interpreter. We
+    //       block the interpreter by calling ensureJitCompiled. The verifier, however, must
+    //       run in configurations where dex2oat didn't verify the class itself. So explicitly
+    //       check whether the class has been already loaded, and skip then.
+    // TODO: Add multiple configurations to the run script once that becomes easier to do.
+    if (hasJit() && !isLoadedClass("Main$ClassD")) {
+      testClassEventsJit();
+    }
+  }
+
+  private static void testClassEventsJit() throws Exception {
+    enableClassLoadSeenEvents(true);
+
+    testClassEventsJitImpl();
+
+    enableClassLoadSeenEvents(false);
+
+    if (!hadLoadEvent()) {
+      throw new RuntimeException("Did not get expected load event.");
+    }
+  }
+
+  private static void testClassEventsJitImpl() throws Exception {
+    ensureJitCompiled(Main.class, "testClassEventsJitImpl");
+
+    if (ClassD.x != 1) {
+      throw new RuntimeException("Unexpected value");
+    }
   }
 
   private static void printClassLoaderClasses(ClassLoader cl) {
@@ -335,9 +374,14 @@
 
   private static native int[] getClassVersion(Class<?> c);
 
-  private static native void enableClassLoadEvents(boolean b);
+  private static native void enableClassLoadPreparePrintEvents(boolean b);
 
-  private static native void ensureJitCompiled(Class c, String name);
+  private static native void ensureJitCompiled(Class<?> c, String name);
+
+  private static native boolean hasJit();
+  private static native boolean isLoadedClass(String name);
+  private static native void enableClassLoadSeenEvents(boolean b);
+  private static native boolean hadLoadEvent();
 
   private static class TestForNonInit {
     public static double dummy = Math.random();  // So it can't be compile-time initialized.
@@ -361,6 +405,10 @@
   public abstract static class ClassC implements InfA, InfC {
   }
 
+  public static class ClassD {
+    static int x = 1;
+  }
+
   private static final String DEX1 = System.getenv("DEX_LOCATION") + "/912-classes.jar";
   private static final String DEX2 = System.getenv("DEX_LOCATION") + "/912-classes-ex.jar";
 
diff --git a/test/921-hello-failure/expected.txt b/test/921-hello-failure/expected.txt
index 9615e6b..a5dc10d 100644
--- a/test/921-hello-failure/expected.txt
+++ b/test/921-hello-failure/expected.txt
@@ -1,3 +1,6 @@
+hello - Verification
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_FAILS_VERIFICATION)
+hello - Verification
 hello - NewName
 Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
 hello - NewName
@@ -29,3 +32,21 @@
 Transformation error : java.lang.Exception(Failed to retransform classes <LTransform;, LTransform2;> due to JVMTI_ERROR_NAMES_DONT_MATCH)
 hello - MultiRetrans
 hello2 - MultiRetrans
+hello - NewMethod
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+hello - NewMethod
+hello2 - MissingMethod
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform3;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_DELETED)
+hello2 - MissingMethod
+hello - MethodChange
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED)
+hello - MethodChange
+hello - NewField
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello - NewField
+hello there - MissingField
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello there - MissingField
+hello there again - FieldChange
+Transformation error : java.lang.Exception(Failed to redefine class <LTransform4;> due to JVMTI_ERROR_UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED)
+hello there again - FieldChange
diff --git a/test/921-hello-failure/src/FieldChange.java b/test/921-hello-failure/src/FieldChange.java
new file mode 100644
index 0000000..cc2ea28
--- /dev/null
+++ b/test/921-hello-failure/src/FieldChange.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class FieldChange {
+  // The following is a base64 encoding of the following class.
+  // class Transform4 {
+  //   private Object greeting;
+  //   public Transform4(String hi) { }
+  //   public void sayHi(String name) {
+  //     throw new Error("Should not be called!");
+  //   }
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAFwoABgAQBwARCAASCgACABMHABQHABUBAAhncmVldGluZwEAEkxqYXZhL2xhbmcv" +
+    "T2JqZWN0OwEABjxpbml0PgEAFShMamF2YS9sYW5nL1N0cmluZzspVgEABENvZGUBAA9MaW5lTnVt" +
+    "YmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA9UcmFuc2Zvcm00LmphdmEMAAkAFgEAD2ph" +
+    "dmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3QgYmUgY2FsbGVkIQwACQAKAQAKVHJhbnNmb3JtNAEA" +
+    "EGphdmEvbGFuZy9PYmplY3QBAAMoKVYAIAAFAAYAAAABAAIABwAIAAAAAgABAAkACgABAAsAAAAd" +
+    "AAEAAgAAAAUqtwABsQAAAAEADAAAAAYAAQAAAAMAAQANAAoAAQALAAAAIgADAAIAAAAKuwACWRID" +
+    "twAEvwAAAAEADAAAAAYAAQAAAAUAAQAOAAAAAgAP");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQASXs5yszuhud+/w4q07495k9eO7Yb+l8u4AgAAcAAAAHhWNBIAAAAAAAAAABgCAAAM" +
+    "AAAAcAAAAAUAAACgAAAAAgAAALQAAAABAAAAzAAAAAQAAADUAAAAAQAAAPQAAACkAQAAFAEAAFYB" +
+    "AABeAQAAbAEAAH8BAACTAQAApwEAAL4BAADPAQAA0gEAANYBAADqAQAA9AEAAAEAAAACAAAAAwAA" +
+    "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABQAQAAAAACAAoAAAAAAAEAAAAAAAAAAQALAAAA" +
+    "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAAcCAAAAAAAAAgACAAEAAAD7" +
+    "AQAABAAAAHAQAwAAAA4ABAACAAIAAAABAgAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMA" +
+    "Bjxpbml0PgAMTFRyYW5zZm9ybTQ7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmpl" +
+    "Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3Jt" +
+    "NC5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjIyAAhncmVldGluZwAFc2F5SGkAAwEABw4A" +
+    "BQEABw4AAAEBAQACAIGABJQCAQGsAgANAAAAAAAAAAEAAAAAAAAAAQAAAAwAAABwAAAAAgAAAAUA" +
+    "AACgAAAAAwAAAAIAAAC0AAAABAAAAAEAAADMAAAABQAAAAQAAADUAAAABgAAAAEAAAD0AAAAASAA" +
+    "AAIAAAAUAQAAARAAAAEAAABQAQAAAiAAAAwAAABWAQAAAyAAAAIAAAD7AQAAACAAAAEAAAAHAgAA" +
+    "ABAAAAEAAAAYAgAA");
+
+  public static void doTest(Transform4 t) {
+    t.sayHi("FieldChange");
+    try {
+      Main.doCommonClassRedefinition(Transform4.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("FieldChange");
+  }
+}
diff --git a/test/921-hello-failure/src/Main.java b/test/921-hello-failure/src/Main.java
index 67ca1e1..5bbe2b5 100644
--- a/test/921-hello-failure/src/Main.java
+++ b/test/921-hello-failure/src/Main.java
@@ -18,6 +18,7 @@
 public class Main {
 
   public static void main(String[] args) {
+    Verification.doTest(new Transform());
     NewName.doTest(new Transform());
     DifferentAccess.doTest(new Transform());
     NewInterface.doTest(new Transform2());
@@ -25,6 +26,12 @@
     ReorderInterface.doTest(new Transform2());
     MultiRedef.doTest(new Transform(), new Transform2());
     MultiRetrans.doTest(new Transform(), new Transform2());
+    NewMethod.doTest(new Transform());
+    MissingMethod.doTest(new Transform3());
+    MethodChange.doTest(new Transform());
+    NewField.doTest(new Transform());
+    MissingField.doTest(new Transform4("there"));
+    FieldChange.doTest(new Transform4("there again"));
   }
 
   // Transforms the class. This throws an exception if something goes wrong.
diff --git a/test/921-hello-failure/src/MethodChange.java b/test/921-hello-failure/src/MethodChange.java
new file mode 100644
index 0000000..16f5778
--- /dev/null
+++ b/test/921-hello-failure/src/MethodChange.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MethodChange {
+  // The following is a base64 encoding of the following class.
+  // class Transform {
+  //   void sayHi(String name) {
+  //     throw new Error("Should not be called!");
+  //   }
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+    "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+    "aWxlAQAOVHJhbnNmb3JtLmphdmEMAAcACAEAD2phdmEvbGFuZy9FcnJvcgEAFVNob3VsZCBub3Qg" +
+    "YmUgY2FsbGVkIQwABwAMAQAJVHJhbnNmb3JtAQAQamF2YS9sYW5nL09iamVjdAAgAAUABgAAAAAA" +
+    "AgAAAAcACAABAAkAAAAdAAEAAQAAAAUqtwABsQAAAAEACgAAAAYAAQAAAAIAAAALAAwAAQAJAAAA" +
+    "IgADAAIAAAAKuwACWRIDtwAEvwAAAAEACgAAAAYAAQAAAAQAAQANAAAAAgAO");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQCrV81cy4Q+YKMMMqc0bZEO5Y1X5u7irPeQAgAAcAAAAHhWNBIAAAAAAAAAAPwBAAAL" +
+    "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACIAQAACAEAAEoB" +
+    "AABSAQAAXwEAAHIBAACGAQAAmgEAALEBAADBAQAAxAEAAMgBAADcAQAAAQAAAAIAAAADAAAABAAA" +
+    "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+    "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAO4BAAAAAAAAAQABAAEAAADjAQAABAAAAHAQAwAA" +
+    "AA4ABAACAAIAAADoAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgALTFRy" +
+    "YW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xh" +
+    "bmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0uamF2YQABVgACVkwA" +
+    "EmVtaXR0ZXI6IGphY2stNC4yNAAFc2F5SGkAAgAHDgAEAQAHDgAAAAEBAICABIgCAQCgAgwAAAAA" +
+    "AAAAAQAAAAAAAAABAAAACwAAAHAAAAACAAAABQAAAJwAAAADAAAAAgAAALAAAAAFAAAABAAAAMgA" +
+    "AAAGAAAAAQAAAOgAAAABIAAAAgAAAAgBAAABEAAAAQAAAEQBAAACIAAACwAAAEoBAAADIAAAAgAA" +
+    "AOMBAAAAIAAAAQAAAO4BAAAAEAAAAQAAAPwBAAA=");
+
+  public static void doTest(Transform t) {
+    t.sayHi("MethodChange");
+    try {
+      Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("MethodChange");
+  }
+}
diff --git a/test/921-hello-failure/src/MissingField.java b/test/921-hello-failure/src/MissingField.java
new file mode 100644
index 0000000..2f643cc
--- /dev/null
+++ b/test/921-hello-failure/src/MissingField.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MissingField {
+  // The following is a base64 encoding of the following class.
+  // class Transform4 {
+  //   public Transform4(String s) { }
+  //   public void sayHi(String name) {
+  //     throw new Error("Should not be called!");
+  //   }
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAFQoABgAOBwAPCAAQCgACABEHABIHABMBAAY8aW5pdD4BABUoTGphdmEvbGFuZy9T" +
+    "dHJpbmc7KVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAFc2F5SGkBAApTb3VyY2VGaWxlAQAP" +
+    "VHJhbnNmb3JtNC5qYXZhDAAHABQBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91bGQgbm90IGJlIGNh" +
+    "bGxlZCEMAAcACAEAClRyYW5zZm9ybTQBABBqYXZhL2xhbmcvT2JqZWN0AQADKClWACAABQAGAAAA" +
+    "AAACAAEABwAIAAEACQAAAB0AAQACAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAgABAAsACAABAAkA" +
+    "AAAiAAMAAgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAAwAAAACAA0=");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQDBVUVrMUEFx3lYkgJF54evq9vHvOUDZveUAgAAcAAAAHhWNBIAAAAAAAAAAAACAAAL" +
+    "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACMAQAACAEAAEoB" +
+    "AABSAQAAYAEAAHMBAACHAQAAmwEAALIBAADDAQAAxgEAAMoBAADeAQAAAQAAAAIAAAADAAAABAAA" +
+    "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAEAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+    "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAPEBAAAAAAAAAgACAAEAAADlAQAABAAAAHAQAwAA" +
+    "AA4ABAACAAIAAADrAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAMTFRy" +
+    "YW5zZm9ybTQ7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9s" +
+    "YW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3JtNC5qYXZhAAFWAAJW" +
+    "TAASZW1pdHRlcjogamFjay00LjIyAAVzYXlIaQACAQAHDgAEAQAHDgAAAAEBAIGABIgCAQGgAgAM" +
+    "AAAAAAAAAAEAAAAAAAAAAQAAAAsAAABwAAAAAgAAAAUAAACcAAAAAwAAAAIAAACwAAAABQAAAAQA" +
+    "AADIAAAABgAAAAEAAADoAAAAASAAAAIAAAAIAQAAARAAAAEAAABEAQAAAiAAAAsAAABKAQAAAyAA" +
+    "AAIAAADlAQAAACAAAAEAAADxAQAAABAAAAEAAAAAAgAA");
+
+  public static void doTest(Transform4 t) {
+    t.sayHi("MissingField");
+    try {
+      Main.doCommonClassRedefinition(Transform4.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("MissingField");
+  }
+}
diff --git a/test/921-hello-failure/src/MissingMethod.java b/test/921-hello-failure/src/MissingMethod.java
new file mode 100644
index 0000000..3f1925c
--- /dev/null
+++ b/test/921-hello-failure/src/MissingMethod.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class MissingMethod {
+  // The following is a base64 encoding of the following class.
+  // class Transform3 {
+  //   public void sayHi(String name) {
+  //     throw new Error("Should not be called!");
+  //   }
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAFQoABgAPBwAQCAARCgACABIHABMHABQBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+    "TGluZU51bWJlclRhYmxlAQAFc2F5SGkBABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAApTb3VyY2VG" +
+    "aWxlAQAPVHJhbnNmb3JtMy5qYXZhDAAHAAgBAA9qYXZhL2xhbmcvRXJyb3IBABVTaG91bGQgbm90" +
+    "IGJlIGNhbGxlZCEMAAcADAEAClRyYW5zZm9ybTMBABBqYXZhL2xhbmcvT2JqZWN0ACAABQAGAAAA" +
+    "AAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAgABAAsADAABAAkA" +
+    "AAAiAAMAAgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAA0AAAACAA4=");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQDnVQvyn7XrwDiCC/SE55zBCtEqk4pzA2mUAgAAcAAAAHhWNBIAAAAAAAAAAAACAAAL" +
+    "AAAAcAAAAAUAAACcAAAAAgAAALAAAAAAAAAAAAAAAAQAAADIAAAAAQAAAOgAAACMAQAACAEAAEoB" +
+    "AABSAQAAYAEAAHMBAACHAQAAmwEAALIBAADDAQAAxgEAAMoBAADeAQAAAQAAAAIAAAADAAAABAAA" +
+    "AAcAAAAHAAAABAAAAAAAAAAIAAAABAAAAEQBAAAAAAAAAAAAAAAAAQAKAAAAAQABAAAAAAACAAAA" +
+    "AAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAPABAAAAAAAAAQABAAEAAADlAQAABAAAAHAQAwAA" +
+    "AA4ABAACAAIAAADqAQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMABjxpbml0PgAMTFRy" +
+    "YW5zZm9ybTM7ABFMamF2YS9sYW5nL0Vycm9yOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9s" +
+    "YW5nL1N0cmluZzsAFVNob3VsZCBub3QgYmUgY2FsbGVkIQAPVHJhbnNmb3JtMy5qYXZhAAFWAAJW" +
+    "TAASZW1pdHRlcjogamFjay00LjI0AAVzYXlIaQACAAcOAAQBAAcOAAAAAQEAgIAEiAIBAaACAAAM" +
+    "AAAAAAAAAAEAAAAAAAAAAQAAAAsAAABwAAAAAgAAAAUAAACcAAAAAwAAAAIAAACwAAAABQAAAAQA" +
+    "AADIAAAABgAAAAEAAADoAAAAASAAAAIAAAAIAQAAARAAAAEAAABEAQAAAiAAAAsAAABKAQAAAyAA" +
+    "AAIAAADlAQAAACAAAAEAAADwAQAAABAAAAEAAAAAAgAA");
+
+  public static void doTest(Transform3 t) {
+    t.sayHi("MissingMethod");
+    try {
+      Main.doCommonClassRedefinition(Transform3.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("MissingMethod");
+  }
+}
diff --git a/test/921-hello-failure/src/NewField.java b/test/921-hello-failure/src/NewField.java
new file mode 100644
index 0000000..c85b79e
--- /dev/null
+++ b/test/921-hello-failure/src/NewField.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class NewField {
+  // The following is a base64 encoding of the following class.
+  // class Transform {
+  //   private Object field;
+  //   public void sayHi(String name) {
+  //     throw new Error("Should not be called!");
+  //   }
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAFwoABgARBwASCAATCgACABQHABUHABYBAAVmaWVsZAEAEkxqYXZhL2xhbmcvT2Jq" +
+    "ZWN0OwEABjxpbml0PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAFShM" +
+    "amF2YS9sYW5nL1N0cmluZzspVgEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwACQAKAQAP" +
+    "amF2YS9sYW5nL0Vycm9yAQAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhDAAJAA4BAAlUcmFuc2Zvcm0B" +
+    "ABBqYXZhL2xhbmcvT2JqZWN0ACAABQAGAAAAAQACAAcACAAAAAIAAAAJAAoAAQALAAAAHQABAAEA" +
+    "AAAFKrcAAbEAAAABAAwAAAAGAAEAAAABAAEADQAOAAEACwAAACIAAwACAAAACrsAAlkSA7cABL8A" +
+    "AAABAAwAAAAGAAEAAAAEAAEADwAAAAIAEA==");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQBNWknL2iyjim487p0EIH/8V5OjOeLgw5e0AgAAcAAAAHhWNBIAAAAAAAAAABQCAAAM" +
+    "AAAAcAAAAAUAAACgAAAAAgAAALQAAAABAAAAzAAAAAQAAADUAAAAAQAAAPQAAACgAQAAFAEAAFYB" +
+    "AABeAQAAawEAAH4BAACSAQAApgEAAL0BAADNAQAA0AEAANQBAADoAQAA7wEAAAEAAAACAAAAAwAA" +
+    "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABQAQAAAAACAAoAAAAAAAAAAAAAAAAAAQALAAAA" +
+    "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAAAECAAAAAAAAAQABAAEAAAD2" +
+    "AQAABAAAAHAQAwAAAA4ABAACAAIAAAD7AQAACQAAACIAAQAbAQUAAABwIAIAEAAnAAAAAQAAAAMA" +
+    "Bjxpbml0PgALTFRyYW5zZm9ybTsAEUxqYXZhL2xhbmcvRXJyb3I7ABJMamF2YS9sYW5nL09iamVj" +
+    "dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhAA5UcmFuc2Zvcm0u" +
+    "amF2YQABVgACVkwAEmVtaXR0ZXI6IGphY2stNC4yMgAFZmllbGQABXNheUhpAAEABw4ABAEABw4A" +
+    "AAEBAQACAICABJQCAQGsAgAAAA0AAAAAAAAAAQAAAAAAAAABAAAADAAAAHAAAAACAAAABQAAAKAA" +
+    "AAADAAAAAgAAALQAAAAEAAAAAQAAAMwAAAAFAAAABAAAANQAAAAGAAAAAQAAAPQAAAABIAAAAgAA" +
+    "ABQBAAABEAAAAQAAAFABAAACIAAADAAAAFYBAAADIAAAAgAAAPYBAAAAIAAAAQAAAAECAAAAEAAA" +
+    "AQAAABQCAAA=");
+
+  public static void doTest(Transform t) {
+    t.sayHi("NewField");
+    try {
+      Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("NewField");
+  }
+}
diff --git a/test/921-hello-failure/src/NewMethod.java b/test/921-hello-failure/src/NewMethod.java
new file mode 100644
index 0000000..5eac670
--- /dev/null
+++ b/test/921-hello-failure/src/NewMethod.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class NewMethod {
+  // The following is a base64 encoding of the following class.
+  // class Transform {
+  //   public void extraMethod() {}
+  //   public void sayHi(String name) {
+  //     throw new Error("Should not be called!");
+  //   }
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAFgoABgAQBwARCAASCgACABMHABQHABUBAAY8aW5pdD4BAAMoKVYBAARDb2RlAQAP" +
+    "TGluZU51bWJlclRhYmxlAQALZXh0cmFNZXRob2QBAAVzYXlIaQEAFShMamF2YS9sYW5nL1N0cmlu" +
+    "ZzspVgEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwABwAIAQAPamF2YS9sYW5nL0Vycm9y" +
+    "AQAVU2hvdWxkIG5vdCBiZSBjYWxsZWQhDAAHAA0BAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" +
+    "ZWN0ACAABQAGAAAAAAADAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAA" +
+    "AQABAAsACAABAAkAAAAZAAAAAQAAAAGxAAAAAQAKAAAABgABAAAAAgABAAwADQABAAkAAAAiAAMA" +
+    "AgAAAAq7AAJZEgO3AAS/AAAAAQAKAAAABgABAAAABAABAA4AAAACAA8=");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQBeV7dLAwN1GBTa/yRlkuiIQatNHghVdrnIAgAAcAAAAHhWNBIAAAAAAAAAADQCAAAM" +
+    "AAAAcAAAAAUAAACgAAAAAgAAALQAAAAAAAAAAAAAAAUAAADMAAAAAQAAAPQAAAC0AQAAFAEAAGoB" +
+    "AAByAQAAfwEAAJIBAACmAQAAugEAANEBAADhAQAA5AEAAOgBAAD8AQAACQIAAAEAAAACAAAAAwAA" +
+    "AAQAAAAHAAAABwAAAAQAAAAAAAAACAAAAAQAAABkAQAAAAAAAAAAAAAAAAAACgAAAAAAAQALAAAA" +
+    "AQABAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAGAAAAAAAAACACAAAAAAAAAQABAAEAAAAQ" +
+    "AgAABAAAAHAQBAAAAA4AAQABAAAAAAAVAgAAAQAAAA4AAAAEAAIAAgAAABoCAAAJAAAAIgABABsB" +
+    "BQAAAHAgAwAQACcAAAABAAAAAwAGPGluaXQ+AAtMVHJhbnNmb3JtOwARTGphdmEvbGFuZy9FcnJv" +
+    "cjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABVTaG91bGQgbm90IGJl" +
+    "IGNhbGxlZCEADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAASZW1pdHRlcjogamFjay00LjIyAAtleHRy" +
+    "YU1ldGhvZAAFc2F5SGkAAQAHDgACAAcOAAQBAAcOAAAAAQIAgIAElAIBAawCAQHAAgAADAAAAAAA" +
+    "AAABAAAAAAAAAAEAAAAMAAAAcAAAAAIAAAAFAAAAoAAAAAMAAAACAAAAtAAAAAUAAAAFAAAAzAAA" +
+    "AAYAAAABAAAA9AAAAAEgAAADAAAAFAEAAAEQAAABAAAAZAEAAAIgAAAMAAAAagEAAAMgAAADAAAA" +
+    "EAIAAAAgAAABAAAAIAIAAAAQAAABAAAANAIAAA==");
+
+  public static void doTest(Transform t) {
+    t.sayHi("NewMethod");
+    try {
+      Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("NewMethod");
+  }
+}
diff --git a/test/921-hello-failure/src/Transform3.java b/test/921-hello-failure/src/Transform3.java
new file mode 100644
index 0000000..d2cb064
--- /dev/null
+++ b/test/921-hello-failure/src/Transform3.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform3 {
+  public void extraMethod(String name) {
+    System.out.println("extraMethod - " + name);
+  }
+  public void sayHi(String name) {
+    System.out.println("hello2 - " + name);
+  }
+}
diff --git a/test/921-hello-failure/src/Transform4.java b/test/921-hello-failure/src/Transform4.java
new file mode 100644
index 0000000..fd76338
--- /dev/null
+++ b/test/921-hello-failure/src/Transform4.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform4 {
+  private String greeting;
+  public Transform4(String hi) {
+    greeting = hi;
+  }
+  public void sayHi(String name) {
+    System.out.println("hello " + greeting + " - " + name);
+  }
+}
diff --git a/test/921-hello-failure/src/Verification.java b/test/921-hello-failure/src/Verification.java
new file mode 100644
index 0000000..242b5d2
--- /dev/null
+++ b/test/921-hello-failure/src/Verification.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+class Verification {
+  // Jasmin program:
+  //
+  // .source                  Transform.java
+  // .class                   Transform
+  // .super                   java/lang/Object
+  // .method                  <init>()V
+  //    .limit stack          1
+  //    .limit locals         1
+  //    aload_0
+  //    invokespecial         java/lang/Object/<init>()V
+  //    return
+  // .end method
+  // .method                  sayHi(Ljava/lang/String;)V
+  //    .limit stack          1
+  //    .limit locals         2
+  //    aload_1
+  //    areturn
+  // .end method
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgADAC0ADgoADQAHBwAIAQAQamF2YS9sYW5nL09iamVjdAEAClNvdXJjZUZpbGUBAAY8aW5p" +
+    "dD4BAAVzYXlIaQwABQAKAQAJVHJhbnNmb3JtAQAEQ29kZQEAAygpVgEADlRyYW5zZm9ybS5qYXZh" +
+    "AQAVKExqYXZhL2xhbmcvU3RyaW5nOylWBwADACAAAgANAAAAAAACAAAABQAKAAEACQAAABEAAQAB" +
+    "AAAABSq3AAGxAAAAAAABAAYADAABAAkAAAAOAAEAAgAAAAIrsAAAAAAAAQAEAAAAAgAL");
+
+  // Smali program:
+  //
+  // .class LTransform;
+  // .super Ljava/lang/Object;
+  // .source "Transform.java"
+  // # direct methods
+  // .method constructor <init>()V
+  //     .registers 1
+  //     invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+  //     return-void
+  // .end method
+  // # virtual methods
+  // .method public sayHi(Ljava/lang/String;)V
+  //     .registers 2
+  //     return-object p1
+  // .end method
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQClOAc4ZDMXaHMezhYcqZxcjUeVCWRYUkooAgAAcAAAAHhWNBIAAAAAAAAAAJQBAAAI" +
+    "AAAAcAAAAAQAAACQAAAAAgAAAKAAAAAAAAAAAAAAAAMAAAC4AAAAAQAAANAAAAA4AQAA8AAAAPAA" +
+    "AAD4AAAABQEAABkBAAAtAQAAPQEAAEABAABEAQAAAQAAAAIAAAADAAAABQAAAAUAAAADAAAAAAAA" +
+    "AAYAAAADAAAATAEAAAAAAAAAAAAAAAABAAcAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAEAAAA" +
+    "AAAAAIYBAAAAAAAABjxpbml0PgALTFRyYW5zZm9ybTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGph" +
+    "dmEvbGFuZy9TdHJpbmc7AA5UcmFuc2Zvcm0uamF2YQABVgACVkwABXNheUhpAAABAAAAAgAAAAAA" +
+    "AAAAAAAAAQABAAEAAAAAAAAABAAAAHAQAgAAAA4AAgACAAAAAAAAAAAAAQAAABEBAAABAQCAgATc" +
+    "AgEB9AIMAAAAAAAAAAEAAAAAAAAAAQAAAAgAAABwAAAAAgAAAAQAAACQAAAAAwAAAAIAAACgAAAA" +
+    "BQAAAAMAAAC4AAAABgAAAAEAAADQAAAAAiAAAAgAAADwAAAAARAAAAEAAABMAQAAAxAAAAIAAABU" +
+    "AQAAASAAAAIAAABcAQAAACAAAAEAAACGAQAAABAAAAEAAACUAQAA");
+
+  public static void doTest(Transform t) {
+    t.sayHi("Verification");
+    try {
+      Main.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    } catch (Exception e) {
+      System.out.println(
+          "Transformation error : " + e.getClass().getName() + "(" + e.getMessage() + ")");
+    }
+    t.sayHi("Verification");
+  }
+}
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
index 29c4aa3..f18d70e 100644
--- a/test/924-threads/src/Main.java
+++ b/test/924-threads/src/Main.java
@@ -20,6 +20,7 @@
 import java.util.Comparator;
 import java.util.concurrent.CountDownLatch;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
@@ -162,8 +163,20 @@
 
   private static void doAllThreadsTests() {
     Thread[] threads = getAllThreads();
-    Arrays.sort(threads, THREAD_COMP);
-    System.out.println(Arrays.toString(threads));
+    List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+    // Filter out JIT thread. It may or may not be there depending on configuration.
+    Iterator<Thread> it = threadList.iterator();
+    while (it.hasNext()) {
+      Thread t = it.next();
+      if (t.getName().startsWith("Jit thread pool worker")) {
+        it.remove();
+        break;
+      }
+    }
+
+    Collections.sort(threadList, THREAD_COMP);
+    System.out.println(threadList);
   }
 
   private static void doTLSTests() throws Exception {
diff --git a/test/925-threadgroups/src/Main.java b/test/925-threadgroups/src/Main.java
index 3d7a4ca..bf7441f 100644
--- a/test/925-threadgroups/src/Main.java
+++ b/test/925-threadgroups/src/Main.java
@@ -14,8 +14,12 @@
  * limitations under the License.
  */
 
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Comparator;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
 
 public class Main {
   public static void main(String[] args) throws Exception {
@@ -64,10 +68,23 @@
     Thread[] threads = (Thread[])data[0];
     ThreadGroup[] groups = (ThreadGroup[])data[1];
 
-    Arrays.sort(threads, THREAD_COMP);
+    List<Thread> threadList = new ArrayList<>(Arrays.asList(threads));
+
+    // Filter out JIT thread. It may or may not be there depending on configuration.
+    Iterator<Thread> it = threadList.iterator();
+    while (it.hasNext()) {
+      Thread t = it.next();
+      if (t.getName().startsWith("Jit thread pool worker")) {
+        it.remove();
+        break;
+      }
+    }
+
+    Collections.sort(threadList, THREAD_COMP);
+
     Arrays.sort(groups, THREADGROUP_COMP);
     System.out.println(tg.getName() + ":");
-    System.out.println("  " + Arrays.toString(threads));
+    System.out.println("  " + threadList);
     System.out.println("  " + Arrays.toString(groups));
 
     if (tg.getParent() != null) {
diff --git a/test/931-agent-thread/agent_thread.cc b/test/931-agent-thread/agent_thread.cc
index 6ace4ce..a488d9a 100644
--- a/test/931-agent-thread/agent_thread.cc
+++ b/test/931-agent-thread/agent_thread.cc
@@ -15,6 +15,7 @@
  */
 
 #include <inttypes.h>
+#include <sched.h>
 
 #include "barrier.h"
 #include "base/logging.h"
@@ -125,6 +126,24 @@
 
   data.b.Wait(Thread::Current());
 
+  // Scheduling may mean that the agent thread is put to sleep. Wait until it's dead in an effort
+  // to not unload the plugin and crash.
+  for (;;) {
+    NanoSleep(1000 * 1000);
+    jint thread_state;
+    jvmtiError state_result = jvmti_env->GetThreadState(thread.get(), &thread_state);
+    if (JvmtiErrorToException(env, state_result)) {
+      return;
+    }
+    if (thread_state == 0 ||                                    // Was never alive.
+        (thread_state & JVMTI_THREAD_STATE_TERMINATED) != 0) {  // Was alive and died.
+      break;
+    }
+  }
+  // Yield and sleep a bit more, to give the plugin time to tear down the native thread structure.
+  sched_yield();
+  NanoSleep(100 * 1000 * 1000);
+
   env->DeleteGlobalRef(data.main_thread);
 }
 
diff --git a/test/942-private-recursive/src/Transform.java b/test/942-private-recursive/src/Transform.java
index dd5452c..7714326 100644
--- a/test/942-private-recursive/src/Transform.java
+++ b/test/942-private-recursive/src/Transform.java
@@ -15,10 +15,6 @@
  */
 
 class Transform {
-  public void sayHi(int recur, Runnable r) {
-    privateSayHi(recur, r);
-  }
-
   private void privateSayHi(int recur, Runnable r) {
     System.out.println("hello" + recur);
     if (recur == 1) {
@@ -29,4 +25,8 @@
     }
     System.out.println("goodbye" + recur);
   }
+
+  public void sayHi(int recur, Runnable r) {
+    privateSayHi(recur, r);
+  }
 }
diff --git a/test/944-transform-classloaders/build b/test/944-transform-classloaders/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/944-transform-classloaders/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/944-transform-classloaders/classloader.cc b/test/944-transform-classloaders/classloader.cc
new file mode 100644
index 0000000..5fbd8e1
--- /dev/null
+++ b/test/944-transform-classloaders/classloader.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "jni.h"
+#include "mirror/class-inl.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test944TransformClassloaders {
+
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_getDexFilePointer(JNIEnv* env, jclass, jclass klass) {
+  if (Runtime::Current() == nullptr) {
+    env->ThrowNew(env->FindClass("java/lang/Exception"),
+                  "We do not seem to be running in ART! Unable to get dex file.");
+    return 0;
+  }
+  ScopedObjectAccess soa(env);
+  // This sequence of casts must be the same as those done in
+  // runtime/native/dalvik_system_DexFile.cc in order to ensure that we get the same results.
+  return static_cast<jlong>(reinterpret_cast<uintptr_t>(
+      &soa.Decode<mirror::Class>(klass)->GetDexFile()));
+}
+
+}  // namespace Test944TransformClassloaders
+}  // namespace art
diff --git a/test/944-transform-classloaders/expected.txt b/test/944-transform-classloaders/expected.txt
new file mode 100644
index 0000000..7952247
--- /dev/null
+++ b/test/944-transform-classloaders/expected.txt
@@ -0,0 +1,5 @@
+hello
+hello2
+Goodbye
+Goodbye2
+Passed
diff --git a/test/944-transform-classloaders/info.txt b/test/944-transform-classloaders/info.txt
new file mode 100644
index 0000000..9155564
--- /dev/null
+++ b/test/944-transform-classloaders/info.txt
@@ -0,0 +1,7 @@
+Tests that redefined dex files are stored in the appropriate classloader.
+
+This test cannot run on the RI.
+
+We use reflection with setAccessible(true) to examine the private internals of
+classloaders. Changes to the internal operation or definition of
+dalvik.system.BaseDexClassLoader might cause this test to fail.
diff --git a/test/944-transform-classloaders/run b/test/944-transform-classloaders/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/944-transform-classloaders/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/944-transform-classloaders/src/CommonClassDefinition.java b/test/944-transform-classloaders/src/CommonClassDefinition.java
new file mode 100644
index 0000000..62602a0
--- /dev/null
+++ b/test/944-transform-classloaders/src/CommonClassDefinition.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class CommonClassDefinition {
+  public final Class<?> target;
+  public final byte[] class_file_bytes;
+  public final byte[] dex_file_bytes;
+
+  CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+    this.target = target;
+    this.class_file_bytes = class_file_bytes;
+    this.dex_file_bytes = dex_file_bytes;
+  }
+}
diff --git a/test/944-transform-classloaders/src/Main.java b/test/944-transform-classloaders/src/Main.java
new file mode 100644
index 0000000..4911e00
--- /dev/null
+++ b/test/944-transform-classloaders/src/Main.java
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.lang.reflect.*;
+public class Main {
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform {
+   *   public void sayHi() {
+   *    System.out.println("Goodbye");
+   *   }
+   * }
+   */
+  private static CommonClassDefinition TRANSFORM_DEFINITION = new CommonClassDefinition(
+      Transform.class,
+      Base64.getDecoder().decode(
+        "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+        "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA" +
+        "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq" +
+        "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph" +
+        "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG" +
+        "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB" +
+        "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0="),
+      Base64.getDecoder().decode(
+        "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO" +
+        "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB" +
+        "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA" +
+        "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+        "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA" +
+        "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+        "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50" +
+        "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh" +
+        "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291" +
+        "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA" +
+        "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA" +
+        "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA" +
+        "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA="));
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform2 {
+   *   public void sayHi() {
+   *    System.out.println("Goodbye2");
+   *   }
+   * }
+   */
+  private static CommonClassDefinition TRANSFORM2_DEFINITION = new CommonClassDefinition(
+      Transform2.class,
+      Base64.getDecoder().decode(
+        "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB" +
+        "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA9UcmFuc2Zvcm0yLmphdmEM" +
+        "AAcACAcAFgwAFwAYAQAIR29vZGJ5ZTIHABkMABoAGwEAClRyYW5zZm9ybTIBABBqYXZhL2xhbmcv" +
+        "T2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEA" +
+        "E2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAA" +
+        "BQAGAAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAAQABAAsA" +
+        "CAABAAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAADAAgABAABAAwAAAACAA0="),
+      Base64.getDecoder().decode(
+        "ZGV4CjAzNQABX6vL8OT7aGLjbzFBEfCM9Aaz+zzGzVnQAgAAcAAAAHhWNBIAAAAAAAAAADACAAAO" +
+        "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACwAQAAIAEAAGIB" +
+        "AABqAQAAdAEAAIIBAACZAQAArQEAAMEBAADVAQAA5gEAAOkBAADtAQAAAQIAAAYCAAAPAgAAAgAA" +
+        "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA" +
+        "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAACECAAAA" +
+        "AAAAAQABAAEAAAAWAgAABAAAAHAQAwAAAA4AAwABAAIAAAAbAgAACQAAAGIAAAAbAQEAAABuIAIA" +
+        "EAAOAAAAAQAAAAMABjxpbml0PgAIR29vZGJ5ZTIADExUcmFuc2Zvcm0yOwAVTGphdmEvaW8vUHJp" +
+        "bnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEv" +
+        "bGFuZy9TeXN0ZW07AA9UcmFuc2Zvcm0yLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTQuMjQA" +
+        "A291dAAHcHJpbnRsbgAFc2F5SGkAAQAHDgADAAcOhwAAAAEBAICABKACAQG4AgANAAAAAAAAAAEA" +
+        "AAAAAAAAAQAAAA4AAABwAAAAAgAAAAYAAACoAAAAAwAAAAIAAADAAAAABAAAAAEAAADYAAAABQAA" +
+        "AAQAAADgAAAABgAAAAEAAAAAAQAAASAAAAIAAAAgAQAAARAAAAEAAABcAQAAAiAAAA4AAABiAQAA" +
+        "AyAAAAIAAAAWAgAAACAAAAEAAAAhAgAAABAAAAEAAAAwAgAA"));
+
+  public static void main(String[] args) throws Exception {
+    doTest();
+    System.out.println("Passed");
+  }
+
+  private static void checkIsInstance(Class<?> klass, Object o) throws Exception {
+    if (!klass.isInstance(o)) {
+      throw new Exception(klass + " is not the class of " + o);
+    }
+  }
+
+  private static boolean arrayContains(long[] arr, long value) {
+    if (arr == null) {
+      return false;
+    }
+    for (int i = 0; i < arr.length; i++) {
+      if (arr[i] == value) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Checks that we can find the dex-file for the given class in its classloader.
+   *
+   * Throws if it fails.
+   */
+  private static void checkDexFileInClassLoader(Class<?> klass) throws Exception {
+    // If all the android BCP classes were availible when compiling this test and access checks
+    // weren't a thing this function would be written as follows:
+    //
+    // long dexFilePtr = getDexFilePointer(klass);
+    // dalvik.system.BaseDexClassLoader loader =
+    //     (dalvik.system.BaseDexClassLoader)klass.getClassLoader();
+    // dalvik.system.DexPathList pathListValue = loader.pathList;
+    // dalvik.system.DexPathList.Element[] elementArrayValue = pathListValue.dexElements;
+    // int array_length = elementArrayValue.length;
+    // for (int i = 0; i < array_length; i++) {
+    //   dalvik.system.DexPathList.Element curElement = elementArrayValue[i];
+    //   dalvik.system.DexFile curDexFile = curElement.dexFile;
+    //   if (curDexFile == null) {
+    //     continue;
+    //   }
+    //   long[] curCookie = (long[])curDexFile.mCookie;
+    //   long[] curInternalCookie = (long[])curDexFile.mInternalCookie;
+    //   if (arrayContains(curCookie, dexFilePtr) || arrayContains(curInternalCookie, dexFilePtr)) {
+    //     return;
+    //   }
+    // }
+    // throw new Exception(
+    //     "Unable to find dex file pointer " + dexFilePtr + " in class loader for " + klass);
+
+    // Get all the fields and classes we need by reflection.
+    Class<?> baseDexClassLoaderClass = Class.forName("dalvik.system.BaseDexClassLoader");
+    Field pathListField = baseDexClassLoaderClass.getDeclaredField("pathList");
+
+    Class<?> dexPathListClass = Class.forName("dalvik.system.DexPathList");
+    Field elementArrayField = dexPathListClass.getDeclaredField("dexElements");
+
+    Class<?> dexPathListElementClass = Class.forName("dalvik.system.DexPathList$Element");
+    Field dexFileField = dexPathListElementClass.getDeclaredField("dexFile");
+
+    Class<?> dexFileClass = Class.forName("dalvik.system.DexFile");
+    Field dexFileCookieField = dexFileClass.getDeclaredField("mCookie");
+    Field dexFileInternalCookieField = dexFileClass.getDeclaredField("mInternalCookie");
+
+    // Make all the fields accessible
+    AccessibleObject.setAccessible(new AccessibleObject[] { pathListField,
+                                                            elementArrayField,
+                                                            dexFileField,
+                                                            dexFileCookieField,
+                                                            dexFileInternalCookieField }, true);
+
+    long dexFilePtr = getDexFilePointer(klass);
+
+    ClassLoader loader = klass.getClassLoader();
+    checkIsInstance(baseDexClassLoaderClass, loader);
+    // DexPathList pathListValue = ((BaseDexClassLoader) loader).pathList;
+    Object pathListValue = pathListField.get(loader);
+
+    checkIsInstance(dexPathListClass, pathListValue);
+
+    // DexPathList.Element[] elementArrayValue = pathListValue.dexElements;
+    Object elementArrayValue = elementArrayField.get(pathListValue);
+    if (!elementArrayValue.getClass().isArray() ||
+        elementArrayValue.getClass().getComponentType() != dexPathListElementClass) {
+      throw new Exception("elementArrayValue is not an " + dexPathListElementClass + " array!");
+    }
+    // int array_length = elementArrayValue.length;
+    int array_length = Array.getLength(elementArrayValue);
+    for (int i = 0; i < array_length; i++) {
+      // DexPathList.Element curElement = elementArrayValue[i];
+      Object curElement = Array.get(elementArrayValue, i);
+      checkIsInstance(dexPathListElementClass, curElement);
+
+      // DexFile curDexFile = curElement.dexFile;
+      Object curDexFile = dexFileField.get(curElement);
+      if (curDexFile == null) {
+        continue;
+      }
+      checkIsInstance(dexFileClass, curDexFile);
+
+      // long[] curCookie = (long[])curDexFile.mCookie;
+      long[] curCookie = (long[])dexFileCookieField.get(curDexFile);
+      // long[] curInternalCookie = (long[])curDexFile.mInternalCookie;
+      long[] curInternalCookie = (long[])dexFileInternalCookieField.get(curDexFile);
+
+      if (arrayContains(curCookie, dexFilePtr) || arrayContains(curInternalCookie, dexFilePtr)) {
+        return;
+      }
+    }
+    throw new Exception(
+        "Unable to find dex file pointer " + dexFilePtr + " in class loader for " + klass);
+  }
+
+  private static void doTest() throws Exception {
+    Transform t = new Transform();
+    Transform2 t2 = new Transform2();
+
+    long initial_t1_dex = getDexFilePointer(Transform.class);
+    long initial_t2_dex = getDexFilePointer(Transform2.class);
+    if (initial_t2_dex != initial_t1_dex) {
+      throw new Exception("The classes " + Transform.class + " and " + Transform2.class + " " +
+                          "have different initial dex files!");
+    }
+    checkDexFileInClassLoader(Transform.class);
+    checkDexFileInClassLoader(Transform2.class);
+
+    // Make sure they are loaded
+    t.sayHi();
+    t2.sayHi();
+    // Redefine both of the classes.
+    doMultiClassRedefinition(TRANSFORM_DEFINITION, TRANSFORM2_DEFINITION);
+    // Make sure we actually transformed them!
+    t.sayHi();
+    t2.sayHi();
+
+    long final_t1_dex = getDexFilePointer(Transform.class);
+    long final_t2_dex = getDexFilePointer(Transform2.class);
+    if (final_t2_dex == final_t1_dex) {
+      throw new Exception("The classes " + Transform.class + " and " + Transform2.class + " " +
+                          "have the same initial dex files!");
+    } else if (final_t1_dex == initial_t1_dex) {
+      throw new Exception("The class " + Transform.class + " did not get a new dex file!");
+    } else if (final_t2_dex == initial_t2_dex) {
+      throw new Exception("The class " + Transform2.class + " did not get a new dex file!");
+    }
+    // Check to make sure the new dex files are in the class loader.
+    checkDexFileInClassLoader(Transform.class);
+    checkDexFileInClassLoader(Transform2.class);
+  }
+
+  private static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  // Gets the 'long' (really a native pointer) that is stored in the ClassLoader representing the
+  // DexFile a class is loaded from. This is converted from the DexFile* in the same way it is done
+  // in runtime/native/dalvik_system_DexFile.cc
+  private static native long getDexFilePointer(Class<?> target);
+  // Transforms the classes
+  private static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                            byte[][] classfiles,
+                                                            byte[][] dexfiles);
+}
diff --git a/test/944-transform-classloaders/src/Transform.java b/test/944-transform-classloaders/src/Transform.java
new file mode 100644
index 0000000..8e8af35
--- /dev/null
+++ b/test/944-transform-classloaders/src/Transform.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+  public void sayHi() {
+    // Use lower 'h' to make sure the string will have a different string id
+    // than the transformation (the transformation code is the same except
+    // the actual printed String, which was making the test inacurately passing
+    // in JIT mode when loading the string from the dex cache, as the string ids
+    // of the two different strings were the same).
+    // We know the string ids will be different because lexicographically:
+    // "Goodbye" < "LTransform;" < "hello".
+    System.out.println("hello");
+  }
+}
diff --git a/test/944-transform-classloaders/src/Transform2.java b/test/944-transform-classloaders/src/Transform2.java
new file mode 100644
index 0000000..eb22842
--- /dev/null
+++ b/test/944-transform-classloaders/src/Transform2.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform2 {
+  public void sayHi() {
+    System.out.println("hello2");
+  }
+}
diff --git a/test/945-obsolete-native/build b/test/945-obsolete-native/build
new file mode 100755
index 0000000..898e2e5
--- /dev/null
+++ b/test/945-obsolete-native/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/945-obsolete-native/expected.txt b/test/945-obsolete-native/expected.txt
new file mode 100644
index 0000000..83efda1
--- /dev/null
+++ b/test/945-obsolete-native/expected.txt
@@ -0,0 +1,9 @@
+hello
+Not doing anything here
+goodbye
+hello
+transforming calling function
+goodbye
+Hello - Transformed
+Not doing anything here
+Goodbye - Transformed
diff --git a/test/945-obsolete-native/info.txt b/test/945-obsolete-native/info.txt
new file mode 100644
index 0000000..c8b892c
--- /dev/null
+++ b/test/945-obsolete-native/info.txt
@@ -0,0 +1 @@
+Tests basic obsolete method support
diff --git a/test/945-obsolete-native/obsolete_native.cc b/test/945-obsolete-native/obsolete_native.cc
new file mode 100644
index 0000000..061e7af
--- /dev/null
+++ b/test/945-obsolete-native/obsolete_native.cc
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+#include <memory>
+#include <stdio.h>
+
+#include "android-base/stringprintf.h"
+
+#include "android-base/stringprintf.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test945ObsoleteNative {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_bindTest945ObsoleteNative(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+  BindFunctions(jvmti_env, env, "Transform");
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Transform_doExecute(JNIEnv* env,
+                                                           jclass klass ATTRIBUTE_UNUSED,
+                                                           jobject runnable) {
+  jclass runnable_klass = env->FindClass("java/lang/Runnable");
+  DCHECK(runnable_klass != nullptr);
+  jmethodID run_method = env->GetMethodID(runnable_klass, "run", "()V");
+  env->CallVoidMethod(runnable, run_method);
+}
+
+
+}  // namespace Test945ObsoleteNative
+}  // namespace art
diff --git a/test/945-obsolete-native/run b/test/945-obsolete-native/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/945-obsolete-native/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/945-obsolete-native/src/Main.java b/test/945-obsolete-native/src/Main.java
new file mode 100644
index 0000000..5e2154e
--- /dev/null
+++ b/test/945-obsolete-native/src/Main.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Base64;
+
+public class Main {
+  // class Transform {
+  //   public void sayHi(Runnable r) {
+  //     System.out.println("Hello - Transformed");
+  //     doExecute(r);
+  //     System.out.println("Goodbye - Transformed");
+  //   }
+  //
+  //   private static native void doExecute(Runnable r);
+  // }
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADQAIgoACAASCQATABQIABUKABYAFwoABwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW" +
+    "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAXKExqYXZhL2xhbmcvUnVubmFibGU7" +
+    "KVYBAAlkb0V4ZWN1dGUBAApTb3VyY2VGaWxlAQAOVHJhbnNmb3JtLmphdmEMAAkACgcAHAwAHQAe" +
+    "AQATSGVsbG8gLSBUcmFuc2Zvcm1lZAcAHwwAIAAhDAAPAA4BABVHb29kYnllIC0gVHJhbnNmb3Jt" +
+    "ZWQBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291" +
+    "dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxu" +
+    "AQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABwAIAAAAAAADAAAACQAKAAEACwAAAB0AAQABAAAA" +
+    "BSq3AAGxAAAAAQAMAAAABgABAAAAEQABAA0ADgABAAsAAAA5AAIAAgAAABWyAAISA7YABCu4AAWy" +
+    "AAISBrYABLEAAAABAAwAAAASAAQAAAATAAgAFAAMABUAFAAWAQoADwAOAAAAAQAQAAAAAgAR");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQB1fZcJR/opPuXacK8mIla5shH0LSg72qJYAwAAcAAAAHhWNBIAAAAAAAAAALgCAAAR" +
+    "AAAAcAAAAAcAAAC0AAAAAwAAANAAAAABAAAA9AAAAAUAAAD8AAAAAQAAACQBAAAUAgAARAEAAKIB" +
+    "AACqAQAAwQEAANYBAADjAQAA+gEAAA4CAAAkAgAAOAIAAEwCAABcAgAAXwIAAGMCAABuAgAAggIA" +
+    "AIcCAACQAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA" +
+    "lAEAAAsAAAAGAAAAnAEAAAUAAQAOAAAAAAAAAAAAAAAAAAEADAAAAAAAAQAQAAAAAQACAA8AAAAC" +
+    "AAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAJAAAAAAAAAKUCAAAAAAAAAQABAAEAAACXAgAABAAAAHAQ" +
+    "BAAAAA4ABAACAAIAAACcAgAAFAAAAGIAAAAbAQIAAABuIAMAEABxEAEAAwBiAAAAGwEBAAAAbiAD" +
+    "ABAADgABAAAAAwAAAAEAAAAEAAY8aW5pdD4AFUdvb2RieWUgLSBUcmFuc2Zvcm1lZAATSGVsbG8g" +
+    "LSBUcmFuc2Zvcm1lZAALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEv" +
+    "bGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABJM" +
+    "amF2YS9sYW5nL1N5c3RlbTsADlRyYW5zZm9ybS5qYXZhAAFWAAJWTAAJZG9FeGVjdXRlABJlbWl0" +
+    "dGVyOiBqYWNrLTQuMjUAA291dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAQAHDoc8hwAAAAIBAICA" +
+    "BMQCAYoCAAIB3AIADQAAAAAAAAABAAAAAAAAAAEAAAARAAAAcAAAAAIAAAAHAAAAtAAAAAMAAAAD" +
+    "AAAA0AAAAAQAAAABAAAA9AAAAAUAAAAFAAAA/AAAAAYAAAABAAAAJAEAAAEgAAACAAAARAEAAAEQ" +
+    "AAACAAAAlAEAAAIgAAARAAAAogEAAAMgAAACAAAAlwIAAAAgAAABAAAApQIAAAAQAAABAAAAuAIA" +
+    "AA==");
+
+  public static void main(String[] args) {
+    bindTest945ObsoleteNative();
+    doTest(new Transform());
+  }
+
+  public static void doTest(Transform t) {
+    t.sayHi(() -> { System.out.println("Not doing anything here"); });
+    t.sayHi(() -> {
+      System.out.println("transforming calling function");
+      doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    });
+    t.sayHi(() -> { System.out.println("Not doing anything here"); });
+  }
+
+  // Transforms the class
+  private static native void doCommonClassRedefinition(Class<?> target,
+                                                       byte[] classfile,
+                                                       byte[] dexfile);
+
+  private static native void bindTest945ObsoleteNative();
+}
diff --git a/test/945-obsolete-native/src/Transform.java b/test/945-obsolete-native/src/Transform.java
new file mode 100644
index 0000000..2b7cc1b
--- /dev/null
+++ b/test/945-obsolete-native/src/Transform.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+  public void sayHi(Runnable r) {
+    System.out.println("hello");
+    doExecute(r);
+    System.out.println("goodbye");
+  }
+
+  private static native void doExecute(Runnable r);
+}
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index 801904d..fc9f030 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -676,6 +676,13 @@
             Integer.class, MethodType.methodType(Integer.class, Integer.class));
         fail("Unexpected success for non-void type for findConstructor");
     } catch (NoSuchMethodException e) {}
+
+    // Array class constructor.
+    try {
+        MethodHandle foo = MethodHandles.lookup().findConstructor(
+            Object[].class, MethodType.methodType(void.class));
+        fail("Unexpected success for array class type for findConstructor");
+    } catch (NoSuchMethodException e) {}
   }
 
   public static void testStringConstructors() throws Throwable {
diff --git a/test/Android.bp b/test/Android.bp
index 1070645..00c890a 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -273,6 +273,8 @@
         "931-agent-thread/agent_thread.cc",
         "933-misc-events/misc_events.cc",
         "936-search-onload/search_onload.cc",
+        "944-transform-classloaders/classloader.cc",
+        "945-obsolete-native/obsolete_native.cc",
     ],
     shared_libs: [
         "libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 742353d..1938b92 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -533,26 +533,13 @@
 # also uses Generic JNI instead of the JNI compiler.
 # Test 906 iterates the heap filtering with different options. No instances should be created
 # between those runs to be able to have precise checks.
-# Test 902 hits races with the JIT compiler. b/32821077
 # Test 629 requires compilation.
-# Test 914, 915, 917, & 919 are very sensitive to the exact state of the stack,
-# including the jit-inserted runtime frames. This causes them to be somewhat
-# flaky as JIT tests. This should be fixed once b/33630159 or b/33616143 are
-# resolved but until then just disable them. Test 916 already checks this
-# feature for JIT use cases in a way that is resilient to the jit frames.
 # 912: b/34655682
 TEST_ART_BROKEN_JIT_RUN_TESTS := \
   137-cfi \
   629-vdex-speed \
-  902-hello-transformation \
   904-object-allocation \
   906-iterate-heap \
-  912-classes \
-  914-hello-obsolescence \
-  915-obsolete-2 \
-  917-fields-transformation \
-  919-obsolete-fields \
-  926-multi-obsolescence \
 
 ifneq (,$(filter jit,$(COMPILER_TYPES)))
   ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -863,6 +850,69 @@
 
 endif
 
+# Host executables.
+host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
+
+# Classpath for Jack compilation for host.
+host_prereq_rules += $(HOST_JACK_CLASSPATH_DEPENDENCIES)
+
+# Required for dx, jasmin, smali, dexmerger, jack.
+host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
+
+host_prereq_rules += $(HOST_OUT_EXECUTABLES)/hprof-conv
+
+# Classpath for Jack compilation for target.
+target_prereq_rules := $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
+
+# Sync test files to the target, depends upon all things that must be pushed
+#to the target.
+target_prereq_rules += test-art-target-sync
+
+define core-image-dependencies
+  image_suffix := $(3)
+  ifeq ($(3),regalloc_gc)
+    image_suffix:=optimizing
+  else
+    ifeq ($(3),jit)
+      image_suffix:=interpreter
+    endif
+  endif
+  ifeq ($(2),no-image)
+    $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_pic_$(4))
+  else
+    ifeq ($(2),npicimage)
+      $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_no-pic_$(4))
+    else
+      ifeq ($(2),picimage)
+        $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_pic_$(4))
+      else
+        ifeq ($(2),multinpicimage)
+          $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(4))
+        else
+          ifeq ($(2),multipicimage)
+             $(1)_prereq_rules += $$($(call name-to-var,$(1))_CORE_IMAGE_$$(image_suffix)_pic_multi_$(4))
+          endif
+        endif
+      endif
+    endif
+  endif
+endef
+
+# Add core image dependencies required for given target - HOST or TARGET,
+# IMAGE_TYPE, COMPILER_TYPE and ADDRESS_SIZE to the prereq_rules.
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach image, $(IMAGE_TYPES), \
+    $(foreach compiler, $(COMPILER_TYPES), \
+      $(foreach address_size, $(ALL_ADDRESS_SIZES), $(eval \
+        $(call core-image-dependencies,$(target),$(image),$(compiler),$(address_size)))))))
+
+test-art-host-run-test-dependencies : $(host_prereq_rules)
+test-art-target-run-test-dependencies : $(target_prereq_rules)
+test-art-run-test-dependencies : test-art-host-run-test-dependencies test-art-target-run-test-dependencies
+
+host_prereq_rules :=
+target_prereq_rules :=
+
 # Create a rule to build and run a tests following the form:
 # test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
 #    {4: interpreter optimizing jit interp-ac}-
diff --git a/test/Nested/Nested.java b/test/Nested/Nested.java
index 78b273b..f493989 100644
--- a/test/Nested/Nested.java
+++ b/test/Nested/Nested.java
@@ -17,4 +17,6 @@
 class Nested {
     class Inner {
     }
+    Object x = new Object() {
+    };
 }
diff --git a/test/knownfailures.json b/test/knownfailures.json
new file mode 100644
index 0000000..84df924
--- /dev/null
+++ b/test/knownfailures.json
@@ -0,0 +1,343 @@
+[
+    {
+        "test": "153-reference-stress",
+        "description": ["Disable 153-reference-stress temporarily until a fix",
+                        "arrives."],
+        "bug": "http://b/33389022"
+    },
+    {
+        "test": "080-oom-fragmentation",
+        "description": "Disable 080-oom-fragmentation due to flakes.",
+        "bug": "http://b/33795328"
+    },
+    {
+        "tests": ["497-inlining-and-class-loader",
+                  "542-unresolved-access-check"],
+        "description": ["Disable 497-inlining-and-class-loader and ",
+                        "542-unresolved-access-check until they are rewritten.",
+                        "These tests use a broken class loader that tries to",
+                        "register a dex file that's already registered with a",
+                        "different loader."],
+        "bug": "http://b/34193123"
+    },
+    {
+        "test": "149-suspend-all-stress",
+        "description": "Disable 149-suspend-all-stress, its output is flaky",
+        "bug": "http://b/28988206"
+    },
+    {
+        "test": "577-profile-foreign-dex",
+        "description": "Disable 577-profile-foreign-dex",
+        "bug": "http://b/27454772"
+    },
+    {
+        "tests": ["002-sleep",
+                  "053-wait-some",
+                  "055-enum-performance",
+                  "133-static-invoke-super"],
+        "description": ["Tests that are timing sensitive and flaky on heavily",
+                        "loaded systems."]
+    },
+    {
+        "test": "147-stripped-dex-fallback",
+        "variant": "target",
+        "description": ["147-stripped-dex-fallback isn't supported on device",
+                        "because --strip-dex  requires the zip command."]
+    },
+    {
+        "test": "569-checker-pattern-replacement",
+        "variant": "target",
+        "description": ["569-checker-pattern-replacement tests behaviour",
+                        "present only on host."]
+    },
+    {
+        "tests": ["116-nodex2oat",
+                  "118-noimage-dex2oat",
+                  "134-nodex2oat-nofallback"],
+        "variant": "prebuild",
+        "description": ["Note 116-nodex2oat is not broken per-se it just",
+                        "doesn't (and isn't meant to) work with --prebuild."]
+    },
+    {
+        "test": "554-jit-profile-file",
+        "variant": "no-prebuild | interpreter",
+        "description": ["554-jit-profile-file is disabled because it needs a",
+                        "primary oat file to know what it should save."]
+    },
+    {
+        "tests": ["529-checker-unresolved", "555-checker-regression-x86const"],
+        "variant": "no-prebuild",
+        "bug": "http://b/27784033"
+    },
+    {
+        "tests": ["117-nopatchoat",
+                  "147-stripped-dex-fallback",
+                  "608-checker-unresolved-lse"],
+        "variant": "no-prebuild"
+    },
+    {
+        "tests": ["117-nopatchoat",
+                  "118-noimage-dex2oat",
+                  "119-noimage-patchoat",
+                  "554-jit-profile-file"],
+        "variant": "no-relocate",
+        "description": ["117-nopatchoat is not broken per-se it just doesn't",
+                        "work (and isn't meant to) without --prebuild",
+                        "--relocate"]
+    },
+    {
+        "test": "137-cfi",
+        "variant": "interp-ac",
+        "description": ["Temporarily disable some broken tests when forcing",
+                        "access checks in interpreter"],
+        "bug": "http://b/22414682"
+    },
+    {
+        "test" : "629-vdex-speed",
+        "variant": "interp-ac | no-dex2oat | interpreter | jit | relocate-npatchoat",
+        "description": "629 requires compilation."
+    },
+    {
+        "test": "137-cfi",
+        "variant": "gcstress",
+        "description": ["137-cfi needs to unwind a second forked process. We're",
+                        "using a primitive sleep to wait till we hope the",
+                        "second process got into the expected state. The",
+                        "slowness of gcstress makes this bad."]
+    },
+    {
+        "tests": ["908-gc-start-finish",
+                  "913-heaps"],
+        "variant": "gcstress",
+        "description": ["908-gc-start-finish expects GCs only to be run at",
+                        "clear points. The reduced heap size makes this",
+                        "non-deterministic. Same for 913."]
+    },
+    {
+        "test": "961-default-iface-resolution-gen",
+        "variant": "gcstress",
+        "description": ["961-default-iface-resolution-gen and",
+                        "964-default-iface-init-genare very long tests that",
+                        "often will take more than the timeout to run when",
+                        "gcstress is enabled. This is because gcstress slows",
+                        "down allocations significantly which these tests do a",
+                        "lot."]
+    },
+    {
+        "tests": ["964-default-iface-init-gen",
+                 "154-gc-loop"],
+        "variant": "gcstress"
+    },
+    {
+        "test": "115-native-bridge",
+        "variant": "target",
+        "description": ["115-native-bridge setup is complicated. Need to",
+                        "implement it correctly for the target."]
+    },
+    {
+        "test": "130-hprof",
+        "variant": "target",
+        "desription": ["130-hprof dumps the heap and runs hprof-conv to check",
+                       "whether the file is somewhat readable. Thi is only",
+                       "possible on the host. TODO: Turn off all the other",
+                       "combinations, this is more about testing actual ART",
+                       "code. A gtest is very hard to write here, as (for a",
+                       "complete test) JDWP must be set up."]
+    },
+    {
+        "test": "131-structural-change",
+        "variant": "debug",
+        "description": ["131 is an old test. The functionality has been",
+                        "implemented at an earlier stage and is checked",
+                        "in tests 138. Blacklisted for debug builds since",
+                        "these builds have duplicate classes checks which",
+                        "punt to interpreter"]
+    },
+    {
+        "test": "138-duplicate-classes-check",
+        "variant": "ndebug",
+        "description": ["Turned on for debug builds since debug builds have",
+                        "duplicate classes checks enabled"],
+        "bug": "http://b/2133391"
+    },
+    {
+        "test": "147-stripped-dex-fallback",
+        "variant": "no-dex2oat | no-image | relocate-npatchoat",
+        "description": ["147-stripped-dex-fallback is disabled because it",
+                        "requires --prebuild."]
+    },
+    {
+        "test": "554-jit-profile-file",
+        "variant": "no-dex2oat | no-image | relocate-npatchoat",
+        "description": ["554-jit-profile-file is disabled because it needs a",
+                        "primary oat file to know what it should save."]
+    },
+    {
+        "tests": ["116-nodex2oat",
+                  "117-nopatchoat",
+                  "118-noimage-dex2oat",
+                  "119-noimage-patchoat",
+                  "137-cfi",
+                  "138-duplicate-classes-check2"],
+        "variant": "no-dex2oat | no-image | relocate-npatchoat",
+        "description": ["All these tests check that we have sane behavior if we",
+                        "don't have a patchoat or dex2oat. Therefore we",
+                        "shouldn't run them in situations where we actually",
+                        "don't have these since they explicitly test for them.",
+                        "These all also assume we have an image."]
+    },
+    {
+        "tests": ["137-cfi",
+                  "138-duplicate-classes-check",
+                  "018-stack-overflow",
+                  "961-default-iface-resolution-gen",
+                  "964-default-iface-init"],
+        "variant": "no-image",
+        "description": ["This test fails without an image. 018, 961, 964 often",
+                        "time out."],
+        "bug": "http://b/34369284"
+    },
+    {
+        "test": "137-cfi",
+        "description": ["This test unrolls and expects managed frames, but",
+                        "tracing means we run the interpreter."],
+        "variant": "trace | stream"
+    },
+    {
+        "tests": ["802-deoptimization",
+                 "570-checker-osr"],
+        "description": ["This test dynamically enables tracing to force a",
+                        "deoptimization. This makes the test meaningless",
+                        "when already tracing, and writes an error message",
+                        "that we do not want to check for."],
+        "variant": "trace | stream"
+    },
+    {
+        "test": "130-hprof",
+        "description": "130 occasional timeout",
+        "bug": "http://b/32383962",
+        "variant": "trace | stream"
+    },
+    {
+        "tests": ["087-gc-after-link",
+                  "141-class-unload"],
+        "variant": "trace | stream"
+    },
+    {
+        "tests": ["604-hot-static-interface",
+                  "612-jit-dex-cache",
+                  "613-inlining-dex-cache",
+                  "616-cha",
+                  "626-set-resolved-string"],
+        "variant": "trace  | stream",
+        "description": ["These tests expect JIT compilation, which is",
+                        "suppressed when tracing."]
+    },
+    {
+        "test": "137-cfi",
+        "description": ["CFI unwinding expects managed frames, and the test",
+                        "does not iterate enough to even compile. JIT also",
+                        "uses Generic JNI instead of the JNI compiler."],
+        "variant": "interpreter | jit"
+    },
+    {
+        "test": "906-iterate-heap",
+        "description": ["Test 906 iterates the heap filtering with different",
+                        "options. No instances should be created between those",
+                        "runs to be able to have precise checks."],
+        "variant": "jit"
+    },
+    {
+        "tests": ["904-object-allocation"],
+        "variant": "jit"
+    },
+    {
+        "test": "912-classes",
+        "variant": "jit",
+        "bug": "http://b/34655682"
+    },
+    {
+        "tests": ["570-checker-select",
+                  "484-checker-register-hints"],
+        "description": ["These tests were based on the linear scan allocator,",
+                        "which makes different decisions than the graph",
+                        "coloring allocator. (These attempt to test for code",
+                        "quality, not correctness.)"],
+        "variant": "regalloc_gc"
+    },
+    {
+        "tests": ["454-get-vreg",
+                  "457-regs",
+                  "602-deoptimizeable"],
+        "description": ["Tests that should fail when the optimizing compiler ",
+                        "compiles them non-debuggable."],
+        "variant": "optimizing &  ndebuggable | regalloc_gc & ndebuggable"
+    },
+    {
+        "test": "596-app-images",
+        "variant": "npictest"
+    },
+    {
+        "test": "055-enum-performance",
+        "variant": "optimizing | regalloc_gc",
+        "description": ["055: Exceeds run time limits due to heap poisoning ",
+                        "instrumentation (on ARM and ARM64 devices)."]
+    },
+    {
+        "test": "909-attach-agent",
+        "variant": "debuggable",
+        "description": "Tests that check semantics for a non-debuggable app."
+    },
+    {
+        "test": "137-cfi",
+        "variant": "debuggable",
+        "description": ["The test relies on AOT code and debuggable makes us",
+                        "JIT always."]
+    },
+    {
+        "tests": ["000-nop",
+                  "134-nodex2oat-nofallback",
+                  "147-stripped-dex-fallback",
+                 "595-profile-saving"],
+        "description": "The doesn't compile anything",
+        "env_vars": {"ART_TEST_BISECTION": "true"},
+        "variant": "optimizing | regalloc_gc"
+    },
+    {
+        "tests": "089-many-methods",
+        "description": "The test tests a build failure",
+        "env_vars": {"ART_TEST_BISECTION": "true"},
+        "variant": "optimizing | regalloc_gc"
+    },
+    {
+        "tests": ["018-stack-overflow",
+                  "116-nodex2oat",
+                  "117-nopatchoat",
+                  "118-noimage-dex2oat",
+                  "119-noimage-patchoat",
+                  "126-miranda-multidex",
+                  "137-cfi"],
+        "description": "The test run dalvikvm more than once.",
+        "env_vars": {"ART_TEST_BISECTION": "true"},
+        "variant": "optimizing | regalloc_gc"
+    },
+    {
+        "tests": ["115-native-bridge",
+                 "088-monitor-verification"],
+        "description": "The test assume they are always compiled.",
+        "env_vars": {"ART_TEST_BISECTION": "true"},
+        "variant": "optimizing | regalloc_gc"
+    },
+    {
+        "test": "055-enum-performance",
+        "description": ["The test tests performance which degrades during",
+                        "bisecting."],
+        "env_vars": {"ART_TEST_BISECTION": "true"},
+        "variant": "optimizing | regalloc_gc"
+    },
+    {
+        "test": "537-checker-arraycopy",
+        "env_vars": {"ART_USE_READ_BARRIER": "true"},
+        "variant": "interpreter | optimizing | regalloc_gc | jit"
+    }
+]
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
new file mode 100644
index 0000000..278980f
--- /dev/null
+++ b/test/testrunner/env.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import tempfile
+import subprocess
+
+env = dict(os.environ)
+
+def getEnvBoolean(var, default):
+  val = env.get(var)
+  if val:
+    if val == "True" or val == "true":
+      return True
+    if val == "False" or val == "false":
+      return False
+  return default
+
+def get_build_var(var_name):
+  # The command is taken from build/envsetup.sh to fetch build variables.
+  command = ("CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core "
+             "make --no-print-directory -C \"%s\" -f build/core/config.mk "
+             "dumpvar-%s") % (ANDROID_BUILD_TOP, var_name)
+  config = subprocess.Popen(command, stdout=subprocess.PIPE,
+                            shell=True).communicate()[0]
+  return config.strip()
+
+def get_env(key):
+  return env.get(key)
+
+ANDROID_BUILD_TOP = env.get('ANDROID_BUILD_TOP', os.getcwd())
+
+# Directory used for temporary test files on the host.
+ART_HOST_TEST_DIR = tempfile.mkdtemp(prefix = 'test-art-')
+
+# Keep going after encountering a test failure?
+ART_TEST_KEEP_GOING = getEnvBoolean('ART_TEST_KEEP_GOING', True)
+
+# Do you want all tests, even those that are time consuming?
+ART_TEST_FULL = getEnvBoolean('ART_TEST_FULL', False)
+
+# Do you want interpreter tests run?
+ART_TEST_INTERPRETER = getEnvBoolean('ART_TEST_INTERPRETER', ART_TEST_FULL)
+ART_TEST_INTERPRETER_ACCESS_CHECKS = getEnvBoolean('ART_TEST_INTERPRETER_ACCESS_CHECKS',
+                                                   ART_TEST_FULL)
+
+# Do you want JIT tests run?
+ART_TEST_JIT = getEnvBoolean('ART_TEST_JIT', ART_TEST_FULL)
+
+# Do you want optimizing compiler tests run?
+ART_TEST_OPTIMIZING = getEnvBoolean('ART_TEST_OPTIMIZING', True)
+
+# Do you want to test the optimizing compiler with graph coloring register allocation?
+ART_TEST_OPTIMIZING_GRAPH_COLOR = getEnvBoolean('ART_TEST_OPTIMIZING_GRAPH_COLOR', ART_TEST_FULL)
+
+# Do we want to test a non-PIC-compiled core image?
+ART_TEST_NPIC_IMAGE = getEnvBoolean('ART_TEST_NPIC_IMAGE', ART_TEST_FULL)
+
+# Do we want to test PIC-compiled tests ("apps")?
+ART_TEST_PIC_TEST = getEnvBoolean('ART_TEST_PIC_TEST', ART_TEST_FULL)
+# Do you want tracing tests run?
+ART_TEST_TRACE = getEnvBoolean('ART_TEST_TRACE', ART_TEST_FULL)
+
+# Do you want tracing tests (streaming mode) run?
+ART_TEST_TRACE_STREAM = getEnvBoolean('ART_TEST_TRACE_STREAM', ART_TEST_FULL)
+
+# Do you want tests with GC verification enabled run?
+ART_TEST_GC_VERIFY = getEnvBoolean('ART_TEST_GC_VERIFY', ART_TEST_FULL)
+
+# Do you want tests with the GC stress mode enabled run?
+ART_TEST_GC_STRESS = getEnvBoolean('ART_TEST_GC_STRESS', ART_TEST_FULL)
+
+# Do you want tests with the JNI forcecopy mode enabled run?
+ART_TEST_JNI_FORCECOPY = getEnvBoolean('ART_TEST_JNI_FORCECOPY', ART_TEST_FULL)
+
+# Do you want run-tests with relocation disabled run?
+ART_TEST_RUN_TEST_RELOCATE = getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE', ART_TEST_FULL)
+
+# Do you want run-tests with prebuilding?
+ART_TEST_RUN_TEST_PREBUILD = getEnvBoolean('ART_TEST_RUN_TEST_PREBUILD', True)
+
+# Do you want run-tests with no prebuilding enabled run?
+ART_TEST_RUN_TEST_NO_PREBUILD = getEnvBoolean('ART_TEST_RUN_TEST_NO_PREBUILD', ART_TEST_FULL)
+
+# Do you want run-tests with a pregenerated core.art?
+ART_TEST_RUN_TEST_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_IMAGE', True)
+
+# Do you want run-tests without a pregenerated core.art?
+ART_TEST_RUN_TEST_NO_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_NO_IMAGE', ART_TEST_FULL)
+
+# Do you want run-tests with relocation enabled but patchoat failing?
+ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT = getEnvBoolean('ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT',
+                                                       ART_TEST_FULL)
+
+# Do you want run-tests without a dex2oat?
+ART_TEST_RUN_TEST_NO_DEX2OAT = getEnvBoolean('ART_TEST_RUN_TEST_NO_DEX2OAT', ART_TEST_FULL)
+
+# Do you want run-tests with libartd.so?
+ART_TEST_RUN_TEST_DEBUG = getEnvBoolean('ART_TEST_RUN_TEST_DEBUG', True)
+
+# Do you want run-tests with libart.so?
+ART_TEST_RUN_TEST_NDEBUG = getEnvBoolean('ART_TEST_RUN_TEST_NDEBUG', ART_TEST_FULL)
+
+# Do you want failed tests to have their artifacts cleaned up?
+ART_TEST_RUN_TEST_ALWAYS_CLEAN = getEnvBoolean('ART_TEST_RUN_TEST_ALWAYS_CLEAN', True)
+
+# Do you want run-tests with the --debuggable flag
+ART_TEST_RUN_TEST_DEBUGGABLE = getEnvBoolean('ART_TEST_RUN_TEST_DEBUGGABLE', ART_TEST_FULL)
+
+# Do you want to test multi-part boot-image functionality?
+ART_TEST_RUN_TEST_MULTI_IMAGE = getEnvBoolean('ART_TEST_RUN_TEST_MULTI_IMAGE', ART_TEST_FULL)
+
+ART_TEST_DEBUG_GC = getEnvBoolean('ART_TEST_DEBUG_GC', False)
+
+ART_TEST_BISECTION = getEnvBoolean('ART_TEST_BISECTION', False)
+
+DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = env.get('DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
+
+# Do you want run-tests with the host/target's second arch?
+ART_TEST_RUN_TEST_2ND_ARCH = getEnvBoolean('ART_TEST_RUN_TEST_2ND_ARCH', True)
+
+HOST_2ND_ARCH_PREFIX = get_build_var('HOST_2ND_ARCH_PREFIX')
+HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES = env.get(
+  HOST_2ND_ARCH_PREFIX + 'DEX2OAT_HOST_INSTRUCTION_SET_FEATURES')
+
+ART_TEST_ANDROID_ROOT = env.get('ART_TEST_ANDROID_ROOT')
+
+ART_TEST_WITH_STRACE = getEnvBoolean('ART_TEST_DEBUG_GC', False)
+
+TARGET_2ND_ARCH = get_build_var('TARGET_2ND_ARCH')
+TARGET_ARCH = get_build_var('TARGET_ARCH')
+if TARGET_2ND_ARCH:
+  if "64" in TARGET_ARCH:
+    ART_PHONY_TEST_TARGET_SUFFIX = "64"
+    _2ND_ART_PHONY_TEST_TARGET_SUFFIX = "32"
+  else:
+    ART_PHONY_TEST_TARGET_SUFFIX = "32"
+    _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+else:
+  if "64" in TARGET_ARCH:
+    ART_PHONY_TEST_TARGET_SUFFIX = "64"
+    _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+  else:
+    ART_PHONY_TEST_TARGET_SUFFIX = "32"
+    _2ND_ART_PHONY_TEST_TARGET_SUFFIX = ""
+
+HOST_PREFER_32_BIT = get_build_var('HOST_PREFER_32_BIT')
+if HOST_PREFER_32_BIT == "true":
+  ART_PHONY_TEST_HOST_SUFFIX = "32"
+  _2ND_ART_PHONY_TEST_HOST_SUFFIX = ""
+else:
+  ART_PHONY_TEST_HOST_SUFFIX = "64"
+  _2ND_ART_PHONY_TEST_HOST_SUFFIX = "32"
+
+HOST_OUT_EXECUTABLES = os.path.join(ANDROID_BUILD_TOP,
+                                    get_build_var("HOST_OUT_EXECUTABLES"))
+os.environ['JACK'] = HOST_OUT_EXECUTABLES + '/jack'
+os.environ['DX'] = HOST_OUT_EXECUTABLES + '/dx'
+os.environ['SMALI'] = HOST_OUT_EXECUTABLES + '/smali'
+os.environ['JASMIN'] = HOST_OUT_EXECUTABLES + '/jasmin'
+os.environ['DXMERGER'] = HOST_OUT_EXECUTABLES + '/dexmerger'
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
new file mode 100755
index 0000000..f60a6c9
--- /dev/null
+++ b/test/testrunner/testrunner.py
@@ -0,0 +1,795 @@
+#!/usr/bin/env python
+#
+# Copyright 2017, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""ART Run-Test TestRunner
+
+The testrunner runs the ART run-tests by simply invoking the script.
+It fetches the list of eligible tests from art/test directory, and list of
+disabled tests from art/test/knownfailures.json. It runs the tests by
+invoking art/test/run-test script and checks the exit value to decide if the
+test passed or failed.
+
+Before invoking the script, first build all the tests dependencies.
+There are two major build targets for building target and host tests
+dependencies:
+1) test-art-host-run-test
+2) test-art-target-run-test
+
+There are various options to invoke the script which are:
+-t: Either the test name as in art/test or the test name including the variant
+    information. Eg, "-t 001-HelloWorld",
+    "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32"
+-j: Number of thread workers to be used. Eg - "-j64"
+--dry-run: Instead of running the test name, just print its name.
+--verbose
+-b / --build-dependencies: to build the dependencies before running the test
+
+To specify any specific variants for the test, use --<<variant-name>>.
+For eg, for compiler type as optimizing, use --optimizing.
+
+
+In the end, the script will print the failed and skipped tests if any.
+
+"""
+import fnmatch
+import itertools
+import json
+from optparse import OptionParser
+import os
+import re
+import subprocess
+import sys
+import threading
+import time
+
+import env
+
+TARGET_TYPES = set()
+RUN_TYPES = set()
+PREBUILD_TYPES = set()
+COMPILER_TYPES = set()
+RELOCATE_TYPES = set()
+TRACE_TYPES = set()
+GC_TYPES = set()
+JNI_TYPES = set()
+IMAGE_TYPES = set()
+PICTEST_TYPES = set()
+DEBUGGABLE_TYPES = set()
+ADDRESS_SIZES = set()
+OPTIMIZING_COMPILER_TYPES = set()
+ADDRESS_SIZES_TARGET = {'host': set(), 'target': set()}
+
+# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
+# that has key as the test name (like 001-HelloWorld), and value as set of
+# variants that the test is disabled for.
+DISABLED_TEST_CONTAINER = {}
+
+# The Dict contains the list of all possible variants for a given type. For example,
+# for key TARGET, the value would be target and host. The list is used to parse
+# the test name given as the argument to run.
+VARIANT_TYPE_DICT = {}
+
+# The set contains all the variants of each time.
+TOTAL_VARIANTS_SET = set()
+
+# The colors are used in the output. When a test passes, COLOR_PASS is used,
+# and so on.
+COLOR_ERROR = '\033[91m'
+COLOR_PASS = '\033[92m'
+COLOR_SKIP = '\033[93m'
+COLOR_NORMAL = '\033[0m'
+
+# The mutex object is used by the threads for exclusive access of test_count
+# to make any changes in its value.
+test_count_mutex = threading.Lock()
+# The set contains the list of all the possible run tests that are in art/test
+# directory.
+RUN_TEST_SET = set()
+# The semaphore object is used by the testrunner to limit the number of
+# threads to the user requested concurrency value.
+semaphore = threading.Semaphore(1)
+# The mutex object is used to provide exclusive access to a thread to print
+# its output.
+print_mutex = threading.Lock()
+failed_tests = []
+skipped_tests = []
+
+# Flags
+n_thread = 1
+test_count = 0
+total_test_count = 0
+verbose = False
+last_print_length = 0
+dry_run = False
+build = False
+gdb = False
+gdb_arg = ''
+stop_testrunner = False
+
+def gather_test_info():
+  """The method gathers test information about the test to be run which includes
+  generating the list of total tests from the art/test directory and the list
+  of disabled test. It also maps various variants to types.
+  """
+  global TOTAL_VARIANTS_SET
+  global DISABLED_TEST_CONTAINER
+  # TODO: Avoid duplication of the variant names in different lists.
+  VARIANT_TYPE_DICT['pictest'] = {'pictest', 'npictest'}
+  VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
+  VARIANT_TYPE_DICT['target'] = {'target', 'host'}
+  VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
+  VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image', 'npicimage',
+                                'multinpicimage', 'multipicimage'}
+  VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
+  VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
+  VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'no-dex2oat', 'prebuild'}
+  VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
+  VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
+  VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
+  VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'optimizing',
+                              'regalloc_gc'}
+
+  for v_type in VARIANT_TYPE_DICT:
+    TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
+
+  test_dir = env.ANDROID_BUILD_TOP + '/art/test'
+  for f in os.listdir(test_dir):
+    if fnmatch.fnmatch(f, '[0-9]*'):
+      RUN_TEST_SET.add(f)
+  DISABLED_TEST_CONTAINER = get_disabled_test_info()
+
+
+def setup_test_env():
+  """The method sets default value for the various variants of the tests if they
+  are already not set.
+  """
+  if env.ART_TEST_BISECTION:
+    env.ART_TEST_RUN_TEST_NO_PREBUILD = True
+    env.ART_TEST_RUN_TEST_PREBUILD = False
+    # Bisection search writes to standard output.
+    env.ART_TEST_QUIET = False
+
+  if not TARGET_TYPES:
+    TARGET_TYPES.add('host')
+    TARGET_TYPES.add('target')
+
+  if env.ART_TEST_RUN_TEST_PREBUILD:
+    PREBUILD_TYPES.add('prebuild')
+  if env.ART_TEST_RUN_TEST_NO_PREBUILD:
+    PREBUILD_TYPES.add('no-prebuild')
+  if env.ART_TEST_RUN_TEST_NO_DEX2OAT:
+    PREBUILD_TYPES.add('no-dex2oat')
+
+  if env.ART_TEST_INTERPRETER_ACCESS_CHECKS:
+    COMPILER_TYPES.add('interp-ac')
+  if env.ART_TEST_INTERPRETER:
+    COMPILER_TYPES.add('interpreter')
+  if env.ART_TEST_JIT:
+    COMPILER_TYPES.add('jit')
+
+  if env.ART_TEST_OPTIMIZING:
+    COMPILER_TYPES.add('optimizing')
+    OPTIMIZING_COMPILER_TYPES.add('optimizing')
+  if env.ART_TEST_OPTIMIZING_GRAPH_COLOR:
+    COMPILER_TYPES.add('regalloc_gc')
+    OPTIMIZING_COMPILER_TYPES.add('regalloc_gc')
+
+  if not RELOCATE_TYPES:
+    RELOCATE_TYPES.add('no-relocate')
+  if env.ART_TEST_RUN_TEST_RELOCATE:
+    RELOCATE_TYPES.add('relocate')
+  if env.ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT:
+    RELOCATE_TYPES.add('relocate-npatchoat')
+
+  if not TRACE_TYPES:
+    TRACE_TYPES.add('ntrace')
+  if env.ART_TEST_TRACE:
+    TRACE_TYPES.add('trace')
+  if env.ART_TEST_TRACE_STREAM:
+    TRACE_TYPES.add('stream')
+
+  if not GC_TYPES:
+    GC_TYPES.add('cms')
+  if env.ART_TEST_GC_STRESS:
+    GC_TYPES.add('gcstress')
+  if env.ART_TEST_GC_VERIFY:
+    GC_TYPES.add('gcverify')
+
+  if not JNI_TYPES:
+    JNI_TYPES.add('checkjni')
+  if env.ART_TEST_JNI_FORCECOPY:
+    JNI_TYPES.add('forcecopy')
+
+  if env.ART_TEST_RUN_TEST_IMAGE:
+    IMAGE_TYPES.add('picimage')
+  if env.ART_TEST_RUN_TEST_NO_IMAGE:
+    IMAGE_TYPES.add('no-image')
+  if env.ART_TEST_RUN_TEST_MULTI_IMAGE:
+    IMAGE_TYPES.add('multipicimage')
+  if env.ART_TEST_NPIC_IMAGE:
+    IMAGE_TYPES.add('npicimage')
+  if env.ART_TEST_RUN_TEST_MULTI_IMAGE:
+    IMAGE_TYPES.add('multinpicimage')
+
+  if not PICTEST_TYPES:
+    PICTEST_TYPES.add('npictest')
+  if env.ART_TEST_PIC_TEST:
+    PICTEST_TYPES.add('pictest')
+
+  if env.ART_TEST_RUN_TEST_DEBUG:
+    RUN_TYPES.add('debug')
+  if env.ART_TEST_RUN_TEST_NDEBUG:
+    RUN_TYPES.add('ndebug')
+
+  if not DEBUGGABLE_TYPES:
+    DEBUGGABLE_TYPES.add('ndebuggable')
+
+  if env.ART_TEST_RUN_TEST_DEBUGGABLE:
+    DEBUGGABLE_TYPES.add('debuggable')
+
+  if not ADDRESS_SIZES:
+    ADDRESS_SIZES_TARGET['target'].add(env.ART_PHONY_TEST_TARGET_SUFFIX)
+    ADDRESS_SIZES_TARGET['host'].add(env.ART_PHONY_TEST_HOST_SUFFIX)
+    if env.ART_TEST_RUN_TEST_2ND_ARCH:
+      ADDRESS_SIZES_TARGET['host'].add(env._2ND_ART_PHONY_TEST_HOST_SUFFIX)
+      ADDRESS_SIZES_TARGET['target'].add(env._2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+  else:
+    ADDRESS_SIZES_TARGET['host'] = ADDRESS_SIZES_TARGET['host'].union(ADDRESS_SIZES)
+    ADDRESS_SIZES_TARGET['target'] = ADDRESS_SIZES_TARGET['target'].union(ADDRESS_SIZES)
+
+  global semaphore
+  semaphore = threading.Semaphore(n_thread)
+
+
+def run_tests(tests):
+  """Creates thread workers to run the tests.
+
+  The method generates command and thread worker to run the tests. Depending on
+  the user input for the number of threads to be used, the method uses a
+  semaphore object to keep a count in control for the thread workers. When a new
+  worker is created, it acquires the semaphore object, and when the number of
+  workers reaches the maximum allowed concurrency, the method wait for an
+  existing thread worker to release the semaphore object. Worker releases the
+  semaphore object when they finish printing the output.
+
+  Args:
+    tests: The set of tests to be run.
+  """
+  options_all = ''
+  global total_test_count
+  total_test_count = len(tests)
+  total_test_count *= len(RUN_TYPES)
+  total_test_count *= len(PREBUILD_TYPES)
+  total_test_count *= len(RELOCATE_TYPES)
+  total_test_count *= len(TRACE_TYPES)
+  total_test_count *= len(GC_TYPES)
+  total_test_count *= len(JNI_TYPES)
+  total_test_count *= len(IMAGE_TYPES)
+  total_test_count *= len(PICTEST_TYPES)
+  total_test_count *= len(DEBUGGABLE_TYPES)
+  total_test_count *= len(COMPILER_TYPES)
+  target_address_combinations = 0
+  for target in TARGET_TYPES:
+    for address_size in ADDRESS_SIZES_TARGET[target]:
+      target_address_combinations += 1
+  total_test_count *= target_address_combinations
+
+  if env.ART_TEST_WITH_STRACE:
+    options_all += ' --strace'
+
+  if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN:
+    options_all += ' --always-clean'
+
+  if env.ART_TEST_BISECTION:
+    options_all += ' --bisection-search'
+
+  if env.ART_TEST_ANDROID_ROOT:
+    options_all += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
+
+  if gdb:
+    options_all += ' --gdb'
+    if gdb_arg:
+      options_all += ' --gdb-arg ' + gdb_arg
+
+  config = itertools.product(tests, TARGET_TYPES, RUN_TYPES, PREBUILD_TYPES,
+                             COMPILER_TYPES, RELOCATE_TYPES, TRACE_TYPES,
+                             GC_TYPES, JNI_TYPES, IMAGE_TYPES, PICTEST_TYPES,
+                             DEBUGGABLE_TYPES)
+
+  for test, target, run, prebuild, compiler, relocate, trace, gc, \
+      jni, image, pictest, debuggable in config:
+    for address_size in ADDRESS_SIZES_TARGET[target]:
+      if stop_testrunner:
+        # When ART_TEST_KEEP_GOING is set to false, then as soon as a test
+        # fails, stop_testrunner is set to True. When this happens, the method
+        # stops creating any any thread and wait for all the exising threads
+        # to end.
+        while threading.active_count() > 2:
+          time.sleep(0.1)
+          return
+      test_name = 'test-art-'
+      test_name += target + '-run-test-'
+      test_name += run + '-'
+      test_name += prebuild + '-'
+      test_name += compiler + '-'
+      test_name += relocate + '-'
+      test_name += trace + '-'
+      test_name += gc + '-'
+      test_name += jni + '-'
+      test_name += image + '-'
+      test_name += pictest + '-'
+      test_name += debuggable + '-'
+      test_name += test
+      test_name += address_size
+
+      variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
+                     image, pictest, debuggable, address_size}
+
+      options_test = options_all
+
+      if target == 'host':
+        options_test += ' --host'
+
+      if run == 'ndebug':
+        options_test += ' -O'
+
+      if prebuild == 'prebuild':
+        options_test += ' --prebuild'
+      elif prebuild == 'no-prebuild':
+        options_test += ' --no-prebuild'
+      elif prebuild == 'no-dex2oat':
+        options_test += ' --no-prebuild --no-dex2oat'
+
+      if compiler == 'optimizing':
+        options_test += ' --optimizing'
+      elif compiler == 'regalloc_gc':
+        options_test += ' --optimizing -Xcompiler-option --register-allocation-strategy=graph-color'
+      elif compiler == 'interpreter':
+        options_test += ' --interpreter'
+      elif compiler == 'interp-ac':
+        options_test += ' --interpreter --verify-soft-fail'
+      elif compiler == 'jit':
+        options_test += ' --jit'
+
+      if relocate == 'relocate':
+        options_test += ' --relocate'
+      elif relocate == 'no-relocate':
+        options_test += ' --no-relocate'
+      elif relocate == 'relocate-npatchoat':
+        options_test += ' --relocate --no-patchoat'
+
+      if trace == 'trace':
+        options_test += ' --trace'
+      elif trace == 'stream':
+        options_test += ' --trace --stream'
+
+      if gc == 'gcverify':
+        options_test += ' --gcverify'
+      elif gc == 'gcstress':
+        options_test += ' --gcstress'
+
+      if jni == 'forcecopy':
+        options_test += ' --runtime-option -Xjniopts:forcecopy'
+      elif jni == 'checkjni':
+        options_test += ' --runtime-option -Xcheck:jni'
+
+      if image == 'no-image':
+        options_test += ' --no-image'
+      elif image == 'npicimage':
+        options_test += ' --npic-image'
+      elif image == 'multinpicimage':
+        options_test += ' --npic-image --multi-image'
+      elif image == 'multipicimage':
+        options_test += ' --multi-image'
+
+      if pictest == 'pictest':
+        options_test += ' --pic-test'
+
+      if debuggable == 'debuggable':
+        options_test += ' --debuggable'
+
+      if address_size == '64':
+        options_test += ' --64'
+
+        if env.DEX2OAT_HOST_INSTRUCTION_SET_FEATURES:
+          options_test += ' --instruction-set-features' + env.DEX2OAT_HOST_INSTRUCTION_SET_FEATURES
+
+      elif address_size == '32':
+        if env.HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES:
+          options_test += ' --instruction-set-features ' + \
+                          env.HOST_2ND_ARCH_PREFIX_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES
+
+      options_test = (' --output-path %s/run-test-output/%s') % (
+        env.ART_HOST_TEST_DIR, test_name) + options_test
+
+      run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
+      command = run_test_sh + ' ' + options_test + ' ' + test
+
+      semaphore.acquire()
+      worker = threading.Thread(target=run_test, args=(command, test, variant_set, test_name))
+      worker.daemon = True
+      worker.start()
+
+  while threading.active_count() > 2:
+    time.sleep(0.1)
+
+
+def run_test(command, test, test_variant, test_name):
+  """Runs the test.
+
+  It invokes art/test/run-test script to run the test. The output of the script
+  is checked, and if it ends with "Succeeded!", it assumes that the tests
+  passed, otherwise, put it in the list of failed test. Before actually running
+  the test, it also checks if the test is placed in the list of disabled tests,
+  and if yes, it skips running it, and adds the test in the list of skipped
+  tests. The method uses print_text method to actually print the output. After
+  successfully running and capturing the output for the test, it releases the
+  semaphore object.
+
+  Args:
+    command: The command to be used to invoke the script
+    test: The name of the test without the variant information.
+    test_variant: The set of variant for the test.
+    test_name: The name of the test along with the variants.
+  """
+  global last_print_length
+  global test_count
+  global stop_testrunner
+  if is_test_disabled(test, test_variant):
+    test_skipped = True
+  else:
+    test_skipped = False
+    proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
+    script_output = proc.stdout.read().strip()
+    test_passed = not proc.wait()
+
+  # If verbose is set to True, every test information is printed on a new line.
+  # If not, the information is printed on the same line overriding the
+  # previous test output.
+  if not verbose:
+    suffix = '\r'
+    prefix = ' ' * last_print_length + '\r'
+  else:
+    suffix = '\n'
+    prefix = ''
+  test_count_mutex.acquire()
+  test_count += 1
+  percent = (test_count * 100) / total_test_count
+  out = '[ ' + str(percent) + '% ' + str(test_count) + '/' + str(total_test_count) + ' ] '
+  test_count_mutex.release()
+  out += test_name + ' '
+  if not test_skipped:
+    if test_passed:
+      out += COLOR_PASS + 'PASS' + COLOR_NORMAL
+      last_print_length = len(out)
+    else:
+      failed_tests.append(test_name)
+      out += COLOR_ERROR + 'FAIL' + COLOR_NORMAL
+      out += '\n' + command + '\n' + script_output
+      if not env.ART_TEST_KEEP_GOING:
+        stop_testrunner = True
+      last_print_length = 0
+  elif not dry_run:
+    out += COLOR_SKIP + 'SKIP' + COLOR_NORMAL
+    last_print_length = len(out)
+    skipped_tests.append(test_name)
+  print_mutex.acquire()
+  print_text(prefix + out + suffix)
+  print_mutex.release()
+  semaphore.release()
+
+
+def get_disabled_test_info():
+  """Generate set of known failures.
+
+  It parses the art/test/knownfailures.json file to generate the list of
+  disabled tests.
+
+  Returns:
+    The method returns a dict of tests mapped to the variants list
+    for which the test should not be run.
+  """
+  known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json'
+  with open(known_failures_file) as known_failures_json:
+    known_failures_info = json.loads(known_failures_json.read())
+
+  disabled_test_info = {}
+  for failure in known_failures_info:
+    tests = failure.get('test')
+    if tests:
+      tests = [tests]
+    else:
+      tests = failure.get('tests', [])
+    variants = parse_variants(failure.get('variant'))
+    env_vars = failure.get('env_vars')
+    if check_env_vars(env_vars):
+      for test in tests:
+        if test in disabled_test_info:
+          disabled_test_info[test] = disabled_test_info[test].union(variants)
+        else:
+          disabled_test_info[test] = variants
+  return disabled_test_info
+
+
+def check_env_vars(env_vars):
+  """Checks if the env variables are set as required to run the test.
+
+  Returns:
+    True if all the env variables are set as required, otherwise False.
+  """
+
+  if not env_vars:
+    return True
+  for key in env_vars:
+    if env.get_env(key) != env_vars.get(key):
+      return False
+  return True
+
+
+def is_test_disabled(test, variant_set):
+  """Checks if the test along with the variant_set is disabled.
+
+  Args:
+    test: The name of the test as in art/test directory.
+    variant_set: Variants to be used for the test.
+  Returns:
+    True, if the test is disabled.
+  """
+  if dry_run:
+    return True
+  variants_list = DISABLED_TEST_CONTAINER.get(test, {})
+  for variants in variants_list:
+    variants_present = True
+    for variant in variants:
+      if variant not in variant_set:
+        variants_present = False
+        break
+    if variants_present:
+      return True
+  return False
+
+
+def parse_variants(variants):
+  """Parse variants fetched from art/test/knownfailures.json.
+  """
+  if not variants:
+    variants = ''
+    for variant in TOTAL_VARIANTS_SET:
+      variants += variant
+      variants += '|'
+    variants = variants[:-1]
+  variant_list = set()
+  or_variants = variants.split('|')
+  for or_variant in or_variants:
+    and_variants = or_variant.split('&')
+    variant = set()
+    for and_variant in and_variants:
+      and_variant = and_variant.strip()
+      variant.add(and_variant)
+    variant_list.add(frozenset(variant))
+  return variant_list
+
+def print_text(output):
+  sys.stdout.write(output)
+  sys.stdout.flush()
+
+def print_analysis():
+  if not verbose:
+    print_text(' ' * last_print_length + '\r')
+  if skipped_tests:
+    print_text(COLOR_SKIP + 'SKIPPED TESTS' + COLOR_NORMAL + '\n')
+    for test in skipped_tests:
+      print_text(test + '\n')
+    print_text('\n')
+
+  if failed_tests:
+    print_text(COLOR_ERROR + 'FAILED TESTS' + COLOR_NORMAL + '\n')
+    for test in failed_tests:
+      print_text(test + '\n')
+
+
+def parse_test_name(test_name):
+  """Parses the testname provided by the user.
+  It supports two types of test_name:
+  1) Like 001-HelloWorld. In this case, it will just verify if the test actually
+  exists and if it does, it returns the testname.
+  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32
+  In this case, it will parse all the variants and check if they are placed
+  correctly. If yes, it will set the various VARIANT_TYPES to use the
+  variants required to run the test. Again, it returns the test_name
+  without the variant information like 001-HelloWorld.
+  """
+  if test_name in RUN_TEST_SET:
+    return {test_name}
+
+  regex = '^test-art-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-'
+  regex += 'run-test-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['pictest']) + ')-'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
+  regex += '(' + '|'.join(RUN_TEST_SET) + ')'
+  regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$'
+  match = re.match(regex, test_name)
+  if match:
+    TARGET_TYPES.add(match.group(1))
+    RUN_TYPES.add(match.group(2))
+    PREBUILD_TYPES.add(match.group(3))
+    COMPILER_TYPES.add(match.group(4))
+    RELOCATE_TYPES.add(match.group(5))
+    TRACE_TYPES.add(match.group(6))
+    GC_TYPES.add(match.group(7))
+    JNI_TYPES.add(match.group(8))
+    IMAGE_TYPES.add(match.group(9))
+    PICTEST_TYPES.add(match.group(10))
+    DEBUGGABLE_TYPES.add(match.group(11))
+    ADDRESS_SIZES.add(match.group(13))
+    return {match.group(12)}
+
+
+def parse_option():
+  global verbose
+  global dry_run
+  global n_thread
+  global build
+  global gdb
+  global gdb_arg
+
+  parser = OptionParser()
+  parser.add_option('-t', '--test', dest='test', help='name of the test')
+  parser.add_option('-j', type='int', dest='n_thread')
+  for variant in TOTAL_VARIANTS_SET:
+    flag = '--' + variant
+    flag_dest = variant.replace('-', '_')
+    if variant == '32' or variant == '64':
+      flag_dest = 'n' + flag_dest
+    parser.add_option(flag, action='store_true', dest=flag_dest)
+  parser.add_option('--verbose', '-v', action='store_true', dest='verbose')
+  parser.add_option('--dry-run', action='store_true', dest='dry_run')
+  parser.add_option('-b', '--build-dependencies', action='store_true', dest='build')
+  parser.add_option('--gdb', action='store_true', dest='gdb')
+  parser.add_option('--gdb-arg', dest='gdb_arg')
+
+  options = parser.parse_args()[0]
+  test = ''
+  if options.test:
+    test = parse_test_name(options.test)
+  if options.pictest:
+    PICTEST_TYPES.add('pictest')
+  if options.ndebug:
+    RUN_TYPES.add('ndebug')
+  if options.interp_ac:
+    COMPILER_TYPES.add('interp-ac')
+  if options.picimage:
+    IMAGE_TYPES.add('picimage')
+  if options.n64:
+    ADDRESS_SIZES.add('64')
+  if options.interpreter:
+    COMPILER_TYPES.add('interpreter')
+  if options.jni:
+    JNI_TYPES.add('jni')
+  if options.relocate_npatchoat:
+    RELOCATE_TYPES.add('relocate-npatchoat')
+  if options.no_prebuild:
+    PREBUILD_TYPES.add('no-prebuild')
+  if options.npictest:
+    PICTEST_TYPES.add('npictest')
+  if options.no_dex2oat:
+    PREBUILD_TYPES.add('no-dex2oat')
+  if options.jit:
+    COMPILER_TYPES.add('jit')
+  if options.relocate:
+    RELOCATE_TYPES.add('relocate')
+  if options.ndebuggable:
+    DEBUGGABLE_TYPES.add('ndebuggable')
+  if options.no_image:
+    IMAGE_TYPES.add('no-image')
+  if options.optimizing:
+    COMPILER_TYPES.add('optimizing')
+  if options.trace:
+    TRACE_TYPES.add('trace')
+  if options.gcstress:
+    GC_TYPES.add('gcstress')
+  if options.no_relocate:
+    RELOCATE_TYPES.add('no-relocate')
+  if options.target:
+    TARGET_TYPES.add('target')
+  if options.forcecopy:
+    JNI_TYPES.add('forcecopy')
+  if options.n32:
+    ADDRESS_SIZES.add('32')
+  if options.host:
+    TARGET_TYPES.add('host')
+  if options.gcverify:
+    GC_TYPES.add('gcverify')
+  if options.debuggable:
+    DEBUGGABLE_TYPES.add('debuggable')
+  if options.prebuild:
+    PREBUILD_TYPES.add('prebuild')
+  if options.debug:
+    RUN_TYPES.add('debug')
+  if options.checkjni:
+    JNI_TYPES.add('checkjni')
+  if options.ntrace:
+    TRACE_TYPES.add('ntrace')
+  if options.cms:
+    GC_TYPES.add('cms')
+  if options.npicimage:
+    IMAGE_TYPES.add('npicimage')
+  if options.multinpicimage:
+    IMAGE_TYPES.add('multinpicimage')
+  if options.multipicimage:
+    IMAGE_TYPES.add('multipicimage')
+  if options.verbose:
+    verbose = True
+  if options.n_thread:
+    n_thread = max(1, options.n_thread)
+  if options.dry_run:
+    dry_run = True
+    verbose = True
+  if options.build:
+    build = True
+  if options.gdb:
+    n_thread = 1
+    gdb = True
+    if options.gdb_arg:
+      gdb_arg = options.gdb_arg
+
+  return test
+
+def main():
+  gather_test_info()
+  user_requested_test = parse_option()
+  setup_test_env()
+  if build:
+    build_targets = ''
+    if 'host' in TARGET_TYPES:
+      build_targets += 'test-art-host-run-test-dependencies'
+    if 'target' in TARGET_TYPES:
+      build_targets += 'test-art-target-run-test-dependencies'
+    build_command = 'make -j' + str(n_thread) + ' ' + build_targets
+    if subprocess.call(build_command.split()):
+      sys.exit(1)
+  if user_requested_test:
+    test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_test,))
+  else:
+    test_runner_thread = threading.Thread(target=run_tests, args=(RUN_TEST_SET,))
+  test_runner_thread.daemon = True
+  try:
+    test_runner_thread.start()
+    while threading.active_count() > 1:
+      time.sleep(0.1)
+    print_analysis()
+    if failed_tests:
+      sys.exit(1)
+    sys.exit(0)
+  except SystemExit:
+    pass
+  except:
+    print "hello"
+    print_analysis()
+    sys.exit(1)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index 008e2e5..351857d 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -121,6 +121,8 @@
   { "941-recursive-obsolete-jit", common_redefine::OnLoad, nullptr },
   { "942-private-recursive", common_redefine::OnLoad, nullptr },
   { "943-private-recursive-jit", common_redefine::OnLoad, nullptr },
+  { "944-transform-classloaders", common_redefine::OnLoad, nullptr },
+  { "945-obsolete-native", common_redefine::OnLoad, nullptr },
 };
 
 static AgentLib* FindAgent(char* name) {
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
index d792e90..59c5211 100644
--- a/tools/cpp-define-generator/Android.bp
+++ b/tools/cpp-define-generator/Android.bp
@@ -20,7 +20,7 @@
 //
 // In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
 
-art_cc_binary {
+cc_binary {  // Do not use art_cc_binary because HOST_PREFER_32_BIT is incompatible with genrule.
     name: "cpp-define-generator-data",
     host_supported: true,
     device_supported: false,
@@ -34,3 +34,14 @@
         "libbase",
     ],
 }
+
+// Note: See $OUT_DIR/soong/build.ninja
+// For the exact filename that this generates to run make command on just
+// this rule later.
+genrule {
+  name: "cpp-define-generator-asm-support",
+  out: ["asm_support_gen.h"],
+  tools: ["cpp-define-generator-data"],
+  tool_files: ["verify-asm-support"],
+  cmd: "$(location verify-asm-support) --quiet \"$(location cpp-define-generator-data)\" \"$(out)\""
+}
diff --git a/tools/cpp-define-generator/constant_jit.def b/tools/cpp-define-generator/constant_jit.def
index 82cdbb2..5fa5194 100644
--- a/tools/cpp-define-generator/constant_jit.def
+++ b/tools/cpp-define-generator/constant_jit.def
@@ -25,6 +25,5 @@
 
 DEFINE_JIT_CONSTANT(CHECK_OSR,       int16_t, art::jit::kJitCheckForOSR)
 DEFINE_JIT_CONSTANT(HOTNESS_DISABLE, int16_t, art::jit::kJitHotnessDisabled)
-DEFINE_JIT_CONSTANT(CHECK_OSR_THRESHOLD, int16_t, art::jit::Jit::kJitRecheckOSRThreshold)
 
 #undef DEFINE_JIT_CONSTANT
diff --git a/tools/cpp-define-generator/presubmit-check-files-up-to-date b/tools/cpp-define-generator/presubmit-check-files-up-to-date
new file mode 100755
index 0000000..67a702a
--- /dev/null
+++ b/tools/cpp-define-generator/presubmit-check-files-up-to-date
@@ -0,0 +1,67 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ---------------------------------------------------------------------------
+
+# Generates asm_support_gen.h into a temporary location.
+# Then verifies it is the same as our local stored copy.
+
+GEN_TOOL=cpp-define-generator-data
+
+if ! which "$GEN_TOOL"; then
+  echo "ERROR: Please build cpp-define-generator-data or source build/envsetup.sh" >&2
+  exit 1
+fi
+
+#######################
+#######################
+
+PREUPLOAD_COMMIT_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
+BUILD_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
+
+function finish() {
+  # Delete temp files.
+  [[ -f "$PREUPLOAD_COMMIT_COPY" ]] && rm "$PREUPLOAD_COMMIT_COPY"
+  [[ -f "$BUILD_COPY" ]] && rm "$BUILD_COPY"
+}
+trap finish EXIT
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ART_DIR="$( cd "$DIR/../.." && pwd )"
+ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
+
+# Repo upload hook runs inside of the top-level git directory.
+# If we run this script manually, be in the right place for git.
+cd "$ART_DIR"
+
+if [[ -z $PREUPLOAD_COMMIT ]]; then
+  echo "WARNING: Not running as a pre-upload hook. Assuming commit to check = 'HEAD'"
+  PREUPLOAD_COMMIT=HEAD
+fi
+
+# Get version we are about to push into git.
+git show "$PREUPLOAD_COMMIT:$ASM_SUPPORT_GEN_CHECKED_IN_COPY" > "$PREUPLOAD_COMMIT_COPY" || exit 1
+# Get version that our build would have made.
+"$GEN_TOOL" > "$BUILD_COPY" || exit 1
+
+if ! diff "$PREUPLOAD_COMMIT_COPY" "$BUILD_COPY"; then
+  echo "asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
+  echo "             has diverged from the build copy." >&2
+  echo "             Please re-run the 'generate-asm-support' command to resync the header." >&2
+  exit 1
+fi
+
+# Success. Print nothing to avoid spamming users.
diff --git a/tools/cpp-define-generator/verify-asm-support b/tools/cpp-define-generator/verify-asm-support
new file mode 100755
index 0000000..745b115
--- /dev/null
+++ b/tools/cpp-define-generator/verify-asm-support
@@ -0,0 +1,101 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ---------------------------------------------------------------------------
+
+# Generates asm_support_gen.h into the $OUT directory in the build.
+# Then verifies that it is the same as in runtime/generated/asm_support_gen.h
+
+# Validates that art/runtime/generated/asm_support_gen.h
+# - This must be run after a build since it uses cpp-define-generator-data
+
+# Path to asm_support_gen.h that we check into our git repository.
+ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
+# Instead of producing an error if checked-in copy differs from the generated version,
+# overwrite the local checked-in copy instead.
+OVERWRITE_CHECKED_IN_COPY_IF_CHANGED="n"
+
+#######################
+#######################
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ART_DIR="$( cd "$DIR/../.." && pwd )"
+ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY="$ART_DIR/runtime/generated/asm_support_gen.h"
+
+# Sanity check that we haven't moved the file around.
+# If we did, perhaps the above constant should be updated.
+if ! [[ -f "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" ]]; then
+  echo "ERROR: Missing asm_support_gen.h, expected to be in '$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY'" >&2
+  exit 1
+fi
+
+# The absolute path to cpp-define-generator is in $1
+# Generate the file as part of the build into the out location specified by $2.
+
+# Compare that the generated file matches our golden copy that's checked into git.
+# If not, it is a fatal error and the user needs to run 'generate-asm-support' to rebuild.
+
+if [[ $# -lt 2 ]]; then
+  echo "Usage: $0 [--quiet] [--presubmit] <path-to-cpp-define-generator-data-binary> <output-file>'" >&2
+  exit 1
+fi
+
+# Supress 'chatty' messages during the build.
+# If anything is printed in a success case then
+# the main Android build can't reuse the same line for
+# showing multiple commands being executed.
+QUIET=false
+if [[ "$1" == "--quiet" ]]; then
+  QUIET=true
+  shift
+fi
+
+CPP_DEFINE_GENERATOR_TOOL="$1"
+OUTPUT_FILE="$2"
+
+function pecho() {
+  if ! $QUIET; then
+    echo "$@"
+  fi
+}
+
+# Generate the header. Print the command we're running to console for readability.
+pecho "cpp-define-generator-data > \"$OUTPUT_FILE\""
+"$CPP_DEFINE_GENERATOR_TOOL" > "$OUTPUT_FILE"
+retval="$?"
+
+if [[ $retval -ne 0 ]]; then
+  echo "verify-asm-support: FATAL: Error while running cpp-define-generator-data" >&2
+  exit $retval
+fi
+
+if ! diff "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" "$OUTPUT_FILE"; then
+
+  if [[ $OVERWRITE_CHECKED_IN_COPY_IF_CHANGED == "y" ]]; then
+    cp "$OUTPUT_FILE" "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY"
+    echo "verify-asm-support: OK: Overwrote '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' with build copy."
+    echo "                        Please 'git add $ASM_SUPPORT_GEN_CHECKED_IN_COPY'."
+  else
+    echo "---------------------------------------------------------------------------------------------" >&2
+    echo "verify-asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
+    echo "                    has diverged from the build copy." >&2
+    echo "                    Please re-run the 'generate-asm-support' command to resync the header." >&2
+    [[ -f "$OUTPUT_FILE" ]] && rm "$OUTPUT_FILE"
+    exit 1
+  fi
+fi
+
+pecho "verify-asm-support: SUCCESS. Built '$OUTPUT_FILE' which matches our checked in copy."