Merge "Do not use sa_restorer if not defined"
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 40c676c..c4d538f 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -256,7 +256,14 @@
         instrumentation: true,
         profile_file: "art/dex2oat.profdata",
         benchmarks: ["dex2oat"],
-    }
+    },
+    target: {
+        android: {
+            lto: {
+                 thin: true,
+            },
+        },
+    },
 }
 
 art_cc_library {
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 52a0796..b268204 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -60,13 +60,12 @@
     case LinkerPatch::Type::kCallRelative:
     case LinkerPatch::Type::kBakerReadBarrierBranch:
       return false;
+    case LinkerPatch::Type::kDataBimgRelRo:
     case LinkerPatch::Type::kMethodRelative:
     case LinkerPatch::Type::kMethodBssEntry:
     case LinkerPatch::Type::kTypeRelative:
-    case LinkerPatch::Type::kTypeClassTable:
     case LinkerPatch::Type::kTypeBssEntry:
     case LinkerPatch::Type::kStringRelative:
-    case LinkerPatch::Type::kStringInternTable:
     case LinkerPatch::Type::kStringBssEntry:
       return patch.LiteralOffset() == patch.PcInsnOffset();
   }
@@ -271,10 +270,9 @@
       shift = 0u;  // No shift for ADD.
     } else {
       // LDR/STR 32-bit or 64-bit with imm12 == 0 (unset).
-      DCHECK(patch.GetType() == LinkerPatch::Type::kMethodBssEntry ||
-             patch.GetType() == LinkerPatch::Type::kTypeClassTable ||
+      DCHECK(patch.GetType() == LinkerPatch::Type::kDataBimgRelRo ||
+             patch.GetType() == LinkerPatch::Type::kMethodBssEntry ||
              patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
-             patch.GetType() == LinkerPatch::Type::kStringInternTable ||
              patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
       DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn;
     }
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index a5f6099..3da7a43 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -529,6 +529,8 @@
         stream_(output),
         rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
         text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0),
+        data_bimg_rel_ro_(
+            this, ".data.bimg.rel.ro", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
         bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
         dex_(this, ".dex", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
         dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize),
@@ -552,6 +554,7 @@
         loaded_size_(0u),
         virtual_address_(0) {
     text_.phdr_flags_ = PF_R | PF_X;
+    data_bimg_rel_ro_.phdr_flags_ = PF_R | PF_W;  // Shall be made read-only at run time.
     bss_.phdr_flags_ = PF_R | PF_W;
     dex_.phdr_flags_ = PF_R;
     dynamic_.phdr_flags_ = PF_R | PF_W;
@@ -566,6 +569,7 @@
   BuildIdSection* GetBuildId() { return &build_id_; }
   Section* GetRoData() { return &rodata_; }
   Section* GetText() { return &text_; }
+  Section* GetDataBimgRelRo() { return &data_bimg_rel_ro_; }
   Section* GetBss() { return &bss_; }
   Section* GetDex() { return &dex_; }
   StringSection* GetStrTab() { return &strtab_; }
@@ -694,6 +698,7 @@
   void PrepareDynamicSection(const std::string& elf_file_path,
                              Elf_Word rodata_size,
                              Elf_Word text_size,
+                             Elf_Word data_bimg_rel_ro_size,
                              Elf_Word bss_size,
                              Elf_Word bss_methods_offset,
                              Elf_Word bss_roots_offset,
@@ -707,6 +712,9 @@
     // Allocate all pre-dynamic sections.
     rodata_.AllocateVirtualMemory(rodata_size);
     text_.AllocateVirtualMemory(text_size);
+    if (data_bimg_rel_ro_size != 0) {
+      data_bimg_rel_ro_.AllocateVirtualMemory(data_bimg_rel_ro_size);
+    }
     if (bss_size != 0) {
       bss_.AllocateVirtualMemory(bss_size);
     }
@@ -735,6 +743,24 @@
       Elf_Word oatlastword_address = rodata_.GetAddress() + rodata_size - 4;
       dynsym_.Add(oatlastword, &rodata_, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
     }
+    if (data_bimg_rel_ro_size != 0u) {
+      Elf_Word oatdatabimgrelro = dynstr_.Add("oatdatabimgrelro");
+      dynsym_.Add(oatdatabimgrelro,
+                  &data_bimg_rel_ro_,
+                  data_bimg_rel_ro_.GetAddress(),
+                  data_bimg_rel_ro_size,
+                  STB_GLOBAL,
+                  STT_OBJECT);
+      Elf_Word oatdatabimgrelrolastword = dynstr_.Add("oatdatabimgrelrolastword");
+      Elf_Word oatdatabimgrelrolastword_address =
+          data_bimg_rel_ro_.GetAddress() + data_bimg_rel_ro_size - 4;
+      dynsym_.Add(oatdatabimgrelrolastword,
+                  &data_bimg_rel_ro_,
+                  oatdatabimgrelrolastword_address,
+                  4,
+                  STB_GLOBAL,
+                  STT_OBJECT);
+    }
     DCHECK_LE(bss_roots_offset, bss_size);
     if (bss_size != 0u) {
       Elf_Word oatbss = dynstr_.Add("oatbss");
@@ -1010,6 +1036,7 @@
 
   Section rodata_;
   Section text_;
+  Section data_bimg_rel_ro_;
   Section bss_;
   Section dex_;
   CachedStringSection dynstr_;
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index 6f4e774..36051d2 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -41,19 +41,27 @@
   // choose to squeeze the Type into fewer than 8 bits, we'll have to declare
   // patch_type_ as an uintN_t and do explicit static_cast<>s.
   enum class Type : uint8_t {
+    kDataBimgRelRo,           // NOTE: Actual patching is instruction_set-dependent.
     kMethodRelative,          // NOTE: Actual patching is instruction_set-dependent.
     kMethodBssEntry,          // NOTE: Actual patching is instruction_set-dependent.
     kCall,
     kCallRelative,            // NOTE: Actual patching is instruction_set-dependent.
     kTypeRelative,            // NOTE: Actual patching is instruction_set-dependent.
-    kTypeClassTable,          // NOTE: Actual patching is instruction_set-dependent.
     kTypeBssEntry,            // NOTE: Actual patching is instruction_set-dependent.
     kStringRelative,          // NOTE: Actual patching is instruction_set-dependent.
-    kStringInternTable,       // NOTE: Actual patching is instruction_set-dependent.
     kStringBssEntry,          // NOTE: Actual patching is instruction_set-dependent.
     kBakerReadBarrierBranch,  // NOTE: Actual patching is instruction_set-dependent.
   };
 
+  static LinkerPatch DataBimgRelRoPatch(size_t literal_offset,
+                                        uint32_t pc_insn_offset,
+                                        uint32_t boot_image_offset) {
+    LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file */ nullptr);
+    patch.boot_image_offset_ = boot_image_offset;
+    patch.pc_insn_offset_ = pc_insn_offset;
+    return patch;
+  }
+
   static LinkerPatch RelativeMethodPatch(size_t literal_offset,
                                          const DexFile* target_dex_file,
                                          uint32_t pc_insn_offset,
@@ -100,16 +108,6 @@
     return patch;
   }
 
-  static LinkerPatch TypeClassTablePatch(size_t literal_offset,
-                                         const DexFile* target_dex_file,
-                                         uint32_t pc_insn_offset,
-                                         uint32_t target_type_idx) {
-    LinkerPatch patch(literal_offset, Type::kTypeClassTable, target_dex_file);
-    patch.type_idx_ = target_type_idx;
-    patch.pc_insn_offset_ = pc_insn_offset;
-    return patch;
-  }
-
   static LinkerPatch TypeBssEntryPatch(size_t literal_offset,
                                        const DexFile* target_dex_file,
                                        uint32_t pc_insn_offset,
@@ -130,16 +128,6 @@
     return patch;
   }
 
-  static LinkerPatch StringInternTablePatch(size_t literal_offset,
-                                            const DexFile* target_dex_file,
-                                            uint32_t pc_insn_offset,
-                                            uint32_t target_string_idx) {
-    LinkerPatch patch(literal_offset, Type::kStringInternTable, target_dex_file);
-    patch.string_idx_ = target_string_idx;
-    patch.pc_insn_offset_ = pc_insn_offset;
-    return patch;
-  }
-
   static LinkerPatch StringBssEntryPatch(size_t literal_offset,
                                          const DexFile* target_dex_file,
                                          uint32_t pc_insn_offset,
@@ -172,14 +160,13 @@
 
   bool IsPcRelative() const {
     switch (GetType()) {
+      case Type::kDataBimgRelRo:
       case Type::kMethodRelative:
       case Type::kMethodBssEntry:
       case Type::kCallRelative:
       case Type::kTypeRelative:
-      case Type::kTypeClassTable:
       case Type::kTypeBssEntry:
       case Type::kStringRelative:
-      case Type::kStringInternTable:
       case Type::kStringBssEntry:
       case Type::kBakerReadBarrierBranch:
         return true;
@@ -188,6 +175,11 @@
     }
   }
 
+  uint32_t BootImageOffset() const {
+    DCHECK(patch_type_ == Type::kDataBimgRelRo);
+    return boot_image_offset_;
+  }
+
   MethodReference TargetMethod() const {
     DCHECK(patch_type_ == Type::kMethodRelative ||
            patch_type_ == Type::kMethodBssEntry ||
@@ -198,40 +190,35 @@
 
   const DexFile* TargetTypeDexFile() const {
     DCHECK(patch_type_ == Type::kTypeRelative ||
-           patch_type_ == Type::kTypeClassTable ||
            patch_type_ == Type::kTypeBssEntry);
     return target_dex_file_;
   }
 
   dex::TypeIndex TargetTypeIndex() const {
     DCHECK(patch_type_ == Type::kTypeRelative ||
-           patch_type_ == Type::kTypeClassTable ||
            patch_type_ == Type::kTypeBssEntry);
     return dex::TypeIndex(type_idx_);
   }
 
   const DexFile* TargetStringDexFile() const {
     DCHECK(patch_type_ == Type::kStringRelative ||
-           patch_type_ == Type::kStringInternTable ||
            patch_type_ == Type::kStringBssEntry);
     return target_dex_file_;
   }
 
   dex::StringIndex TargetStringIndex() const {
     DCHECK(patch_type_ == Type::kStringRelative ||
-           patch_type_ == Type::kStringInternTable ||
            patch_type_ == Type::kStringBssEntry);
     return dex::StringIndex(string_idx_);
   }
 
   uint32_t PcInsnOffset() const {
-    DCHECK(patch_type_ == Type::kMethodRelative ||
+    DCHECK(patch_type_ == Type::kDataBimgRelRo ||
+           patch_type_ == Type::kMethodRelative ||
            patch_type_ == Type::kMethodBssEntry ||
            patch_type_ == Type::kTypeRelative ||
-           patch_type_ == Type::kTypeClassTable ||
            patch_type_ == Type::kTypeBssEntry ||
            patch_type_ == Type::kStringRelative ||
-           patch_type_ == Type::kStringInternTable ||
            patch_type_ == Type::kStringBssEntry);
     return pc_insn_offset_;
   }
@@ -263,10 +250,11 @@
   uint32_t literal_offset_ : 24;  // Method code size up to 16MiB.
   Type patch_type_ : 8;
   union {
-    uint32_t cmp1_;             // Used for relational operators.
-    uint32_t method_idx_;       // Method index for Call/Method patches.
-    uint32_t type_idx_;         // Type index for Type patches.
-    uint32_t string_idx_;       // String index for String patches.
+    uint32_t cmp1_;               // Used for relational operators.
+    uint32_t boot_image_offset_;  // Data to write to the .data.bimg.rel.ro entry.
+    uint32_t method_idx_;         // Method index for Call/Method patches.
+    uint32_t type_idx_;           // Type index for Type patches.
+    uint32_t string_idx_;         // String index for String patches.
     uint32_t baker_custom_value1_;
     static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators");
     static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators");
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ff59173..0fcc9c6 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -51,6 +51,8 @@
 #include "dex/verified_method.h"
 #include "driver/compiler_driver.h"
 #include "graph_visualizer.h"
+#include "image.h"
+#include "gc/space/image_space.h"
 #include "intern_table.h"
 #include "intrinsics.h"
 #include "mirror/array-inl.h"
@@ -722,6 +724,47 @@
   }
 }
 
+static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
+  Runtime* runtime = Runtime::Current();
+  DCHECK(runtime->IsAotCompiler());
+  const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
+      runtime->GetHeap()->GetBootImageSpaces();
+  // Check that the `object` is in the expected section of one of the boot image files.
+  DCHECK(std::any_of(boot_image_spaces.begin(),
+                     boot_image_spaces.end(),
+                     [object, section](gc::space::ImageSpace* space) {
+                       uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+                       uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
+                       return space->GetImageHeader().GetImageSection(section).Contains(offset);
+                     }));
+  uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
+  uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
+  return dchecked_integral_cast<uint32_t>(offset);
+}
+
+// NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
+uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
+  DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
+  ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
+  DCHECK(klass != nullptr);
+  return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
+}
+
+// NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
+uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
+  DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
+  ObjPtr<mirror::String> string = load_string->GetString().Get();
+  DCHECK(string != nullptr);
+  return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
+}
+
+uint32_t CodeGenerator::GetBootImageOffset(HInvokeStaticOrDirect* invoke) {
+  DCHECK_EQ(invoke->GetMethodLoadKind(), HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo);
+  ArtMethod* method = invoke->GetResolvedMethod();
+  DCHECK(method != nullptr);
+  return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
+}
+
 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
   // The DCHECKS below check that a register is not specified twice in
   // the summary. The out location can overlap with an input, so we need
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 60de722..7031483 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -556,6 +556,10 @@
                                                         Location runtime_return_location);
   void GenerateLoadClassRuntimeCall(HLoadClass* cls);
 
+  uint32_t GetBootImageOffset(HLoadClass* load_class);
+  uint32_t GetBootImageOffset(HLoadString* load_string);
+  uint32_t GetBootImageOffset(HInvokeStaticOrDirect* invoke);
+
   static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
 
   void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1f9c554..a024df8 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -78,6 +78,7 @@
 using helpers::OutputRegister;
 using helpers::QRegisterFrom;
 using helpers::RegisterFrom;
+using helpers::SRegisterFrom;
 using helpers::StackOperandFrom;
 using helpers::VIXLRegCodeFromART;
 using helpers::WRegisterFrom;
@@ -4459,12 +4460,23 @@
       // Load method address from literal pool.
       __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
       break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
+      // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+      uint32_t boot_image_offset = GetBootImageOffset(invoke);
+      vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_offset);
+      EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
+      // Add LDR with its PC-relative .data.bimg.rel.ro patch.
+      vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_offset, adrp_label);
+      // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
+      EmitLdrOffsetPlaceholder(ldr_label, WRegisterFrom(temp), XRegisterFrom(temp));
+      break;
+    }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
-      // Add ADRP with its PC-relative DexCache access patch.
+      // Add ADRP with its PC-relative .bss entry patch.
       MethodReference target_method(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex());
       vixl::aarch64::Label* adrp_label = NewMethodBssEntryPatch(target_method);
       EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
-      // Add LDR with its PC-relative DexCache access patch.
+      // Add LDR with its PC-relative .bss entry patch.
       vixl::aarch64::Label* ldr_label =
           NewMethodBssEntryPatch(target_method, adrp_label);
       EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
@@ -4559,6 +4571,13 @@
   codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
 }
 
+vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
+    uint32_t boot_image_offset,
+    vixl::aarch64::Label* adrp_label) {
+  return NewPcRelativePatch(
+      /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
+}
+
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch(
     MethodReference target_method,
     vixl::aarch64::Label* adrp_label) {
@@ -4681,6 +4700,14 @@
   }
 }
 
+linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
+                                              const DexFile* target_dex_file,
+                                              uint32_t pc_insn_offset,
+                                              uint32_t boot_image_offset) {
+  DCHECK(target_dex_file == nullptr);  // Unused for DataBimgRelRoPatch(), should be null.
+  return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+}
+
 void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
@@ -4700,11 +4727,10 @@
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
   } else {
-    DCHECK(boot_image_method_patches_.empty());
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>(
-        boot_image_string_patches_, linker_patches);
+    EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_type_patches_.empty());
+    DCHECK(boot_image_string_patches_.empty());
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -4779,7 +4805,7 @@
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -4888,23 +4914,16 @@
       __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
       break;
     }
-    case HLoadClass::LoadKind::kBootImageClassTable: {
+    case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      // Add ADRP with its PC-relative type patch.
-      const DexFile& dex_file = cls->GetDexFile();
-      dex::TypeIndex type_index = cls->GetTypeIndex();
-      vixl::aarch64::Label* adrp_label = codegen_->NewBootImageTypePatch(dex_file, type_index);
+      uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
+      // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+      vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset);
       codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
-      // Add LDR with its PC-relative type patch.
+      // Add LDR with its PC-relative .data.bimg.rel.ro patch.
       vixl::aarch64::Label* ldr_label =
-          codegen_->NewBootImageTypePatch(dex_file, type_index, adrp_label);
+          codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label);
       codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X());
-      // Extract the reference from the slot data, i.e. clear the hash bits.
-      int32_t masked_hash = ClassTable::TableSlot::MaskHash(
-          ComputeModifiedUtf8Hash(dex_file.StringByTypeIdx(type_index)));
-      if (masked_hash != 0) {
-        __ Sub(out.W(), out.W(), Operand(masked_hash));
-      }
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -4914,7 +4933,7 @@
       vixl::aarch64::Register temp = XRegisterFrom(out_loc);
       vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
       codegen_->EmitAdrpPlaceholder(adrp_label, temp);
-      // Add LDR with its PC-relative Class patch.
+      // Add LDR with its PC-relative Class .bss entry patch.
       vixl::aarch64::Label* ldr_label =
           codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
       // /* GcRoot<mirror::Class> */ out = *(base_address + offset)  /* PC-relative */
@@ -4989,7 +5008,7 @@
     HLoadString::LoadKind desired_string_load_kind) {
   switch (desired_string_load_kind) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5055,16 +5074,15 @@
       __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
       return;
     }
-    case HLoadString::LoadKind::kBootImageInternTable: {
+    case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      // Add ADRP with its PC-relative String patch.
-      const DexFile& dex_file = load->GetDexFile();
-      const dex::StringIndex string_index = load->GetStringIndex();
-      vixl::aarch64::Label* adrp_label = codegen_->NewBootImageStringPatch(dex_file, string_index);
+      // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+      uint32_t boot_image_offset = codegen_->GetBootImageOffset(load);
+      vixl::aarch64::Label* adrp_label = codegen_->NewBootImageRelRoPatch(boot_image_offset);
       codegen_->EmitAdrpPlaceholder(adrp_label, out.X());
-      // Add LDR with its PC-relative String patch.
+      // Add LDR with its PC-relative .data.bimg.rel.ro patch.
       vixl::aarch64::Label* ldr_label =
-          codegen_->NewBootImageStringPatch(dex_file, string_index, adrp_label);
+          codegen_->NewBootImageRelRoPatch(boot_image_offset, adrp_label);
       codegen_->EmitLdrOffsetPlaceholder(ldr_label, out.W(), out.X());
       return;
     }
@@ -5076,7 +5094,7 @@
       Register temp = XRegisterFrom(out_loc);
       vixl::aarch64::Label* adrp_label = codegen_->NewStringBssEntryPatch(dex_file, string_index);
       codegen_->EmitAdrpPlaceholder(adrp_label, temp);
-      // Add LDR with its .bss entry String patch.
+      // Add LDR with its PC-relative String .bss entry patch.
       vixl::aarch64::Label* ldr_label =
           codegen_->NewStringBssEntryPatch(dex_file, string_index, adrp_label);
       // /* GcRoot<mirror::String> */ out = *(base_address + offset)  /* PC-relative */
@@ -5462,6 +5480,113 @@
   }
 }
 
+// TODO: integrate with HandleBinaryOp?
+static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
+  LocationSummary* locations = new (allocator) LocationSummary(minmax);
+  switch (minmax->GetResultType()) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorARM64::GenerateMinMaxInt(LocationSummary* locations,
+                                                      bool is_min,
+                                                      DataType::Type type) {
+  Location op1 = locations->InAt(0);
+  Location op2 = locations->InAt(1);
+  Location out = locations->Out();
+
+  Register op1_reg;
+  Register op2_reg;
+  Register out_reg;
+  if (type == DataType::Type::kInt64) {
+    op1_reg = XRegisterFrom(op1);
+    op2_reg = XRegisterFrom(op2);
+    out_reg = XRegisterFrom(out);
+  } else {
+    DCHECK_EQ(type, DataType::Type::kInt32);
+    op1_reg = WRegisterFrom(op1);
+    op2_reg = WRegisterFrom(op2);
+    out_reg = WRegisterFrom(out);
+  }
+
+  __ Cmp(op1_reg, op2_reg);
+  __ Csel(out_reg, op1_reg, op2_reg, is_min ? lt : gt);
+}
+
+void InstructionCodeGeneratorARM64::GenerateMinMaxFP(LocationSummary* locations,
+                                                     bool is_min,
+                                                     DataType::Type type) {
+  Location op1 = locations->InAt(0);
+  Location op2 = locations->InAt(1);
+  Location out = locations->Out();
+
+  FPRegister op1_reg;
+  FPRegister op2_reg;
+  FPRegister out_reg;
+  if (type == DataType::Type::kFloat64) {
+    op1_reg = DRegisterFrom(op1);
+    op2_reg = DRegisterFrom(op2);
+    out_reg = DRegisterFrom(out);
+  } else {
+    DCHECK_EQ(type, DataType::Type::kFloat32);
+    op1_reg = SRegisterFrom(op1);
+    op2_reg = SRegisterFrom(op2);
+    out_reg = SRegisterFrom(out);
+  }
+
+  if (is_min) {
+    __ Fmin(out_reg, op1_reg, op2_reg);
+  } else {
+    __ Fmax(out_reg, op1_reg, op2_reg);
+  }
+}
+
+// TODO: integrate with HandleBinaryOp?
+void InstructionCodeGeneratorARM64::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
+  DataType::Type type = minmax->GetResultType();
+  switch (type) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      GenerateMinMaxInt(minmax->GetLocations(), is_min, type);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      GenerateMinMaxFP(minmax->GetLocations(), is_min, type);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << type;
+  }
+}
+
+void LocationsBuilderARM64::VisitMin(HMin* min) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
+}
+
+void InstructionCodeGeneratorARM64::VisitMin(HMin* min) {
+  GenerateMinMax(min, /*is_min*/ true);
+}
+
+void LocationsBuilderARM64::VisitMax(HMax* max) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
+}
+
+void InstructionCodeGeneratorARM64::VisitMax(HMax* max) {
+  GenerateMinMax(max, /*is_min*/ false);
+}
+
 void LocationsBuilderARM64::VisitAbs(HAbs* abs) {
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
   switch (abs->GetResultType()) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index e34f799..b59ccd9 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -273,6 +273,10 @@
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
   void HandleCondition(HCondition* instruction);
 
+  void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type);
+  void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type);
+  void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
+
   // Generate a heap reference load using one register `out`:
   //
   //   out <- *(out + offset)
@@ -561,7 +565,14 @@
     UNIMPLEMENTED(FATAL);
   }
 
-  // Add a new PC-relative method patch for an instruction and return the label
+  // Add a new boot image relocation patch for an instruction and return the label
+  // to be bound before the instruction. The instruction will be either the
+  // ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing
+  // to the associated ADRP patch label).
+  vixl::aarch64::Label* NewBootImageRelRoPatch(uint32_t boot_image_offset,
+                                               vixl::aarch64::Label* adrp_label = nullptr);
+
+  // Add a new boot image method patch for an instruction and return the label
   // to be bound before the instruction. The instruction will be either the
   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
   // to the associated ADRP patch label).
@@ -575,7 +586,7 @@
   vixl::aarch64::Label* NewMethodBssEntryPatch(MethodReference target_method,
                                                vixl::aarch64::Label* adrp_label = nullptr);
 
-  // Add a new PC-relative type patch for an instruction and return the label
+  // Add a new boot image type patch for an instruction and return the label
   // to be bound before the instruction. The instruction will be either the
   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
   // to the associated ADRP patch label).
@@ -591,7 +602,7 @@
                                              dex::TypeIndex type_index,
                                              vixl::aarch64::Label* adrp_label = nullptr);
 
-  // Add a new PC-relative string patch for an instruction and return the label
+  // Add a new boot image string patch for an instruction and return the label
   // to be bound before the instruction. The instruction will be either the
   // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
   // to the associated ADRP patch label).
@@ -820,7 +831,8 @@
   Uint32ToLiteralMap uint32_literals_;
   // Deduplication map for 64-bit literals, used for non-patchable method address or method code.
   Uint64ToLiteralMap uint64_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative.
+  // PC-relative method patch info for kBootImageLinkTimePcRelative/BootImageRelRo.
+  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
@@ -828,7 +840,7 @@
   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
   // PC-relative type patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
-  // PC-relative String patch info; type depends on configuration (intern table or boot image PIC).
+  // PC-relative String patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
   // PC-relative String patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 13518ad..6ebcc67 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -4690,6 +4690,244 @@
   }
 }
 
+static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
+  LocationSummary* locations = new (allocator) LocationSummary(minmax);
+  switch (minmax->GetResultType()) {
+    case DataType::Type::kInt32:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+    case DataType::Type::kInt64:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      break;
+    case DataType::Type::kFloat32:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      locations->AddTemp(Location::RequiresRegister());
+      break;
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateMinMaxInt(LocationSummary* locations, bool is_min) {
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+  Location out_loc = locations->Out();
+
+  vixl32::Register op1 = RegisterFrom(op1_loc);
+  vixl32::Register op2 = RegisterFrom(op2_loc);
+  vixl32::Register out = RegisterFrom(out_loc);
+
+  __ Cmp(op1, op2);
+
+  {
+    ExactAssemblyScope aas(GetVIXLAssembler(),
+                           3 * kMaxInstructionSizeInBytes,
+                           CodeBufferCheckScope::kMaximumSize);
+
+    __ ite(is_min ? lt : gt);
+    __ mov(is_min ? lt : gt, out, op1);
+    __ mov(is_min ? ge : le, out, op2);
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateMinMaxLong(LocationSummary* locations, bool is_min) {
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+  Location out_loc = locations->Out();
+
+  // Optimization: don't generate any code if inputs are the same.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in location builder.
+    return;
+  }
+
+  vixl32::Register op1_lo = LowRegisterFrom(op1_loc);
+  vixl32::Register op1_hi = HighRegisterFrom(op1_loc);
+  vixl32::Register op2_lo = LowRegisterFrom(op2_loc);
+  vixl32::Register op2_hi = HighRegisterFrom(op2_loc);
+  vixl32::Register out_lo = LowRegisterFrom(out_loc);
+  vixl32::Register out_hi = HighRegisterFrom(out_loc);
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  const vixl32::Register temp = temps.Acquire();
+
+  DCHECK(op1_lo.Is(out_lo));
+  DCHECK(op1_hi.Is(out_hi));
+
+  // Compare op1 >= op2, or op1 < op2.
+  __ Cmp(out_lo, op2_lo);
+  __ Sbcs(temp, out_hi, op2_hi);
+
+  // Now GE/LT condition code is correct for the long comparison.
+  {
+    vixl32::ConditionType cond = is_min ? ge : lt;
+    ExactAssemblyScope it_scope(GetVIXLAssembler(),
+                                3 * kMaxInstructionSizeInBytes,
+                                CodeBufferCheckScope::kMaximumSize);
+    __ itt(cond);
+    __ mov(cond, out_lo, op2_lo);
+    __ mov(cond, out_hi, op2_hi);
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax, bool is_min) {
+  LocationSummary* locations = minmax->GetLocations();
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+  Location out_loc = locations->Out();
+
+  // Optimization: don't generate any code if inputs are the same.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in location builder.
+    return;
+  }
+
+  vixl32::SRegister op1 = SRegisterFrom(op1_loc);
+  vixl32::SRegister op2 = SRegisterFrom(op2_loc);
+  vixl32::SRegister out = SRegisterFrom(out_loc);
+
+  UseScratchRegisterScope temps(GetVIXLAssembler());
+  const vixl32::Register temp1 = temps.Acquire();
+  vixl32::Register temp2 = RegisterFrom(locations->GetTemp(0));
+  vixl32::Label nan, done;
+  vixl32::Label* final_label = codegen_->GetFinalLabel(minmax, &done);
+
+  DCHECK(op1.Is(out));
+
+  __ Vcmp(op1, op2);
+  __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+  __ B(vs, &nan, /* far_target */ false);  // if un-ordered, go to NaN handling.
+
+  // op1 <> op2
+  vixl32::ConditionType cond = is_min ? gt : lt;
+  {
+    ExactAssemblyScope it_scope(GetVIXLAssembler(),
+                                2 * kMaxInstructionSizeInBytes,
+                                CodeBufferCheckScope::kMaximumSize);
+    __ it(cond);
+    __ vmov(cond, F32, out, op2);
+  }
+  // for <>(not equal), we've done min/max calculation.
+  __ B(ne, final_label, /* far_target */ false);
+
+  // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
+  __ Vmov(temp1, op1);
+  __ Vmov(temp2, op2);
+  if (is_min) {
+    __ Orr(temp1, temp1, temp2);
+  } else {
+    __ And(temp1, temp1, temp2);
+  }
+  __ Vmov(out, temp1);
+  __ B(final_label);
+
+  // handle NaN input.
+  __ Bind(&nan);
+  __ Movt(temp1, High16Bits(kNanFloat));  // 0x7FC0xxxx is a NaN.
+  __ Vmov(out, temp1);
+
+  if (done.IsReferenced()) {
+    __ Bind(&done);
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax, bool is_min) {
+  LocationSummary* locations = minmax->GetLocations();
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+  Location out_loc = locations->Out();
+
+  // Optimization: don't generate any code if inputs are the same.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in.
+    return;
+  }
+
+  vixl32::DRegister op1 = DRegisterFrom(op1_loc);
+  vixl32::DRegister op2 = DRegisterFrom(op2_loc);
+  vixl32::DRegister out = DRegisterFrom(out_loc);
+  vixl32::Label handle_nan_eq, done;
+  vixl32::Label* final_label = codegen_->GetFinalLabel(minmax, &done);
+
+  DCHECK(op1.Is(out));
+
+  __ Vcmp(op1, op2);
+  __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+  __ B(vs, &handle_nan_eq, /* far_target */ false);  // if un-ordered, go to NaN handling.
+
+  // op1 <> op2
+  vixl32::ConditionType cond = is_min ? gt : lt;
+  {
+    ExactAssemblyScope it_scope(GetVIXLAssembler(),
+                                2 * kMaxInstructionSizeInBytes,
+                                CodeBufferCheckScope::kMaximumSize);
+    __ it(cond);
+    __ vmov(cond, F64, out, op2);
+  }
+  // for <>(not equal), we've done min/max calculation.
+  __ B(ne, final_label, /* far_target */ false);
+
+  // handle op1 == op2, max(+0.0,-0.0).
+  if (!is_min) {
+    __ Vand(F64, out, op1, op2);
+    __ B(final_label);
+  }
+
+  // handle op1 == op2, min(+0.0,-0.0), NaN input.
+  __ Bind(&handle_nan_eq);
+  __ Vorr(F64, out, op1, op2);  // assemble op1/-0.0/NaN.
+
+  if (done.IsReferenced()) {
+    __ Bind(&done);
+  }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
+  DataType::Type type = minmax->GetResultType();
+  switch (type) {
+    case DataType::Type::kInt32:
+      GenerateMinMaxInt(minmax->GetLocations(), is_min);
+      break;
+    case DataType::Type::kInt64:
+      GenerateMinMaxLong(minmax->GetLocations(), is_min);
+      break;
+    case DataType::Type::kFloat32:
+      GenerateMinMaxFloat(minmax, is_min);
+      break;
+    case DataType::Type::kFloat64:
+      GenerateMinMaxDouble(minmax, is_min);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << type;
+  }
+}
+
+void LocationsBuilderARMVIXL::VisitMin(HMin* min) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitMin(HMin* min) {
+  GenerateMinMax(min, /*is_min*/ true);
+}
+
+void LocationsBuilderARMVIXL::VisitMax(HMax* max) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitMax(HMax* max) {
+  GenerateMinMax(max, /*is_min*/ false);
+}
+
 void LocationsBuilderARMVIXL::VisitAbs(HAbs* abs) {
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
   switch (abs->GetResultType()) {
@@ -7088,7 +7326,7 @@
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -7198,18 +7436,12 @@
       __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
       break;
     }
-    case HLoadClass::LoadKind::kBootImageClassTable: {
+    case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
+          codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
       codegen_->EmitMovwMovtPlaceholder(labels, out);
       __ Ldr(out, MemOperand(out, /* offset */ 0));
-      // Extract the reference from the slot data, i.e. clear the hash bits.
-      int32_t masked_hash = ClassTable::TableSlot::MaskHash(
-          ComputeModifiedUtf8Hash(cls->GetDexFile().StringByTypeIdx(cls->GetTypeIndex())));
-      if (masked_hash != 0) {
-        __ Sub(out, out, Operand(masked_hash));
-      }
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -7295,7 +7527,7 @@
     HLoadString::LoadKind desired_string_load_kind) {
   switch (desired_string_load_kind) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -7359,10 +7591,10 @@
       __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
       return;
     }
-    case HLoadString::LoadKind::kBootImageInternTable: {
+    case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
+          codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
       codegen_->EmitMovwMovtPlaceholder(labels, out);
       __ Ldr(out, MemOperand(out, /* offset */ 0));
       return;
@@ -8956,6 +9188,14 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
       break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
+      uint32_t boot_image_offset = GetBootImageOffset(invoke);
+      PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset);
+      vixl32::Register temp_reg = RegisterFrom(temp);
+      EmitMovwMovtPlaceholder(labels, temp_reg);
+      GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0);
+      break;
+    }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
       PcRelativePatchInfo* labels = NewMethodBssEntryPatch(
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
@@ -9053,6 +9293,13 @@
   }
 }
 
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
+    uint32_t boot_image_offset) {
+  return NewPcRelativePatch(/* dex_file */ nullptr,
+                            boot_image_offset,
+                            &boot_image_method_patches_);
+}
+
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageMethodPatch(
     MethodReference target_method) {
   return NewPcRelativePatch(
@@ -9143,6 +9390,14 @@
   }
 }
 
+linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
+                                              const DexFile* target_dex_file,
+                                              uint32_t pc_insn_offset,
+                                              uint32_t boot_image_offset) {
+  DCHECK(target_dex_file == nullptr);  // Unused for DataBimgRelRoPatch(), should be null.
+  return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+}
+
 void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
@@ -9162,11 +9417,10 @@
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
   } else {
-    DCHECK(boot_image_method_patches_.empty());
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>(
-        boot_image_string_patches_, linker_patches);
+    EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_type_patches_.empty());
+    DCHECK(boot_image_string_patches_.empty());
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index bbc715c..2d8f6a6 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -349,6 +349,12 @@
                       bool value_can_be_null);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
+  void GenerateMinMaxInt(LocationSummary* locations, bool is_min);
+  void GenerateMinMaxLong(LocationSummary* locations, bool is_min);
+  void GenerateMinMaxFloat(HInstruction* minmax, bool is_min);
+  void GenerateMinMaxDouble(HInstruction* minmax, bool is_min);
+  void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
+
   // Generate a heap reference load using one register `out`:
   //
   //   out <- *(out + offset)
@@ -574,6 +580,7 @@
     vixl::aarch32::Label add_pc_label;
   };
 
+  PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset);
   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method);
   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
   PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file, dex::TypeIndex type_index);
@@ -798,7 +805,8 @@
 
   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
   Uint32ToLiteralMap uint32_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative.
+  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
+  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
@@ -806,7 +814,7 @@
   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
   // PC-relative type patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
-  // PC-relative String patch info; type depends on configuration (intern table or boot image PIC).
+  // PC-relative String patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
   // PC-relative String patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index eb5f72e..be9ff48 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1597,6 +1597,14 @@
   }
 }
 
+linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
+                                              const DexFile* target_dex_file,
+                                              uint32_t pc_insn_offset,
+                                              uint32_t boot_image_offset) {
+  DCHECK(target_dex_file == nullptr);  // Unused for DataBimgRelRoPatch(), should be null.
+  return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+}
+
 void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
@@ -1615,11 +1623,10 @@
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
   } else {
-    DCHECK(boot_image_method_patches_.empty());
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>(
-        boot_image_string_patches_, linker_patches);
+    EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_type_patches_.empty());
+    DCHECK(boot_image_string_patches_.empty());
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -1630,6 +1637,13 @@
   DCHECK_EQ(size, linker_patches->size());
 }
 
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
+    uint32_t boot_image_offset,
+    const PcRelativePatchInfo* info_high) {
+  return NewPcRelativePatch(
+      /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+}
+
 CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
     MethodReference target_method,
     const PcRelativePatchInfo* info_high) {
@@ -7725,7 +7739,7 @@
     HLoadString::LoadKind desired_string_load_kind) {
   switch (desired_string_load_kind) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -7748,7 +7762,7 @@
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -7835,6 +7849,15 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
       break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
+      uint32_t boot_image_offset = GetBootImageOffset(invoke);
+      PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+      PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+      Register temp_reg = temp.AsRegister<Register>();
+      EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
+      __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+      break;
+    }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
       PcRelativePatchInfo* info_high = NewMethodBssEntryPatch(
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
@@ -7956,7 +7979,7 @@
     // We need an extra register for PC-relative literals on R2.
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadClass::LoadKind::kBootImageAddress:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       if (isR6) {
         break;
@@ -8008,7 +8031,7 @@
     // We need an extra register for PC-relative literals on R2.
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadClass::LoadKind::kBootImageAddress:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       base_or_current_method_reg =
           (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
@@ -8065,22 +8088,17 @@
       }
       break;
     }
-    case HLoadClass::LoadKind::kBootImageClassTable: {
+    case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+      uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
+          codegen_->NewBootImageRelRoPatch(boot_image_offset);
       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
+          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
       __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
-      // Extract the reference from the slot data, i.e. clear the hash bits.
-      int32_t masked_hash = ClassTable::TableSlot::MaskHash(
-          ComputeModifiedUtf8Hash(cls->GetDexFile().StringByTypeIdx(cls->GetTypeIndex())));
-      if (masked_hash != 0) {
-        __ Addiu(out, out, -masked_hash);
-      }
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -8171,7 +8189,7 @@
     // We need an extra register for PC-relative literals on R2.
     case HLoadString::LoadKind::kBootImageAddress:
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       if (isR6) {
         break;
@@ -8223,7 +8241,7 @@
     // We need an extra register for PC-relative literals on R2.
     case HLoadString::LoadKind::kBootImageAddress:
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       base_or_current_method_reg =
           (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
@@ -8259,12 +8277,13 @@
       }
       return;
     }
-    case HLoadString::LoadKind::kBootImageInternTable: {
+    case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+      uint32_t boot_image_offset = codegen_->GetBootImageOffset(load);
       CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
+          codegen_->NewBootImageRelRoPatch(boot_image_offset);
       CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
+          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
                                                      out,
                                                      base_or_current_method_reg);
@@ -8779,6 +8798,390 @@
   }
 }
 
+static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
+  LocationSummary* locations = new (allocator) LocationSummary(minmax);
+  switch (minmax->GetResultType()) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateMinMaxInt(LocationSummary* locations,
+                                                     bool is_min,
+                                                     bool isR6,
+                                                     DataType::Type type) {
+  if (isR6) {
+    // Some architectures, such as ARM and MIPS (prior to r6), have a
+    // conditional move instruction which only changes the target
+    // (output) register if the condition is true (MIPS prior to r6 had
+    // MOVF, MOVT, MOVN, and MOVZ). The SELEQZ and SELNEZ instructions
+    // always change the target (output) register.  If the condition is
+    // true the output register gets the contents of the "rs" register;
+    // otherwise, the output register is set to zero. One consequence
+    // of this is that to implement something like "rd = c==0 ? rs : rt"
+    // MIPS64r6 needs to use a pair of SELEQZ/SELNEZ instructions.
+    // After executing this pair of instructions one of the output
+    // registers from the pair will necessarily contain zero. Then the
+    // code ORs the output registers from the SELEQZ/SELNEZ instructions
+    // to get the final result.
+    //
+    // The initial test to see if the output register is same as the
+    // first input register is needed to make sure that value in the
+    // first input register isn't clobbered before we've finished
+    // computing the output value. The logic in the corresponding else
+    // clause performs the same task but makes sure the second input
+    // register isn't clobbered in the event that it's the same register
+    // as the output register; the else clause also handles the case
+    // where the output register is distinct from both the first, and the
+    // second input registers.
+    if (type == DataType::Type::kInt64) {
+      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
+      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
+      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
+      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+      MipsLabel compare_done;
+
+      if (a_lo == b_lo) {
+        if (out_lo != a_lo) {
+          __ Move(out_lo, a_lo);
+          __ Move(out_hi, a_hi);
+        }
+      } else {
+        __ Slt(TMP, b_hi, a_hi);
+        __ Bne(b_hi, a_hi, &compare_done);
+
+        __ Sltu(TMP, b_lo, a_lo);
+
+        __ Bind(&compare_done);
+
+        if (is_min) {
+          __ Seleqz(AT, a_lo, TMP);
+          __ Selnez(out_lo, b_lo, TMP);  // Safe even if out_lo == a_lo/b_lo
+                                         // because at this point we're
+                                         // done using a_lo/b_lo.
+        } else {
+          __ Selnez(AT, a_lo, TMP);
+          __ Seleqz(out_lo, b_lo, TMP);  // ditto
+        }
+        __ Or(out_lo, out_lo, AT);
+        if (is_min) {
+          __ Seleqz(AT, a_hi, TMP);
+          __ Selnez(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
+        } else {
+          __ Selnez(AT, a_hi, TMP);
+          __ Seleqz(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
+        }
+        __ Or(out_hi, out_hi, AT);
+      }
+    } else {
+      DCHECK_EQ(type, DataType::Type::kInt32);
+      Register a = locations->InAt(0).AsRegister<Register>();
+      Register b = locations->InAt(1).AsRegister<Register>();
+      Register out = locations->Out().AsRegister<Register>();
+
+      if (a == b) {
+        if (out != a) {
+          __ Move(out, a);
+        }
+      } else {
+        __ Slt(AT, b, a);
+        if (is_min) {
+          __ Seleqz(TMP, a, AT);
+          __ Selnez(AT, b, AT);
+        } else {
+          __ Selnez(TMP, a, AT);
+          __ Seleqz(AT, b, AT);
+        }
+        __ Or(out, TMP, AT);
+      }
+    }
+  } else {  // !isR6
+    if (type == DataType::Type::kInt64) {
+      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
+      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
+      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
+      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
+
+      MipsLabel compare_done;
+
+      if (a_lo == b_lo) {
+        if (out_lo != a_lo) {
+          __ Move(out_lo, a_lo);
+          __ Move(out_hi, a_hi);
+        }
+      } else {
+        __ Slt(TMP, a_hi, b_hi);
+        __ Bne(a_hi, b_hi, &compare_done);
+
+        __ Sltu(TMP, a_lo, b_lo);
+
+        __ Bind(&compare_done);
+
+        if (is_min) {
+          if (out_lo != a_lo) {
+            __ Movn(out_hi, a_hi, TMP);
+            __ Movn(out_lo, a_lo, TMP);
+          }
+          if (out_lo != b_lo) {
+            __ Movz(out_hi, b_hi, TMP);
+            __ Movz(out_lo, b_lo, TMP);
+          }
+        } else {
+          if (out_lo != a_lo) {
+            __ Movz(out_hi, a_hi, TMP);
+            __ Movz(out_lo, a_lo, TMP);
+          }
+          if (out_lo != b_lo) {
+            __ Movn(out_hi, b_hi, TMP);
+            __ Movn(out_lo, b_lo, TMP);
+          }
+        }
+      }
+    } else {
+      DCHECK_EQ(type, DataType::Type::kInt32);
+      Register a = locations->InAt(0).AsRegister<Register>();
+      Register b = locations->InAt(1).AsRegister<Register>();
+      Register out = locations->Out().AsRegister<Register>();
+
+      if (a == b) {
+        if (out != a) {
+          __ Move(out, a);
+        }
+      } else {
+        __ Slt(AT, a, b);
+        if (is_min) {
+          if (out != a) {
+            __ Movn(out, a, AT);
+          }
+          if (out != b) {
+            __ Movz(out, b, AT);
+          }
+        } else {
+          if (out != a) {
+            __ Movz(out, a, AT);
+          }
+          if (out != b) {
+            __ Movn(out, b, AT);
+          }
+        }
+      }
+    }
+  }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateMinMaxFP(LocationSummary* locations,
+                                                    bool is_min,
+                                                    bool isR6,
+                                                    DataType::Type type) {
+  FRegister out = locations->Out().AsFpuRegister<FRegister>();
+  FRegister a = locations->InAt(0).AsFpuRegister<FRegister>();
+  FRegister b = locations->InAt(1).AsFpuRegister<FRegister>();
+
+  if (isR6) {
+    MipsLabel noNaNs;
+    MipsLabel done;
+    FRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
+
+    // When Java computes min/max it prefers a NaN to a number; the
+    // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
+    // the inputs is a NaN and the other is a valid number, the MIPS
+    // instruction will return the number; Java wants the NaN value
+    // returned. This is why there is extra logic preceding the use of
+    // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
+    // NaN, return the NaN, otherwise return the min/max.
+    if (type == DataType::Type::kFloat64) {
+      __ CmpUnD(FTMP, a, b);
+      __ Bc1eqz(FTMP, &noNaNs);
+
+      // One of the inputs is a NaN
+      __ CmpEqD(ftmp, a, a);
+      // If a == a then b is the NaN, otherwise a is the NaN.
+      __ SelD(ftmp, a, b);
+
+      if (ftmp != out) {
+        __ MovD(out, ftmp);
+      }
+
+      __ B(&done);
+
+      __ Bind(&noNaNs);
+
+      if (is_min) {
+        __ MinD(out, a, b);
+      } else {
+        __ MaxD(out, a, b);
+      }
+    } else {
+      DCHECK_EQ(type, DataType::Type::kFloat32);
+      __ CmpUnS(FTMP, a, b);
+      __ Bc1eqz(FTMP, &noNaNs);
+
+      // One of the inputs is a NaN
+      __ CmpEqS(ftmp, a, a);
+      // If a == a then b is the NaN, otherwise a is the NaN.
+      __ SelS(ftmp, a, b);
+
+      if (ftmp != out) {
+        __ MovS(out, ftmp);
+      }
+
+      __ B(&done);
+
+      __ Bind(&noNaNs);
+
+      if (is_min) {
+        __ MinS(out, a, b);
+      } else {
+        __ MaxS(out, a, b);
+      }
+    }
+
+    __ Bind(&done);
+
+  } else {  // !isR6
+    MipsLabel ordered;
+    MipsLabel compare;
+    MipsLabel select;
+    MipsLabel done;
+
+    if (type == DataType::Type::kFloat64) {
+      __ CunD(a, b);
+    } else {
+      DCHECK_EQ(type, DataType::Type::kFloat32);
+      __ CunS(a, b);
+    }
+    __ Bc1f(&ordered);
+
+    // a or b (or both) is a NaN. Return one, which is a NaN.
+    if (type == DataType::Type::kFloat64) {
+      __ CeqD(b, b);
+    } else {
+      __ CeqS(b, b);
+    }
+    __ B(&select);
+
+    __ Bind(&ordered);
+
+    // Neither is a NaN.
+    // a == b? (-0.0 compares equal with +0.0)
+    // If equal, handle zeroes, else compare further.
+    if (type == DataType::Type::kFloat64) {
+      __ CeqD(a, b);
+    } else {
+      __ CeqS(a, b);
+    }
+    __ Bc1f(&compare);
+
+    // a == b either bit for bit or one is -0.0 and the other is +0.0.
+    if (type == DataType::Type::kFloat64) {
+      __ MoveFromFpuHigh(TMP, a);
+      __ MoveFromFpuHigh(AT, b);
+    } else {
+      __ Mfc1(TMP, a);
+      __ Mfc1(AT, b);
+    }
+
+    if (is_min) {
+      // -0.0 prevails over +0.0.
+      __ Or(TMP, TMP, AT);
+    } else {
+      // +0.0 prevails over -0.0.
+      __ And(TMP, TMP, AT);
+    }
+
+    if (type == DataType::Type::kFloat64) {
+      __ Mfc1(AT, a);
+      __ Mtc1(AT, out);
+      __ MoveToFpuHigh(TMP, out);
+    } else {
+      __ Mtc1(TMP, out);
+    }
+    __ B(&done);
+
+    __ Bind(&compare);
+
+    if (type == DataType::Type::kFloat64) {
+      if (is_min) {
+        // return (a <= b) ? a : b;
+        __ ColeD(a, b);
+      } else {
+        // return (a >= b) ? a : b;
+        __ ColeD(b, a);  // b <= a
+      }
+    } else {
+      if (is_min) {
+        // return (a <= b) ? a : b;
+        __ ColeS(a, b);
+      } else {
+        // return (a >= b) ? a : b;
+        __ ColeS(b, a);  // b <= a
+      }
+    }
+
+    __ Bind(&select);
+
+    if (type == DataType::Type::kFloat64) {
+      __ MovtD(out, a);
+      __ MovfD(out, b);
+    } else {
+      __ MovtS(out, a);
+      __ MovfS(out, b);
+    }
+
+    __ Bind(&done);
+  }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
+  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+  DataType::Type type = minmax->GetResultType();
+  switch (type) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      GenerateMinMaxInt(minmax->GetLocations(), is_min, isR6, type);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      GenerateMinMaxFP(minmax->GetLocations(), is_min, isR6, type);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << type;
+  }
+}
+
+void LocationsBuilderMIPS::VisitMin(HMin* min) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
+}
+
+void InstructionCodeGeneratorMIPS::VisitMin(HMin* min) {
+  GenerateMinMax(min, /*is_min*/ true);
+}
+
+void LocationsBuilderMIPS::VisitMax(HMax* max) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
+}
+
+void InstructionCodeGeneratorMIPS::VisitMax(HMax* max) {
+  GenerateMinMax(max, /*is_min*/ false);
+}
+
 void LocationsBuilderMIPS::VisitAbs(HAbs* abs) {
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
   switch (abs->GetResultType()) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index d09ab7c..d906896 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -246,6 +246,9 @@
                       bool value_can_be_null);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
 
+  void GenerateMinMaxInt(LocationSummary* locations, bool is_min, bool isR6, DataType::Type type);
+  void GenerateMinMaxFP(LocationSummary* locations, bool is_min, bool isR6, DataType::Type type);
+  void GenerateMinMax(HBinaryOperation*, bool is_min);
   void GenerateAbsFP(LocationSummary* locations, DataType::Type type, bool isR2OrNewer, bool isR6);
 
   // Generate a heap reference load using one register `out`:
@@ -617,6 +620,8 @@
     DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
   };
 
+  PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
+                                              const PcRelativePatchInfo* info_high = nullptr);
   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
                                                const PcRelativePatchInfo* info_high = nullptr);
   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method,
@@ -691,7 +696,8 @@
 
   // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
   Uint32ToLiteralMap uint32_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative.
+  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
+  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
@@ -699,7 +705,7 @@
   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
   // PC-relative type patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
-  // PC-relative String patch info; type depends on configuration (intern table or boot image PIC).
+  // PC-relative String patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
   // PC-relative String patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 9593eec..f8851b4 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1509,6 +1509,14 @@
   }
 }
 
+linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
+                                              const DexFile* target_dex_file,
+                                              uint32_t pc_insn_offset,
+                                              uint32_t boot_image_offset) {
+  DCHECK(target_dex_file == nullptr);  // Unused for DataBimgRelRoPatch(), should be null.
+  return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+}
+
 void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
@@ -1527,11 +1535,10 @@
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
   } else {
-    DCHECK(boot_image_method_patches_.empty());
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>(
-        boot_image_string_patches_, linker_patches);
+    EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_type_patches_.empty());
+    DCHECK(boot_image_string_patches_.empty());
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -1542,6 +1549,13 @@
   DCHECK_EQ(size, linker_patches->size());
 }
 
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
+    uint32_t boot_image_offset,
+    const PcRelativePatchInfo* info_high) {
+  return NewPcRelativePatch(
+      /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+}
+
 CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch(
     MethodReference target_method,
     const PcRelativePatchInfo* info_high) {
@@ -5839,7 +5853,7 @@
   bool fallback_load = false;
   switch (desired_string_load_kind) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5866,7 +5880,7 @@
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5926,6 +5940,15 @@
                      kLoadDoubleword,
                      DeduplicateUint64Literal(invoke->GetMethodAddress()));
       break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
+      uint32_t boot_image_offset = GetBootImageOffset(invoke);
+      PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
+      PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
+      EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
+      // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
+      __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+      break;
+    }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
       PcRelativePatchInfo* info_high = NewMethodBssEntryPatch(
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
@@ -6113,20 +6136,15 @@
                      codegen_->DeduplicateBootImageAddressLiteral(address));
       break;
     }
-    case HLoadClass::LoadKind::kBootImageClassTable: {
+    case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+      uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
+          codegen_->NewBootImageRelRoPatch(boot_image_offset);
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
+          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
       __ Lwu(out, AT, /* placeholder */ 0x5678);
-      // Extract the reference from the slot data, i.e. clear the hash bits.
-      int32_t masked_hash = ClassTable::TableSlot::MaskHash(
-          ComputeModifiedUtf8Hash(cls->GetDexFile().StringByTypeIdx(cls->GetTypeIndex())));
-      if (masked_hash != 0) {
-        __ Daddiu(out, out, -masked_hash);
-      }
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -6248,12 +6266,13 @@
                      codegen_->DeduplicateBootImageAddressLiteral(address));
       return;
     }
-    case HLoadString::LoadKind::kBootImageInternTable: {
+    case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
+      uint32_t boot_image_offset = codegen_->GetBootImageOffset(load);
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
+          codegen_->NewBootImageRelRoPatch(boot_image_offset);
       CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
+          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
       codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
       __ Lwu(out, AT, /* placeholder */ 0x5678);
       return;
@@ -6665,6 +6684,182 @@
   }
 }
 
+static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
+  LocationSummary* locations = new (allocator) LocationSummary(minmax);
+  switch (minmax->GetResultType()) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateMinMaxInt(LocationSummary* locations, bool is_min) {
+  GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+  GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
+  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+  if (lhs == rhs) {
+    if (out != lhs) {
+      __ Move(out, lhs);
+    }
+  } else {
+    // Some architectures, such as ARM and MIPS (prior to r6), have a
+    // conditional move instruction which only changes the target
+    // (output) register if the condition is true (MIPS prior to r6 had
+    // MOVF, MOVT, and MOVZ). The SELEQZ and SELNEZ instructions always
+    // change the target (output) register.  If the condition is true the
+    // output register gets the contents of the "rs" register; otherwise,
+    // the output register is set to zero. One consequence of this is
+    // that to implement something like "rd = c==0 ? rs : rt" MIPS64r6
+    // needs to use a pair of SELEQZ/SELNEZ instructions.  After
+    // executing this pair of instructions one of the output registers
+    // from the pair will necessarily contain zero. Then the code ORs the
+    // output registers from the SELEQZ/SELNEZ instructions to get the
+    // final result.
+    //
+    // The initial test to see if the output register is same as the
+    // first input register is needed to make sure that value in the
+    // first input register isn't clobbered before we've finished
+    // computing the output value. The logic in the corresponding else
+    // clause performs the same task but makes sure the second input
+    // register isn't clobbered in the event that it's the same register
+    // as the output register; the else clause also handles the case
+    // where the output register is distinct from both the first, and the
+    // second input registers.
+    if (out == lhs) {
+      __ Slt(AT, rhs, lhs);
+      if (is_min) {
+        __ Seleqz(out, lhs, AT);
+        __ Selnez(AT, rhs, AT);
+      } else {
+        __ Selnez(out, lhs, AT);
+        __ Seleqz(AT, rhs, AT);
+      }
+    } else {
+      __ Slt(AT, lhs, rhs);
+      if (is_min) {
+        __ Seleqz(out, rhs, AT);
+        __ Selnez(AT, lhs, AT);
+      } else {
+        __ Selnez(out, rhs, AT);
+        __ Seleqz(AT, lhs, AT);
+      }
+    }
+    __ Or(out, out, AT);
+  }
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateMinMaxFP(LocationSummary* locations,
+                                                      bool is_min,
+                                                      DataType::Type type) {
+  FpuRegister a = locations->InAt(0).AsFpuRegister<FpuRegister>();
+  FpuRegister b = locations->InAt(1).AsFpuRegister<FpuRegister>();
+  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
+
+  Mips64Label noNaNs;
+  Mips64Label done;
+  FpuRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
+
+  // When Java computes min/max it prefers a NaN to a number; the
+  // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
+  // the inputs is a NaN and the other is a valid number, the MIPS
+  // instruction will return the number; Java wants the NaN value
+  // returned. This is why there is extra logic preceding the use of
+  // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
+  // NaN, return the NaN, otherwise return the min/max.
+  if (type == DataType::Type::kFloat64) {
+    __ CmpUnD(FTMP, a, b);
+    __ Bc1eqz(FTMP, &noNaNs);
+
+    // One of the inputs is a NaN
+    __ CmpEqD(ftmp, a, a);
+    // If a == a then b is the NaN, otherwise a is the NaN.
+    __ SelD(ftmp, a, b);
+
+    if (ftmp != out) {
+      __ MovD(out, ftmp);
+    }
+
+    __ Bc(&done);
+
+    __ Bind(&noNaNs);
+
+    if (is_min) {
+      __ MinD(out, a, b);
+    } else {
+      __ MaxD(out, a, b);
+    }
+  } else {
+    DCHECK_EQ(type, DataType::Type::kFloat32);
+    __ CmpUnS(FTMP, a, b);
+    __ Bc1eqz(FTMP, &noNaNs);
+
+    // One of the inputs is a NaN
+    __ CmpEqS(ftmp, a, a);
+    // If a == a then b is the NaN, otherwise a is the NaN.
+    __ SelS(ftmp, a, b);
+
+    if (ftmp != out) {
+      __ MovS(out, ftmp);
+    }
+
+    __ Bc(&done);
+
+    __ Bind(&noNaNs);
+
+    if (is_min) {
+      __ MinS(out, a, b);
+    } else {
+      __ MaxS(out, a, b);
+    }
+  }
+
+  __ Bind(&done);
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
+  DataType::Type type = minmax->GetResultType();
+  switch (type) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      GenerateMinMaxInt(minmax->GetLocations(), is_min);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      GenerateMinMaxFP(minmax->GetLocations(), is_min, type);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << type;
+  }
+}
+
+void LocationsBuilderMIPS64::VisitMin(HMin* min) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitMin(HMin* min) {
+  GenerateMinMax(min, /*is_min*/ true);
+}
+
+void LocationsBuilderMIPS64::VisitMax(HMax* max) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitMax(HMax* max) {
+  GenerateMinMax(max, /*is_min*/ false);
+}
+
 void LocationsBuilderMIPS64::VisitAbs(HAbs* abs) {
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
   switch (abs->GetResultType()) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index ddeb3eb..d1da1ce 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -242,6 +242,10 @@
                       bool value_can_be_null);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
+  void GenerateMinMaxInt(LocationSummary* locations, bool is_min);
+  void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type);
+  void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
+
   // Generate a heap reference load using one register `out`:
   //
   //   out <- *(out + offset)
@@ -586,6 +590,8 @@
     DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
   };
 
+  PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
+                                              const PcRelativePatchInfo* info_high = nullptr);
   PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
                                                const PcRelativePatchInfo* info_high = nullptr);
   PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method,
@@ -655,7 +661,8 @@
   // Deduplication map for 64-bit literals, used for non-patchable method address or method code
   // address.
   Uint64ToLiteralMap uint64_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative.
+  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
+  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
@@ -663,7 +670,7 @@
   ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
   // PC-relative type patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
-  // PC-relative String patch info; type depends on configuration (intern table or boot image PIC).
+  // PC-relative String patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
   // PC-relative type patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 51b96be..4818084 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -51,6 +51,9 @@
 
 static constexpr int kFakeReturnRegister = Register(8);
 
+static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000);
+static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
+
 // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
 #define __ down_cast<X86Assembler*>(codegen->GetAssembler())->  // NOLINT
 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, x).Int32Value()
@@ -3802,6 +3805,211 @@
   }
 }
 
+static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
+  LocationSummary* locations = new (allocator) LocationSummary(minmax);
+  switch (minmax->GetResultType()) {
+    case DataType::Type::kInt32:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      break;
+    case DataType::Type::kInt64:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      // Register to use to perform a long subtract to set cc.
+      locations->AddTemp(Location::RequiresRegister());
+      break;
+    case DataType::Type::kFloat32:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      locations->AddTemp(Location::RequiresRegister());
+      break;
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorX86::GenerateMinMaxInt(LocationSummary* locations,
+                                                    bool is_min,
+                                                    DataType::Type type) {
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+
+  // Shortcut for same input locations.
+  if (op1_loc.Equals(op2_loc)) {
+    // Can return immediately, as op1_loc == out_loc.
+    // Note: if we ever support separate registers, e.g., output into memory, we need to check for
+    //       a copy here.
+    DCHECK(locations->Out().Equals(op1_loc));
+    return;
+  }
+
+  if (type == DataType::Type::kInt64) {
+    // Need to perform a subtract to get the sign right.
+    // op1 is already in the same location as the output.
+    Location output = locations->Out();
+    Register output_lo = output.AsRegisterPairLow<Register>();
+    Register output_hi = output.AsRegisterPairHigh<Register>();
+
+    Register op2_lo = op2_loc.AsRegisterPairLow<Register>();
+    Register op2_hi = op2_loc.AsRegisterPairHigh<Register>();
+
+    // The comparison is performed by subtracting the second operand from
+    // the first operand and then setting the status flags in the same
+    // manner as the SUB instruction."
+    __ cmpl(output_lo, op2_lo);
+
+    // Now use a temp and the borrow to finish the subtraction of op2_hi.
+    Register temp = locations->GetTemp(0).AsRegister<Register>();
+    __ movl(temp, output_hi);
+    __ sbbl(temp, op2_hi);
+
+    // Now the condition code is correct.
+    Condition cond = is_min ? Condition::kGreaterEqual : Condition::kLess;
+    __ cmovl(cond, output_lo, op2_lo);
+    __ cmovl(cond, output_hi, op2_hi);
+  } else {
+    DCHECK_EQ(type, DataType::Type::kInt32);
+    Register out = locations->Out().AsRegister<Register>();
+    Register op2 = op2_loc.AsRegister<Register>();
+
+    //  (out := op1)
+    //  out <=? op2
+    //  if out is min jmp done
+    //  out := op2
+    // done:
+
+    __ cmpl(out, op2);
+    Condition cond = is_min ? Condition::kGreater : Condition::kLess;
+    __ cmovl(cond, out, op2);
+  }
+}
+
+void InstructionCodeGeneratorX86::GenerateMinMaxFP(LocationSummary* locations,
+                                                   bool is_min,
+                                                   DataType::Type type) {
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+  Location out_loc = locations->Out();
+  XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
+
+  // Shortcut for same input locations.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));
+    return;
+  }
+
+  //  (out := op1)
+  //  out <=? op2
+  //  if Nan jmp Nan_label
+  //  if out is min jmp done
+  //  if op2 is min jmp op2_label
+  //  handle -0/+0
+  //  jmp done
+  // Nan_label:
+  //  out := NaN
+  // op2_label:
+  //  out := op2
+  // done:
+  //
+  // This removes one jmp, but needs to copy one input (op1) to out.
+  //
+  // TODO: This is straight from Quick (except literal pool). Make NaN an out-of-line slowpath?
+
+  XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
+
+  NearLabel nan, done, op2_label;
+  if (type == DataType::Type::kFloat64) {
+    __ ucomisd(out, op2);
+  } else {
+    DCHECK_EQ(type, DataType::Type::kFloat32);
+    __ ucomiss(out, op2);
+  }
+
+  __ j(Condition::kParityEven, &nan);
+
+  __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
+  __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
+
+  // Handle 0.0/-0.0.
+  if (is_min) {
+    if (type == DataType::Type::kFloat64) {
+      __ orpd(out, op2);
+    } else {
+      __ orps(out, op2);
+    }
+  } else {
+    if (type == DataType::Type::kFloat64) {
+      __ andpd(out, op2);
+    } else {
+      __ andps(out, op2);
+    }
+  }
+  __ jmp(&done);
+
+  // NaN handling.
+  __ Bind(&nan);
+  if (type == DataType::Type::kFloat64) {
+    // TODO: Use a constant from the constant table (requires extra input).
+    __ LoadLongConstant(out, kDoubleNaN);
+  } else {
+    Register constant = locations->GetTemp(0).AsRegister<Register>();
+    __ movl(constant, Immediate(kFloatNaN));
+    __ movd(out, constant);
+  }
+  __ jmp(&done);
+
+  // out := op2;
+  __ Bind(&op2_label);
+  if (type == DataType::Type::kFloat64) {
+    __ movsd(out, op2);
+  } else {
+    __ movss(out, op2);
+  }
+
+  // Done.
+  __ Bind(&done);
+}
+
+void InstructionCodeGeneratorX86::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
+  DataType::Type type = minmax->GetResultType();
+  switch (type) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      GenerateMinMaxInt(minmax->GetLocations(), is_min, type);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      GenerateMinMaxFP(minmax->GetLocations(), is_min, type);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << type;
+  }
+}
+
+void LocationsBuilderX86::VisitMin(HMin* min) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
+}
+
+void InstructionCodeGeneratorX86::VisitMin(HMin* min) {
+  GenerateMinMax(min, /*is_min*/ true);
+}
+
+void LocationsBuilderX86::VisitMax(HMax* max) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
+}
+
+void InstructionCodeGeneratorX86::VisitMax(HMax* max) {
+  GenerateMinMax(max, /*is_min*/ false);
+}
+
 void LocationsBuilderX86::VisitAbs(HAbs* abs) {
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
   switch (abs->GetResultType()) {
@@ -4624,6 +4832,15 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       __ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
       break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
+      Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
+                                                                temp.AsRegister<Register>());
+      __ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
+      RecordBootImageRelRoPatch(
+          invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress(),
+          GetBootImageOffset(invoke));
+      break;
+    }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
       Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
                                                                 temp.AsRegister<Register>());
@@ -4685,6 +4902,13 @@
   RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
 }
 
+void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
+                                                 uint32_t boot_image_offset) {
+  boot_image_method_patches_.emplace_back(
+      method_address, /* target_dex_file */ nullptr, boot_image_offset);
+  __ Bind(&boot_image_method_patches_.back().label);
+}
+
 void CodeGeneratorX86::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
   DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
   HX86ComputeBaseMethodAddress* method_address =
@@ -4754,6 +4978,14 @@
   }
 }
 
+linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
+                                              const DexFile* target_dex_file,
+                                              uint32_t pc_insn_offset,
+                                              uint32_t boot_image_offset) {
+  DCHECK(target_dex_file == nullptr);  // Unused for DataBimgRelRoPatch(), should be null.
+  return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+}
+
 void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
@@ -4772,11 +5004,10 @@
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
   } else {
-    DCHECK(boot_image_method_patches_.empty());
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>(
-        boot_image_string_patches_, linker_patches);
+    EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_type_patches_.empty());
+    DCHECK(boot_image_string_patches_.empty());
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -6145,7 +6376,7 @@
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -6183,7 +6414,7 @@
 
   if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
       load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
-      load_kind == HLoadClass::LoadKind::kBootImageClassTable ||
+      load_kind == HLoadClass::LoadKind::kBootImageRelRo ||
       load_kind == HLoadClass::LoadKind::kBssEntry) {
     locations->SetInAt(0, Location::RequiresRegister());
   }
@@ -6259,17 +6490,12 @@
       __ movl(out, Immediate(address));
       break;
     }
-    case HLoadClass::LoadKind::kBootImageClassTable: {
+    case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       Register method_address = locations->InAt(0).AsRegister<Register>();
       __ movl(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
-      codegen_->RecordBootImageTypePatch(cls);
-      // Extract the reference from the slot data, i.e. clear the hash bits.
-      int32_t masked_hash = ClassTable::TableSlot::MaskHash(
-          ComputeModifiedUtf8Hash(cls->GetDexFile().StringByTypeIdx(cls->GetTypeIndex())));
-      if (masked_hash != 0) {
-        __ subl(out, Immediate(masked_hash));
-      }
+      codegen_->RecordBootImageRelRoPatch(cls->InputAt(0)->AsX86ComputeBaseMethodAddress(),
+                                          codegen_->GetBootImageOffset(cls));
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -6349,7 +6575,7 @@
     HLoadString::LoadKind desired_string_load_kind) {
   switch (desired_string_load_kind) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -6368,7 +6594,7 @@
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
   HLoadString::LoadKind load_kind = load->GetLoadKind();
   if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
-      load_kind == HLoadString::LoadKind::kBootImageInternTable ||
+      load_kind == HLoadString::LoadKind::kBootImageRelRo ||
       load_kind == HLoadString::LoadKind::kBssEntry) {
     locations->SetInAt(0, Location::RequiresRegister());
   }
@@ -6422,11 +6648,12 @@
       __ movl(out, Immediate(address));
       return;
     }
-    case HLoadString::LoadKind::kBootImageInternTable: {
+    case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       Register method_address = locations->InAt(0).AsRegister<Register>();
       __ movl(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
-      codegen_->RecordBootImageStringPatch(load);
+      codegen_->RecordBootImageRelRoPatch(load->InputAt(0)->AsX86ComputeBaseMethodAddress(),
+                                          codegen_->GetBootImageOffset(load));
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 51e5bca..9c537a7 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -225,6 +225,9 @@
   void GenerateShlLong(const Location& loc, int shift);
   void GenerateShrLong(const Location& loc, int shift);
   void GenerateUShrLong(const Location& loc, int shift);
+  void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type);
+  void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type);
+  void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
 
   void HandleFieldSet(HInstruction* instruction,
                       const FieldInfo& field_info,
@@ -414,6 +417,8 @@
   void GenerateVirtualCall(
       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
 
+  void RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
+                                 uint32_t boot_image_offset);
   void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
   void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
   void RecordBootImageTypePatch(HLoadClass* load_class);
@@ -631,17 +636,18 @@
   X86Assembler assembler_;
   const X86InstructionSetFeatures& isa_features_;
 
-  // PC-relative method patch info for kBootImageLinkTimePcRelative.
+  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
+  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
   ArenaDeque<X86PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<X86PcRelativePatchInfo> method_bss_entry_patches_;
   // PC-relative type patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<X86PcRelativePatchInfo> boot_image_type_patches_;
-  // Type patch locations for kBssEntry.
+  // PC-relative type patch info for kBssEntry.
   ArenaDeque<X86PcRelativePatchInfo> type_bss_entry_patches_;
-  // String patch locations; type depends on configuration (intern table or boot image PIC).
+  // PC-relative String patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<X86PcRelativePatchInfo> boot_image_string_patches_;
-  // String patch locations for kBssEntry.
+  // PC-relative String patch info for kBssEntry.
   ArenaDeque<X86PcRelativePatchInfo> string_bss_entry_patches_;
 
   // Patches for string root accesses in JIT compiled code.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0bb56a2..c378c5b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -998,6 +998,13 @@
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress());
       break;
+    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
+      // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
+      __ movl(temp.AsRegister<CpuRegister>(),
+              Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+      RecordBootImageRelRoPatch(GetBootImageOffset(invoke));
+      break;
+    }
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
       __ movq(temp.AsRegister<CpuRegister>(),
               Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
@@ -1059,6 +1066,11 @@
   RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
 }
 
+void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
+  boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset);
+  __ Bind(&boot_image_method_patches_.back().label);
+}
+
 void CodeGeneratorX86_64::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
   boot_image_method_patches_.emplace_back(
       invoke->GetTargetMethod().dex_file, invoke->GetTargetMethod().index);
@@ -1110,6 +1122,14 @@
   }
 }
 
+linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
+                                              const DexFile* target_dex_file,
+                                              uint32_t pc_insn_offset,
+                                              uint32_t boot_image_offset) {
+  DCHECK(target_dex_file == nullptr);  // Unused for DataBimgRelRoPatch(), should be null.
+  return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+}
+
 void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
   DCHECK(linker_patches->empty());
   size_t size =
@@ -1128,11 +1148,10 @@
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
   } else {
-    DCHECK(boot_image_method_patches_.empty());
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeClassTablePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringInternTablePatch>(
-        boot_image_string_patches_, linker_patches);
+    EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_type_patches_.empty());
+    DCHECK(boot_image_string_patches_.empty());
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -3821,6 +3840,177 @@
   }
 }
 
+static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
+  LocationSummary* locations = new (allocator) LocationSummary(minmax);
+  switch (minmax->GetResultType()) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      locations->SetInAt(0, Location::RequiresRegister());
+      locations->SetInAt(1, Location::RequiresRegister());
+      locations->SetOut(Location::SameAsFirstInput());
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      // The following is sub-optimal, but all we can do for now. It would be fine to also accept
+      // the second input to be the output (we can simply swap inputs).
+      locations->SetOut(Location::SameAsFirstInput());
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
+  }
+}
+
+void InstructionCodeGeneratorX86_64::GenerateMinMaxInt(LocationSummary* locations,
+                                                       bool is_min,
+                                                       DataType::Type type) {
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+
+  // Shortcut for same input locations.
+  if (op1_loc.Equals(op2_loc)) {
+    // Can return immediately, as op1_loc == out_loc.
+    // Note: if we ever support separate registers, e.g., output into memory, we need to check for
+    //       a copy here.
+    DCHECK(locations->Out().Equals(op1_loc));
+    return;
+  }
+
+  CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+  CpuRegister op2 = op2_loc.AsRegister<CpuRegister>();
+
+  //  (out := op1)
+  //  out <=? op2
+  //  if out is min jmp done
+  //  out := op2
+  // done:
+
+  if (type == DataType::Type::kInt64) {
+    __ cmpq(out, op2);
+    __ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, /*is64bit*/ true);
+  } else {
+    DCHECK_EQ(type, DataType::Type::kInt32);
+    __ cmpl(out, op2);
+    __ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, /*is64bit*/ false);
+  }
+}
+
+void InstructionCodeGeneratorX86_64::GenerateMinMaxFP(LocationSummary* locations,
+                                                      bool is_min,
+                                                      DataType::Type type) {
+  Location op1_loc = locations->InAt(0);
+  Location op2_loc = locations->InAt(1);
+  Location out_loc = locations->Out();
+  XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
+
+  // Shortcut for same input locations.
+  if (op1_loc.Equals(op2_loc)) {
+    DCHECK(out_loc.Equals(op1_loc));
+    return;
+  }
+
+  //  (out := op1)
+  //  out <=? op2
+  //  if Nan jmp Nan_label
+  //  if out is min jmp done
+  //  if op2 is min jmp op2_label
+  //  handle -0/+0
+  //  jmp done
+  // Nan_label:
+  //  out := NaN
+  // op2_label:
+  //  out := op2
+  // done:
+  //
+  // This removes one jmp, but needs to copy one input (op1) to out.
+  //
+  // TODO: This is straight from Quick. Make NaN an out-of-line slowpath?
+
+  XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
+
+  NearLabel nan, done, op2_label;
+  if (type == DataType::Type::kFloat64) {
+    __ ucomisd(out, op2);
+  } else {
+    DCHECK_EQ(type, DataType::Type::kFloat32);
+    __ ucomiss(out, op2);
+  }
+
+  __ j(Condition::kParityEven, &nan);
+
+  __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
+  __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
+
+  // Handle 0.0/-0.0.
+  if (is_min) {
+    if (type == DataType::Type::kFloat64) {
+      __ orpd(out, op2);
+    } else {
+      __ orps(out, op2);
+    }
+  } else {
+    if (type == DataType::Type::kFloat64) {
+      __ andpd(out, op2);
+    } else {
+      __ andps(out, op2);
+    }
+  }
+  __ jmp(&done);
+
+  // NaN handling.
+  __ Bind(&nan);
+  if (type == DataType::Type::kFloat64) {
+    __ movsd(out, codegen_->LiteralInt64Address(INT64_C(0x7FF8000000000000)));
+  } else {
+    __ movss(out, codegen_->LiteralInt32Address(INT32_C(0x7FC00000)));
+  }
+  __ jmp(&done);
+
+  // out := op2;
+  __ Bind(&op2_label);
+  if (type == DataType::Type::kFloat64) {
+    __ movsd(out, op2);
+  } else {
+    __ movss(out, op2);
+  }
+
+  // Done.
+  __ Bind(&done);
+}
+
+void InstructionCodeGeneratorX86_64::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
+  DataType::Type type = minmax->GetResultType();
+  switch (type) {
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      GenerateMinMaxInt(minmax->GetLocations(), is_min, type);
+      break;
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      GenerateMinMaxFP(minmax->GetLocations(), is_min, type);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected type for HMinMax " << type;
+  }
+}
+
+void LocationsBuilderX86_64::VisitMin(HMin* min) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
+}
+
+void InstructionCodeGeneratorX86_64::VisitMin(HMin* min) {
+  GenerateMinMax(min, /*is_min*/ true);
+}
+
+void LocationsBuilderX86_64::VisitMax(HMax* max) {
+  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
+}
+
+void InstructionCodeGeneratorX86_64::VisitMax(HMax* max) {
+  GenerateMinMax(max, /*is_min*/ false);
+}
+
 void LocationsBuilderX86_64::VisitAbs(HAbs* abs) {
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
   switch (abs->GetResultType()) {
@@ -5535,7 +5725,7 @@
     case HLoadClass::LoadKind::kReferrersClass:
       break;
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageClassTable:
+    case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5643,16 +5833,10 @@
       __ movl(out, Immediate(static_cast<int32_t>(address)));  // Zero-extended.
       break;
     }
-    case HLoadClass::LoadKind::kBootImageClassTable: {
+    case HLoadClass::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
-      codegen_->RecordBootImageTypePatch(cls);
-      // Extract the reference from the slot data, i.e. clear the hash bits.
-      int32_t masked_hash = ClassTable::TableSlot::MaskHash(
-          ComputeModifiedUtf8Hash(cls->GetDexFile().StringByTypeIdx(cls->GetTypeIndex())));
-      if (masked_hash != 0) {
-        __ subl(out, Immediate(masked_hash));
-      }
+      codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
       break;
     }
     case HLoadClass::LoadKind::kBssEntry: {
@@ -5717,7 +5901,7 @@
     HLoadString::LoadKind desired_string_load_kind) {
   switch (desired_string_load_kind) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageInternTable:
+    case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
       DCHECK(!Runtime::Current()->UseJitCompilation());
       break;
@@ -5783,10 +5967,10 @@
       __ movl(out, Immediate(static_cast<int32_t>(address)));  // Zero-extended.
       return;
     }
-    case HLoadString::LoadKind::kBootImageInternTable: {
+    case HLoadString::LoadKind::kBootImageRelRo: {
       DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
       __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
-      codegen_->RecordBootImageStringPatch(load);
+      codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
       return;
     }
     case HLoadString::LoadKind::kBssEntry: {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 1079e94..e8d1efe 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -222,6 +222,10 @@
                       bool value_can_be_null);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
 
+  void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type);
+  void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type);
+  void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
+
   // Generate a heap reference load using one register `out`:
   //
   //   out <- *(out + offset)
@@ -410,6 +414,7 @@
   void GenerateVirtualCall(
       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
 
+  void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
   void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
   void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
   void RecordBootImageTypePatch(HLoadClass* load_class);
@@ -604,17 +609,18 @@
   // Used for fixups to the constant area.
   int constant_area_start_;
 
-  // PC-relative method patch info for kBootImageLinkTimePcRelative.
+  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
+  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
   ArenaDeque<PatchInfo<Label>> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PatchInfo<Label>> method_bss_entry_patches_;
   // PC-relative type patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PatchInfo<Label>> boot_image_type_patches_;
-  // Type patch locations for kBssEntry.
+  // PC-relative type patch info for kBssEntry.
   ArenaDeque<PatchInfo<Label>> type_bss_entry_patches_;
-  // String patch locations; type depends on configuration (intern table or boot image PIC).
+  // PC-relative String patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PatchInfo<Label>> boot_image_string_patches_;
-  // String patch locations for kBssEntry.
+  // PC-relative String patch info for kBssEntry.
   ArenaDeque<PatchInfo<Label>> string_bss_entry_patches_;
 
   // Patches for string literals in JIT compiled code.
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index d699d01..0a310ca 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -78,16 +78,10 @@
   DCHECK(instruction != nullptr);
   if (instruction->IsArrayLength()) {
     return true;
-  } else if (instruction->IsInvokeStaticOrDirect()) {
-    switch (instruction->AsInvoke()->GetIntrinsic()) {
-      case Intrinsics::kMathMinIntInt:
-      case Intrinsics::kMathMinLongLong:
-        // Instruction MIN(>=0, >=0) is >= 0.
-        return IsGEZero(instruction->InputAt(0)) &&
-               IsGEZero(instruction->InputAt(1));
-      default:
-        break;
-    }
+  } else if (instruction->IsMin()) {
+    // Instruction MIN(>=0, >=0) is >= 0.
+    return IsGEZero(instruction->InputAt(0)) &&
+           IsGEZero(instruction->InputAt(1));
   } else if (instruction->IsAbs()) {
     // Instruction ABS(>=0) is >= 0.
     // NOTE: ABS(minint) = minint prevents assuming
@@ -101,21 +95,14 @@
 /** Hunts "under the hood" for a suitable instruction at the hint. */
 static bool IsMaxAtHint(
     HInstruction* instruction, HInstruction* hint, /*out*/HInstruction** suitable) {
-  if (instruction->IsInvokeStaticOrDirect()) {
-    switch (instruction->AsInvoke()->GetIntrinsic()) {
-      case Intrinsics::kMathMinIntInt:
-      case Intrinsics::kMathMinLongLong:
-        // For MIN(x, y), return most suitable x or y as maximum.
-        return IsMaxAtHint(instruction->InputAt(0), hint, suitable) ||
-               IsMaxAtHint(instruction->InputAt(1), hint, suitable);
-      default:
-        break;
-    }
+  if (instruction->IsMin()) {
+    // For MIN(x, y), return most suitable x or y as maximum.
+    return IsMaxAtHint(instruction->InputAt(0), hint, suitable) ||
+           IsMaxAtHint(instruction->InputAt(1), hint, suitable);
   } else {
     *suitable = instruction;
     return HuntForDeclaration(instruction) == hint;
   }
-  return false;
 }
 
 /** Post-analysis simplification of a minimum value that makes the bound more useful to clients. */
@@ -364,11 +351,11 @@
   }
 }
 
-bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* tc) const {
+bool InductionVarRange::IsFinite(HLoopInformation* loop, /*out*/ int64_t* trip_count) const {
   HInductionVarAnalysis::InductionInfo *trip =
       induction_analysis_->LookupInfo(loop, GetLoopControl(loop));
   if (trip != nullptr && !IsUnsafeTripCount(trip)) {
-    IsConstant(trip->op_a, kExact, tc);
+    IsConstant(trip->op_a, kExact, trip_count);
     return true;
   }
   return false;
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index eac8d2f..3483770 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -120,6 +120,8 @@
   void SimplifyReturnThis(HInvoke* invoke);
   void SimplifyAllocationIntrinsic(HInvoke* invoke);
   void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind);
+  void SimplifyMin(HInvoke* invoke, DataType::Type type);
+  void SimplifyMax(HInvoke* invoke, DataType::Type type);
   void SimplifyAbs(HInvoke* invoke, DataType::Type type);
 
   CodeGenerator* codegen_;
@@ -2407,6 +2409,20 @@
   invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, mem_barrier);
 }
 
+void InstructionSimplifierVisitor::SimplifyMin(HInvoke* invoke, DataType::Type type) {
+  DCHECK(invoke->IsInvokeStaticOrDirect());
+  HMin* min = new (GetGraph()->GetAllocator())
+      HMin(type, invoke->InputAt(0), invoke->InputAt(1), invoke->GetDexPc());
+  invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, min);
+}
+
+void InstructionSimplifierVisitor::SimplifyMax(HInvoke* invoke, DataType::Type type) {
+  DCHECK(invoke->IsInvokeStaticOrDirect());
+  HMax* max = new (GetGraph()->GetAllocator())
+      HMax(type, invoke->InputAt(0), invoke->InputAt(1), invoke->GetDexPc());
+  invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, max);
+}
+
 void InstructionSimplifierVisitor::SimplifyAbs(HInvoke* invoke, DataType::Type type) {
   DCHECK(invoke->IsInvokeStaticOrDirect());
   HAbs* abs = new (GetGraph()->GetAllocator())
@@ -2497,6 +2513,30 @@
     case Intrinsics::kVarHandleStoreStoreFence:
       SimplifyMemBarrier(instruction, MemBarrierKind::kStoreStore);
       break;
+    case Intrinsics::kMathMinIntInt:
+      SimplifyMin(instruction, DataType::Type::kInt32);
+      break;
+    case Intrinsics::kMathMinLongLong:
+      SimplifyMin(instruction, DataType::Type::kInt64);
+      break;
+    case Intrinsics::kMathMinFloatFloat:
+      SimplifyMin(instruction, DataType::Type::kFloat32);
+      break;
+    case Intrinsics::kMathMinDoubleDouble:
+      SimplifyMin(instruction, DataType::Type::kFloat64);
+      break;
+    case Intrinsics::kMathMaxIntInt:
+      SimplifyMax(instruction, DataType::Type::kInt32);
+      break;
+    case Intrinsics::kMathMaxLongLong:
+      SimplifyMax(instruction, DataType::Type::kInt64);
+      break;
+    case Intrinsics::kMathMaxFloatFloat:
+      SimplifyMax(instruction, DataType::Type::kFloat32);
+      break;
+    case Intrinsics::kMathMaxDoubleDouble:
+      SimplifyMax(instruction, DataType::Type::kFloat64);
+      break;
     case Intrinsics::kMathAbsInt:
       SimplifyAbs(instruction, DataType::Type::kInt32);
       break;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 24aff22..1035cbc 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -266,6 +266,14 @@
              << " should have been converted to HIR";                    \
 }
 #define UNREACHABLE_INTRINSICS(Arch)                            \
+UNREACHABLE_INTRINSIC(Arch, MathMinIntInt)                      \
+UNREACHABLE_INTRINSIC(Arch, MathMinLongLong)                    \
+UNREACHABLE_INTRINSIC(Arch, MathMinFloatFloat)                  \
+UNREACHABLE_INTRINSIC(Arch, MathMinDoubleDouble)                \
+UNREACHABLE_INTRINSIC(Arch, MathMaxIntInt)                      \
+UNREACHABLE_INTRINSIC(Arch, MathMaxLongLong)                    \
+UNREACHABLE_INTRINSIC(Arch, MathMaxFloatFloat)                  \
+UNREACHABLE_INTRINSIC(Arch, MathMaxDoubleDouble)                \
 UNREACHABLE_INTRINSIC(Arch, MathAbsInt)                         \
 UNREACHABLE_INTRINSIC(Arch, MathAbsLong)                        \
 UNREACHABLE_INTRINSIC(Arch, MathAbsFloat)                       \
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 0b04fff..81c0b50 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -344,14 +344,6 @@
   GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetVIXLAssembler());
 }
 
-static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
 static void GenNumberOfLeadingZeros(LocationSummary* locations,
                                     DataType::Type type,
                                     MacroAssembler* masm) {
@@ -529,113 +521,6 @@
   GenLowestOneBit(invoke, DataType::Type::kInt64, GetVIXLAssembler());
 }
 
-static void GenMinMaxFP(LocationSummary* locations,
-                        bool is_min,
-                        bool is_double,
-                        MacroAssembler* masm) {
-  Location op1 = locations->InAt(0);
-  Location op2 = locations->InAt(1);
-  Location out = locations->Out();
-
-  FPRegister op1_reg = is_double ? DRegisterFrom(op1) : SRegisterFrom(op1);
-  FPRegister op2_reg = is_double ? DRegisterFrom(op2) : SRegisterFrom(op2);
-  FPRegister out_reg = is_double ? DRegisterFrom(out) : SRegisterFrom(out);
-  if (is_min) {
-    __ Fmin(out_reg, op1_reg, op2_reg);
-  } else {
-    __ Fmax(out_reg, op1_reg, op2_reg);
-  }
-}
-
-static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetInAt(1, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ true, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, /* is_double */ false, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, /* is_double */ true, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(
-      invoke->GetLocations(), /* is_min */ false, /* is_double */ false, GetVIXLAssembler());
-}
-
-static void GenMinMax(LocationSummary* locations,
-                      bool is_min,
-                      bool is_long,
-                      MacroAssembler* masm) {
-  Location op1 = locations->InAt(0);
-  Location op2 = locations->InAt(1);
-  Location out = locations->Out();
-
-  Register op1_reg = is_long ? XRegisterFrom(op1) : WRegisterFrom(op1);
-  Register op2_reg = is_long ? XRegisterFrom(op2) : WRegisterFrom(op2);
-  Register out_reg = is_long ? XRegisterFrom(out) : WRegisterFrom(out);
-
-  __ Cmp(op1_reg, op2_reg);
-  __ Csel(out_reg, op1_reg, op2_reg, is_min ? lt : gt);
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMinIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMinIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, /* is_long */ false, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMinLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMinLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, /* is_long */ true, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMaxIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMaxIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ false, GetVIXLAssembler());
-}
-
-void IntrinsicLocationsBuilderARM64::VisitMathMaxLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARM64::VisitMathMaxLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetVIXLAssembler());
-}
-
 static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
   LocationSummary* locations =
       new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index e351fcc..e61a0b0 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -432,264 +432,6 @@
   GenNumberOfTrailingZeros(invoke, DataType::Type::kInt64, codegen_);
 }
 
-static void GenMinMaxFloat(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* codegen) {
-  ArmVIXLAssembler* assembler = codegen->GetAssembler();
-  Location op1_loc = invoke->GetLocations()->InAt(0);
-  Location op2_loc = invoke->GetLocations()->InAt(1);
-  Location out_loc = invoke->GetLocations()->Out();
-
-  // Optimization: don't generate any code if inputs are the same.
-  if (op1_loc.Equals(op2_loc)) {
-    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in location builder.
-    return;
-  }
-
-  vixl32::SRegister op1 = SRegisterFrom(op1_loc);
-  vixl32::SRegister op2 = SRegisterFrom(op2_loc);
-  vixl32::SRegister out = OutputSRegister(invoke);
-  UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
-  const vixl32::Register temp1 = temps.Acquire();
-  vixl32::Register temp2 = RegisterFrom(invoke->GetLocations()->GetTemp(0));
-  vixl32::Label nan, done;
-  vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &done);
-
-  DCHECK(op1.Is(out));
-
-  __ Vcmp(op1, op2);
-  __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
-  __ B(vs, &nan, /* far_target */ false);  // if un-ordered, go to NaN handling.
-
-  // op1 <> op2
-  vixl32::ConditionType cond = is_min ? gt : lt;
-  {
-    ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
-                                2 * kMaxInstructionSizeInBytes,
-                                CodeBufferCheckScope::kMaximumSize);
-    __ it(cond);
-    __ vmov(cond, F32, out, op2);
-  }
-  // for <>(not equal), we've done min/max calculation.
-  __ B(ne, final_label, /* far_target */ false);
-
-  // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
-  __ Vmov(temp1, op1);
-  __ Vmov(temp2, op2);
-  if (is_min) {
-    __ Orr(temp1, temp1, temp2);
-  } else {
-    __ And(temp1, temp1, temp2);
-  }
-  __ Vmov(out, temp1);
-  __ B(final_label);
-
-  // handle NaN input.
-  __ Bind(&nan);
-  __ Movt(temp1, High16Bits(kNanFloat));  // 0x7FC0xxxx is a NaN.
-  __ Vmov(out, temp1);
-
-  if (done.IsReferenced()) {
-    __ Bind(&done);
-  }
-}
-
-static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetInAt(1, Location::RequiresFpuRegister());
-  locations->SetOut(Location::SameAsFirstInput());
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-  invoke->GetLocations()->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFloat(invoke, /* is_min */ true, codegen_);
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-  invoke->GetLocations()->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFloat(invoke, /* is_min */ false, codegen_);
-}
-
-static void GenMinMaxDouble(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* codegen) {
-  ArmVIXLAssembler* assembler = codegen->GetAssembler();
-  Location op1_loc = invoke->GetLocations()->InAt(0);
-  Location op2_loc = invoke->GetLocations()->InAt(1);
-  Location out_loc = invoke->GetLocations()->Out();
-
-  // Optimization: don't generate any code if inputs are the same.
-  if (op1_loc.Equals(op2_loc)) {
-    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in.
-    return;
-  }
-
-  vixl32::DRegister op1 = DRegisterFrom(op1_loc);
-  vixl32::DRegister op2 = DRegisterFrom(op2_loc);
-  vixl32::DRegister out = OutputDRegister(invoke);
-  vixl32::Label handle_nan_eq, done;
-  vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &done);
-
-  DCHECK(op1.Is(out));
-
-  __ Vcmp(op1, op2);
-  __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
-  __ B(vs, &handle_nan_eq, /* far_target */ false);  // if un-ordered, go to NaN handling.
-
-  // op1 <> op2
-  vixl32::ConditionType cond = is_min ? gt : lt;
-  {
-    ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
-                                2 * kMaxInstructionSizeInBytes,
-                                CodeBufferCheckScope::kMaximumSize);
-    __ it(cond);
-    __ vmov(cond, F64, out, op2);
-  }
-  // for <>(not equal), we've done min/max calculation.
-  __ B(ne, final_label, /* far_target */ false);
-
-  // handle op1 == op2, max(+0.0,-0.0).
-  if (!is_min) {
-    __ Vand(F64, out, op1, op2);
-    __ B(final_label);
-  }
-
-  // handle op1 == op2, min(+0.0,-0.0), NaN input.
-  __ Bind(&handle_nan_eq);
-  __ Vorr(F64, out, op1, op2);  // assemble op1/-0.0/NaN.
-
-  if (done.IsReferenced()) {
-    __ Bind(&done);
-  }
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxDouble(invoke, /* is_min */ true , codegen_);
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxDouble(invoke, /* is_min */ false, codegen_);
-}
-
-static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
-  Location op1_loc = invoke->GetLocations()->InAt(0);
-  Location op2_loc = invoke->GetLocations()->InAt(1);
-  Location out_loc = invoke->GetLocations()->Out();
-
-  // Optimization: don't generate any code if inputs are the same.
-  if (op1_loc.Equals(op2_loc)) {
-    DCHECK(out_loc.Equals(op1_loc));  // out_loc is set as SameAsFirstInput() in location builder.
-    return;
-  }
-
-  vixl32::Register op1_lo = LowRegisterFrom(op1_loc);
-  vixl32::Register op1_hi = HighRegisterFrom(op1_loc);
-  vixl32::Register op2_lo = LowRegisterFrom(op2_loc);
-  vixl32::Register op2_hi = HighRegisterFrom(op2_loc);
-  vixl32::Register out_lo = LowRegisterFrom(out_loc);
-  vixl32::Register out_hi = HighRegisterFrom(out_loc);
-  UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
-  const vixl32::Register temp = temps.Acquire();
-
-  DCHECK(op1_lo.Is(out_lo));
-  DCHECK(op1_hi.Is(out_hi));
-
-  // Compare op1 >= op2, or op1 < op2.
-  __ Cmp(out_lo, op2_lo);
-  __ Sbcs(temp, out_hi, op2_hi);
-
-  // Now GE/LT condition code is correct for the long comparison.
-  {
-    vixl32::ConditionType cond = is_min ? ge : lt;
-    ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
-                                3 * kMaxInstructionSizeInBytes,
-                                CodeBufferCheckScope::kMaximumSize);
-    __ itt(cond);
-    __ mov(cond, out_lo, op2_lo);
-    __ mov(cond, out_hi, op2_hi);
-  }
-}
-
-static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::SameAsFirstInput());
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
-  CreateLongLongToLongLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
-  GenMinMaxLong(invoke, /* is_min */ true, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
-  CreateLongLongToLongLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
-  GenMinMaxLong(invoke, /* is_min */ false, GetAssembler());
-}
-
-static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
-  vixl32::Register op1 = InputRegisterAt(invoke, 0);
-  vixl32::Register op2 = InputRegisterAt(invoke, 1);
-  vixl32::Register out = OutputRegister(invoke);
-
-  __ Cmp(op1, op2);
-
-  {
-    ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
-                           3 * kMaxInstructionSizeInBytes,
-                           CodeBufferCheckScope::kMaximumSize);
-
-    __ ite(is_min ? lt : gt);
-    __ mov(is_min ? lt : gt, out, op1);
-    __ mov(is_min ? ge : le, out, op2);
-  }
-}
-
-static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
-  GenMinMax(invoke, /* is_min */ true, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
-  GenMinMax(invoke, /* is_min */ false, GetAssembler());
-}
-
 void IntrinsicLocationsBuilderARMVIXL::VisitMathSqrt(HInvoke* invoke) {
   CreateFPToFPLocations(allocator_, invoke);
 }
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 6d6ff75..bc1292b 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -58,6 +58,10 @@
   return codegen_->GetInstructionSetFeatures().Is32BitFloatingPoint();
 }
 
+inline bool IntrinsicCodeGeneratorMIPS::HasMsa() const {
+  return codegen_->GetInstructionSetFeatures().HasMsa();
+}
+
 #define __ codegen->GetAssembler()->
 
 static void MoveFromReturnRegister(Location trg,
@@ -612,6 +616,7 @@
 static void GenBitCount(LocationSummary* locations,
                         DataType::Type type,
                         bool isR6,
+                        bool hasMsa,
                         MipsAssembler* assembler) {
   Register out = locations->Out().AsRegister<Register>();
 
@@ -637,85 +642,102 @@
   // instructions compared to a loop-based algorithm which required 47
   // instructions.
 
-  if (type == DataType::Type::kInt32) {
-    Register in = locations->InAt(0).AsRegister<Register>();
-
-    __ Srl(TMP, in, 1);
-    __ LoadConst32(AT, 0x55555555);
-    __ And(TMP, TMP, AT);
-    __ Subu(TMP, in, TMP);
-    __ LoadConst32(AT, 0x33333333);
-    __ And(out, TMP, AT);
-    __ Srl(TMP, TMP, 2);
-    __ And(TMP, TMP, AT);
-    __ Addu(TMP, out, TMP);
-    __ Srl(out, TMP, 4);
-    __ Addu(out, out, TMP);
-    __ LoadConst32(AT, 0x0F0F0F0F);
-    __ And(out, out, AT);
-    __ LoadConst32(TMP, 0x01010101);
-    if (isR6) {
-      __ MulR6(out, out, TMP);
+  if (hasMsa) {
+    if (type == DataType::Type::kInt32) {
+      Register in = locations->InAt(0).AsRegister<Register>();
+      __ Mtc1(in, FTMP);
+      __ PcntW(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
+      __ Mfc1(out, FTMP);
     } else {
-      __ MulR2(out, out, TMP);
+      DCHECK_EQ(type, DataType::Type::kInt64);
+      Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+      __ Mtc1(in_lo, FTMP);
+      __ Mthc1(in_hi, FTMP);
+      __ PcntD(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
+      __ Mfc1(out, FTMP);
     }
-    __ Srl(out, out, 24);
   } else {
-    DCHECK_EQ(type, DataType::Type::kInt64);
-    Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-    Register tmp_hi = locations->GetTemp(0).AsRegister<Register>();
-    Register out_hi = locations->GetTemp(1).AsRegister<Register>();
-    Register tmp_lo = TMP;
-    Register out_lo = out;
+    if (type == DataType::Type::kInt32) {
+      Register in = locations->InAt(0).AsRegister<Register>();
 
-    __ Srl(tmp_lo, in_lo, 1);
-    __ Srl(tmp_hi, in_hi, 1);
-
-    __ LoadConst32(AT, 0x55555555);
-
-    __ And(tmp_lo, tmp_lo, AT);
-    __ Subu(tmp_lo, in_lo, tmp_lo);
-
-    __ And(tmp_hi, tmp_hi, AT);
-    __ Subu(tmp_hi, in_hi, tmp_hi);
-
-    __ LoadConst32(AT, 0x33333333);
-
-    __ And(out_lo, tmp_lo, AT);
-    __ Srl(tmp_lo, tmp_lo, 2);
-    __ And(tmp_lo, tmp_lo, AT);
-    __ Addu(tmp_lo, out_lo, tmp_lo);
-
-    __ And(out_hi, tmp_hi, AT);
-    __ Srl(tmp_hi, tmp_hi, 2);
-    __ And(tmp_hi, tmp_hi, AT);
-    __ Addu(tmp_hi, out_hi, tmp_hi);
-
-    // Here we deviate from the original algorithm a bit. We've reached
-    // the stage where the bitfields holding the subtotals are large
-    // enough to hold the combined subtotals for both the low word, and
-    // the high word. This means that we can add the subtotals for the
-    // the high, and low words into a single word, and compute the final
-    // result for both the high, and low words using fewer instructions.
-    __ LoadConst32(AT, 0x0F0F0F0F);
-
-    __ Addu(TMP, tmp_hi, tmp_lo);
-
-    __ Srl(out, TMP, 4);
-    __ And(out, out, AT);
-    __ And(TMP, TMP, AT);
-    __ Addu(out, out, TMP);
-
-    __ LoadConst32(AT, 0x01010101);
-
-    if (isR6) {
-      __ MulR6(out, out, AT);
+      __ Srl(TMP, in, 1);
+      __ LoadConst32(AT, 0x55555555);
+      __ And(TMP, TMP, AT);
+      __ Subu(TMP, in, TMP);
+      __ LoadConst32(AT, 0x33333333);
+      __ And(out, TMP, AT);
+      __ Srl(TMP, TMP, 2);
+      __ And(TMP, TMP, AT);
+      __ Addu(TMP, out, TMP);
+      __ Srl(out, TMP, 4);
+      __ Addu(out, out, TMP);
+      __ LoadConst32(AT, 0x0F0F0F0F);
+      __ And(out, out, AT);
+      __ LoadConst32(TMP, 0x01010101);
+      if (isR6) {
+        __ MulR6(out, out, TMP);
+      } else {
+        __ MulR2(out, out, TMP);
+      }
+      __ Srl(out, out, 24);
     } else {
-      __ MulR2(out, out, AT);
-    }
+      DCHECK_EQ(type, DataType::Type::kInt64);
+      Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
+      Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
+      Register tmp_hi = locations->GetTemp(0).AsRegister<Register>();
+      Register out_hi = locations->GetTemp(1).AsRegister<Register>();
+      Register tmp_lo = TMP;
+      Register out_lo = out;
 
-    __ Srl(out, out, 24);
+      __ Srl(tmp_lo, in_lo, 1);
+      __ Srl(tmp_hi, in_hi, 1);
+
+      __ LoadConst32(AT, 0x55555555);
+
+      __ And(tmp_lo, tmp_lo, AT);
+      __ Subu(tmp_lo, in_lo, tmp_lo);
+
+      __ And(tmp_hi, tmp_hi, AT);
+      __ Subu(tmp_hi, in_hi, tmp_hi);
+
+      __ LoadConst32(AT, 0x33333333);
+
+      __ And(out_lo, tmp_lo, AT);
+      __ Srl(tmp_lo, tmp_lo, 2);
+      __ And(tmp_lo, tmp_lo, AT);
+      __ Addu(tmp_lo, out_lo, tmp_lo);
+
+      __ And(out_hi, tmp_hi, AT);
+      __ Srl(tmp_hi, tmp_hi, 2);
+      __ And(tmp_hi, tmp_hi, AT);
+      __ Addu(tmp_hi, out_hi, tmp_hi);
+
+      // Here we deviate from the original algorithm a bit. We've reached
+      // the stage where the bitfields holding the subtotals are large
+      // enough to hold the combined subtotals for both the low word, and
+      // the high word. This means that we can add the subtotals for the
+      // the high, and low words into a single word, and compute the final
+      // result for both the high, and low words using fewer instructions.
+      __ LoadConst32(AT, 0x0F0F0F0F);
+
+      __ Addu(TMP, tmp_hi, tmp_lo);
+
+      __ Srl(out, TMP, 4);
+      __ And(out, out, AT);
+      __ And(TMP, TMP, AT);
+      __ Addu(out, out, TMP);
+
+      __ LoadConst32(AT, 0x01010101);
+
+      if (isR6) {
+        __ MulR6(out, out, AT);
+      } else {
+        __ MulR2(out, out, AT);
+      }
+
+      __ Srl(out, out, 24);
+    }
   }
 }
 
@@ -725,7 +747,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler());
+  GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), HasMsa(), GetAssembler());
 }
 
 // int java.lang.Long.bitCount(int)
@@ -739,459 +761,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler());
-}
-
-static void GenMinMaxFP(LocationSummary* locations,
-                        bool is_min,
-                        DataType::Type type,
-                        bool is_R6,
-                        MipsAssembler* assembler) {
-  FRegister out = locations->Out().AsFpuRegister<FRegister>();
-  FRegister a = locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister b = locations->InAt(1).AsFpuRegister<FRegister>();
-
-  if (is_R6) {
-    MipsLabel noNaNs;
-    MipsLabel done;
-    FRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
-
-    // When Java computes min/max it prefers a NaN to a number; the
-    // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
-    // the inputs is a NaN and the other is a valid number, the MIPS
-    // instruction will return the number; Java wants the NaN value
-    // returned. This is why there is extra logic preceding the use of
-    // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
-    // NaN, return the NaN, otherwise return the min/max.
-    if (type == DataType::Type::kFloat64) {
-      __ CmpUnD(FTMP, a, b);
-      __ Bc1eqz(FTMP, &noNaNs);
-
-      // One of the inputs is a NaN
-      __ CmpEqD(ftmp, a, a);
-      // If a == a then b is the NaN, otherwise a is the NaN.
-      __ SelD(ftmp, a, b);
-
-      if (ftmp != out) {
-        __ MovD(out, ftmp);
-      }
-
-      __ B(&done);
-
-      __ Bind(&noNaNs);
-
-      if (is_min) {
-        __ MinD(out, a, b);
-      } else {
-        __ MaxD(out, a, b);
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kFloat32);
-      __ CmpUnS(FTMP, a, b);
-      __ Bc1eqz(FTMP, &noNaNs);
-
-      // One of the inputs is a NaN
-      __ CmpEqS(ftmp, a, a);
-      // If a == a then b is the NaN, otherwise a is the NaN.
-      __ SelS(ftmp, a, b);
-
-      if (ftmp != out) {
-        __ MovS(out, ftmp);
-      }
-
-      __ B(&done);
-
-      __ Bind(&noNaNs);
-
-      if (is_min) {
-        __ MinS(out, a, b);
-      } else {
-        __ MaxS(out, a, b);
-      }
-    }
-
-    __ Bind(&done);
-  } else {
-    MipsLabel ordered;
-    MipsLabel compare;
-    MipsLabel select;
-    MipsLabel done;
-
-    if (type == DataType::Type::kFloat64) {
-      __ CunD(a, b);
-    } else {
-      DCHECK_EQ(type, DataType::Type::kFloat32);
-      __ CunS(a, b);
-    }
-    __ Bc1f(&ordered);
-
-    // a or b (or both) is a NaN. Return one, which is a NaN.
-    if (type == DataType::Type::kFloat64) {
-      __ CeqD(b, b);
-    } else {
-      __ CeqS(b, b);
-    }
-    __ B(&select);
-
-    __ Bind(&ordered);
-
-    // Neither is a NaN.
-    // a == b? (-0.0 compares equal with +0.0)
-    // If equal, handle zeroes, else compare further.
-    if (type == DataType::Type::kFloat64) {
-      __ CeqD(a, b);
-    } else {
-      __ CeqS(a, b);
-    }
-    __ Bc1f(&compare);
-
-    // a == b either bit for bit or one is -0.0 and the other is +0.0.
-    if (type == DataType::Type::kFloat64) {
-      __ MoveFromFpuHigh(TMP, a);
-      __ MoveFromFpuHigh(AT, b);
-    } else {
-      __ Mfc1(TMP, a);
-      __ Mfc1(AT, b);
-    }
-
-    if (is_min) {
-      // -0.0 prevails over +0.0.
-      __ Or(TMP, TMP, AT);
-    } else {
-      // +0.0 prevails over -0.0.
-      __ And(TMP, TMP, AT);
-    }
-
-    if (type == DataType::Type::kFloat64) {
-      __ Mfc1(AT, a);
-      __ Mtc1(AT, out);
-      __ MoveToFpuHigh(TMP, out);
-    } else {
-      __ Mtc1(TMP, out);
-    }
-    __ B(&done);
-
-    __ Bind(&compare);
-
-    if (type == DataType::Type::kFloat64) {
-      if (is_min) {
-        // return (a <= b) ? a : b;
-        __ ColeD(a, b);
-      } else {
-        // return (a >= b) ? a : b;
-        __ ColeD(b, a);  // b <= a
-      }
-    } else {
-      if (is_min) {
-        // return (a <= b) ? a : b;
-        __ ColeS(a, b);
-      } else {
-        // return (a >= b) ? a : b;
-        __ ColeS(b, a);  // b <= a
-      }
-    }
-
-    __ Bind(&select);
-
-    if (type == DataType::Type::kFloat64) {
-      __ MovtD(out, a);
-      __ MovfD(out, b);
-    } else {
-      __ MovtS(out, a);
-      __ MovfS(out, b);
-    }
-
-    __ Bind(&done);
-  }
-}
-
-static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetInAt(1, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap);
-}
-
-// double java.lang.Math.min(double, double)
-void IntrinsicLocationsBuilderMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(),
-              /* is_min */ true,
-              DataType::Type::kFloat64,
-              IsR6(),
-              GetAssembler());
-}
-
-// float java.lang.Math.min(float, float)
-void IntrinsicLocationsBuilderMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(),
-              /* is_min */ true,
-              DataType::Type::kFloat32,
-              IsR6(),
-              GetAssembler());
-}
-
-// double java.lang.Math.max(double, double)
-void IntrinsicLocationsBuilderMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(),
-              /* is_min */ false,
-              DataType::Type::kFloat64,
-              IsR6(),
-              GetAssembler());
-}
-
-// float java.lang.Math.max(float, float)
-void IntrinsicLocationsBuilderMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(),
-              /* is_min */ false,
-              DataType::Type::kFloat32,
-              IsR6(),
-              GetAssembler());
-}
-
-static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-static void GenMinMax(LocationSummary* locations,
-                      bool is_min,
-                      DataType::Type type,
-                      bool is_R6,
-                      MipsAssembler* assembler) {
-  if (is_R6) {
-    // Some architectures, such as ARM and MIPS (prior to r6), have a
-    // conditional move instruction which only changes the target
-    // (output) register if the condition is true (MIPS prior to r6 had
-    // MOVF, MOVT, MOVN, and MOVZ). The SELEQZ and SELNEZ instructions
-    // always change the target (output) register.  If the condition is
-    // true the output register gets the contents of the "rs" register;
-    // otherwise, the output register is set to zero. One consequence
-    // of this is that to implement something like "rd = c==0 ? rs : rt"
-    // MIPS64r6 needs to use a pair of SELEQZ/SELNEZ instructions.
-    // After executing this pair of instructions one of the output
-    // registers from the pair will necessarily contain zero. Then the
-    // code ORs the output registers from the SELEQZ/SELNEZ instructions
-    // to get the final result.
-    //
-    // The initial test to see if the output register is same as the
-    // first input register is needed to make sure that value in the
-    // first input register isn't clobbered before we've finished
-    // computing the output value. The logic in the corresponding else
-    // clause performs the same task but makes sure the second input
-    // register isn't clobbered in the event that it's the same register
-    // as the output register; the else clause also handles the case
-    // where the output register is distinct from both the first, and the
-    // second input registers.
-    if (type == DataType::Type::kInt64) {
-      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
-      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
-      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-      MipsLabel compare_done;
-
-      if (a_lo == b_lo) {
-        if (out_lo != a_lo) {
-          __ Move(out_lo, a_lo);
-          __ Move(out_hi, a_hi);
-        }
-      } else {
-        __ Slt(TMP, b_hi, a_hi);
-        __ Bne(b_hi, a_hi, &compare_done);
-
-        __ Sltu(TMP, b_lo, a_lo);
-
-        __ Bind(&compare_done);
-
-        if (is_min) {
-          __ Seleqz(AT, a_lo, TMP);
-          __ Selnez(out_lo, b_lo, TMP);  // Safe even if out_lo == a_lo/b_lo
-                                         // because at this point we're
-                                         // done using a_lo/b_lo.
-        } else {
-          __ Selnez(AT, a_lo, TMP);
-          __ Seleqz(out_lo, b_lo, TMP);  // ditto
-        }
-        __ Or(out_lo, out_lo, AT);
-        if (is_min) {
-          __ Seleqz(AT, a_hi, TMP);
-          __ Selnez(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
-        } else {
-          __ Selnez(AT, a_hi, TMP);
-          __ Seleqz(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
-        }
-        __ Or(out_hi, out_hi, AT);
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt32);
-      Register a = locations->InAt(0).AsRegister<Register>();
-      Register b = locations->InAt(1).AsRegister<Register>();
-      Register out = locations->Out().AsRegister<Register>();
-
-      if (a == b) {
-        if (out != a) {
-          __ Move(out, a);
-        }
-      } else {
-        __ Slt(AT, b, a);
-        if (is_min) {
-          __ Seleqz(TMP, a, AT);
-          __ Selnez(AT, b, AT);
-        } else {
-          __ Selnez(TMP, a, AT);
-          __ Seleqz(AT, b, AT);
-        }
-        __ Or(out, TMP, AT);
-      }
-    }
-  } else {
-    if (type == DataType::Type::kInt64) {
-      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
-      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
-      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-      MipsLabel compare_done;
-
-      if (a_lo == b_lo) {
-        if (out_lo != a_lo) {
-          __ Move(out_lo, a_lo);
-          __ Move(out_hi, a_hi);
-        }
-      } else {
-        __ Slt(TMP, a_hi, b_hi);
-        __ Bne(a_hi, b_hi, &compare_done);
-
-        __ Sltu(TMP, a_lo, b_lo);
-
-        __ Bind(&compare_done);
-
-        if (is_min) {
-          if (out_lo != a_lo) {
-            __ Movn(out_hi, a_hi, TMP);
-            __ Movn(out_lo, a_lo, TMP);
-          }
-          if (out_lo != b_lo) {
-            __ Movz(out_hi, b_hi, TMP);
-            __ Movz(out_lo, b_lo, TMP);
-          }
-        } else {
-          if (out_lo != a_lo) {
-            __ Movz(out_hi, a_hi, TMP);
-            __ Movz(out_lo, a_lo, TMP);
-          }
-          if (out_lo != b_lo) {
-            __ Movn(out_hi, b_hi, TMP);
-            __ Movn(out_lo, b_lo, TMP);
-          }
-        }
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt32);
-      Register a = locations->InAt(0).AsRegister<Register>();
-      Register b = locations->InAt(1).AsRegister<Register>();
-      Register out = locations->Out().AsRegister<Register>();
-
-      if (a == b) {
-        if (out != a) {
-          __ Move(out, a);
-        }
-      } else {
-        __ Slt(AT, a, b);
-        if (is_min) {
-          if (out != a) {
-            __ Movn(out, a, AT);
-          }
-          if (out != b) {
-            __ Movz(out, b, AT);
-          }
-        } else {
-          if (out != a) {
-            __ Movz(out, a, AT);
-          }
-          if (out != b) {
-            __ Movn(out, b, AT);
-          }
-        }
-      }
-    }
-  }
-}
-
-// int java.lang.Math.min(int, int)
-void IntrinsicLocationsBuilderMIPS::VisitMathMinIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMinIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(),
-            /* is_min */ true,
-            DataType::Type::kInt32,
-            IsR6(),
-            GetAssembler());
-}
-
-// long java.lang.Math.min(long, long)
-void IntrinsicLocationsBuilderMIPS::VisitMathMinLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMinLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(),
-            /* is_min */ true,
-            DataType::Type::kInt64,
-            IsR6(),
-            GetAssembler());
-}
-
-// int java.lang.Math.max(int, int)
-void IntrinsicLocationsBuilderMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMaxIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(),
-            /* is_min */ false,
-            DataType::Type::kInt32,
-            IsR6(),
-            GetAssembler());
-}
-
-// long java.lang.Math.max(long, long)
-void IntrinsicLocationsBuilderMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathMaxLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(),
-            /* is_min */ false,
-            DataType::Type::kInt64,
-            IsR6(),
-            GetAssembler());
+  GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), HasMsa(), GetAssembler());
 }
 
 // double java.lang.Math.sqrt(double)
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 13397f1..1c1ba40 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -71,6 +71,7 @@
   bool IsR2OrNewer() const;
   bool IsR6() const;
   bool Is32BitFPU() const;
+  bool HasMsa() const;
 
  private:
   MipsAssembler* GetAssembler();
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 5debd26..f429afd 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -46,6 +46,10 @@
   return codegen_->GetGraph()->GetAllocator();
 }
 
+inline bool IntrinsicCodeGeneratorMIPS64::HasMsa() const {
+  return codegen_->GetInstructionSetFeatures().HasMsa();
+}
+
 #define __ codegen->GetAssembler()->
 
 static void MoveFromReturnRegister(Location trg,
@@ -386,6 +390,7 @@
 
 static void GenBitCount(LocationSummary* locations,
                         const DataType::Type type,
+                        const bool hasMsa,
                         Mips64Assembler* assembler) {
   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
   GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
@@ -414,41 +419,52 @@
   // bits are set but the algorithm here attempts to minimize the total
   // number of instructions executed even when a large number of bits
   // are set.
-
-  if (type == DataType::Type::kInt32) {
-    __ Srl(TMP, in, 1);
-    __ LoadConst32(AT, 0x55555555);
-    __ And(TMP, TMP, AT);
-    __ Subu(TMP, in, TMP);
-    __ LoadConst32(AT, 0x33333333);
-    __ And(out, TMP, AT);
-    __ Srl(TMP, TMP, 2);
-    __ And(TMP, TMP, AT);
-    __ Addu(TMP, out, TMP);
-    __ Srl(out, TMP, 4);
-    __ Addu(out, out, TMP);
-    __ LoadConst32(AT, 0x0F0F0F0F);
-    __ And(out, out, AT);
-    __ LoadConst32(TMP, 0x01010101);
-    __ MulR6(out, out, TMP);
-    __ Srl(out, out, 24);
-  } else if (type == DataType::Type::kInt64) {
-    __ Dsrl(TMP, in, 1);
-    __ LoadConst64(AT, 0x5555555555555555L);
-    __ And(TMP, TMP, AT);
-    __ Dsubu(TMP, in, TMP);
-    __ LoadConst64(AT, 0x3333333333333333L);
-    __ And(out, TMP, AT);
-    __ Dsrl(TMP, TMP, 2);
-    __ And(TMP, TMP, AT);
-    __ Daddu(TMP, out, TMP);
-    __ Dsrl(out, TMP, 4);
-    __ Daddu(out, out, TMP);
-    __ LoadConst64(AT, 0x0F0F0F0F0F0F0F0FL);
-    __ And(out, out, AT);
-    __ LoadConst64(TMP, 0x0101010101010101L);
-    __ Dmul(out, out, TMP);
-    __ Dsrl32(out, out, 24);
+  if (hasMsa) {
+    if (type == DataType::Type::kInt32) {
+      __ Mtc1(in, FTMP);
+      __ PcntW(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
+      __ Mfc1(out, FTMP);
+    } else {
+      __ Dmtc1(in, FTMP);
+      __ PcntD(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
+      __ Dmfc1(out, FTMP);
+    }
+  } else {
+    if (type == DataType::Type::kInt32) {
+      __ Srl(TMP, in, 1);
+      __ LoadConst32(AT, 0x55555555);
+      __ And(TMP, TMP, AT);
+      __ Subu(TMP, in, TMP);
+      __ LoadConst32(AT, 0x33333333);
+      __ And(out, TMP, AT);
+      __ Srl(TMP, TMP, 2);
+      __ And(TMP, TMP, AT);
+      __ Addu(TMP, out, TMP);
+      __ Srl(out, TMP, 4);
+      __ Addu(out, out, TMP);
+      __ LoadConst32(AT, 0x0F0F0F0F);
+      __ And(out, out, AT);
+      __ LoadConst32(TMP, 0x01010101);
+      __ MulR6(out, out, TMP);
+      __ Srl(out, out, 24);
+    } else {
+      __ Dsrl(TMP, in, 1);
+      __ LoadConst64(AT, 0x5555555555555555L);
+      __ And(TMP, TMP, AT);
+      __ Dsubu(TMP, in, TMP);
+      __ LoadConst64(AT, 0x3333333333333333L);
+      __ And(out, TMP, AT);
+      __ Dsrl(TMP, TMP, 2);
+      __ And(TMP, TMP, AT);
+      __ Daddu(TMP, out, TMP);
+      __ Dsrl(out, TMP, 4);
+      __ Daddu(out, out, TMP);
+      __ LoadConst64(AT, 0x0F0F0F0F0F0F0F0FL);
+      __ And(out, out, AT);
+      __ LoadConst64(TMP, 0x0101010101010101L);
+      __ Dmul(out, out, TMP);
+      __ Dsrl32(out, out, 24);
+    }
   }
 }
 
@@ -458,7 +474,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler());
+  GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, HasMsa(), GetAssembler());
 }
 
 // int java.lang.Long.bitCount(long)
@@ -467,222 +483,7 @@
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
-}
-
-static void GenMinMaxFP(LocationSummary* locations,
-                        bool is_min,
-                        DataType::Type type,
-                        Mips64Assembler* assembler) {
-  FpuRegister a = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister b = locations->InAt(1).AsFpuRegister<FpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  Mips64Label noNaNs;
-  Mips64Label done;
-  FpuRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
-
-  // When Java computes min/max it prefers a NaN to a number; the
-  // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
-  // the inputs is a NaN and the other is a valid number, the MIPS
-  // instruction will return the number; Java wants the NaN value
-  // returned. This is why there is extra logic preceding the use of
-  // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
-  // NaN, return the NaN, otherwise return the min/max.
-  if (type == DataType::Type::kFloat64) {
-    __ CmpUnD(FTMP, a, b);
-    __ Bc1eqz(FTMP, &noNaNs);
-
-    // One of the inputs is a NaN
-    __ CmpEqD(ftmp, a, a);
-    // If a == a then b is the NaN, otherwise a is the NaN.
-    __ SelD(ftmp, a, b);
-
-    if (ftmp != out) {
-      __ MovD(out, ftmp);
-    }
-
-    __ Bc(&done);
-
-    __ Bind(&noNaNs);
-
-    if (is_min) {
-      __ MinD(out, a, b);
-    } else {
-      __ MaxD(out, a, b);
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat32);
-    __ CmpUnS(FTMP, a, b);
-    __ Bc1eqz(FTMP, &noNaNs);
-
-    // One of the inputs is a NaN
-    __ CmpEqS(ftmp, a, a);
-    // If a == a then b is the NaN, otherwise a is the NaN.
-    __ SelS(ftmp, a, b);
-
-    if (ftmp != out) {
-      __ MovS(out, ftmp);
-    }
-
-    __ Bc(&done);
-
-    __ Bind(&noNaNs);
-
-    if (is_min) {
-      __ MinS(out, a, b);
-    } else {
-      __ MaxS(out, a, b);
-    }
-  }
-
-  __ Bind(&done);
-}
-
-static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetInAt(1, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-}
-
-// double java.lang.Math.min(double, double)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, DataType::Type::kFloat64, GetAssembler());
-}
-
-// float java.lang.Math.min(float, float)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ true, DataType::Type::kFloat32, GetAssembler());
-}
-
-// double java.lang.Math.max(double, double)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, DataType::Type::kFloat64, GetAssembler());
-}
-
-// float java.lang.Math.max(float, float)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke->GetLocations(), /* is_min */ false, DataType::Type::kFloat32, GetAssembler());
-}
-
-static void GenMinMax(LocationSummary* locations,
-                      bool is_min,
-                      Mips64Assembler* assembler) {
-  GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (lhs == rhs) {
-    if (out != lhs) {
-      __ Move(out, lhs);
-    }
-  } else {
-    // Some architectures, such as ARM and MIPS (prior to r6), have a
-    // conditional move instruction which only changes the target
-    // (output) register if the condition is true (MIPS prior to r6 had
-    // MOVF, MOVT, and MOVZ). The SELEQZ and SELNEZ instructions always
-    // change the target (output) register.  If the condition is true the
-    // output register gets the contents of the "rs" register; otherwise,
-    // the output register is set to zero. One consequence of this is
-    // that to implement something like "rd = c==0 ? rs : rt" MIPS64r6
-    // needs to use a pair of SELEQZ/SELNEZ instructions.  After
-    // executing this pair of instructions one of the output registers
-    // from the pair will necessarily contain zero. Then the code ORs the
-    // output registers from the SELEQZ/SELNEZ instructions to get the
-    // final result.
-    //
-    // The initial test to see if the output register is same as the
-    // first input register is needed to make sure that value in the
-    // first input register isn't clobbered before we've finished
-    // computing the output value. The logic in the corresponding else
-    // clause performs the same task but makes sure the second input
-    // register isn't clobbered in the event that it's the same register
-    // as the output register; the else clause also handles the case
-    // where the output register is distinct from both the first, and the
-    // second input registers.
-    if (out == lhs) {
-      __ Slt(AT, rhs, lhs);
-      if (is_min) {
-        __ Seleqz(out, lhs, AT);
-        __ Selnez(AT, rhs, AT);
-      } else {
-        __ Selnez(out, lhs, AT);
-        __ Seleqz(AT, rhs, AT);
-      }
-    } else {
-      __ Slt(AT, lhs, rhs);
-      if (is_min) {
-        __ Seleqz(out, rhs, AT);
-        __ Selnez(AT, lhs, AT);
-      } else {
-        __ Selnez(out, rhs, AT);
-        __ Seleqz(AT, lhs, AT);
-      }
-    }
-    __ Or(out, out, AT);
-  }
-}
-
-static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-// int java.lang.Math.min(int, int)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMinIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, GetAssembler());
-}
-
-// long java.lang.Math.min(long, long)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMinLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, GetAssembler());
-}
-
-// int java.lang.Math.max(int, int)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMaxIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, GetAssembler());
-}
-
-// long java.lang.Math.max(long, long)
-void IntrinsicLocationsBuilderMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathMaxLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, GetAssembler());
+  GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, HasMsa(), GetAssembler());
 }
 
 // double java.lang.Math.sqrt(double)
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 6f40d90..748b0b0 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -68,6 +68,8 @@
 #undef INTRINSICS_LIST
 #undef OPTIMIZING_INTRINSICS
 
+  bool HasMsa() const;
+
  private:
   Mips64Assembler* GetAssembler();
 
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 0edc061..c4f322b 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -40,11 +40,6 @@
 
 namespace x86 {
 
-static constexpr int kDoubleNaNHigh = 0x7FF80000;
-static constexpr int kDoubleNaNLow = 0x00000000;
-static constexpr int64_t kDoubleNaN = INT64_C(0x7FF8000000000000);
-static constexpr int32_t kFloatNaN = INT32_C(0x7FC00000);
-
 IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
   : allocator_(codegen->GetGraph()->GetAllocator()),
     codegen_(codegen) {
@@ -333,278 +328,6 @@
   GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
 }
 
-static void GenMinMaxFP(HInvoke* invoke,
-                        bool is_min,
-                        bool is_double,
-                        X86Assembler* assembler,
-                        CodeGeneratorX86* codegen) {
-  LocationSummary* locations = invoke->GetLocations();
-  Location op1_loc = locations->InAt(0);
-  Location op2_loc = locations->InAt(1);
-  Location out_loc = locations->Out();
-  XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
-
-  // Shortcut for same input locations.
-  if (op1_loc.Equals(op2_loc)) {
-    DCHECK(out_loc.Equals(op1_loc));
-    return;
-  }
-
-  //  (out := op1)
-  //  out <=? op2
-  //  if Nan jmp Nan_label
-  //  if out is min jmp done
-  //  if op2 is min jmp op2_label
-  //  handle -0/+0
-  //  jmp done
-  // Nan_label:
-  //  out := NaN
-  // op2_label:
-  //  out := op2
-  // done:
-  //
-  // This removes one jmp, but needs to copy one input (op1) to out.
-  //
-  // TODO: This is straight from Quick (except literal pool). Make NaN an out-of-line slowpath?
-
-  XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
-
-  NearLabel nan, done, op2_label;
-  if (is_double) {
-    __ ucomisd(out, op2);
-  } else {
-    __ ucomiss(out, op2);
-  }
-
-  __ j(Condition::kParityEven, &nan);
-
-  __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
-  __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
-
-  // Handle 0.0/-0.0.
-  if (is_min) {
-    if (is_double) {
-      __ orpd(out, op2);
-    } else {
-      __ orps(out, op2);
-    }
-  } else {
-    if (is_double) {
-      __ andpd(out, op2);
-    } else {
-      __ andps(out, op2);
-    }
-  }
-  __ jmp(&done);
-
-  // NaN handling.
-  __ Bind(&nan);
-  // Do we have a constant area pointer?
-  if (locations->GetInputCount() == 3 && locations->InAt(2).IsValid()) {
-    HX86ComputeBaseMethodAddress* method_address =
-        invoke->InputAt(2)->AsX86ComputeBaseMethodAddress();
-    DCHECK(locations->InAt(2).IsRegister());
-    Register constant_area = locations->InAt(2).AsRegister<Register>();
-    if (is_double) {
-      __ movsd(out, codegen->LiteralInt64Address(kDoubleNaN, method_address, constant_area));
-    } else {
-      __ movss(out, codegen->LiteralInt32Address(kFloatNaN, method_address, constant_area));
-    }
-  } else {
-    if (is_double) {
-      __ pushl(Immediate(kDoubleNaNHigh));
-      __ pushl(Immediate(kDoubleNaNLow));
-      __ movsd(out, Address(ESP, 0));
-      __ addl(ESP, Immediate(8));
-    } else {
-      __ pushl(Immediate(kFloatNaN));
-      __ movss(out, Address(ESP, 0));
-      __ addl(ESP, Immediate(4));
-    }
-  }
-  __ jmp(&done);
-
-  // out := op2;
-  __ Bind(&op2_label);
-  if (is_double) {
-    __ movsd(out, op2);
-  } else {
-    __ movss(out, op2);
-  }
-
-  // Done.
-  __ Bind(&done);
-}
-
-static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetInAt(1, Location::RequiresFpuRegister());
-  // The following is sub-optimal, but all we can do for now. It would be fine to also accept
-  // the second input to be the output (we can simply swap inputs).
-  locations->SetOut(Location::SameAsFirstInput());
-  HInvokeStaticOrDirect* static_or_direct = invoke->AsInvokeStaticOrDirect();
-  DCHECK(static_or_direct != nullptr);
-  if (static_or_direct->HasSpecialInput() &&
-      invoke->InputAt(static_or_direct->GetSpecialInputIndex())->IsX86ComputeBaseMethodAddress()) {
-    locations->SetInAt(2, Location::RequiresRegister());
-  }
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke,
-              /* is_min */ true,
-              /* is_double */ true,
-              GetAssembler(),
-              codegen_);
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke,
-              /* is_min */ true,
-              /* is_double */ false,
-              GetAssembler(),
-              codegen_);
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(invoke,
-              /* is_min */ false,
-              /* is_double */ true,
-              GetAssembler(),
-              codegen_);
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(invoke,
-              /* is_min */ false,
-              /* is_double */ false,
-              GetAssembler(),
-              codegen_);
-}
-
-static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
-                      X86Assembler* assembler) {
-  Location op1_loc = locations->InAt(0);
-  Location op2_loc = locations->InAt(1);
-
-  // Shortcut for same input locations.
-  if (op1_loc.Equals(op2_loc)) {
-    // Can return immediately, as op1_loc == out_loc.
-    // Note: if we ever support separate registers, e.g., output into memory, we need to check for
-    //       a copy here.
-    DCHECK(locations->Out().Equals(op1_loc));
-    return;
-  }
-
-  if (is_long) {
-    // Need to perform a subtract to get the sign right.
-    // op1 is already in the same location as the output.
-    Location output = locations->Out();
-    Register output_lo = output.AsRegisterPairLow<Register>();
-    Register output_hi = output.AsRegisterPairHigh<Register>();
-
-    Register op2_lo = op2_loc.AsRegisterPairLow<Register>();
-    Register op2_hi = op2_loc.AsRegisterPairHigh<Register>();
-
-    // Spare register to compute the subtraction to set condition code.
-    Register temp = locations->GetTemp(0).AsRegister<Register>();
-
-    // Subtract off op2_low.
-    __ movl(temp, output_lo);
-    __ subl(temp, op2_lo);
-
-    // Now use the same tempo and the borrow to finish the subtraction of op2_hi.
-    __ movl(temp, output_hi);
-    __ sbbl(temp, op2_hi);
-
-    // Now the condition code is correct.
-    Condition cond = is_min ? Condition::kGreaterEqual : Condition::kLess;
-    __ cmovl(cond, output_lo, op2_lo);
-    __ cmovl(cond, output_hi, op2_hi);
-  } else {
-    Register out = locations->Out().AsRegister<Register>();
-    Register op2 = op2_loc.AsRegister<Register>();
-
-    //  (out := op1)
-    //  out <=? op2
-    //  if out is min jmp done
-    //  out := op2
-    // done:
-
-    __ cmpl(out, op2);
-    Condition cond = is_min ? Condition::kGreater : Condition::kLess;
-    __ cmovl(cond, out, op2);
-  }
-}
-
-static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::SameAsFirstInput());
-}
-
-static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::SameAsFirstInput());
-  // Register to use to perform a long subtract to set cc.
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, /* is_long */ false, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) {
-  CreateLongLongToLongLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, /* is_long */ true, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ false, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) {
-  CreateLongLongToLongLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
-}
-
 static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
   LocationSummary* locations =
       new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 9d378d6..437bc3d 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -236,208 +236,6 @@
   GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
 }
 
-static void GenMinMaxFP(LocationSummary* locations,
-                        bool is_min,
-                        bool is_double,
-                        X86_64Assembler* assembler,
-                        CodeGeneratorX86_64* codegen) {
-  Location op1_loc = locations->InAt(0);
-  Location op2_loc = locations->InAt(1);
-  Location out_loc = locations->Out();
-  XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
-
-  // Shortcut for same input locations.
-  if (op1_loc.Equals(op2_loc)) {
-    DCHECK(out_loc.Equals(op1_loc));
-    return;
-  }
-
-  //  (out := op1)
-  //  out <=? op2
-  //  if Nan jmp Nan_label
-  //  if out is min jmp done
-  //  if op2 is min jmp op2_label
-  //  handle -0/+0
-  //  jmp done
-  // Nan_label:
-  //  out := NaN
-  // op2_label:
-  //  out := op2
-  // done:
-  //
-  // This removes one jmp, but needs to copy one input (op1) to out.
-  //
-  // TODO: This is straight from Quick. Make NaN an out-of-line slowpath?
-
-  XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
-
-  NearLabel nan, done, op2_label;
-  if (is_double) {
-    __ ucomisd(out, op2);
-  } else {
-    __ ucomiss(out, op2);
-  }
-
-  __ j(Condition::kParityEven, &nan);
-
-  __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
-  __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
-
-  // Handle 0.0/-0.0.
-  if (is_min) {
-    if (is_double) {
-      __ orpd(out, op2);
-    } else {
-      __ orps(out, op2);
-    }
-  } else {
-    if (is_double) {
-      __ andpd(out, op2);
-    } else {
-      __ andps(out, op2);
-    }
-  }
-  __ jmp(&done);
-
-  // NaN handling.
-  __ Bind(&nan);
-  if (is_double) {
-    __ movsd(out, codegen->LiteralInt64Address(INT64_C(0x7FF8000000000000)));
-  } else {
-    __ movss(out, codegen->LiteralInt32Address(INT32_C(0x7FC00000)));
-  }
-  __ jmp(&done);
-
-  // out := op2;
-  __ Bind(&op2_label);
-  if (is_double) {
-    __ movsd(out, op2);
-  } else {
-    __ movss(out, op2);
-  }
-
-  // Done.
-  __ Bind(&done);
-}
-
-static void CreateFPFPToFP(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetInAt(1, Location::RequiresFpuRegister());
-  // The following is sub-optimal, but all we can do for now. It would be fine to also accept
-  // the second input to be the output (we can simply swap inputs).
-  locations->SetOut(Location::SameAsFirstInput());
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFP(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMinDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(
-      invoke->GetLocations(), /* is_min */ true, /* is_double */ true, GetAssembler(), codegen_);
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFP(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMinFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(
-      invoke->GetLocations(), /* is_min */ true, /* is_double */ false, GetAssembler(), codegen_);
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  CreateFPFPToFP(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMaxDoubleDouble(HInvoke* invoke) {
-  GenMinMaxFP(
-      invoke->GetLocations(), /* is_min */ false, /* is_double */ true, GetAssembler(), codegen_);
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  CreateFPFPToFP(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMaxFloatFloat(HInvoke* invoke) {
-  GenMinMaxFP(
-      invoke->GetLocations(), /* is_min */ false, /* is_double */ false, GetAssembler(), codegen_);
-}
-
-static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
-                      X86_64Assembler* assembler) {
-  Location op1_loc = locations->InAt(0);
-  Location op2_loc = locations->InAt(1);
-
-  // Shortcut for same input locations.
-  if (op1_loc.Equals(op2_loc)) {
-    // Can return immediately, as op1_loc == out_loc.
-    // Note: if we ever support separate registers, e.g., output into memory, we need to check for
-    //       a copy here.
-    DCHECK(locations->Out().Equals(op1_loc));
-    return;
-  }
-
-  CpuRegister out = locations->Out().AsRegister<CpuRegister>();
-  CpuRegister op2 = op2_loc.AsRegister<CpuRegister>();
-
-  //  (out := op1)
-  //  out <=? op2
-  //  if out is min jmp done
-  //  out := op2
-  // done:
-
-  if (is_long) {
-    __ cmpq(out, op2);
-  } else {
-    __ cmpl(out, op2);
-  }
-
-  __ cmov(is_min ? Condition::kGreater : Condition::kLess, out, op2, is_long);
-}
-
-static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::SameAsFirstInput());
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMinIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMinIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, /* is_long */ false, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMinLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMinLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ true, /* is_long */ true, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMaxIntInt(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ false, GetAssembler());
-}
-
-void IntrinsicLocationsBuilderX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
-  CreateIntIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorX86_64::VisitMathMaxLongLong(HInvoke* invoke) {
-  GenMinMax(invoke->GetLocations(), /* is_min */ false, /* is_long */ true, GetAssembler());
-}
-
 static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
   LocationSummary* locations =
       new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 5a483e2..d3b081e 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -334,29 +334,14 @@
 // Detect reductions of the following forms,
 //   x = x_phi + ..
 //   x = x_phi - ..
-//   x = max(x_phi, ..)
 //   x = min(x_phi, ..)
+//   x = max(x_phi, ..)
 static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) {
-  if (reduction->IsAdd()) {
+  if (reduction->IsAdd() || reduction->IsMin() || reduction->IsMax()) {
     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi) ||
            (reduction->InputAt(0) != phi && reduction->InputAt(1) == phi);
   } else if (reduction->IsSub()) {
     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi);
-  } else if (reduction->IsInvokeStaticOrDirect()) {
-    switch (reduction->AsInvokeStaticOrDirect()->GetIntrinsic()) {
-      case Intrinsics::kMathMinIntInt:
-      case Intrinsics::kMathMinLongLong:
-      case Intrinsics::kMathMinFloatFloat:
-      case Intrinsics::kMathMinDoubleDouble:
-      case Intrinsics::kMathMaxIntInt:
-      case Intrinsics::kMathMaxLongLong:
-      case Intrinsics::kMathMaxFloatFloat:
-      case Intrinsics::kMathMaxDoubleDouble:
-        return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi) ||
-               (reduction->InputAt(0) != phi && reduction->InputAt(1) == phi);
-      default:
-        return false;
-    }
   }
   return false;
 }
@@ -1322,50 +1307,34 @@
       }
       return true;
     }
-  } else if (instruction->IsInvokeStaticOrDirect()) {
-    // Accept particular intrinsics.
-    HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect();
-    switch (invoke->GetIntrinsic()) {
-      case Intrinsics::kMathMinIntInt:
-      case Intrinsics::kMathMinLongLong:
-      case Intrinsics::kMathMinFloatFloat:
-      case Intrinsics::kMathMinDoubleDouble:
-      case Intrinsics::kMathMaxIntInt:
-      case Intrinsics::kMathMaxLongLong:
-      case Intrinsics::kMathMaxFloatFloat:
-      case Intrinsics::kMathMaxDoubleDouble: {
-        // Deal with vector restrictions.
-        HInstruction* opa = instruction->InputAt(0);
-        HInstruction* opb = instruction->InputAt(1);
-        HInstruction* r = opa;
-        HInstruction* s = opb;
-        bool is_unsigned = false;
-        if (HasVectorRestrictions(restrictions, kNoMinMax)) {
-          return false;
-        } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
-                   !IsNarrowerOperands(opa, opb, type, &r, &s, &is_unsigned)) {
-          return false;  // reject, unless all operands are same-extension narrower
-        }
-        // Accept MIN/MAX(x, y) for vectorizable operands.
-        DCHECK(r != nullptr);
-        DCHECK(s != nullptr);
-        if (generate_code && vector_mode_ != kVector) {  // de-idiom
-          r = opa;
-          s = opb;
-        }
-        if (VectorizeUse(node, r, generate_code, type, restrictions) &&
-            VectorizeUse(node, s, generate_code, type, restrictions)) {
-          if (generate_code) {
-            GenerateVecOp(
-                instruction, vector_map_->Get(r), vector_map_->Get(s), type, is_unsigned);
-          }
-          return true;
-        }
-        return false;
+  } else if (instruction->IsMin() || instruction->IsMax()) {
+    // Deal with vector restrictions.
+    HInstruction* opa = instruction->InputAt(0);
+    HInstruction* opb = instruction->InputAt(1);
+    HInstruction* r = opa;
+    HInstruction* s = opb;
+    bool is_unsigned = false;
+    if (HasVectorRestrictions(restrictions, kNoMinMax)) {
+      return false;
+    } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
+               !IsNarrowerOperands(opa, opb, type, &r, &s, &is_unsigned)) {
+      return false;  // reject, unless all operands are same-extension narrower
+    }
+    // Accept MIN/MAX(x, y) for vectorizable operands.
+    DCHECK(r != nullptr);
+    DCHECK(s != nullptr);
+    if (generate_code && vector_mode_ != kVector) {  // de-idiom
+      r = opa;
+      s = opb;
+    }
+    if (VectorizeUse(node, r, generate_code, type, restrictions) &&
+        VectorizeUse(node, s, generate_code, type, restrictions)) {
+      if (generate_code) {
+        GenerateVecOp(
+            instruction, vector_map_->Get(r), vector_map_->Get(s), type, is_unsigned);
       }
-      default:
-        return false;
-    }  // switch
+      return true;
+    }
   }
   return false;
 }
@@ -1806,80 +1775,29 @@
       GENERATE_VEC(
         new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
         new (global_allocator_) HUShr(org_type, opa, opb, dex_pc));
+    case HInstruction::kMin:
+      GENERATE_VEC(
+        new (global_allocator_) HVecMin(global_allocator_,
+                                        opa,
+                                        opb,
+                                        HVecOperation::ToProperType(type, is_unsigned),
+                                        vector_length_,
+                                        dex_pc),
+        new (global_allocator_) HMin(org_type, opa, opb, dex_pc));
+    case HInstruction::kMax:
+      GENERATE_VEC(
+        new (global_allocator_) HVecMax(global_allocator_,
+                                        opa,
+                                        opb,
+                                        HVecOperation::ToProperType(type, is_unsigned),
+                                        vector_length_,
+                                        dex_pc),
+        new (global_allocator_) HMax(org_type, opa, opb, dex_pc));
     case HInstruction::kAbs:
       DCHECK(opb == nullptr);
       GENERATE_VEC(
         new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_, dex_pc),
         new (global_allocator_) HAbs(org_type, opa, dex_pc));
-    case HInstruction::kInvokeStaticOrDirect: {
-      HInvokeStaticOrDirect* invoke = org->AsInvokeStaticOrDirect();
-      if (vector_mode_ == kVector) {
-        switch (invoke->GetIntrinsic()) {
-          case Intrinsics::kMathMinIntInt:
-          case Intrinsics::kMathMinLongLong:
-          case Intrinsics::kMathMinFloatFloat:
-          case Intrinsics::kMathMinDoubleDouble: {
-            vector = new (global_allocator_)
-                HVecMin(global_allocator_,
-                        opa,
-                        opb,
-                        HVecOperation::ToProperType(type, is_unsigned),
-                        vector_length_,
-                        dex_pc);
-            break;
-          }
-          case Intrinsics::kMathMaxIntInt:
-          case Intrinsics::kMathMaxLongLong:
-          case Intrinsics::kMathMaxFloatFloat:
-          case Intrinsics::kMathMaxDoubleDouble: {
-            vector = new (global_allocator_)
-                HVecMax(global_allocator_,
-                        opa,
-                        opb,
-                        HVecOperation::ToProperType(type, is_unsigned),
-                        vector_length_,
-                        dex_pc);
-            break;
-          }
-          default:
-            LOG(FATAL) << "Unsupported SIMD intrinsic " << org->GetId();
-            UNREACHABLE();
-        }  // switch invoke
-      } else {
-        // In scalar code, simply clone the method invoke, and replace its operands with the
-        // corresponding new scalar instructions in the loop. The instruction will get an
-        // environment while being inserted from the instruction map in original program order.
-        DCHECK(vector_mode_ == kSequential);
-        size_t num_args = invoke->GetNumberOfArguments();
-        HInvokeStaticOrDirect* new_invoke = new (global_allocator_) HInvokeStaticOrDirect(
-            global_allocator_,
-            num_args,
-            invoke->GetType(),
-            invoke->GetDexPc(),
-            invoke->GetDexMethodIndex(),
-            invoke->GetResolvedMethod(),
-            invoke->GetDispatchInfo(),
-            invoke->GetInvokeType(),
-            invoke->GetTargetMethod(),
-            invoke->GetClinitCheckRequirement());
-        HInputsRef inputs = invoke->GetInputs();
-        size_t num_inputs = inputs.size();
-        DCHECK_LE(num_args, num_inputs);
-        DCHECK_EQ(num_inputs, new_invoke->GetInputs().size());  // both invokes agree
-        for (size_t index = 0; index < num_inputs; ++index) {
-          HInstruction* new_input = index < num_args
-              ? vector_map_->Get(inputs[index])
-              : inputs[index];  // beyond arguments: just pass through
-          new_invoke->SetArgumentAt(index, new_input);
-        }
-        new_invoke->SetIntrinsic(invoke->GetIntrinsic(),
-                                 kNeedsEnvironmentOrCache,
-                                 kNoSideEffects,
-                                 kNoThrow);
-        vector = new_invoke;
-      }
-      break;
-    }
     default:
       break;
   }  // switch
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index db83689..c21bd65 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -227,11 +227,14 @@
   graph_->ClearDominanceInformation();
   graph_->BuildDominatorTree();
 
+  // BuildDominatorTree inserts a block beetween loop header and entry block.
+  EXPECT_EQ(header->GetPredecessors()[0]->GetSinglePredecessor(), entry_block_);
+
   // Check that after optimizations in BuildDominatorTree()/SimplifyCFG() phi inputs
   // are still mapped correctly to the block predecessors.
   for (size_t i = 0, e = phi->InputCount(); i < e; i++) {
     HInstruction* input = phi->InputAt(i);
-    ASSERT_TRUE(input->GetBlock()->Dominates(header->GetPredecessors()[i]));
+    EXPECT_TRUE(input->GetBlock()->Dominates(header->GetPredecessors()[i]));
   }
 }
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index f6ba19f..d3212cb 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2891,6 +2891,8 @@
       return os << "BootImageLinkTimePcRelative";
     case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
       return os << "DirectAddress";
+    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo:
+      return os << "BootImageRelRo";
     case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry:
       return os << "BssEntry";
     case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
@@ -2925,7 +2927,7 @@
   }
   switch (GetLoadKind()) {
     case LoadKind::kBootImageAddress:
-    case LoadKind::kBootImageClassTable:
+    case LoadKind::kBootImageRelRo:
     case LoadKind::kJitTableAddress: {
       ScopedObjectAccess soa(Thread::Current());
       return GetClass().Get() == other_load_class->GetClass().Get();
@@ -2944,8 +2946,8 @@
       return os << "BootImageLinkTimePcRelative";
     case HLoadClass::LoadKind::kBootImageAddress:
       return os << "BootImageAddress";
-    case HLoadClass::LoadKind::kBootImageClassTable:
-      return os << "BootImageClassTable";
+    case HLoadClass::LoadKind::kBootImageRelRo:
+      return os << "BootImageRelRo";
     case HLoadClass::LoadKind::kBssEntry:
       return os << "BssEntry";
     case HLoadClass::LoadKind::kJitTableAddress:
@@ -2968,7 +2970,7 @@
   }
   switch (GetLoadKind()) {
     case LoadKind::kBootImageAddress:
-    case LoadKind::kBootImageInternTable:
+    case LoadKind::kBootImageRelRo:
     case LoadKind::kJitTableAddress: {
       ScopedObjectAccess soa(Thread::Current());
       return GetString().Get() == other_load_string->GetString().Get();
@@ -2984,8 +2986,8 @@
       return os << "BootImageLinkTimePcRelative";
     case HLoadString::LoadKind::kBootImageAddress:
       return os << "BootImageAddress";
-    case HLoadString::LoadKind::kBootImageInternTable:
-      return os << "BootImageInternTable";
+    case HLoadString::LoadKind::kBootImageRelRo:
+      return os << "BootImageRelRo";
     case HLoadString::LoadKind::kBssEntry:
       return os << "BssEntry";
     case HLoadString::LoadKind::kJitTableAddress:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 62550be..a8364e0 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1384,7 +1384,9 @@
   M(LoadException, Instruction)                                         \
   M(LoadString, Instruction)                                            \
   M(LongConstant, Constant)                                             \
+  M(Max, Instruction)                                                   \
   M(MemoryBarrier, Instruction)                                         \
+  M(Min, BinaryOperation)                                               \
   M(MonitorOperation, Instruction)                                      \
   M(Mul, BinaryOperation)                                               \
   M(NativeDebugInfo, Instruction)                                       \
@@ -4429,6 +4431,10 @@
     // Used for app->boot calls with non-relocatable image and for JIT-compiled calls.
     kDirectAddress,
 
+    // Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
+    // Used for app->boot calls with relocatable image.
+    kBootImageRelRo,
+
     // Load from an entry in the .bss section using a PC-relative load.
     // Used for classes outside boot image when .bss is accessible with a PC-relative load.
     kBssEntry,
@@ -4561,6 +4567,7 @@
   bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
   bool HasPcRelativeMethodLoadKind() const {
     return GetMethodLoadKind() == MethodLoadKind::kBootImageLinkTimePcRelative ||
+           GetMethodLoadKind() == MethodLoadKind::kBootImageRelRo ||
            GetMethodLoadKind() == MethodLoadKind::kBssEntry;
   }
   bool HasCurrentMethodInput() const {
@@ -5017,6 +5024,76 @@
   DEFAULT_COPY_CONSTRUCTOR(Rem);
 };
 
+class HMin FINAL : public HBinaryOperation {
+ public:
+  HMin(DataType::Type result_type,
+       HInstruction* left,
+       HInstruction* right,
+       uint32_t dex_pc)
+      : HBinaryOperation(kMin, result_type, left, right, SideEffects::None(), dex_pc) {}
+
+  bool IsCommutative() const OVERRIDE { return true; }
+
+  // Evaluation for integral values.
+  template <typename T> static T ComputeIntegral(T x, T y) {
+    return (x <= y) ? x : y;
+  }
+
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetIntConstant(
+        ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetLongConstant(
+        ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  // TODO: Evaluation for floating-point values.
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+
+  DECLARE_INSTRUCTION(Min);
+
+ protected:
+  DEFAULT_COPY_CONSTRUCTOR(Min);
+};
+
+class HMax FINAL : public HBinaryOperation {
+ public:
+  HMax(DataType::Type result_type,
+       HInstruction* left,
+       HInstruction* right,
+       uint32_t dex_pc)
+      : HBinaryOperation(kMax, result_type, left, right, SideEffects::None(), dex_pc) {}
+
+  bool IsCommutative() const OVERRIDE { return true; }
+
+  // Evaluation for integral values.
+  template <typename T> static T ComputeIntegral(T x, T y) {
+    return (x >= y) ? x : y;
+  }
+
+  HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetIntConstant(
+        ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+    return GetBlock()->GetGraph()->GetLongConstant(
+        ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
+  }
+  // TODO: Evaluation for floating-point values.
+  HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+                      HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+  HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+                      HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { return nullptr; }
+
+  DECLARE_INSTRUCTION(Max);
+
+ protected:
+  DEFAULT_COPY_CONSTRUCTOR(Max);
+};
+
 class HAbs FINAL : public HUnaryOperation {
  public:
   HAbs(DataType::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc)
@@ -6067,12 +6144,12 @@
     kBootImageLinkTimePcRelative,
 
     // Use a known boot image Class* address, embedded in the code by the codegen.
-    // Used for boot image classes referenced by apps in AOT- and JIT-compiled code.
+    // Used for boot image classes referenced by apps in JIT- and AOT-compiled code (non-PIC).
     kBootImageAddress,
 
-    // Use a PC-relative load from a boot image ClassTable mmapped into the .bss
-    // of the oat file.
-    kBootImageClassTable,
+    // Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
+    // Used for boot image classes referenced by apps in AOT-compiled code (PIC).
+    kBootImageRelRo,
 
     // Load from an entry in the .bss section using a PC-relative load.
     // Used for classes outside boot image when .bss is accessible with a PC-relative load.
@@ -6120,6 +6197,12 @@
     return GetPackedField<LoadKindField>();
   }
 
+  bool HasPcRelativeLoadKind() const {
+    return GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
+           GetLoadKind() == LoadKind::kBootImageRelRo ||
+           GetLoadKind() == LoadKind::kBssEntry;
+  }
+
   bool CanBeMoved() const OVERRIDE { return true; }
 
   bool InstructionDataEquals(const HInstruction* other) const;
@@ -6224,7 +6307,6 @@
   static bool HasTypeReference(LoadKind load_kind) {
     return load_kind == LoadKind::kReferrersClass ||
         load_kind == LoadKind::kBootImageLinkTimePcRelative ||
-        load_kind == LoadKind::kBootImageClassTable ||
         load_kind == LoadKind::kBssEntry ||
         load_kind == LoadKind::kRuntimeCall;
   }
@@ -6270,7 +6352,7 @@
   // including literal pool loads, which are PC-relative too.
   DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
          GetLoadKind() == LoadKind::kBootImageAddress ||
-         GetLoadKind() == LoadKind::kBootImageClassTable ||
+         GetLoadKind() == LoadKind::kBootImageRelRo ||
          GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
   DCHECK(special_input_.GetInstruction() == nullptr);
   special_input_ = HUserRecord<HInstruction*>(special_input);
@@ -6286,12 +6368,12 @@
     kBootImageLinkTimePcRelative,
 
     // Use a known boot image String* address, embedded in the code by the codegen.
-    // Used for boot image strings referenced by apps in AOT- and JIT-compiled code.
+    // Used for boot image strings referenced by apps in JIT- and AOT-compiled code (non-PIC).
     kBootImageAddress,
 
-    // Use a PC-relative load from a boot image InternTable mmapped into the .bss
-    // of the oat file.
-    kBootImageInternTable,
+    // Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
+    // Used for boot image strings referenced by apps in AOT-compiled code (PIC).
+    kBootImageRelRo,
 
     // Load from an entry in the .bss section using a PC-relative load.
     // Used for strings outside boot image when .bss is accessible with a PC-relative load.
@@ -6326,6 +6408,12 @@
     return GetPackedField<LoadKindField>();
   }
 
+  bool HasPcRelativeLoadKind() const {
+    return GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
+           GetLoadKind() == LoadKind::kBootImageRelRo ||
+           GetLoadKind() == LoadKind::kBssEntry;
+  }
+
   const DexFile& GetDexFile() const {
     return dex_file_;
   }
@@ -6354,7 +6442,7 @@
     LoadKind load_kind = GetLoadKind();
     if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
         load_kind == LoadKind::kBootImageAddress ||
-        load_kind == LoadKind::kBootImageInternTable ||
+        load_kind == LoadKind::kBootImageRelRo ||
         load_kind == LoadKind::kJitTableAddress) {
       return false;
     }
@@ -6432,7 +6520,7 @@
   // including literal pool loads, which are PC-relative too.
   DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
          GetLoadKind() == LoadKind::kBootImageAddress ||
-         GetLoadKind() == LoadKind::kBootImageInternTable ||
+         GetLoadKind() == LoadKind::kBootImageRelRo ||
          GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
   // HLoadString::GetInputRecords() returns an empty array at this point,
   // so use the GetInputRecords() from the base class to set the input record.
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index 9d53585..0102254 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -75,7 +75,7 @@
     switch (load_kind) {
       case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
       case HLoadClass::LoadKind::kBootImageAddress:
-      case HLoadClass::LoadKind::kBootImageClassTable:
+      case HLoadClass::LoadKind::kBootImageRelRo:
       case HLoadClass::LoadKind::kBssEntry:
         // Add a base register for PC-relative literals on R2.
         InitializePCRelativeBasePointer();
@@ -91,7 +91,7 @@
     switch (load_kind) {
       case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
       case HLoadString::LoadKind::kBootImageAddress:
-      case HLoadString::LoadKind::kBootImageInternTable:
+      case HLoadString::LoadKind::kBootImageRelRo:
       case HLoadString::LoadKind::kBssEntry:
         // Add a base register for PC-relative literals on R2.
         InitializePCRelativeBasePointer();
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index a3ca631..647336b 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -81,20 +81,14 @@
   }
 
   void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
-    HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
-    if (load_kind == HLoadClass::LoadKind::kBootImageLinkTimePcRelative ||
-        load_kind == HLoadClass::LoadKind::kBootImageClassTable ||
-        load_kind == HLoadClass::LoadKind::kBssEntry) {
+    if (load_class->HasPcRelativeLoadKind()) {
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_class);
       load_class->AddSpecialInput(method_address);
     }
   }
 
   void VisitLoadString(HLoadString* load_string) OVERRIDE {
-    HLoadString::LoadKind load_kind = load_string->GetLoadKind();
-    if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
-        load_kind == HLoadString::LoadKind::kBootImageInternTable ||
-        load_kind == HLoadString::LoadKind::kBssEntry) {
+    if (load_string->HasPcRelativeLoadKind()) {
       HX86ComputeBaseMethodAddress* method_address = GetPCRelativeBasePointer(load_string);
       load_string->AddSpecialInput(method_address);
     }
@@ -234,12 +228,13 @@
     switch (invoke->GetIntrinsic()) {
       case Intrinsics::kMathAbsDouble:
       case Intrinsics::kMathAbsFloat:
-        LOG(FATAL) << "Unreachable abs";
-        UNREACHABLE();
       case Intrinsics::kMathMaxDoubleDouble:
       case Intrinsics::kMathMaxFloatFloat:
       case Intrinsics::kMathMinDoubleDouble:
       case Intrinsics::kMathMinFloatFloat:
+        LOG(FATAL) << "Unreachable min/max/abs: intrinsics should have been lowered "
+                      "to IR nodes by instruction simplifier";
+        UNREACHABLE();
       case Intrinsics::kMathRoundFloat:
         if (!base_added) {
           DCHECK(invoke_static_or_direct != nullptr);
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index cdbe483..bca538f 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -679,6 +679,8 @@
            instruction->IsCompare() ||
            instruction->IsCondition() ||
            instruction->IsDiv() ||
+           instruction->IsMin() ||
+           instruction->IsMax() ||
            instruction->IsMul() ||
            instruction->IsOr() ||
            instruction->IsRem() ||
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 1e49411..7dffb2a 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -125,8 +125,12 @@
              BootImageAOTCanEmbedMethod(callee, compiler_driver)) {
     method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative;
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+  } else if (IsInBootImage(callee)) {
+    // Use PC-relative access to the .data.bimg.rel.ro methods array.
+    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo;
+    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
   } else {
-    // Use PC-relative access to the .bss methods arrays.
+    // Use PC-relative access to the .bss methods array.
     method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBssEntry;
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
   }
@@ -207,7 +211,7 @@
       } else if (is_in_boot_image) {
         // AOT app compilation, boot image class.
         if (codegen->GetCompilerOptions().GetCompilePic()) {
-          desired_load_kind = HLoadClass::LoadKind::kBootImageClassTable;
+          desired_load_kind = HLoadClass::LoadKind::kBootImageRelRo;
         } else {
           desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
         }
@@ -288,7 +292,7 @@
       string = class_linker->LookupString(string_index, dex_cache.Get());
       if (string != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
         if (codegen->GetCompilerOptions().GetCompilePic()) {
-          desired_load_kind = HLoadString::LoadKind::kBootImageInternTable;
+          desired_load_kind = HLoadString::LoadKind::kBootImageRelRo;
         } else {
           desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
         }
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index a7c23be..04942f9 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -28,6 +28,11 @@
 using HBasicBlockSet = SuperblockCloner::HBasicBlockSet;
 using HEdgeSet = SuperblockCloner::HEdgeSet;
 
+// When doing peeling we can choose whether to keep original loop (made of original basic blocks)
+// and form a peeled iteration of the copy blocks (preserve the header) or transfer original loop
+// blocks to the peeled iteration and create new loop from the copy blocks. Similar for unrolling.
+static const bool kPeelUnrollPreserveHeader = true;
+
 void HEdge::Dump(std::ostream& stream) const {
   stream << "(" << from_ << "->" << to_ << ")";
 }
@@ -70,20 +75,18 @@
   return true;
 }
 
-// Returns a common predecessor of loop1 and loop2 in the loop tree or nullptr if it is the whole
-// graph.
-static HLoopInformation* FindCommonLoop(HLoopInformation* loop1, HLoopInformation* loop2) {
-  if (loop1 != nullptr || loop2 != nullptr) {
-    return nullptr;
+// Returns whether two Edge sets are equal (ArenaHashSet doesn't have "Equal" method).
+static bool EdgeHashSetsEqual(const HEdgeSet* set1, const HEdgeSet* set2) {
+  if (set1->Size() != set2->Size()) {
+    return false;
   }
 
-  if (loop1->IsIn(*loop2)) {
-    return loop2;
-  } else if (loop2->IsIn(*loop1)) {
-    return loop1;
+  for (auto e : *set1) {
+    if (set2->Find(e) == set2->end()) {
+      return false;
+    }
   }
-  HBasicBlock* block = CommonDominator::ForPair(loop1->GetHeader(), loop2->GetHeader());
-  return block->GetLoopInformation();
+  return true;
 }
 
 // Calls HGraph::OrderLoopHeaderPredecessors for each loop in the graph.
@@ -95,6 +98,21 @@
   }
 }
 
+// Performs DFS on the subgraph (specified by 'bb_set') starting from the specified block; while
+// traversing the function removes basic blocks from the bb_set (instead of traditional DFS
+// 'marking'). So what is left in the 'bb_set' after the traversal is not reachable from the start
+// block.
+static void TraverseSubgraphForConnectivity(HBasicBlock* block, HBasicBlockSet* bb_set) {
+  DCHECK(bb_set->IsBitSet(block->GetBlockId()));
+  bb_set->ClearBit(block->GetBlockId());
+
+  for (HBasicBlock* succ : block->GetSuccessors()) {
+    if (bb_set->IsBitSet(succ->GetBlockId())) {
+      TraverseSubgraphForConnectivity(succ, bb_set);
+    }
+  }
+}
+
 //
 // Helpers for CloneBasicBlock.
 //
@@ -268,7 +286,6 @@
 }
 
 void SuperblockCloner::RecalculateBackEdgesInfo(ArenaBitVector* outer_loop_bb_set) {
-  // TODO: DCHECK that after the transformation the graph is connected.
   HBasicBlock* block_entry = nullptr;
 
   if (outer_loop_ == nullptr) {
@@ -424,6 +441,11 @@
       outer_loop_ = nullptr;
       break;
     }
+    if (outer_loop_ == nullptr) {
+      // We should not use the initial outer_loop_ value 'nullptr' when finding the most outer
+      // common loop.
+      outer_loop_ = loop_exit_loop_info;
+    }
     outer_loop_ = FindCommonLoop(outer_loop_, loop_exit_loop_info);
   }
 
@@ -507,6 +529,34 @@
 // Debug and logging methods.
 //
 
+// Debug function to dump graph' BasicBlocks info.
+void DumpBB(HGraph* graph) {
+  for (HBasicBlock* bb : graph->GetBlocks()) {
+    if (bb == nullptr) {
+      continue;
+    }
+    std::cout << bb->GetBlockId();
+    std::cout << " <- ";
+    for (HBasicBlock* pred : bb->GetPredecessors()) {
+      std::cout << pred->GetBlockId() << " ";
+    }
+    std::cout << " -> ";
+    for (HBasicBlock* succ : bb->GetSuccessors()) {
+      std::cout << succ->GetBlockId()  << " ";
+    }
+
+    if (bb->GetDominator()) {
+      std::cout << " dom " << bb->GetDominator()->GetBlockId();
+    }
+
+    if (bb->GetLoopInformation()) {
+      std::cout <<  "\tloop: " << bb->GetLoopInformation()->GetHeader()->GetBlockId();
+    }
+
+    std::cout << std::endl;
+  }
+}
+
 void SuperblockCloner::CheckInstructionInputsRemapping(HInstruction* orig_instr) {
   DCHECK(!orig_instr->IsPhi());
   HInstruction* copy_instr = GetInstrCopy(orig_instr);
@@ -542,6 +592,82 @@
   }
 }
 
+bool SuperblockCloner::CheckRemappingInfoIsValid() {
+  for (HEdge edge : *remap_orig_internal_) {
+    if (!IsEdgeValid(edge, graph_) ||
+        !IsInOrigBBSet(edge.GetFrom()) ||
+        !IsInOrigBBSet(edge.GetTo())) {
+      return false;
+    }
+  }
+
+  for (auto edge : *remap_copy_internal_) {
+    if (!IsEdgeValid(edge, graph_) ||
+        !IsInOrigBBSet(edge.GetFrom()) ||
+        !IsInOrigBBSet(edge.GetTo())) {
+      return false;
+    }
+  }
+
+  for (auto edge : *remap_incoming_) {
+    if (!IsEdgeValid(edge, graph_) ||
+        IsInOrigBBSet(edge.GetFrom()) ||
+        !IsInOrigBBSet(edge.GetTo())) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+void SuperblockCloner::VerifyGraph() {
+  for (auto it : *hir_map_) {
+    HInstruction* orig_instr = it.first;
+    HInstruction* copy_instr = it.second;
+    if (!orig_instr->IsPhi() && !orig_instr->IsSuspendCheck()) {
+      DCHECK(it.first->GetBlock() != nullptr);
+    }
+    if (!copy_instr->IsPhi() && !copy_instr->IsSuspendCheck()) {
+      DCHECK(it.second->GetBlock() != nullptr);
+    }
+  }
+
+  GraphChecker checker(graph_);
+  checker.Run();
+  if (!checker.IsValid()) {
+    for (const std::string& error : checker.GetErrors()) {
+      std::cout << error << std::endl;
+    }
+    LOG(FATAL) << "GraphChecker failed: superblock cloner\n";
+  }
+}
+
+void DumpBBSet(const ArenaBitVector* set) {
+  for (uint32_t idx : set->Indexes()) {
+    std::cout << idx << "\n";
+  }
+}
+
+void SuperblockCloner::DumpInputSets() {
+  std::cout << graph_->PrettyMethod() << "\n";
+  std::cout << "orig_bb_set:\n";
+  for (uint32_t idx : orig_bb_set_.Indexes()) {
+    std::cout << idx << "\n";
+  }
+  std::cout << "remap_orig_internal:\n";
+  for (HEdge e : *remap_orig_internal_) {
+    std::cout << e << "\n";
+  }
+  std::cout << "remap_copy_internal:\n";
+  for (auto e : *remap_copy_internal_) {
+    std::cout << e << "\n";
+  }
+  std::cout << "remap_incoming:\n";
+  for (auto e : *remap_incoming_) {
+    std::cout << e << "\n";
+  }
+}
+
 //
 // Public methods.
 //
@@ -569,6 +695,7 @@
   remap_orig_internal_ = remap_orig_internal;
   remap_copy_internal_ = remap_copy_internal;
   remap_incoming_ = remap_incoming;
+  DCHECK(CheckRemappingInfoIsValid());
 }
 
 bool SuperblockCloner::IsSubgraphClonable() const {
@@ -602,6 +729,63 @@
   return true;
 }
 
+bool SuperblockCloner::IsFastCase() const {
+  // Check that loop unrolling/loop peeling is being conducted.
+  // Check that all the basic blocks belong to the same loop.
+  bool flag = false;
+  HLoopInformation* common_loop_info = nullptr;
+  for (uint32_t idx : orig_bb_set_.Indexes()) {
+    HBasicBlock* block = GetBlockById(idx);
+    HLoopInformation* block_loop_info = block->GetLoopInformation();
+    if (!flag) {
+      common_loop_info = block_loop_info;
+    } else {
+      if (block_loop_info != common_loop_info) {
+        return false;
+      }
+    }
+  }
+
+  // Check that orig_bb_set_ corresponds to loop peeling/unrolling.
+  if (common_loop_info == nullptr || !orig_bb_set_.SameBitsSet(&common_loop_info->GetBlocks())) {
+    return false;
+  }
+
+  bool peeling_or_unrolling = false;
+  HEdgeSet remap_orig_internal(graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HEdgeSet remap_copy_internal(graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HEdgeSet remap_incoming(graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+
+
+  // Check whether remapping info corresponds to loop unrolling.
+  CollectRemappingInfoForPeelUnroll(/* to_unroll*/ true,
+                                    common_loop_info,
+                                    &remap_orig_internal,
+                                    &remap_copy_internal,
+                                    &remap_incoming);
+
+  peeling_or_unrolling |= EdgeHashSetsEqual(&remap_orig_internal, remap_orig_internal_) &&
+                          EdgeHashSetsEqual(&remap_copy_internal, remap_copy_internal_) &&
+                          EdgeHashSetsEqual(&remap_incoming, remap_incoming_);
+
+  remap_orig_internal.Clear();
+  remap_copy_internal.Clear();
+  remap_incoming.Clear();
+
+  // Check whether remapping info corresponds to loop peeling.
+  CollectRemappingInfoForPeelUnroll(/* to_unroll*/ false,
+                                    common_loop_info,
+                                    &remap_orig_internal,
+                                    &remap_copy_internal,
+                                    &remap_incoming);
+
+  peeling_or_unrolling |= EdgeHashSetsEqual(&remap_orig_internal, remap_orig_internal_) &&
+                          EdgeHashSetsEqual(&remap_copy_internal, remap_copy_internal_) &&
+                          EdgeHashSetsEqual(&remap_incoming, remap_incoming_);
+
+  return peeling_or_unrolling;
+}
+
 void SuperblockCloner::Run() {
   DCHECK(bb_map_ != nullptr);
   DCHECK(hir_map_ != nullptr);
@@ -609,6 +793,11 @@
          remap_copy_internal_ != nullptr &&
          remap_incoming_ != nullptr);
   DCHECK(IsSubgraphClonable());
+  DCHECK(IsFastCase());
+
+  if (kSuperblockClonerLogging) {
+    DumpInputSets();
+  }
 
   // Find an area in the graph for which control flow information should be adjusted.
   FindAndSetLocalAreaForAdjustments();
@@ -618,6 +807,19 @@
   // Connect the blocks together/remap successors and fix phis which are directly affected my the
   // remapping.
   RemapEdgesSuccessors();
+
+  // Check that the subgraph is connected.
+  if (kIsDebugBuild) {
+    HBasicBlockSet work_set(arena_, orig_bb_set_.GetSizeOf(), true, kArenaAllocSuperblockCloner);
+
+    // Add original and copy blocks of the subgraph to the work set.
+    for (auto iter : *bb_map_) {
+      work_set.SetBit(iter.first->GetBlockId());   // Original block.
+      work_set.SetBit(iter.second->GetBlockId());  // Copy block.
+    }
+    CHECK(IsSubgraphConnected(&work_set, graph_));
+  }
+
   // Recalculate dominance and backedge information which is required by the next stage.
   AdjustControlFlowInfo();
   // Fix data flow of the graph.
@@ -650,6 +852,10 @@
       }
     }
   }
+
+  if (kSuperblockClonerVerify) {
+    VerifyGraph();
+  }
 }
 
 HBasicBlock* SuperblockCloner::CloneBasicBlock(const HBasicBlock* orig_block) {
@@ -701,4 +907,125 @@
   }
 }
 
+//
+// Stand-alone methods.
+//
+
+void CollectRemappingInfoForPeelUnroll(bool to_unroll,
+                                       HLoopInformation* loop_info,
+                                       HEdgeSet* remap_orig_internal,
+                                       HEdgeSet* remap_copy_internal,
+                                       HEdgeSet* remap_incoming) {
+  DCHECK(loop_info != nullptr);
+  HBasicBlock* loop_header = loop_info->GetHeader();
+  // Set up remap_orig_internal edges set - set is empty.
+  // Set up remap_copy_internal edges set.
+  for (HBasicBlock* back_edge_block : loop_info->GetBackEdges()) {
+    HEdge e = HEdge(back_edge_block, loop_header);
+    if (to_unroll) {
+      remap_orig_internal->Insert(e);
+      remap_copy_internal->Insert(e);
+    } else {
+      if (kPeelUnrollPreserveHeader) {
+        remap_copy_internal->Insert(e);
+      } else {
+        remap_orig_internal->Insert(e);
+      }
+    }
+  }
+
+  // Set up remap_incoming edges set.
+  if (to_unroll != kPeelUnrollPreserveHeader) {
+    remap_incoming->Insert(HEdge(loop_info->GetPreHeader(), loop_header));
+  }
+}
+
+bool IsSubgraphConnected(SuperblockCloner::HBasicBlockSet* work_set, HGraph* graph) {
+  ArenaVector<HBasicBlock*> entry_blocks(
+      graph->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+
+  // Find subgraph entry blocks.
+  for (uint32_t orig_block_id : work_set->Indexes()) {
+    HBasicBlock* block = graph->GetBlocks()[orig_block_id];
+    for (HBasicBlock* pred : block->GetPredecessors()) {
+      if (!work_set->IsBitSet(pred->GetBlockId())) {
+        entry_blocks.push_back(block);
+        break;
+      }
+    }
+  }
+
+  for (HBasicBlock* entry_block : entry_blocks) {
+    if (work_set->IsBitSet(entry_block->GetBlockId())) {
+      TraverseSubgraphForConnectivity(entry_block, work_set);
+    }
+  }
+
+  // Return whether there are unvisited - unreachable - blocks.
+  return work_set->NumSetBits() == 0;
+}
+
+HLoopInformation* FindCommonLoop(HLoopInformation* loop1, HLoopInformation* loop2) {
+  if (loop1 == nullptr || loop2 == nullptr) {
+    return nullptr;
+  }
+
+  if (loop1->IsIn(*loop2)) {
+    return loop2;
+  }
+
+  HLoopInformation* current = loop1;
+  while (current != nullptr && !loop2->IsIn(*current)) {
+    current = current->GetPreHeader()->GetLoopInformation();
+  }
+
+  return current;
+}
+
+bool PeelUnrollHelper::IsLoopClonable(HLoopInformation* loop_info) {
+  PeelUnrollHelper helper(loop_info, nullptr, nullptr);
+  return helper.IsLoopClonable();
+}
+
+HBasicBlock* PeelUnrollHelper::DoPeelUnrollImpl(bool to_unroll) {
+  // For now do peeling only for natural loops.
+  DCHECK(!loop_info_->IsIrreducible());
+
+  HBasicBlock* loop_header = loop_info_->GetHeader();
+  HGraph* graph = loop_header->GetGraph();
+  ArenaAllocator allocator(graph->GetAllocator()->GetArenaPool());
+
+  HEdgeSet remap_orig_internal(graph->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HEdgeSet remap_copy_internal(graph->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HEdgeSet remap_incoming(graph->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+
+  CollectRemappingInfoForPeelUnroll(to_unroll,
+                                    loop_info_,
+                                    &remap_orig_internal,
+                                    &remap_copy_internal,
+                                    &remap_incoming);
+
+  cloner_.SetSuccessorRemappingInfo(&remap_orig_internal, &remap_copy_internal, &remap_incoming);
+  cloner_.Run();
+  cloner_.CleanUp();
+
+  return kPeelUnrollPreserveHeader ? loop_header : cloner_.GetBlockCopy(loop_header);
+}
+
+PeelUnrollSimpleHelper::PeelUnrollSimpleHelper(HLoopInformation* info)
+  : bb_map_(std::less<HBasicBlock*>(),
+            info->GetHeader()->GetGraph()->GetAllocator()->Adapter(kArenaAllocSuperblockCloner)),
+    hir_map_(std::less<HInstruction*>(),
+             info->GetHeader()->GetGraph()->GetAllocator()->Adapter(kArenaAllocSuperblockCloner)),
+    helper_(info, &bb_map_, &hir_map_) {}
+
 }  // namespace art
+
+namespace std {
+
+ostream& operator<<(ostream& os, const art::HEdge& e) {
+  e.Dump(os);
+  return os;
+}
+
+}  // namespace std
diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h
index 23de692..19c9dd4 100644
--- a/compiler/optimizing/superblock_cloner.h
+++ b/compiler/optimizing/superblock_cloner.h
@@ -152,6 +152,15 @@
   // TODO: Start from small range of graph patterns then extend it.
   bool IsSubgraphClonable() const;
 
+  // Returns whether selected subgraph satisfies the criteria for fast data flow resolution
+  // when iterative DF algorithm is not required and dominators/instructions inputs can be
+  // trivially adjusted.
+  //
+  // TODO: formally describe the criteria.
+  //
+  // Loop peeling and unrolling satisfy the criteria.
+  bool IsFastCase() const;
+
   // Runs the copy algorithm according to the description.
   void Run();
 
@@ -202,11 +211,17 @@
     return IsInOrigBBSet(block->GetBlockId());
   }
 
+  // Returns the area (the most outer loop) in the graph for which control flow (back edges, loops,
+  // dominators) needs to be adjusted.
+  HLoopInformation* GetRegionToBeAdjusted() const {
+    return outer_loop_;
+  }
+
  private:
   // Fills the 'exits' vector with the subgraph exits.
   void SearchForSubgraphExits(ArenaVector<HBasicBlock*>* exits);
 
-  // Finds and records information about the area in the graph for which control-flow (back edges,
+  // Finds and records information about the area in the graph for which control flow (back edges,
   // loops, dominators) needs to be adjusted.
   void FindAndSetLocalAreaForAdjustments();
 
@@ -217,7 +232,7 @@
   // phis' nor instructions' inputs values are resolved.
   void RemapEdgesSuccessors();
 
-  // Adjusts control-flow (back edges, loops, dominators) for the local area defined by
+  // Adjusts control flow (back edges, loops, dominators) for the local area defined by
   // FindAndSetLocalAreaForAdjustments.
   void AdjustControlFlowInfo();
 
@@ -272,6 +287,9 @@
   // Debug and logging methods.
   //
   void CheckInstructionInputsRemapping(HInstruction* orig_instr);
+  bool CheckRemappingInfoIsValid();
+  void VerifyGraph();
+  void DumpInputSets();
 
   HBasicBlock* GetBlockById(uint32_t block_id) const {
     DCHECK(block_id < graph_->GetBlocks().size());
@@ -295,15 +313,94 @@
   HBasicBlockMap* bb_map_;
   // Correspondence map for instructions: (original HInstruction, copy HInstruction).
   HInstructionMap* hir_map_;
-  // Area in the graph for which control-flow (back edges, loops, dominators) needs to be adjusted.
+  // Area in the graph for which control flow (back edges, loops, dominators) needs to be adjusted.
   HLoopInformation* outer_loop_;
   HBasicBlockSet outer_loop_bb_set_;
 
   ART_FRIEND_TEST(SuperblockClonerTest, AdjustControlFlowInfo);
+  ART_FRIEND_TEST(SuperblockClonerTest, IsGraphConnected);
 
   DISALLOW_COPY_AND_ASSIGN(SuperblockCloner);
 };
 
+// Helper class to perform loop peeling/unrolling.
+//
+// This helper should be used when correspondence map between original and copied
+// basic blocks/instructions are demanded.
+class PeelUnrollHelper : public ValueObject {
+ public:
+  explicit PeelUnrollHelper(HLoopInformation* info,
+                            SuperblockCloner::HBasicBlockMap* bb_map,
+                            SuperblockCloner::HInstructionMap* hir_map) :
+      loop_info_(info),
+      cloner_(info->GetHeader()->GetGraph(), &info->GetBlocks(), bb_map, hir_map) {
+    // For now do peeling/unrolling only for natural loops.
+    DCHECK(!info->IsIrreducible());
+  }
+
+  // Returns whether the loop can be peeled/unrolled (static function).
+  static bool IsLoopClonable(HLoopInformation* loop_info);
+
+  // Returns whether the loop can be peeled/unrolled.
+  bool IsLoopClonable() const { return cloner_.IsSubgraphClonable(); }
+
+  HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll */ false); }
+  HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll */ true); }
+  HLoopInformation* GetRegionToBeAdjusted() const { return cloner_.GetRegionToBeAdjusted(); }
+
+ protected:
+  // Applies loop peeling/unrolling for the loop specified by 'loop_info'.
+  //
+  // Depending on 'do_unroll' either unrolls loop by 2 or peels one iteration from it.
+  HBasicBlock* DoPeelUnrollImpl(bool to_unroll);
+
+ private:
+  HLoopInformation* loop_info_;
+  SuperblockCloner cloner_;
+
+  DISALLOW_COPY_AND_ASSIGN(PeelUnrollHelper);
+};
+
+// Helper class to perform loop peeling/unrolling.
+//
+// This helper should be used when there is no need to get correspondence information between
+// original and copied basic blocks/instructions.
+class PeelUnrollSimpleHelper : public ValueObject {
+ public:
+  explicit PeelUnrollSimpleHelper(HLoopInformation* info);
+  bool IsLoopClonable() const { return helper_.IsLoopClonable(); }
+  HBasicBlock* DoPeeling() { return helper_.DoPeeling(); }
+  HBasicBlock* DoUnrolling() { return helper_.DoUnrolling(); }
+  HLoopInformation* GetRegionToBeAdjusted() const { return helper_.GetRegionToBeAdjusted(); }
+
+ private:
+  SuperblockCloner::HBasicBlockMap bb_map_;
+  SuperblockCloner::HInstructionMap hir_map_;
+  PeelUnrollHelper helper_;
+
+  DISALLOW_COPY_AND_ASSIGN(PeelUnrollSimpleHelper);
+};
+
+// Collects edge remapping info for loop peeling/unrolling for the loop specified by loop info.
+void CollectRemappingInfoForPeelUnroll(bool to_unroll,
+                                       HLoopInformation* loop_info,
+                                       SuperblockCloner::HEdgeSet* remap_orig_internal,
+                                       SuperblockCloner::HEdgeSet* remap_copy_internal,
+                                       SuperblockCloner::HEdgeSet* remap_incoming);
+
+// Returns whether blocks from 'work_set' are reachable from the rest of the graph.
+//
+// Returns whether such a set 'outer_entries' of basic blocks exists that:
+// - each block from 'outer_entries' is not from 'work_set'.
+// - each block from 'work_set' is reachable from at least one block from 'outer_entries'.
+//
+// After the function returns work_set contains only blocks from the original 'work_set'
+// which are unreachable from the rest of the graph.
+bool IsSubgraphConnected(SuperblockCloner::HBasicBlockSet* work_set, HGraph* graph);
+
+// Returns a common predecessor of loop1 and loop2 in the loop tree or nullptr if it is the whole
+// graph.
+HLoopInformation* FindCommonLoop(HLoopInformation* loop1, HLoopInformation* loop2);
 }  // namespace art
 
 namespace std {
@@ -312,11 +409,12 @@
 struct hash<art::HEdge> {
   size_t operator()(art::HEdge const& x) const noexcept  {
     // Use Cantor pairing function as the hash function.
-    uint32_t a = x.GetFrom();
-    uint32_t b = x.GetTo();
+    size_t a = x.GetFrom();
+    size_t b = x.GetTo();
     return (a + b) * (a + b + 1) / 2 + b;
   }
 };
+ostream& operator<<(ostream& os, const art::HEdge& e);
 
 }  // namespace std
 
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index f1b7bff..df2e517 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -25,54 +25,67 @@
 
 using HBasicBlockMap = SuperblockCloner::HBasicBlockMap;
 using HInstructionMap = SuperblockCloner::HInstructionMap;
+using HBasicBlockSet = SuperblockCloner::HBasicBlockSet;
+using HEdgeSet = SuperblockCloner::HEdgeSet;
 
 // This class provides methods and helpers for testing various cloning and copying routines:
 // individual instruction cloning and cloning of the more coarse-grain structures.
 class SuperblockClonerTest : public OptimizingUnitTest {
  public:
-  SuperblockClonerTest()
-      : graph_(CreateGraph()), entry_block_(nullptr), exit_block_(nullptr), parameter_(nullptr) {}
+  SuperblockClonerTest() : graph_(CreateGraph()),
+                           entry_block_(nullptr),
+                           return_block_(nullptr),
+                           exit_block_(nullptr),
+                           parameter_(nullptr) {}
 
-  void CreateBasicLoopControlFlow(/* out */ HBasicBlock** header_p,
-                                  /* out */ HBasicBlock** body_p) {
+  void InitGraph() {
     entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
     graph_->AddBlock(entry_block_);
     graph_->SetEntryBlock(entry_block_);
 
-    HBasicBlock* loop_preheader = new (GetAllocator()) HBasicBlock(graph_);
-    HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_);
-    HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_);
-    HBasicBlock* loop_exit = new (GetAllocator()) HBasicBlock(graph_);
-
-    graph_->AddBlock(loop_preheader);
-    graph_->AddBlock(loop_header);
-    graph_->AddBlock(loop_body);
-    graph_->AddBlock(loop_exit);
+    return_block_ = new (GetAllocator()) HBasicBlock(graph_);
+    graph_->AddBlock(return_block_);
 
     exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
     graph_->AddBlock(exit_block_);
     graph_->SetExitBlock(exit_block_);
 
-    entry_block_->AddSuccessor(loop_preheader);
-    loop_preheader->AddSuccessor(loop_header);
-    // Loop exit first to have a proper exit condition/target for HIf.
-    loop_header->AddSuccessor(loop_exit);
-    loop_header->AddSuccessor(loop_body);
-    loop_body->AddSuccessor(loop_header);
-    loop_exit->AddSuccessor(exit_block_);
-
-    *header_p = loop_header;
-    *body_p = loop_body;
+    entry_block_->AddSuccessor(return_block_);
+    return_block_->AddSuccessor(exit_block_);
 
     parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
                                                       dex::TypeIndex(0),
                                                       0,
                                                       DataType::Type::kInt32);
     entry_block_->AddInstruction(parameter_);
-    loop_exit->AddInstruction(new (GetAllocator()) HReturnVoid());
+    return_block_->AddInstruction(new (GetAllocator()) HReturnVoid());
     exit_block_->AddInstruction(new (GetAllocator()) HExit());
   }
 
+  void CreateBasicLoopControlFlow(HBasicBlock* position,
+                                  HBasicBlock* successor,
+                                  /* out */ HBasicBlock** header_p,
+                                  /* out */ HBasicBlock** body_p) {
+    HBasicBlock* loop_preheader = new (GetAllocator()) HBasicBlock(graph_);
+    HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_);
+    HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_);
+
+    graph_->AddBlock(loop_preheader);
+    graph_->AddBlock(loop_header);
+    graph_->AddBlock(loop_body);
+
+    position->ReplaceSuccessor(successor, loop_preheader);
+
+    loop_preheader->AddSuccessor(loop_header);
+    // Loop exit first to have a proper exit condition/target for HIf.
+    loop_header->AddSuccessor(successor);
+    loop_header->AddSuccessor(loop_body);
+    loop_body->AddSuccessor(loop_header);
+
+    *header_p = loop_header;
+    *body_p = loop_body;
+  }
+
   void CreateBasicLoopDataFlow(HBasicBlock* loop_header, HBasicBlock* loop_body) {
     uint32_t dex_pc = 0;
 
@@ -84,11 +97,12 @@
     // Header block.
     HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
     HInstruction* suspend_check = new (GetAllocator()) HSuspendCheck();
+    HInstruction* loop_check = new (GetAllocator()) HGreaterThanOrEqual(phi, const_128);
 
     loop_header->AddPhi(phi);
     loop_header->AddInstruction(suspend_check);
-    loop_header->AddInstruction(new (GetAllocator()) HGreaterThanOrEqual(phi, const_128));
-    loop_header->AddInstruction(new (GetAllocator()) HIf(parameter_));
+    loop_header->AddInstruction(loop_check);
+    loop_header->AddInstruction(new (GetAllocator()) HIf(loop_check));
 
     // Loop body block.
     HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter_, dex_pc);
@@ -97,8 +111,8 @@
     HInstruction* array_get =
         new (GetAllocator()) HArrayGet(null_check, bounds_check, DataType::Type::kInt32, dex_pc);
     HInstruction* add =  new (GetAllocator()) HAdd(DataType::Type::kInt32, array_get, const_1);
-    HInstruction* array_set =
-        new (GetAllocator()) HArraySet(null_check, bounds_check, add, DataType::Type::kInt32, dex_pc);
+    HInstruction* array_set = new (GetAllocator()) HArraySet(
+        null_check, bounds_check, add, DataType::Type::kInt32, dex_pc);
     HInstruction* induction_inc = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, const_1);
 
     loop_body->AddInstruction(null_check);
@@ -153,6 +167,7 @@
   HGraph* graph_;
 
   HBasicBlock* entry_block_;
+  HBasicBlock* return_block_;
   HBasicBlock* exit_block_;
 
   HInstruction* parameter_;
@@ -162,10 +177,11 @@
   HBasicBlock* header = nullptr;
   HBasicBlock* loop_body = nullptr;
 
-  CreateBasicLoopControlFlow(&header, &loop_body);
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
   CreateBasicLoopDataFlow(header, loop_body);
   graph_->BuildDominatorTree();
-  ASSERT_TRUE(CheckGraph());
+  EXPECT_TRUE(CheckGraph());
 
   HSuspendCheck* old_suspend_check = header->GetLoopInformation()->GetSuspendCheck();
   CloneAndReplaceInstructionVisitor visitor(graph_);
@@ -193,7 +209,8 @@
   HBasicBlock* loop_body = nullptr;
   ArenaAllocator* arena = graph_->GetAllocator();
 
-  CreateBasicLoopControlFlow(&header, &loop_body);
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
   CreateBasicLoopDataFlow(header, loop_body);
   graph_->BuildDominatorTree();
   ASSERT_TRUE(CheckGraph());
@@ -272,7 +289,8 @@
   HBasicBlock* loop_body = nullptr;
   ArenaAllocator* arena = graph_->GetAllocator();
 
-  CreateBasicLoopControlFlow(&header, &loop_body);
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
   CreateBasicLoopDataFlow(header, loop_body);
   graph_->BuildDominatorTree();
   ASSERT_TRUE(CheckGraph());
@@ -303,4 +321,487 @@
   EXPECT_TRUE(loop_info->IsBackEdge(*loop_body));
 }
 
+// Tests IsSubgraphConnected function for negative case.
+TEST_F(SuperblockClonerTest, IsGraphConnected) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+  ArenaAllocator* arena = graph_->GetAllocator();
+
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* unreachable_block = new (GetAllocator()) HBasicBlock(graph_);
+  graph_->AddBlock(unreachable_block);
+
+  HBasicBlockSet bb_set(
+      arena, graph_->GetBlocks().size(), false, kArenaAllocSuperblockCloner);
+  bb_set.SetBit(header->GetBlockId());
+  bb_set.SetBit(loop_body->GetBlockId());
+  bb_set.SetBit(unreachable_block->GetBlockId());
+
+  EXPECT_FALSE(IsSubgraphConnected(&bb_set, graph_));
+  EXPECT_EQ(bb_set.NumSetBits(), 1u);
+  EXPECT_TRUE(bb_set.IsBitSet(unreachable_block->GetBlockId()));
+}
+
+// Tests SuperblockCloner for loop peeling case.
+//
+// Control Flow of the example (ignoring critical edges splitting).
+//
+//       Before                    After
+//
+//         |B|                      |B|
+//          |                        |
+//          v                        v
+//         |1|                      |1|
+//          |                        |
+//          v                        v
+//         |2|<-\              (6) |2A|
+//         / \  /                   / \
+//        v   v/                   /   v
+//       |4|  |3|                 /   |3A| (7)
+//        |                      /     /
+//        v                     |     v
+//       |E|                     \   |2|<-\
+//                                \ / \   /
+//                                 v   v /
+//                                |4|  |3|
+//                                 |
+//                                 v
+//                                |E|
+TEST_F(SuperblockClonerTest, LoopPeeling) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  graph_->BuildDominatorTree();
+  EXPECT_TRUE(CheckGraph());
+
+  HBasicBlockMap bb_map(
+      std::less<HBasicBlock*>(), graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HInstructionMap hir_map(
+      std::less<HInstruction*>(), graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+
+  HLoopInformation* loop_info = header->GetLoopInformation();
+  PeelUnrollHelper helper(loop_info, &bb_map, &hir_map);
+  EXPECT_TRUE(helper.IsLoopClonable());
+  HBasicBlock* new_header = helper.DoPeeling();
+  HLoopInformation* new_loop_info = new_header->GetLoopInformation();
+
+  EXPECT_TRUE(CheckGraph());
+
+  // Check loop body successors.
+  EXPECT_EQ(loop_body->GetSingleSuccessor(), header);
+  EXPECT_EQ(bb_map.Get(loop_body)->GetSingleSuccessor(), header);
+
+  // Check loop structure.
+  EXPECT_EQ(header, new_header);
+  EXPECT_EQ(new_loop_info->GetHeader(), header);
+  EXPECT_EQ(new_loop_info->GetBackEdges().size(), 1u);
+  EXPECT_EQ(new_loop_info->GetBackEdges()[0], loop_body);
+}
+
+// Tests SuperblockCloner for loop unrolling case.
+//
+// Control Flow of the example (ignoring critical edges splitting).
+//
+//       Before                    After
+//
+//         |B|                      |B|
+//          |                        |
+//          v                        v
+//         |1|                      |1|
+//          |                        |
+//          v                        v
+//         |2|<-\               (6) |2A|<-\
+//         / \  /                   / \    \
+//        v   v/                   /   v    \
+//       |4|  |3|                 /(7)|3A|   |
+//        |                      /     /    /
+//        v                     |     v    /
+//       |E|                     \   |2|  /
+//                                \ / \  /
+//                                 v   v/
+//                                |4| |3|
+//                                 |
+//                                 v
+//                                |E|
+TEST_F(SuperblockClonerTest, LoopUnrolling) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  graph_->BuildDominatorTree();
+  EXPECT_TRUE(CheckGraph());
+
+  HBasicBlockMap bb_map(
+      std::less<HBasicBlock*>(), graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HInstructionMap hir_map(
+      std::less<HInstruction*>(), graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+
+  HLoopInformation* loop_info = header->GetLoopInformation();
+  PeelUnrollHelper helper(loop_info, &bb_map, &hir_map);
+  EXPECT_TRUE(helper.IsLoopClonable());
+  HBasicBlock* new_header = helper.DoUnrolling();
+
+  EXPECT_TRUE(CheckGraph());
+
+  // Check loop body successors.
+  EXPECT_EQ(loop_body->GetSingleSuccessor(), bb_map.Get(header));
+  EXPECT_EQ(bb_map.Get(loop_body)->GetSingleSuccessor(), header);
+
+  // Check loop structure.
+  EXPECT_EQ(header, new_header);
+  EXPECT_EQ(loop_info, new_header->GetLoopInformation());
+  EXPECT_EQ(loop_info->GetHeader(), new_header);
+  EXPECT_EQ(loop_info->GetBackEdges().size(), 1u);
+  EXPECT_EQ(loop_info->GetBackEdges()[0], bb_map.Get(loop_body));
+}
+
+// Checks that loop unrolling works fine for a loop with multiple back edges. Tests that after
+// the transformation the loop has a single preheader.
+TEST_F(SuperblockClonerTest, LoopPeelingMultipleBackEdges) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+
+  // Transform a basic loop to have multiple back edges.
+  HBasicBlock* latch = header->GetSuccessors()[1];
+  HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph_);
+  HBasicBlock* temp1 = new (GetAllocator()) HBasicBlock(graph_);
+  graph_->AddBlock(if_block);
+  graph_->AddBlock(temp1);
+  header->ReplaceSuccessor(latch, if_block);
+  if_block->AddSuccessor(latch);
+  if_block->AddSuccessor(temp1);
+  temp1->AddSuccessor(header);
+
+  if_block->AddInstruction(new (GetAllocator()) HIf(parameter_));
+
+  HInstructionIterator it(header->GetPhis());
+  DCHECK(!it.Done());
+  HPhi* loop_phi = it.Current()->AsPhi();
+  HInstruction* temp_add = new (GetAllocator()) HAdd(DataType::Type::kInt32,
+                                                     loop_phi,
+                                                     graph_->GetIntConstant(2));
+  temp1->AddInstruction(temp_add);
+  temp1->AddInstruction(new (GetAllocator()) HGoto());
+  loop_phi->AddInput(temp_add);
+
+  graph_->BuildDominatorTree();
+  EXPECT_TRUE(CheckGraph());
+
+  HLoopInformation* loop_info = header->GetLoopInformation();
+  PeelUnrollSimpleHelper helper(loop_info);
+  HBasicBlock* new_header = helper.DoPeeling();
+  EXPECT_EQ(header, new_header);
+
+  EXPECT_TRUE(CheckGraph());
+  EXPECT_EQ(header->GetPredecessors().size(), 3u);
+}
+
+static void CheckLoopStructureForLoopPeelingNested(HBasicBlock* loop1_header,
+                                                   HBasicBlock* loop2_header,
+                                                   HBasicBlock* loop3_header) {
+  EXPECT_EQ(loop1_header->GetLoopInformation()->GetHeader(), loop1_header);
+  EXPECT_EQ(loop2_header->GetLoopInformation()->GetHeader(), loop2_header);
+  EXPECT_EQ(loop3_header->GetLoopInformation()->GetHeader(), loop3_header);
+  EXPECT_EQ(loop1_header->GetLoopInformation()->GetPreHeader()->GetLoopInformation(), nullptr);
+  EXPECT_EQ(loop2_header->GetLoopInformation()->GetPreHeader()->GetLoopInformation(), nullptr);
+  EXPECT_EQ(loop3_header->GetLoopInformation()->GetPreHeader()->GetLoopInformation()->GetHeader(),
+            loop2_header);
+}
+
+TEST_F(SuperblockClonerTest, LoopPeelingNested) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+
+  InitGraph();
+
+  // Create the following nested structure of loops
+  //   Headers:  1    2 3
+  //             [ ], [ [ ] ]
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop1_header = header;
+
+  CreateBasicLoopControlFlow(header, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop2_header = header;
+
+  CreateBasicLoopControlFlow(header, header->GetSuccessors()[1], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop3_header = header;
+
+  graph_->BuildDominatorTree();
+  EXPECT_TRUE(CheckGraph());
+
+  HLoopInformation* loop2_info_before = loop2_header->GetLoopInformation();
+  HLoopInformation* loop3_info_before = loop3_header->GetLoopInformation();
+
+  // Check nested loops structure.
+  CheckLoopStructureForLoopPeelingNested(loop1_header, loop2_header, loop3_header);
+  PeelUnrollSimpleHelper helper(loop1_header->GetLoopInformation());
+  helper.DoPeeling();
+  // Check that nested loops structure has not changed after the transformation.
+  CheckLoopStructureForLoopPeelingNested(loop1_header, loop2_header, loop3_header);
+
+  // Test that the loop info is preserved.
+  EXPECT_EQ(loop2_info_before, loop2_header->GetLoopInformation());
+  EXPECT_EQ(loop3_info_before, loop3_header->GetLoopInformation());
+
+  EXPECT_EQ(loop3_info_before->GetPreHeader()->GetLoopInformation(), loop2_info_before);
+  EXPECT_EQ(loop2_info_before->GetPreHeader()->GetLoopInformation(), nullptr);
+
+  EXPECT_EQ(helper.GetRegionToBeAdjusted(), nullptr);
+
+  EXPECT_TRUE(CheckGraph());
+}
+
+// Checks that the loop population is correctly propagated after an inner loop is peeled.
+TEST_F(SuperblockClonerTest, OuterLoopPopulationAfterInnerPeeled) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+
+  InitGraph();
+
+  // Create the following nested structure of loops
+  //   Headers:  1 2 3        4
+  //             [ [ [ ] ] ], [ ]
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop1_header = header;
+
+  CreateBasicLoopControlFlow(header, header->GetSuccessors()[1], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop2_header = header;
+
+  CreateBasicLoopControlFlow(header, header->GetSuccessors()[1], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop3_header = header;
+
+  CreateBasicLoopControlFlow(loop1_header, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop4_header = header;
+
+  graph_->BuildDominatorTree();
+  EXPECT_TRUE(CheckGraph());
+
+  PeelUnrollSimpleHelper helper(loop3_header->GetLoopInformation());
+  helper.DoPeeling();
+  HLoopInformation* loop1 = loop1_header->GetLoopInformation();
+  HLoopInformation* loop2 = loop2_header->GetLoopInformation();
+  HLoopInformation* loop3 = loop3_header->GetLoopInformation();
+  HLoopInformation* loop4 = loop4_header->GetLoopInformation();
+
+  EXPECT_TRUE(loop1->Contains(*loop2_header));
+  EXPECT_TRUE(loop1->Contains(*loop3_header));
+  EXPECT_TRUE(loop1->Contains(*loop3_header->GetLoopInformation()->GetPreHeader()));
+
+  // Check that loop4 info has not been touched after local run of AnalyzeLoops.
+  EXPECT_EQ(loop4, loop4_header->GetLoopInformation());
+
+  EXPECT_TRUE(loop1->IsIn(*loop1));
+  EXPECT_TRUE(loop2->IsIn(*loop1));
+  EXPECT_TRUE(loop3->IsIn(*loop1));
+  EXPECT_TRUE(loop3->IsIn(*loop2));
+  EXPECT_TRUE(!loop4->IsIn(*loop1));
+
+  EXPECT_EQ(loop4->GetPreHeader()->GetLoopInformation(), nullptr);
+
+  EXPECT_EQ(helper.GetRegionToBeAdjusted(), loop2);
+
+  EXPECT_TRUE(CheckGraph());
+}
+
+// Checks the case when inner loop have an exit not to its immediate outer_loop but some other loop
+// in the hierarchy. Loop population information must be valid after loop peeling.
+TEST_F(SuperblockClonerTest, NestedCaseExitToOutermost) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+
+  InitGraph();
+
+  // Create the following nested structure of loops then peel loop3.
+  //   Headers:  1 2 3
+  //             [ [ [ ] ] ]
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop1_header = header;
+  HBasicBlock* loop_body1 = loop_body;
+
+  CreateBasicLoopControlFlow(header, header->GetSuccessors()[1], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+
+  CreateBasicLoopControlFlow(header, header->GetSuccessors()[1], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop3_header = header;
+  HBasicBlock* loop_body3 = loop_body;
+
+  // Change the loop3 - insert an exit which leads to loop1.
+  HBasicBlock* loop3_extra_if_block = new (GetAllocator()) HBasicBlock(graph_);
+  graph_->AddBlock(loop3_extra_if_block);
+  loop3_extra_if_block->AddInstruction(new (GetAllocator()) HIf(parameter_));
+
+  loop3_header->ReplaceSuccessor(loop_body3, loop3_extra_if_block);
+  loop3_extra_if_block->AddSuccessor(loop_body1);  // Long exit.
+  loop3_extra_if_block->AddSuccessor(loop_body3);
+
+  graph_->BuildDominatorTree();
+  EXPECT_TRUE(CheckGraph());
+
+  HBasicBlock* loop3_long_exit = loop3_extra_if_block->GetSuccessors()[0];
+  EXPECT_TRUE(loop1_header->GetLoopInformation()->Contains(*loop3_long_exit));
+
+  PeelUnrollSimpleHelper helper(loop3_header->GetLoopInformation());
+  helper.DoPeeling();
+
+  HLoopInformation* loop1 = loop1_header->GetLoopInformation();
+  // Check that after the transformation the local area for CF adjustments has been chosen
+  // correctly and loop population has been updated.
+  loop3_long_exit = loop3_extra_if_block->GetSuccessors()[0];
+  EXPECT_TRUE(loop1->Contains(*loop3_long_exit));
+
+  EXPECT_EQ(helper.GetRegionToBeAdjusted(), loop1);
+
+  EXPECT_TRUE(loop1->Contains(*loop3_header));
+  EXPECT_TRUE(loop1->Contains(*loop3_header->GetLoopInformation()->GetPreHeader()));
+
+  EXPECT_TRUE(CheckGraph());
+}
+
+TEST_F(SuperblockClonerTest, FastCaseCheck) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+  ArenaAllocator* arena = graph_->GetAllocator();
+
+  InitGraph();
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  graph_->BuildDominatorTree();
+
+  HLoopInformation* loop_info = header->GetLoopInformation();
+
+  ArenaBitVector orig_bb_set(
+      arena, graph_->GetBlocks().size(), false, kArenaAllocSuperblockCloner);
+  orig_bb_set.Union(&loop_info->GetBlocks());
+
+  HEdgeSet remap_orig_internal(graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HEdgeSet remap_copy_internal(graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+  HEdgeSet remap_incoming(graph_->GetAllocator()->Adapter(kArenaAllocSuperblockCloner));
+
+  CollectRemappingInfoForPeelUnroll(true,
+                                    loop_info,
+                                    &remap_orig_internal,
+                                    &remap_copy_internal,
+                                    &remap_incoming);
+
+  // Insert some extra nodes and edges.
+  HBasicBlock* preheader = loop_info->GetPreHeader();
+  orig_bb_set.SetBit(preheader->GetBlockId());
+
+  // Adjust incoming edges.
+  remap_incoming.Clear();
+  remap_incoming.Insert(HEdge(preheader->GetSinglePredecessor(), preheader));
+
+  HBasicBlockMap bb_map(std::less<HBasicBlock*>(), arena->Adapter(kArenaAllocSuperblockCloner));
+  HInstructionMap hir_map(std::less<HInstruction*>(), arena->Adapter(kArenaAllocSuperblockCloner));
+
+  SuperblockCloner cloner(graph_,
+                          &orig_bb_set,
+                          &bb_map,
+                          &hir_map);
+  cloner.SetSuccessorRemappingInfo(&remap_orig_internal, &remap_copy_internal, &remap_incoming);
+
+  EXPECT_FALSE(cloner.IsFastCase());
+}
+
+// Helper for FindCommonLoop which also check that FindCommonLoop is symmetric.
+static HLoopInformation* FindCommonLoopCheck(HLoopInformation* loop1, HLoopInformation* loop2) {
+  HLoopInformation* common_loop12 = FindCommonLoop(loop1, loop2);
+  HLoopInformation* common_loop21 = FindCommonLoop(loop2, loop1);
+  EXPECT_EQ(common_loop21, common_loop12);
+  return common_loop12;
+}
+
+// Tests FindCommonLoop function on a loop nest.
+TEST_F(SuperblockClonerTest, FindCommonLoop) {
+  HBasicBlock* header = nullptr;
+  HBasicBlock* loop_body = nullptr;
+
+  InitGraph();
+
+  // Create the following nested structure of loops
+  //   Headers:  1 2 3      4      5
+  //             [ [ [ ] ], [ ] ], [ ]
+  CreateBasicLoopControlFlow(entry_block_, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop1_header = header;
+
+  CreateBasicLoopControlFlow(header, header->GetSuccessors()[1], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop2_header = header;
+
+  CreateBasicLoopControlFlow(header, header->GetSuccessors()[1], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop3_header = header;
+
+  CreateBasicLoopControlFlow(loop2_header, loop2_header->GetSuccessors()[0], &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop4_header = header;
+
+  CreateBasicLoopControlFlow(loop1_header, return_block_, &header, &loop_body);
+  CreateBasicLoopDataFlow(header, loop_body);
+  HBasicBlock* loop5_header = header;
+
+  graph_->BuildDominatorTree();
+  EXPECT_TRUE(CheckGraph());
+
+  HLoopInformation* loop1 = loop1_header->GetLoopInformation();
+  HLoopInformation* loop2 = loop2_header->GetLoopInformation();
+  HLoopInformation* loop3 = loop3_header->GetLoopInformation();
+  HLoopInformation* loop4 = loop4_header->GetLoopInformation();
+  HLoopInformation* loop5 = loop5_header->GetLoopInformation();
+
+  EXPECT_TRUE(loop1->IsIn(*loop1));
+  EXPECT_TRUE(loop2->IsIn(*loop1));
+  EXPECT_TRUE(loop3->IsIn(*loop1));
+  EXPECT_TRUE(loop3->IsIn(*loop2));
+  EXPECT_TRUE(loop4->IsIn(*loop1));
+
+  EXPECT_FALSE(loop5->IsIn(*loop1));
+  EXPECT_FALSE(loop4->IsIn(*loop2));
+  EXPECT_FALSE(loop4->IsIn(*loop3));
+
+  EXPECT_EQ(loop1->GetPreHeader()->GetLoopInformation(), nullptr);
+  EXPECT_EQ(loop4->GetPreHeader()->GetLoopInformation(), loop1);
+
+  EXPECT_EQ(FindCommonLoopCheck(nullptr, nullptr), nullptr);
+  EXPECT_EQ(FindCommonLoopCheck(loop2, nullptr), nullptr);
+
+  EXPECT_EQ(FindCommonLoopCheck(loop1, loop1), loop1);
+  EXPECT_EQ(FindCommonLoopCheck(loop1, loop2), loop1);
+  EXPECT_EQ(FindCommonLoopCheck(loop1, loop3), loop1);
+  EXPECT_EQ(FindCommonLoopCheck(loop1, loop4), loop1);
+  EXPECT_EQ(FindCommonLoopCheck(loop1, loop5), nullptr);
+
+  EXPECT_EQ(FindCommonLoopCheck(loop2, loop3), loop2);
+  EXPECT_EQ(FindCommonLoopCheck(loop2, loop4), loop1);
+  EXPECT_EQ(FindCommonLoopCheck(loop2, loop5), nullptr);
+
+  EXPECT_EQ(FindCommonLoopCheck(loop3, loop4), loop1);
+  EXPECT_EQ(FindCommonLoopCheck(loop3, loop5), nullptr);
+
+  EXPECT_EQ(FindCommonLoopCheck(loop4, loop5), nullptr);
+
+  EXPECT_EQ(FindCommonLoopCheck(loop5, loop5), loop5);
+}
+
 }  // namespace art
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 2218ef9..b2ad490 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -2793,6 +2793,26 @@
   DsFsmInstr(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15)).FprOuts(wd).FprIns(ws, wt);
 }
 
+void MipsAssembler::PcntB(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  DsFsmInstr(EmitMsa2R(0xc1, 0x0, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
+}
+
+void MipsAssembler::PcntH(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  DsFsmInstr(EmitMsa2R(0xc1, 0x1, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
+}
+
+void MipsAssembler::PcntW(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  DsFsmInstr(EmitMsa2R(0xc1, 0x2, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
+}
+
+void MipsAssembler::PcntD(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  DsFsmInstr(EmitMsa2R(0xc1, 0x3, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
+}
+
 void MipsAssembler::ReplicateFPToVectorRegister(VectorRegister dst,
                                                 FRegister src,
                                                 bool is_double) {
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 7de8e2e..c6ce62b 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -756,6 +756,11 @@
   void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
   void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
 
+  void PcntB(VectorRegister wd, VectorRegister ws);
+  void PcntH(VectorRegister wd, VectorRegister ws);
+  void PcntW(VectorRegister wd, VectorRegister ws);
+  void PcntD(VectorRegister wd, VectorRegister ws);
+
   // Helper for replicating floating point value in all destination elements.
   void ReplicateFPToVectorRegister(VectorRegister dst, FRegister src, bool is_double);
 
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 937ee25..691c33f 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -2277,6 +2277,22 @@
   DriverStr(RepeatVR(&mips::MipsAssembler::FillW, "fill.w ${reg1}, ${reg2}"), "fill.w");
 }
 
+TEST_F(AssemblerMIPS32r6Test, PcntB) {
+  DriverStr(RepeatVV(&mips::MipsAssembler::PcntB, "pcnt.b ${reg1}, ${reg2}"), "pcnt.b");
+}
+
+TEST_F(AssemblerMIPS32r6Test, PcntH) {
+  DriverStr(RepeatVV(&mips::MipsAssembler::PcntH, "pcnt.h ${reg1}, ${reg2}"), "pcnt.h");
+}
+
+TEST_F(AssemblerMIPS32r6Test, PcntW) {
+  DriverStr(RepeatVV(&mips::MipsAssembler::PcntW, "pcnt.w ${reg1}, ${reg2}"), "pcnt.w");
+}
+
+TEST_F(AssemblerMIPS32r6Test, PcntD) {
+  DriverStr(RepeatVV(&mips::MipsAssembler::PcntD, "pcnt.d ${reg1}, ${reg2}"), "pcnt.d");
+}
+
 TEST_F(AssemblerMIPS32r6Test, LdiB) {
   DriverStr(RepeatVIb(&mips::MipsAssembler::LdiB, -8, "ldi.b ${reg}, {imm}"), "ldi.b");
 }
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index e1b0e75..5a817fa 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -2279,6 +2279,26 @@
   EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15);
 }
 
+void Mips64Assembler::PcntB(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  EmitMsa2R(0xc1, 0x0, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::PcntH(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  EmitMsa2R(0xc1, 0x1, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::PcntW(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  EmitMsa2R(0xc1, 0x2, ws, wd, 0x1e);
+}
+
+void Mips64Assembler::PcntD(VectorRegister wd, VectorRegister ws) {
+  CHECK(HasMsa());
+  EmitMsa2R(0xc1, 0x3, ws, wd, 0x1e);
+}
+
 void Mips64Assembler::ReplicateFPToVectorRegister(VectorRegister dst,
                                                   FpuRegister src,
                                                   bool is_double) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 7a61f39..542dbaf 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -863,6 +863,11 @@
   void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
   void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
 
+  void PcntB(VectorRegister wd, VectorRegister ws);
+  void PcntH(VectorRegister wd, VectorRegister ws);
+  void PcntW(VectorRegister wd, VectorRegister ws);
+  void PcntD(VectorRegister wd, VectorRegister ws);
+
   // Helper for replicating floating point value in all destination elements.
   void ReplicateFPToVectorRegister(VectorRegister dst, FpuRegister src, bool is_double);
 
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index b0e1d91..fb5f12b 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -3529,6 +3529,22 @@
   DriverStr(RepeatVR(&mips64::Mips64Assembler::FillD, "fill.d ${reg1}, ${reg2}"), "fill.d");
 }
 
+TEST_F(AssemblerMIPS64Test, PcntB) {
+  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntB, "pcnt.b ${reg1}, ${reg2}"), "pcnt.b");
+}
+
+TEST_F(AssemblerMIPS64Test, PcntH) {
+  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntH, "pcnt.h ${reg1}, ${reg2}"), "pcnt.h");
+}
+
+TEST_F(AssemblerMIPS64Test, PcntW) {
+  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntW, "pcnt.w ${reg1}, ${reg2}"), "pcnt.w");
+}
+
+TEST_F(AssemblerMIPS64Test, PcntD) {
+  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntD, "pcnt.d ${reg1}, ${reg2}"), "pcnt.d");
+}
+
 TEST_F(AssemblerMIPS64Test, LdiB) {
   DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiB, -8, "ldi.b ${reg}, {imm}"), "ldi.b");
 }
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index ea160c8..42c2541 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -913,6 +913,78 @@
 }
 
 
+void X86Assembler::paddusb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xDC);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::paddsb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xEC);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::paddusw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xDD);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::paddsw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xED);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubusb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xD8);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubsb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xE8);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubusw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xD9);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::psubsw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0xE9);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
 void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a085677..22eaedc 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -449,6 +449,15 @@
   void paddq(XmmRegister dst, XmmRegister src);
   void psubq(XmmRegister dst, XmmRegister src);
 
+  void paddusb(XmmRegister dst, XmmRegister src);
+  void paddsb(XmmRegister dst, XmmRegister src);
+  void paddusw(XmmRegister dst, XmmRegister src);
+  void paddsw(XmmRegister dst, XmmRegister src);
+  void psubusb(XmmRegister dst, XmmRegister src);
+  void psubsb(XmmRegister dst, XmmRegister src);
+  void psubusw(XmmRegister dst, XmmRegister src);
+  void psubsw(XmmRegister dst, XmmRegister src);
+
   void cvtsi2ss(XmmRegister dst, Register src);
   void cvtsi2sd(XmmRegister dst, Register src);
 
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 2fd1b27..8f72db7 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -600,6 +600,38 @@
   DriverStr(RepeatFF(&x86::X86Assembler::psubq, "psubq %{reg2}, %{reg1}"), "psubq");
 }
 
+TEST_F(AssemblerX86Test, PAddUSB) {
+  DriverStr(RepeatFF(&x86::X86Assembler::paddusb, "paddusb %{reg2}, %{reg1}"), "paddusb");
+}
+
+TEST_F(AssemblerX86Test, PAddSB) {
+  DriverStr(RepeatFF(&x86::X86Assembler::paddsb, "paddsb %{reg2}, %{reg1}"), "paddsb");
+}
+
+TEST_F(AssemblerX86Test, PAddUSW) {
+  DriverStr(RepeatFF(&x86::X86Assembler::paddusw, "paddusw %{reg2}, %{reg1}"), "paddusw");
+}
+
+TEST_F(AssemblerX86Test, PAddSW) {
+  DriverStr(RepeatFF(&x86::X86Assembler::psubsw, "psubsw %{reg2}, %{reg1}"), "psubsw");
+}
+
+TEST_F(AssemblerX86Test, PSubUSB) {
+  DriverStr(RepeatFF(&x86::X86Assembler::psubusb, "psubusb %{reg2}, %{reg1}"), "psubusb");
+}
+
+TEST_F(AssemblerX86Test, PSubSB) {
+  DriverStr(RepeatFF(&x86::X86Assembler::psubsb, "psubsb %{reg2}, %{reg1}"), "psubsb");
+}
+
+TEST_F(AssemblerX86Test, PSubUSW) {
+  DriverStr(RepeatFF(&x86::X86Assembler::psubusw, "psubusw %{reg2}, %{reg1}"), "psubusw");
+}
+
+TEST_F(AssemblerX86Test, PSubSW) {
+  DriverStr(RepeatFF(&x86::X86Assembler::psubsw, "psubsw %{reg2}, %{reg1}"), "psubsw");
+}
+
 TEST_F(AssemblerX86Test, XorPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::xorpd, "xorpd %{reg2}, %{reg1}"), "xorpd");
 }
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index ff5a357..c6e16e7 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1011,6 +1011,86 @@
 }
 
 
+void X86_64Assembler::paddusb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xDC);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::paddsb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xEC);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::paddusw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xDD);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::paddsw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xED);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::psubusb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xD8);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::psubsb(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xE8);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::psubusw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xD9);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
+void X86_64Assembler::psubsw(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitOptionalRex32(dst, src);
+  EmitUint8(0x0F);
+  EmitUint8(0xE9);
+  EmitXmmRegisterOperand(dst.LowBits(), src);
+}
+
+
 void X86_64Assembler::cvtsi2ss(XmmRegister dst, CpuRegister src) {
   cvtsi2ss(dst, src, false);
 }
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 7a5fdb5..ab761fb 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -485,6 +485,15 @@
   void paddq(XmmRegister dst, XmmRegister src);
   void psubq(XmmRegister dst, XmmRegister src);
 
+  void paddusb(XmmRegister dst, XmmRegister src);
+  void paddsb(XmmRegister dst, XmmRegister src);
+  void paddusw(XmmRegister dst, XmmRegister src);
+  void paddsw(XmmRegister dst, XmmRegister src);
+  void psubusb(XmmRegister dst, XmmRegister src);
+  void psubsb(XmmRegister dst, XmmRegister src);
+  void psubusw(XmmRegister dst, XmmRegister src);
+  void psubsw(XmmRegister dst, XmmRegister src);
+
   void cvtsi2ss(XmmRegister dst, CpuRegister src);  // Note: this is the r/m32 version.
   void cvtsi2ss(XmmRegister dst, CpuRegister src, bool is64bit);
   void cvtsi2ss(XmmRegister dst, const Address& src, bool is64bit);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 6b1e53c..104e215 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1282,6 +1282,38 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubq, "psubq %{reg2}, %{reg1}"), "psubq");
 }
 
+TEST_F(AssemblerX86_64Test, Paddusb) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddusb, "paddusb %{reg2}, %{reg1}"), "paddusb");
+}
+
+TEST_F(AssemblerX86_64Test, Paddsb) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddsb, "paddsb %{reg2}, %{reg1}"), "paddsb");
+}
+
+TEST_F(AssemblerX86_64Test, Paddusw) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddusw, "paddusw %{reg2}, %{reg1}"), "paddusw");
+}
+
+TEST_F(AssemblerX86_64Test, Paddsw) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddsw, "paddsw %{reg2}, %{reg1}"), "paddsw");
+}
+
+TEST_F(AssemblerX86_64Test, Psubusb) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubusb, "psubusb %{reg2}, %{reg1}"), "psubusb");
+}
+
+TEST_F(AssemblerX86_64Test, Psubsb) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubsb, "psubsb %{reg2}, %{reg1}"), "psubsb");
+}
+
+TEST_F(AssemblerX86_64Test, Psubusw) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubusw, "psubusw %{reg2}, %{reg1}"), "psubusw");
+}
+
+TEST_F(AssemblerX86_64Test, Psubsw) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubsw, "psubsw %{reg2}, %{reg1}"), "psubsw");
+}
+
 TEST_F(AssemblerX86_64Test, Cvtsi2ss) {
   DriverStr(RepeatFr(&x86_64::X86_64Assembler::cvtsi2ss, "cvtsi2ss %{reg2}, %{reg1}"), "cvtsi2ss");
 }
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index b67898d..b158231 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -139,7 +139,14 @@
             "-Wno-frame-larger-than=",
             "-DART_PGO_INSTRUMENTATION",
         ],
-    }
+    },
+    target: {
+        android: {
+            lto: {
+                 thin: true,
+            },
+        },
+    },
 }
 
 art_cc_binary {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 73afbad..6eeec4e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2069,11 +2069,9 @@
         std::unique_ptr<linker::OatWriter>& oat_writer = oat_writers_[i];
 
         oat_writer->PrepareLayout(&patcher);
-
-        size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
-        size_t text_size = oat_writer->GetOatSize() - rodata_size;
-        elf_writer->PrepareDynamicSection(rodata_size,
-                                          text_size,
+        elf_writer->PrepareDynamicSection(oat_writer->GetOatHeader().GetExecutableOffset(),
+                                          oat_writer->GetCodeSize(),
+                                          oat_writer->GetDataBimgRelRoSize(),
                                           oat_writer->GetBssSize(),
                                           oat_writer->GetBssMethodsOffset(),
                                           oat_writer->GetBssRootsOffset(),
@@ -2123,6 +2121,16 @@
         }
         elf_writer->EndText(text);
 
+        if (oat_writer->GetDataBimgRelRoSize() != 0u) {
+          linker::OutputStream* data_bimg_rel_ro = elf_writer->StartDataBimgRelRo();
+          if (!oat_writer->WriteDataBimgRelRo(data_bimg_rel_ro)) {
+            LOG(ERROR) << "Failed to write .data.bimg.rel.ro section to the ELF file "
+                << oat_file->GetPath();
+            return false;
+          }
+          elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
+        }
+
         if (!oat_writer->WriteHeader(elf_writer->GetStream(),
                                      image_file_location_oat_checksum_,
                                      image_file_location_oat_data_begin_,
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 49b84bb..d895282 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -129,12 +129,15 @@
     std::string art_file = scratch.GetFilename() + ".art";
     std::string oat_file = scratch.GetFilename() + ".oat";
     std::string vdex_file = scratch.GetFilename() + ".vdex";
-    ret.art_size = OS::GetFileSizeBytes(art_file.c_str());
-    ret.oat_size = OS::GetFileSizeBytes(oat_file.c_str());
-    ret.vdex_size = OS::GetFileSizeBytes(vdex_file.c_str());
-    CHECK_GT(ret.art_size, 0u) << art_file;
-    CHECK_GT(ret.oat_size, 0u) << oat_file;
-    CHECK_GT(ret.vdex_size, 0u) << vdex_file;
+    int64_t art_size = OS::GetFileSizeBytes(art_file.c_str());
+    int64_t oat_size = OS::GetFileSizeBytes(oat_file.c_str());
+    int64_t vdex_size = OS::GetFileSizeBytes(vdex_file.c_str());
+    CHECK_GT(art_size, 0u) << art_file;
+    CHECK_GT(oat_size, 0u) << oat_file;
+    CHECK_GT(vdex_size, 0u) << vdex_file;
+    ret.art_size = art_size;
+    ret.oat_size = oat_size;
+    ret.vdex_size = vdex_size;
     scratch.Close();
     // Clear image files since we compile the image multiple times and don't want to leave any
     // artifacts behind.
diff --git a/dex2oat/linker/elf_writer.h b/dex2oat/linker/elf_writer.h
index bcf2cd7..cd8cf4c 100644
--- a/dex2oat/linker/elf_writer.h
+++ b/dex2oat/linker/elf_writer.h
@@ -63,6 +63,7 @@
   // This method must be called before calling GetLoadedSize().
   virtual void PrepareDynamicSection(size_t rodata_size,
                                      size_t text_size,
+                                     size_t data_bimg_rel_ro_size,
                                      size_t bss_size,
                                      size_t bss_methods_offset,
                                      size_t bss_roots_offset,
@@ -72,6 +73,8 @@
   virtual void EndRoData(OutputStream* rodata) = 0;
   virtual OutputStream* StartText() = 0;
   virtual void EndText(OutputStream* text) = 0;
+  virtual OutputStream* StartDataBimgRelRo() = 0;
+  virtual void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) = 0;
   virtual void WriteDynamicSection() = 0;
   virtual void WriteDebugInfo(const debug::DebugInfo& debug_info) = 0;
   virtual bool End() = 0;
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 07b02f1..4ab2012 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -105,6 +105,7 @@
   void Start() OVERRIDE;
   void PrepareDynamicSection(size_t rodata_size,
                              size_t text_size,
+                             size_t data_bimg_rel_ro_size,
                              size_t bss_size,
                              size_t bss_methods_offset,
                              size_t bss_roots_offset,
@@ -114,6 +115,8 @@
   void EndRoData(OutputStream* rodata) OVERRIDE;
   OutputStream* StartText() OVERRIDE;
   void EndText(OutputStream* text) OVERRIDE;
+  OutputStream* StartDataBimgRelRo() OVERRIDE;
+  void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) OVERRIDE;
   void WriteDynamicSection() OVERRIDE;
   void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
   bool End() OVERRIDE;
@@ -131,6 +134,7 @@
   File* const elf_file_;
   size_t rodata_size_;
   size_t text_size_;
+  size_t data_bimg_rel_ro_size_;
   size_t bss_size_;
   size_t dex_section_size_;
   std::unique_ptr<BufferedOutputStream> output_stream_;
@@ -171,6 +175,7 @@
       elf_file_(elf_file),
       rodata_size_(0u),
       text_size_(0u),
+      data_bimg_rel_ro_size_(0u),
       bss_size_(0u),
       dex_section_size_(0u),
       output_stream_(
@@ -192,6 +197,7 @@
 template <typename ElfTypes>
 void ElfWriterQuick<ElfTypes>::PrepareDynamicSection(size_t rodata_size,
                                                      size_t text_size,
+                                                     size_t data_bimg_rel_ro_size,
                                                      size_t bss_size,
                                                      size_t bss_methods_offset,
                                                      size_t bss_roots_offset,
@@ -200,6 +206,8 @@
   rodata_size_ = rodata_size;
   DCHECK_EQ(text_size_, 0u);
   text_size_ = text_size;
+  DCHECK_EQ(data_bimg_rel_ro_size_, 0u);
+  data_bimg_rel_ro_size_ = data_bimg_rel_ro_size;
   DCHECK_EQ(bss_size_, 0u);
   bss_size_ = bss_size;
   DCHECK_EQ(dex_section_size_, 0u);
@@ -207,6 +215,7 @@
   builder_->PrepareDynamicSection(elf_file_->GetPath(),
                                   rodata_size_,
                                   text_size_,
+                                  data_bimg_rel_ro_size_,
                                   bss_size_,
                                   bss_methods_offset,
                                   bss_roots_offset,
@@ -240,6 +249,19 @@
 }
 
 template <typename ElfTypes>
+OutputStream* ElfWriterQuick<ElfTypes>::StartDataBimgRelRo() {
+  auto* data_bimg_rel_ro = builder_->GetDataBimgRelRo();
+  data_bimg_rel_ro->Start();
+  return data_bimg_rel_ro;
+}
+
+template <typename ElfTypes>
+void ElfWriterQuick<ElfTypes>::EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) {
+  CHECK_EQ(builder_->GetDataBimgRelRo(), data_bimg_rel_ro);
+  builder_->GetDataBimgRelRo()->End();
+}
+
+template <typename ElfTypes>
 void ElfWriterQuick<ElfTypes>::WriteDynamicSection() {
   if (builder_->GetIsa() == InstructionSet::kMips ||
       builder_->GetIsa() == InstructionSet::kMips64) {
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 319c5fb..7449191 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -313,10 +313,9 @@
         oat_writer->WriteChecksumsAndVdexHeader(vdex_out.get());
 
         oat_writer->PrepareLayout(&patcher);
-        size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
-        size_t text_size = oat_writer->GetOatSize() - rodata_size;
-        elf_writer->PrepareDynamicSection(rodata_size,
-                                          text_size,
+        elf_writer->PrepareDynamicSection(oat_writer->GetOatHeader().GetExecutableOffset(),
+                                          oat_writer->GetCodeSize(),
+                                          oat_writer->GetDataBimgRelRoSize(),
                                           oat_writer->GetBssSize(),
                                           oat_writer->GetBssMethodsOffset(),
                                           oat_writer->GetBssRootsOffset(),
@@ -336,6 +335,13 @@
         ASSERT_TRUE(text_ok);
         elf_writer->EndText(text);
 
+        if (oat_writer->GetDataBimgRelRoSize() != 0u) {
+          OutputStream* data_bimg_rel_ro = elf_writer->StartDataBimgRelRo();
+          bool data_bimg_rel_ro_ok = oat_writer->WriteDataBimgRelRo(data_bimg_rel_ro);
+          ASSERT_TRUE(data_bimg_rel_ro_ok);
+          elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
+        }
+
         bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u);
         ASSERT_TRUE(header_ok);
 
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index a2ba816..6530ead 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -692,7 +692,7 @@
   for (ImageInfo& image_info : image_infos_) {
     ImageSection unused_sections[ImageHeader::kSectionCount];
     const size_t length = RoundUp(
-        image_info.CreateImageSections(unused_sections, compile_app_image_), kPageSize);
+        image_info.CreateImageSections(unused_sections), kPageSize);
 
     std::string error_msg;
     image_info.image_.reset(MemMap::MapAnonymous("image writer image",
@@ -1842,7 +1842,7 @@
     image_info.image_offset_ = image_offset;
     ImageSection unused_sections[ImageHeader::kSectionCount];
     image_info.image_size_ =
-        RoundUp(image_info.CreateImageSections(unused_sections, compile_app_image_), kPageSize);
+        RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
     // There should be no gaps until the next image.
     image_offset += image_info.image_size_;
   }
@@ -1873,8 +1873,7 @@
   }
 }
 
-size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections,
-                                                   bool app_image) const {
+size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
   DCHECK(out_sections != nullptr);
 
   // Do not round up any sections here that are represented by the bins since it will break
@@ -1912,13 +1911,8 @@
   ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
   *dex_cache_arrays_section = ImageSection(GetBinSlotOffset(Bin::kDexCacheArray),
                                            GetBinSlotSize(Bin::kDexCacheArray));
-  // For boot image, round up to the page boundary to separate the interned strings and
-  // class table from the modifiable data. We shall mprotect() these pages read-only when
-  // we load the boot image. This is more than sufficient for the string table alignment,
-  // namely sizeof(uint64_t). See HashSet::WriteToMemory.
-  static_assert(IsAligned<sizeof(uint64_t)>(kPageSize), "String table alignment check.");
-  size_t cur_pos =
-      RoundUp(dex_cache_arrays_section->End(), app_image ? sizeof(uint64_t) : kPageSize);
+  // Round up to the alignment the string table expects. See HashSet::WriteToMemory.
+  size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
   // Calculate the size of the interned strings.
   ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
   *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
@@ -1941,7 +1935,7 @@
 
   // Create the image sections.
   ImageSection sections[ImageHeader::kSectionCount];
-  const size_t image_end = image_info.CreateImageSections(sections, compile_app_image_);
+  const size_t image_end = image_info.CreateImageSections(sections);
 
   // Finally bitmap section.
   const size_t bitmap_bytes = image_info.image_bitmap_->Size();
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 36bbb47..c67835b 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -267,7 +267,7 @@
 
     // Create the image sections into the out sections variable, returns the size of the image
     // excluding the bitmap.
-    size_t CreateImageSections(ImageSection* out_sections, bool app_image) const;
+    size_t CreateImageSections(ImageSection* out_sections) const;
 
     size_t GetStubOffset(StubType stub_type) const {
       DCHECK_LT(static_cast<size_t>(stub_type), kNumberOfStubTypes);
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index c72beea..31f1f1e 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -375,16 +375,19 @@
     vdex_dex_shared_data_offset_(0u),
     vdex_verifier_deps_offset_(0u),
     vdex_quickening_info_offset_(0u),
+    code_size_(0u),
     oat_size_(0u),
+    data_bimg_rel_ro_start_(0u),
+    data_bimg_rel_ro_size_(0u),
     bss_start_(0u),
     bss_size_(0u),
     bss_methods_offset_(0u),
     bss_roots_offset_(0u),
+    data_bimg_rel_ro_entries_(),
     bss_method_entry_references_(),
     bss_method_entries_(),
     bss_type_entries_(),
     bss_string_entries_(),
-    map_boot_image_tables_to_bss_(false),
     oat_data_offset_(0u),
     oat_header_(nullptr),
     size_vdex_header_(0),
@@ -409,6 +412,8 @@
     size_method_header_(0),
     size_code_(0),
     size_code_alignment_(0),
+    size_data_bimg_rel_ro_(0),
+    size_data_bimg_rel_ro_alignment_(0),
     size_relative_call_thunks_(0),
     size_misc_thunks_(0),
     size_vmap_table_(0),
@@ -737,8 +742,13 @@
   {
     TimingLogger::ScopedTiming split("InitOatCodeDexFiles", timings_);
     offset = InitOatCodeDexFiles(offset);
+    code_size_ = offset - GetOatHeader().GetExecutableOffset();
   }
-  oat_size_ = offset;
+  {
+    TimingLogger::ScopedTiming split("InitDataBimgRelRoLayout", timings_);
+    offset = InitDataBimgRelRoLayout(offset);
+  }
+  oat_size_ = offset;  // .bss does not count towards oat_size_.
   bss_start_ = (bss_size_ != 0u) ? RoundUp(oat_size_, kPageSize) : 0u;
 
   CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
@@ -845,7 +855,10 @@
         MethodReference(dex_file_, it.GetMemberIndex()));
     if (HasCompiledCode(compiled_method)) {
       for (const LinkerPatch& patch : compiled_method->GetPatches()) {
-        if (patch.GetType() == LinkerPatch::Type::kMethodBssEntry) {
+        if (patch.GetType() == LinkerPatch::Type::kDataBimgRelRo) {
+          writer_->data_bimg_rel_ro_entries_.Overwrite(patch.BootImageOffset(),
+                                                       /* placeholder */ 0u);
+        } else if (patch.GetType() == LinkerPatch::Type::kMethodBssEntry) {
           MethodReference target_method = patch.TargetMethod();
           AddBssReference(target_method,
                           target_method.dex_file->NumMethodIds(),
@@ -863,9 +876,6 @@
                           target_string.dex_file->NumStringIds(),
                           &writer_->bss_string_entry_references_);
           writer_->bss_string_entries_.Overwrite(target_string, /* placeholder */ 0u);
-        } else if (patch.GetType() == LinkerPatch::Type::kStringInternTable ||
-                   patch.GetType() == LinkerPatch::Type::kTypeClassTable) {
-          writer_->map_boot_image_tables_to_bss_ = true;
         }
       }
     } else {
@@ -1776,6 +1786,16 @@
         for (const LinkerPatch& patch : compiled_method->GetPatches()) {
           uint32_t literal_offset = patch.LiteralOffset();
           switch (patch.GetType()) {
+            case LinkerPatch::Type::kDataBimgRelRo: {
+              uint32_t target_offset =
+                  writer_->data_bimg_rel_ro_start_ +
+                  writer_->data_bimg_rel_ro_entries_.Get(patch.BootImageOffset());
+              writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
+                                                                   patch,
+                                                                   offset_ + literal_offset,
+                                                                   target_offset);
+              break;
+            }
             case LinkerPatch::Type::kMethodBssEntry: {
               uint32_t target_offset =
                   writer_->bss_start_ + writer_->bss_method_entries_.Get(patch.TargetMethod());
@@ -1802,14 +1822,6 @@
                                                                    target_offset);
               break;
             }
-            case LinkerPatch::Type::kStringInternTable: {
-              uint32_t target_offset = GetInternTableEntryOffset(patch);
-              writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
-                                                                   patch,
-                                                                   offset_ + literal_offset,
-                                                                   target_offset);
-              break;
-            }
             case LinkerPatch::Type::kStringBssEntry: {
               StringReference ref(patch.TargetStringDexFile(), patch.TargetStringIndex());
               uint32_t target_offset =
@@ -1828,14 +1840,6 @@
                                                                    target_offset);
               break;
             }
-            case LinkerPatch::Type::kTypeClassTable: {
-              uint32_t target_offset = GetClassTableEntryOffset(patch);
-              writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
-                                                                   patch,
-                                                                   offset_ + literal_offset,
-                                                                   target_offset);
-              break;
-            }
             case LinkerPatch::Type::kTypeBssEntry: {
               TypeReference ref(patch.TargetTypeDexFile(), patch.TargetTypeIndex());
               uint32_t target_offset = writer_->bss_start_ + writer_->bss_type_entries_.Get(ref);
@@ -2037,42 +2041,6 @@
     data[2] = (address >> 16) & 0xffu;
     data[3] = (address >> 24) & 0xffu;
   }
-
-  // Calculate the offset of the InternTable slot (GcRoot<String>) when mmapped to the .bss.
-  uint32_t GetInternTableEntryOffset(const LinkerPatch& patch)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(!writer_->HasBootImage());
-    const uint8_t* string_root = writer_->LookupBootImageInternTableSlot(
-        *patch.TargetStringDexFile(), patch.TargetStringIndex());
-    DCHECK(string_root != nullptr);
-    return GetBootImageTableEntryOffset(string_root);
-  }
-
-  // Calculate the offset of the ClassTable::TableSlot when mmapped to the .bss.
-  uint32_t GetClassTableEntryOffset(const LinkerPatch& patch)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(!writer_->HasBootImage());
-    const uint8_t* table_slot =
-        writer_->LookupBootImageClassTableSlot(*patch.TargetTypeDexFile(), patch.TargetTypeIndex());
-    DCHECK(table_slot != nullptr);
-    return GetBootImageTableEntryOffset(table_slot);
-  }
-
-  uint32_t GetBootImageTableEntryOffset(const uint8_t* raw_root) {
-    uint32_t base_offset = writer_->bss_start_;
-    for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
-      const uint8_t* const_tables_begin =
-          space->Begin() + space->GetImageHeader().GetBootImageConstantTablesOffset();
-      size_t offset = static_cast<size_t>(raw_root - const_tables_begin);
-      if (offset < space->GetImageHeader().GetBootImageConstantTablesSize()) {
-        DCHECK_LE(base_offset + offset, writer_->bss_start_ + writer_->bss_methods_offset_);
-        return base_offset + offset;
-      }
-      base_offset += space->GetImageHeader().GetBootImageConstantTablesSize();
-    }
-    LOG(FATAL) << "Didn't find boot image string in boot image intern tables!";
-    UNREACHABLE();
-  }
 };
 
 class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
@@ -2510,6 +2478,25 @@
   return offset;
 }
 
+size_t OatWriter::InitDataBimgRelRoLayout(size_t offset) {
+  DCHECK_EQ(data_bimg_rel_ro_size_, 0u);
+  if (data_bimg_rel_ro_entries_.empty()) {
+    // Nothing to put to the .data.bimg.rel.ro section.
+    return offset;
+  }
+
+  data_bimg_rel_ro_start_ = RoundUp(offset, kPageSize);
+
+  for (auto& entry : data_bimg_rel_ro_entries_) {
+    size_t& entry_offset = entry.second;
+    entry_offset = data_bimg_rel_ro_size_;
+    data_bimg_rel_ro_size_ += sizeof(uint32_t);
+  }
+
+  offset = data_bimg_rel_ro_start_ + data_bimg_rel_ro_size_;
+  return offset;
+}
+
 void OatWriter::InitBssLayout(InstructionSet instruction_set) {
   {
     InitBssLayoutMethodVisitor visitor(this);
@@ -2519,25 +2506,16 @@
 
   DCHECK_EQ(bss_size_, 0u);
   if (HasBootImage()) {
-    DCHECK(!map_boot_image_tables_to_bss_);
     DCHECK(bss_string_entries_.empty());
   }
-  if (!map_boot_image_tables_to_bss_ &&
-      bss_method_entries_.empty() &&
+  if (bss_method_entries_.empty() &&
       bss_type_entries_.empty() &&
       bss_string_entries_.empty()) {
     // Nothing to put to the .bss section.
     return;
   }
 
-  // Allocate space for boot image tables in the .bss section.
   PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set);
-  if (map_boot_image_tables_to_bss_) {
-    for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
-      bss_size_ += space->GetImageHeader().GetBootImageConstantTablesSize();
-    }
-  }
-
   bss_methods_offset_ = bss_size_;
 
   // Prepare offsets for .bss ArtMethod entries.
@@ -2905,6 +2883,49 @@
     return false;
   }
 
+  if (data_bimg_rel_ro_size_ != 0u) {
+    write_state_ = WriteState::kWriteDataBimgRelRo;
+  } else {
+    if (!CheckOatSize(out, file_offset, relative_offset)) {
+      return false;
+    }
+    write_state_ = WriteState::kWriteHeader;
+  }
+  return true;
+}
+
+bool OatWriter::WriteDataBimgRelRo(OutputStream* out) {
+  CHECK(write_state_ == WriteState::kWriteDataBimgRelRo);
+
+  // Wrap out to update checksum with each write.
+  ChecksumUpdatingOutputStream checksum_updating_out(out, oat_header_.get());
+  out = &checksum_updating_out;
+
+  const size_t file_offset = oat_data_offset_;
+  size_t relative_offset = data_bimg_rel_ro_start_;
+
+  // Record the padding before the .data.bimg.rel.ro section.
+  // Do not write anything, this zero-filled part was skipped (Seek()) when starting the section.
+  size_t code_end = GetOatHeader().GetExecutableOffset() + code_size_;
+  DCHECK_EQ(RoundUp(code_end, kPageSize), relative_offset);
+  size_t padding_size = relative_offset - code_end;
+  DCHECK_EQ(size_data_bimg_rel_ro_alignment_, 0u);
+  size_data_bimg_rel_ro_alignment_ = padding_size;
+
+  relative_offset = WriteDataBimgRelRo(out, file_offset, relative_offset);
+  if (relative_offset == 0) {
+    LOG(ERROR) << "Failed to write boot image relocations to " << out->GetLocation();
+    return false;
+  }
+
+  if (!CheckOatSize(out, file_offset, relative_offset)) {
+    return false;
+  }
+  write_state_ = WriteState::kWriteHeader;
+  return true;
+}
+
+bool OatWriter::CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset) {
   const off_t oat_end_file_offset = out->Seek(0, kSeekCurrent);
   if (oat_end_file_offset == static_cast<off_t>(-1)) {
     LOG(ERROR) << "Failed to get oat end file offset in " << out->GetLocation();
@@ -2939,6 +2960,8 @@
     DO_STAT(size_method_header_);
     DO_STAT(size_code_);
     DO_STAT(size_code_alignment_);
+    DO_STAT(size_data_bimg_rel_ro_);
+    DO_STAT(size_data_bimg_rel_ro_alignment_);
     DO_STAT(size_relative_call_thunks_);
     DO_STAT(size_misc_thunks_);
     DO_STAT(size_vmap_table_);
@@ -3316,6 +3339,32 @@
   return relative_offset;
 }
 
+size_t OatWriter::WriteDataBimgRelRo(OutputStream* out,
+                                     size_t file_offset,
+                                     size_t relative_offset) {
+  if (data_bimg_rel_ro_entries_.empty()) {
+    return relative_offset;
+  }
+
+  // Write the entire .data.bimg.rel.ro with a single WriteFully().
+  std::vector<uint32_t> data;
+  data.reserve(data_bimg_rel_ro_entries_.size());
+  for (const auto& entry : data_bimg_rel_ro_entries_) {
+    uint32_t boot_image_offset = entry.first;
+    data.push_back(boot_image_offset);
+  }
+  DCHECK_EQ(data.size(), data_bimg_rel_ro_entries_.size());
+  DCHECK_OFFSET();
+  if (!out->WriteFully(data.data(), data.size() * sizeof(data[0]))) {
+    PLOG(ERROR) << "Failed to write .data.bimg.rel.ro in " << out->GetLocation();
+    return 0u;
+  }
+  DCHECK_EQ(size_data_bimg_rel_ro_, 0u);
+  size_data_bimg_rel_ro_ = data.size() * sizeof(data[0]);
+  relative_offset += size_data_bimg_rel_ro_;
+  return relative_offset;
+}
+
 bool OatWriter::RecordOatDataOffset(OutputStream* out) {
   // Get the elf file offset of the oat file.
   const off_t raw_file_offset = out->Seek(0, kSeekCurrent);
@@ -4356,42 +4405,6 @@
   return true;
 }
 
-const uint8_t* OatWriter::LookupBootImageInternTableSlot(const DexFile& dex_file,
-                                                         dex::StringIndex string_idx)
-    NO_THREAD_SAFETY_ANALYSIS {  // Single-threaded OatWriter can avoid locking.
-  uint32_t utf16_length;
-  const char* utf8_data = dex_file.StringDataAndUtf16LengthByIdx(string_idx, &utf16_length);
-  DCHECK_EQ(utf16_length, CountModifiedUtf8Chars(utf8_data));
-  InternTable::Utf8String string(utf16_length,
-                                 utf8_data,
-                                 ComputeUtf16HashFromModifiedUtf8(utf8_data, utf16_length));
-  const InternTable* intern_table = Runtime::Current()->GetClassLinker()->intern_table_;
-  for (const InternTable::Table::UnorderedSet& table : intern_table->strong_interns_.tables_) {
-    auto it = table.Find(string);
-    if (it != table.end()) {
-      return reinterpret_cast<const uint8_t*>(std::addressof(*it));
-    }
-  }
-  LOG(FATAL) << "Did not find boot image string " << utf8_data;
-  UNREACHABLE();
-}
-
-const uint8_t* OatWriter::LookupBootImageClassTableSlot(const DexFile& dex_file,
-                                                        dex::TypeIndex type_idx)
-    NO_THREAD_SAFETY_ANALYSIS {  // Single-threaded OatWriter can avoid locking.
-  const char* descriptor = dex_file.StringByTypeIdx(type_idx);
-  ClassTable::DescriptorHashPair pair(descriptor, ComputeModifiedUtf8Hash(descriptor));
-  ClassTable* table = Runtime::Current()->GetClassLinker()->boot_class_table_.get();
-  for (const ClassTable::ClassSet& class_set : table->classes_) {
-    auto it = class_set.Find(pair);
-    if (it != class_set.end()) {
-      return reinterpret_cast<const uint8_t*>(std::addressof(*it));
-    }
-  }
-  LOG(FATAL) << "Did not find boot image class " << descriptor;
-  UNREACHABLE();
-}
-
 debug::DebugInfo OatWriter::GetDebugInfo() const {
   debug::DebugInfo debug_info{};
   debug_info.compiled_methods = ArrayRef<const debug::MethodDebugInfo>(method_info_);
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 0cb0ef2..120ea56 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -137,6 +137,7 @@
   //   - PrepareLayout(),
   //   - WriteRodata(),
   //   - WriteCode(),
+  //   - WriteDataBimgRelRo() iff GetDataBimgRelRoSize() != 0,
   //   - WriteHeader().
 
   // Add dex file source(s) from a file, either a plain dex file or
@@ -197,6 +198,10 @@
   bool WriteRodata(OutputStream* out);
   // Write the code to the .text section.
   bool WriteCode(OutputStream* out);
+  // Write the boot image relocation data to the .data.bimg.rel.ro section.
+  bool WriteDataBimgRelRo(OutputStream* out);
+  // Check the size of the written oat file.
+  bool CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset);
   // Write the oat header. This finalizes the oat file.
   bool WriteHeader(OutputStream* out,
                    uint32_t image_file_location_oat_checksum,
@@ -218,10 +223,18 @@
     return *oat_header_;
   }
 
+  size_t GetCodeSize() const {
+    return code_size_;
+  }
+
   size_t GetOatSize() const {
     return oat_size_;
   }
 
+  size_t GetDataBimgRelRoSize() const {
+    return data_bimg_rel_ro_size_;
+  }
+
   size_t GetBssSize() const {
     return bss_size_;
   }
@@ -323,6 +336,7 @@
   size_t InitOatDexFiles(size_t offset);
   size_t InitOatCode(size_t offset);
   size_t InitOatCodeDexFiles(size_t offset);
+  size_t InitDataBimgRelRoLayout(size_t offset);
   void InitBssLayout(InstructionSet instruction_set);
 
   size_t WriteClassOffsets(OutputStream* out, size_t file_offset, size_t relative_offset);
@@ -332,6 +346,7 @@
   size_t WriteOatDexFiles(OutputStream* out, size_t file_offset, size_t relative_offset);
   size_t WriteCode(OutputStream* out, size_t file_offset, size_t relative_offset);
   size_t WriteCodeDexFiles(OutputStream* out, size_t file_offset, size_t relative_offset);
+  size_t WriteDataBimgRelRo(OutputStream* out, size_t file_offset, size_t relative_offset);
 
   bool RecordOatDataOffset(OutputStream* out);
   bool WriteTypeLookupTables(OutputStream* oat_rodata,
@@ -349,17 +364,12 @@
     return dex_files_ != nullptr && extract_dex_files_into_vdex_;
   }
 
-  // Find the address of the GcRoot<String> in the InternTable for a boot image string.
-  const uint8_t* LookupBootImageInternTableSlot(const DexFile& dex_file,
-                                                dex::StringIndex string_idx);
-  // Find the address of the ClassTable::TableSlot for a boot image class.
-  const uint8_t* LookupBootImageClassTableSlot(const DexFile& dex_file, dex::TypeIndex type_idx);
-
   enum class WriteState {
     kAddingDexFileSources,
     kPrepareLayout,
     kWriteRoData,
     kWriteText,
+    kWriteDataBimgRelRo,
     kWriteHeader,
     kDone
   };
@@ -401,9 +411,18 @@
   // Offset of section holding quickening info inside Vdex.
   size_t vdex_quickening_info_offset_;
 
+  // Size of the .text segment.
+  size_t code_size_;
+
   // Size required for Oat data structures.
   size_t oat_size_;
 
+  // The start of the required .data.bimg.rel.ro section.
+  size_t data_bimg_rel_ro_start_;
+
+  // The size of the required .data.bimg.rel.ro section holding the boot image relocations.
+  size_t data_bimg_rel_ro_size_;
+
   // The start of the required .bss section.
   size_t bss_start_;
 
@@ -416,6 +435,10 @@
   // The offset of the GC roots in .bss section.
   size_t bss_roots_offset_;
 
+  // Map for allocating .data.bimg.rel.ro entries. Indexed by the boot image offset of the
+  // relocation. The value is the assigned offset within the .data.bimg.rel.ro section.
+  SafeMap<uint32_t, size_t> data_bimg_rel_ro_entries_;
+
   // Map for recording references to ArtMethod entries in .bss.
   SafeMap<const DexFile*, BitVector> bss_method_entry_references_;
 
@@ -440,10 +463,6 @@
   // is the target offset for patching, starting at `bss_start_ + bss_roots_offset_`.
   SafeMap<StringReference, size_t, StringReferenceValueComparator> bss_string_entries_;
 
-  // Whether boot image tables should be mapped to the .bss. This is needed for compiled
-  // code that reads from these tables with PC-relative instructions.
-  bool map_boot_image_tables_to_bss_;
-
   // Offset of the oat data from the start of the mmapped region of the elf file.
   size_t oat_data_offset_;
 
@@ -484,6 +503,8 @@
   uint32_t size_method_header_;
   uint32_t size_code_;
   uint32_t size_code_alignment_;
+  uint32_t size_data_bimg_rel_ro_;
+  uint32_t size_data_bimg_rel_ro_alignment_;
   uint32_t size_relative_call_thunks_;
   uint32_t size_misc_thunks_;
   uint32_t size_vmap_table_;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 00b9abe..06d264e 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -216,10 +216,9 @@
                                     instruction_set_features_.get());
     oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files);
     oat_writer.PrepareLayout(&patcher);
-    size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
-    size_t text_size = oat_writer.GetOatSize() - rodata_size;
-    elf_writer->PrepareDynamicSection(rodata_size,
-                                      text_size,
+    elf_writer->PrepareDynamicSection(oat_writer.GetOatHeader().GetExecutableOffset(),
+                                      oat_writer.GetCodeSize(),
+                                      oat_writer.GetDataBimgRelRoSize(),
                                       oat_writer.GetBssSize(),
                                       oat_writer.GetBssMethodsOffset(),
                                       oat_writer.GetBssRootsOffset(),
@@ -248,6 +247,14 @@
     }
     elf_writer->EndText(text);
 
+    if (oat_writer.GetDataBimgRelRoSize() != 0u) {
+      OutputStream* data_bimg_rel_ro = elf_writer->StartDataBimgRelRo();
+      if (!oat_writer.WriteDataBimgRelRo(data_bimg_rel_ro)) {
+        return false;
+      }
+      elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
+    }
+
     if (!oat_writer.WriteHeader(elf_writer->GetStream(), 42U, 4096U, 0)) {
       return false;
     }
@@ -407,7 +414,7 @@
     compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
   }
 
-  ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
+  ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
   key_value_store.Put(OatHeader::kImageLocationKey, "lue.art");
   bool success = WriteElf(tmp_vdex.GetFile(),
@@ -544,10 +551,14 @@
   compiler_driver_->SetDexFilesForOatFile(dex_files);
   compiler_driver_->CompileAll(class_loader, dex_files, &timings);
 
-  ScratchFile tmp_oat, tmp_vdex(tmp_oat, ".vdex");
+  ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
   key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
-  bool success = WriteElf(tmp_vdex.GetFile(), tmp_oat.GetFile(), dex_files, key_value_store, false);
+  bool success = WriteElf(tmp_vdex.GetFile(),
+                          tmp_oat.GetFile(),
+                          dex_files,
+                          key_value_store,
+                          /* verify */ false);
   ASSERT_TRUE(success);
 
   std::unique_ptr<OatFile> oat_file(OatFile::Open(tmp_oat.GetFilename(),
@@ -606,13 +617,13 @@
   ASSERT_TRUE(success);
   input_filenames.push_back(dex_file2.GetFilename().c_str());
 
-  ScratchFile oat_file, vdex_file(oat_file, ".vdex");
+  ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
   key_value_store.Put(OatHeader::kImageLocationKey, "test.art");
   std::unique_ptr<ProfileCompilationInfo>
       profile_compilation_info(use_profile ? new ProfileCompilationInfo() : nullptr);
-  success = WriteElf(vdex_file.GetFile(),
-                     oat_file.GetFile(),
+  success = WriteElf(tmp_vdex.GetFile(),
+                     tmp_oat.GetFile(),
                      input_filenames,
                      key_value_store,
                      verify,
@@ -627,19 +638,19 @@
   ASSERT_TRUE(success);
 
   std::string error_msg;
-  std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(oat_file.GetFilename(),
-                                                         oat_file.GetFilename(),
+  std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(tmp_oat.GetFilename(),
+                                                         tmp_oat.GetFilename(),
                                                          nullptr,
                                                          nullptr,
                                                          false,
                                                          low_4gb,
                                                          nullptr,
                                                          &error_msg));
+  ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
   if (low_4gb) {
     uintptr_t begin = reinterpret_cast<uintptr_t>(opened_oat_file->Begin());
     EXPECT_EQ(begin, static_cast<uint32_t>(begin));
   }
-  ASSERT_TRUE(opened_oat_file != nullptr);
   ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
   std::unique_ptr<const DexFile> opened_dex_file1 =
       opened_oat_file->GetOatDexFiles()[0]->OpenDexFile(&error_msg);
@@ -670,7 +681,7 @@
     ASSERT_EQ(vdex_header.GetQuickeningInfoSize(), 0u);
   }
 
-  int64_t actual_vdex_size = vdex_file.GetFile()->GetLength();
+  int64_t actual_vdex_size = tmp_vdex.GetFile()->GetLength();
   ASSERT_GE(actual_vdex_size, 0);
   ASSERT_EQ((uint64_t) actual_vdex_size, vdex_header.GetComputedFileSize());
 }
@@ -742,9 +753,13 @@
     // Test using the AddDexFileSource() interface with the zip file.
     std::vector<const char*> input_filenames = { zip_file.GetFilename().c_str() };
 
-    ScratchFile oat_file, vdex_file(oat_file, ".vdex");
-    success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(), input_filenames,
-                       key_value_store, verify, /*profile_compilation_info*/nullptr);
+    ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
+    success = WriteElf(tmp_vdex.GetFile(),
+                       tmp_oat.GetFile(),
+                       input_filenames,
+                       key_value_store,
+                       verify,
+                       /* profile_compilation_info */ nullptr);
 
     if (verify) {
       ASSERT_FALSE(success);
@@ -752,15 +767,15 @@
       ASSERT_TRUE(success);
 
       std::string error_msg;
-      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(oat_file.GetFilename(),
-                                                             oat_file.GetFilename(),
+      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(tmp_oat.GetFilename(),
+                                                             tmp_oat.GetFilename(),
                                                              nullptr,
                                                              nullptr,
                                                              false,
                                                              /*low_4gb*/false,
                                                              nullptr,
                                                              &error_msg));
-      ASSERT_TRUE(opened_oat_file != nullptr);
+      ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
       std::unique_ptr<const DexFile> opened_dex_file1 =
           opened_oat_file->GetOatDexFiles()[0]->OpenDexFile(&error_msg);
@@ -788,9 +803,9 @@
     File zip_fd(dup(zip_file.GetFd()), /* check_usage */ false);
     ASSERT_NE(-1, zip_fd.Fd());
 
-    ScratchFile oat_file, vdex_file(oat_file, ".vdex");
-    success = WriteElf(vdex_file.GetFile(),
-                       oat_file.GetFile(),
+    ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
+    success = WriteElf(tmp_vdex.GetFile(),
+                       tmp_oat.GetFile(),
                        std::move(zip_fd),
                        zip_file.GetFilename().c_str(),
                        key_value_store,
@@ -801,15 +816,15 @@
       ASSERT_TRUE(success);
 
       std::string error_msg;
-      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(oat_file.GetFilename(),
-                                                             oat_file.GetFilename(),
+      std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(tmp_oat.GetFilename(),
+                                                             tmp_oat.GetFilename(),
                                                              nullptr,
                                                              nullptr,
                                                              false,
                                                              /*low_4gb*/false,
                                                              nullptr,
                                                              &error_msg));
-      ASSERT_TRUE(opened_oat_file != nullptr);
+      ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
       std::unique_ptr<const DexFile> opened_dex_file1 =
           opened_oat_file->GetOatDexFiles()[0]->OpenDexFile(&error_msg);
@@ -854,8 +869,12 @@
   std::vector<const char*> input_filenames = { zip_file.GetFilename().c_str() };
   ScratchFile oat_file, vdex_file(oat_file, ".vdex");
   std::unique_ptr<ProfileCompilationInfo> profile_compilation_info(new ProfileCompilationInfo());
-  success = WriteElf(vdex_file.GetFile(), oat_file.GetFile(), input_filenames,
-                     key_value_store, /*verify*/false, profile_compilation_info.get());
+  success = WriteElf(vdex_file.GetFile(),
+                     oat_file.GetFile(),
+                     input_filenames,
+                     key_value_store,
+                     /* verify */ false,
+                     profile_compilation_info.get());
   ASSERT_FALSE(success);
 }
 
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index eca0844..c63d6c3 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -46,7 +46,6 @@
     device_supported: false,
     static_libs: [
         "libdexfile",
-        "libbase",
     ] + art_static_dependencies,
     target: {
         darwin: {
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index bea61d0..facda11 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -44,7 +44,14 @@
         instrumentation: true,
         profile_file: "art/dex2oat.profdata",
         benchmarks: ["dex2oat"],
-    }
+    },
+    target: {
+        android: {
+            lto: {
+                 thin: true,
+            },
+        },
+    },
 }
 
 art_cc_library {
@@ -106,6 +113,7 @@
     shared_libs: [
         "libart",
         "libart-dexlayout",
+        "libbase",
     ],
     target: {
         android: {
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index b5f5d6f..eaf11be 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -487,6 +487,7 @@
   { kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x19, "copy_u", "yX" },
   { kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x19, "insert", "YD" },
   { kMsaMask | (0xff << 18), kMsa | (0xc0 << 18) | 0x1e, "fill", "vkD" },
+  { kMsaMask | (0xff << 18), kMsa | (0xc1 << 18) | 0x1e, "pcnt", "vkm" },
   { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x7, "ldi", "kx" },
   { kMsaSpecialMask | (0xf << 2), kMsa | (0x8 << 2), "ld", "kw" },
   { kMsaSpecialMask | (0xf << 2), kMsa | (0x9 << 2), "st", "kw" },
diff --git a/libartbase/base/os_linux.cc b/libartbase/base/os_linux.cc
index 6b5a604..cb228bd 100644
--- a/libartbase/base/os_linux.cc
+++ b/libartbase/base/os_linux.cc
@@ -89,9 +89,11 @@
 int64_t OS::GetFileSizeBytes(const char* name) {
   struct stat st;
   if (stat(name, &st) == 0) {
-    return -1;  // TODO: Deal with symlinks?
+    return st.st_size;  // TODO: Deal with symlinks? According to the documentation,
+                        // the st_size for a symlink is "the length of the pathname
+                        // it contains, without a terminating null byte."
   } else {
-    return st.st_size;
+    return -1;
   }
 }
 
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index f9da178..b2881b8 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -31,7 +31,7 @@
 #else
 #include <algorithm>
 #include "base/stl_util.h"
-#include "globals.h"
+#include "base/globals.h"
 #endif
 
 namespace unix_file {
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index ae4ded5..988ee03 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -40,7 +40,6 @@
             static_libs: [
                 "libziparchive",
                 "libz",
-                "libbase",
             ],
             shared_libs: [
                 "libutils",
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 8069408..dbd90cc 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -172,6 +172,7 @@
     builder_->PrepareDynamicSection(elf_file->GetPath(),
                                     rodata_size,
                                     text_size,
+                                    oat_file_->DataBimgRelRoSize(),
                                     oat_file_->BssSize(),
                                     oat_file_->BssMethodsOffset(),
                                     oat_file_->BssRootsOffset(),
@@ -2179,7 +2180,7 @@
 
     // Intern table is 8-byte aligned.
     uint32_t end_caches = dex_cache_arrays_section.Offset() + dex_cache_arrays_section.Size();
-    CHECK_ALIGNED(intern_section.Offset(), sizeof(uint64_t));
+    CHECK_EQ(RoundUp(end_caches, 8U), intern_section.Offset());
     stats_.alignment_bytes += intern_section.Offset() - end_caches;
 
     // Add space between intern table and class table.
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 8b40a7e..07b1529 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -1004,6 +1004,27 @@
   return false;
 }
 
+void EventHandler::SetupFramePopTraceListener(bool enable) {
+  if (enable) {
+    frame_pop_enabled = true;
+    SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
+  } else {
+    // remove the listener if we have no outstanding frames.
+    {
+      art::ReaderMutexLock mu(art::Thread::Current(), envs_lock_);
+      for (ArtJvmTiEnv* env : envs) {
+        art::ReaderMutexLock event_mu(art::Thread::Current(), env->event_info_mutex_);
+        if (!env->notify_frames.empty()) {
+          // Leaving FramePop listener since there are unsent FramePop events.
+          return;
+        }
+      }
+      frame_pop_enabled = false;
+    }
+    SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
+  }
+}
+
 // Handle special work for the given event type, if necessary.
 void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
   switch (event) {
@@ -1018,14 +1039,14 @@
     case ArtJvmtiEvent::kGarbageCollectionFinish:
       SetupGcPauseTracking(gc_pause_listener_.get(), event, enable);
       return;
-    // FramePop can never be disabled once it's been turned on since we would either need to deal
-    // with dangling pointers or have missed events.
-    // TODO We really need to make this not the case anymore.
+    // FramePop can never be disabled once it's been turned on if it was turned off with outstanding
+    // pop-events since we would either need to deal with dangling pointers or have missed events.
     case ArtJvmtiEvent::kFramePop:
-      if (!enable || (enable && frame_pop_enabled)) {
+      if (enable && frame_pop_enabled) {
+        // The frame-pop event was held on by pending events so we don't need to do anything.
         break;
       } else {
-        SetupTraceListener(method_trace_listener_.get(), event, enable);
+        SetupFramePopTraceListener(enable);
         break;
       }
     case ArtJvmtiEvent::kMethodEntry:
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index 8141eff..bf12cb1 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -247,6 +247,9 @@
  private:
   void SetupTraceListener(JvmtiMethodTraceListener* listener, ArtJvmtiEvent event, bool enable);
 
+  // Specifically handle the FramePop event which it might not always be possible to turn off.
+  void SetupFramePopTraceListener(bool enable);
+
   template <ArtJvmtiEvent kEvent, typename ...Args>
   ALWAYS_INLINE
   inline std::vector<impl::EventHandlerFunc<kEvent>> CollectEvents(art::Thread* thread,
diff --git a/runtime/Android.bp b/runtime/Android.bp
index daab232..590a399 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -351,7 +351,6 @@
                 // ZipArchive support, the order matters here to get all symbols.
                 "libziparchive",
                 "libz",
-                "libbase",
             ],
         },
         android_arm: {
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 737d2a8..aa77187 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -601,7 +601,10 @@
      */
 ENTRY art_quick_osr_stub
     SPILL_ALL_CALLEE_SAVE_GPRS             @ Spill regs (9)
+    SAVE_SIZE=9*4
     mov    r11, sp                         @ Save the stack pointer
+    .cfi_def_cfa r11, SAVE_SIZE            @ CFA = r11 + SAVE_SIZE
+    .cfi_remember_state
     mov    r10, r1                         @ Save size of stack
     ldr    r9, [r11, #40]                  @ Move managed thread pointer into r9
     REFRESH_MARKING_REGISTER
@@ -614,14 +617,18 @@
     str    r3, [sp, #8]                    @ Save JValue* result
     mov    ip, #0
     str    ip, [sp]                        @ Store null for ArtMethod* at bottom of frame
-    sub    sp, sp, r1                      @ Reserve space for callee stack
-    mov    r2, r1
-    mov    r1, r0
-    mov    r0, sp
-    bl     memcpy                          @ memcpy (dest r0, src r1, bytes r2)
+    // r11 isn't properly spilled in the osr method, so we need use DWARF expression.
+    // NB: the CFI must be before the call since this is the address gdb will lookup.
+    // NB: gdb expects that cfa_expression returns the CFA value (not address to it).
+    .cfi_escape                            /* CFA = [sp + 4] + SAVE_SIZE */ \
+      0x0f, 6,                             /* DW_CFA_def_cfa_expression(len) */ \
+      0x92, 13, 4,                         /* DW_OP_bregx(reg,offset) */ \
+      0x06,                                /* DW_OP_deref */ \
+      0x23, SAVE_SIZE                      /* DW_OP_plus_uconst(val) */
     bl     .Losr_entry                     @ Call the method
     ldr    r10, [sp, #8]                   @ Restore JValue* result
     ldr    sp, [sp, #4]                    @ Restore saved stack pointer
+    .cfi_def_cfa sp, SAVE_SIZE             @ CFA = sp + SAVE_SIZE
     ldr    r4, [sp, #36]                   @ load shorty
     ldrb   r4, [r4, #0]                    @ load return type
     cmp    r4, #68                         @ Test if result type char == 'D'.
@@ -635,8 +642,15 @@
 .Losr_exit:
     pop    {r4, r5, r6, r7, r8, r9, r10, r11, pc}
 .Losr_entry:
+    .cfi_restore_state
+    .cfi_def_cfa r11, SAVE_SIZE            @ CFA = r11 + SAVE_SIZE
+    sub sp, sp, r10                        @ Reserve space for callee stack
     sub r10, r10, #4
-    str lr, [sp, r10]                     @ Store link register per the compiler ABI
+    str lr, [sp, r10]                      @ Store link register per the compiler ABI
+    mov r2, r10
+    mov r1, r0
+    mov r0, sp
+    bl  memcpy                             @ memcpy (dest r0, src r1, bytes r2)
     bx r6
 END art_quick_osr_stub
 
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 09fc2c2..375b050 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -613,56 +613,18 @@
 
 
 .macro INVOKE_STUB_CREATE_FRAME
+SAVE_SIZE=6*8   // x4, x5, x19, x20, FP, LR saved.
+    SAVE_TWO_REGS_INCREASE_FRAME x4, x5, SAVE_SIZE
+    SAVE_TWO_REGS x19, x20, 16
+    SAVE_TWO_REGS xFP, xLR, 32
 
-SAVE_SIZE=15*8   // x4, x5, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, SP, LR, FP saved.
-SAVE_SIZE_AND_METHOD=SAVE_SIZE+8
+    mov xFP, sp                            // Use xFP for frame pointer, as it's callee-saved.
+    .cfi_def_cfa_register xFP
 
+    add x10, x2, #(__SIZEOF_POINTER__ + 0xf) // Reserve space for ArtMethod*, arguments and
+    and x10, x10, # ~0xf                   // round up for 16-byte stack alignment.
+    sub sp, sp, x10                        // Adjust SP for ArtMethod*, args and alignment padding.
 
-    mov x9, sp                             // Save stack pointer.
-    .cfi_register sp,x9
-
-    add x10, x2, # SAVE_SIZE_AND_METHOD    // calculate size of frame.
-    sub x10, sp, x10                       // Calculate SP position - saves + ArtMethod* + args
-    and x10, x10, # ~0xf                   // Enforce 16 byte stack alignment.
-    mov sp, x10                            // Set new SP.
-
-    sub x10, x9, #SAVE_SIZE                // Calculate new FP (later). Done here as we must move SP
-    .cfi_def_cfa_register x10              // before this.
-    .cfi_adjust_cfa_offset SAVE_SIZE
-
-    str x28, [x10, #112]
-    .cfi_rel_offset x28, 112
-
-    stp x26, x27, [x10, #96]
-    .cfi_rel_offset x26, 96
-    .cfi_rel_offset x27, 104
-
-    stp x24, x25, [x10, #80]
-    .cfi_rel_offset x24, 80
-    .cfi_rel_offset x25, 88
-
-    stp x22, x23, [x10, #64]
-    .cfi_rel_offset x22, 64
-    .cfi_rel_offset x23, 72
-
-    stp x20, x21, [x10, #48]
-    .cfi_rel_offset x20, 48
-    .cfi_rel_offset x21, 56
-
-    stp x9, x19, [x10, #32]                // Save old stack pointer and x19.
-    .cfi_rel_offset sp, 32
-    .cfi_rel_offset x19, 40
-
-    stp x4, x5, [x10, #16]                 // Save result and shorty addresses.
-    .cfi_rel_offset x4, 16
-    .cfi_rel_offset x5, 24
-
-    stp xFP, xLR, [x10]                    // Store LR & FP.
-    .cfi_rel_offset x29, 0
-    .cfi_rel_offset x30, 8
-
-    mov xFP, x10                           // Use xFP now, as it's callee-saved.
-    .cfi_def_cfa_register x29
     mov xSELF, x3                          // Move thread pointer into SELF register.
 
     // Copy arguments into stack frame.
@@ -677,12 +639,10 @@
     // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler
     // does not have unique-id variables.
 1:
-    cmp w2, #0
-    beq 2f
+    cbz w2, 2f
     sub w2, w2, #4      // Need 65536 bytes of range.
     ldr w10, [x1, x2]
     str w10, [x9, x2]
-
     b 1b
 
 2:
@@ -699,29 +659,14 @@
     // Branch to method.
     blr x9
 
-    // Restore return value address and shorty address.
-    ldp x4, x5, [xFP, #16]
-    .cfi_restore x4
-    .cfi_restore x5
+    // Pop the ArtMethod* (null), arguments and alignment padding from the stack.
+    mov sp, xFP
+    .cfi_def_cfa_register sp
 
-    ldr x28, [xFP, #112]
-    .cfi_restore x28
-
-    ldp x26, x27, [xFP, #96]
-    .cfi_restore x26
-    .cfi_restore x27
-
-    ldp x24, x25, [xFP, #80]
-    .cfi_restore x24
-    .cfi_restore x25
-
-    ldp x22, x23, [xFP, #64]
-    .cfi_restore x22
-    .cfi_restore x23
-
-    ldp x20, x21, [xFP, #48]
-    .cfi_restore x20
-    .cfi_restore x21
+    // Restore saved registers including value address and shorty address.
+    RESTORE_TWO_REGS x19, x20, 16
+    RESTORE_TWO_REGS xFP, xLR, 32
+    RESTORE_TWO_REGS_DECREASE_FRAME x4, x5, SAVE_SIZE
 
     // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
     ldrb w10, [x5]
@@ -731,33 +676,28 @@
 
     // Don't set anything for a void type.
     cmp w10, #'V'
-    beq 3f
+    beq 1f
 
     // Is it a double?
     cmp w10, #'D'
-    bne 1f
-    str d0, [x4]
-    b 3f
+    beq 2f
 
-1:  // Is it a float?
+    // Is it a float?
     cmp w10, #'F'
-    bne 2f
-    str s0, [x4]
-    b 3f
+    beq 3f
 
-2:  // Just store x0. Doesn't matter if it is 64 or 32 bits.
+    // Just store x0. Doesn't matter if it is 64 or 32 bits.
     str x0, [x4]
 
-3:  // Finish up.
-    ldp x2, x19, [xFP, #32]   // Restore stack pointer and x19.
-    .cfi_restore x19
-    mov sp, x2
-    .cfi_restore sp
+1:  // Finish up.
+    ret
 
-    ldp xFP, xLR, [xFP]    // Restore old frame pointer and link register.
-    .cfi_restore x29
-    .cfi_restore x30
+2:  // Store double.
+    str d0, [x4]
+    ret
 
+3:  // Store float.
+    str s0, [x4]
     ret
 
 .endm
@@ -1056,7 +996,7 @@
 
 /*  extern"C" void art_quick_osr_stub(void** stack,                x0
  *                                    size_t stack_size_in_bytes,  x1
- *                                    const uin8_t* native_pc,     x2
+ *                                    const uint8_t* native_pc,    x2
  *                                    JValue *result,              x3
  *                                    char   *shorty,              x4
  *                                    Thread *self)                x5
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 14b01c5..c9514f5 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -79,6 +79,7 @@
     #define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
     #define CFI_RESTORE_STATE .cfi_restore_state
     #define CFI_REMEMBER_STATE .cfi_remember_state
+    #define CFI_ESCAPE(...) .cfi_escape __VA_ARGS__
 #else
     // Mac OS' doesn't like cfi_* directives.
     #define CFI_STARTPROC
@@ -90,6 +91,7 @@
     #define CFI_REL_OFFSET(reg,size)
     #define CFI_RESTORE_STATE
     #define CFI_REMEMBER_STATE
+    #define CFI_ESCAPE(...)
 #endif
 
     // Symbols. On a Mac, we need a leading underscore.
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 5a28120..9251161 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -2369,20 +2369,28 @@
     PUSH ebx
     PUSH esi
     PUSH edi
+    SAVE_SIZE=20                   // 4 registers and the return address
     mov 4+16(%esp), %esi           // ESI = argument array
     mov 8+16(%esp), %ecx           // ECX = size of args
     mov 12+16(%esp), %ebx          // EBX = pc to call
     mov %esp, %ebp                 // Save stack pointer
+    CFI_DEF_CFA(ebp, SAVE_SIZE)    // CFA = ebp + SAVE_SIZE
+    CFI_REMEMBER_STATE
     andl LITERAL(0xFFFFFFF0), %esp // Align stack
-    PUSH ebp                       // Save old stack pointer
+    pushl %ebp                     // Save old stack pointer
     subl LITERAL(12), %esp         // Align stack
     movl LITERAL(0), (%esp)        // Store null for ArtMethod* slot
+    // ebp isn't properly spilled in the osr method, so we need use DWARF expression.
+    // NB: the CFI must be before the call since this is the address gdb will lookup.
+    // NB: gdb expects that cfa_expression returns the CFA value (not address to it).
+    CFI_ESCAPE(                    /* cfa = [sp + 12] + SAVE_SIZE */ \
+      0x0f, 6,                     /* DW_CFA_def_cfa_expression(len) */ \
+      0x92, 4, 12,                 /* DW_OP_bregx(reg,offset) */ \
+      0x06,                        /* DW_OP_deref */ \
+      0x23, SAVE_SIZE)             /* DW_OP_plus_uconst(val) */
     call .Losr_entry
-
-    // Restore stack pointer.
-    addl LITERAL(12), %esp
-    POP ebp
-    mov %ebp, %esp
+    mov 12(%esp), %esp             // Restore stack pointer.
+    CFI_DEF_CFA(esp, SAVE_SIZE)    // CFA = esp + SAVE_SIZE
 
     // Restore callee saves.
     POP edi
@@ -2405,6 +2413,8 @@
     movss %xmm0, (%ecx)           // Store the floating point result
     ret
 .Losr_entry:
+    CFI_RESTORE_STATE
+    CFI_DEF_CFA(ebp, SAVE_SIZE)   // CFA = ebp + SAVE_SIZE
     subl LITERAL(4), %ecx         // Given stack size contains pushed frame pointer, substract it.
     subl %ecx, %esp
     mov %esp, %edi                // EDI = beginning of stack
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 5d9b729..579e554 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -176,8 +176,9 @@
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
+  template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   bool IsIntrinsic() {
-    return (GetAccessFlags() & kAccIntrinsic) != 0;
+    return (GetAccessFlags<kReadBarrierOption>() & kAccIntrinsic) != 0;
   }
 
   ALWAYS_INLINE void SetIntrinsic(uint32_t intrinsic) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -315,12 +316,13 @@
     return (GetAccessFlags() & kAccPreviouslyWarm) != 0;
   }
 
+  template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   void SetPreviouslyWarm() {
-    if (IsIntrinsic()) {
+    if (IsIntrinsic<kReadBarrierOption>()) {
       // kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
       return;
     }
-    AddAccessFlags(kAccPreviouslyWarm);
+    AddAccessFlags<kReadBarrierOption>(kAccPreviouslyWarm);
   }
 
   // Should this method be run in the interpreter and count locks (e.g., failed structured-
@@ -839,8 +841,11 @@
   }
 
   // This setter guarantees atomicity.
+  template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   void AddAccessFlags(uint32_t flag) {
-    DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag));
+    DCHECK(!IsIntrinsic<kReadBarrierOption>() ||
+           !OverlapsIntrinsicBits(flag) ||
+           IsValidIntrinsicUpdate(flag));
     uint32_t old_access_flags;
     uint32_t new_access_flags;
     do {
diff --git a/runtime/base/file_utils.cc b/runtime/base/file_utils.cc
index f9d0d12..1cb3b9c 100644
--- a/runtime/base/file_utils.cc
+++ b/runtime/base/file_utils.cc
@@ -306,8 +306,8 @@
 }
 
 std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension) {
-  const size_t last_ext = filename.find_last_of('.');
-  if (last_ext == std::string::npos) {
+  const size_t last_ext = filename.find_last_of("./");
+  if (last_ext == std::string::npos || filename[last_ext] != '.') {
     return filename + "." + new_extension;
   } else {
     return filename.substr(0, last_ext + 1) + new_extension;
diff --git a/runtime/base/file_utils_test.cc b/runtime/base/file_utils_test.cc
index cf6e34d..e74dfe5 100644
--- a/runtime/base/file_utils_test.cc
+++ b/runtime/base/file_utils_test.cc
@@ -94,4 +94,11 @@
   ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), 1 /* overwrite */));
 }
 
+TEST_F(FileUtilsTest, ReplaceFileExtension) {
+  EXPECT_EQ("/directory/file.vdex", ReplaceFileExtension("/directory/file.oat", "vdex"));
+  EXPECT_EQ("/.directory/file.vdex", ReplaceFileExtension("/.directory/file.oat", "vdex"));
+  EXPECT_EQ("/directory/file.vdex", ReplaceFileExtension("/directory/file", "vdex"));
+  EXPECT_EQ("/.directory/file.vdex", ReplaceFileExtension("/.directory/file", "vdex"));
+}
+
 }  // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1d72875..bf0d3ad 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1807,21 +1807,6 @@
     header.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
   }
 
-  if (!app_image) {
-    // Make the string intern table and class table immutable for boot image.
-    // PIC app oat files may mmap a read-only copy into their own .bss section,
-    // so enforce that the data in the boot image tables remains unchanged.
-    //
-    // We cannot do that for app image even after the fixup as some interned
-    // String references may actually end up pointing to moveable Strings.
-    uint8_t* const_section_begin = space->Begin() + header.GetBootImageConstantTablesOffset();
-    CheckedCall(mprotect,
-                "protect constant tables",
-                const_section_begin,
-                header.GetBootImageConstantTablesSize(),
-                PROT_READ);
-  }
-
   ClassTable* class_table = nullptr;
   {
     WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -3361,9 +3346,10 @@
   CHECK_EQ(dex_cache_location, dex_file_suffix);
   const OatFile* oat_file =
       (dex_file.GetOatDexFile() != nullptr) ? dex_file.GetOatDexFile()->GetOatFile() : nullptr;
-  // Clean up pass to remove null dex caches. Also check if we need to initialize OatFile .bss.
-  // Null dex caches can occur due to class unloading and we are lazily removing null entries.
-  bool initialize_oat_file_bss = (oat_file != nullptr);
+  // Clean up pass to remove null dex caches; null dex caches can occur due to class unloading
+  // and we are lazily removing null entries. Also check if we need to initialize OatFile data
+  // (.data.bimg.rel.ro and .bss sections) needed for code execution.
+  bool initialize_oat_file_data = (oat_file != nullptr) && oat_file->IsExecutable();
   JavaVMExt* const vm = self->GetJniEnv()->GetVm();
   for (auto it = dex_caches_.begin(); it != dex_caches_.end(); ) {
     DexCacheData data = *it;
@@ -3371,15 +3357,36 @@
       vm->DeleteWeakGlobalRef(self, data.weak_root);
       it = dex_caches_.erase(it);
     } else {
-      if (initialize_oat_file_bss &&
+      if (initialize_oat_file_data &&
           it->dex_file->GetOatDexFile() != nullptr &&
           it->dex_file->GetOatDexFile()->GetOatFile() == oat_file) {
-        initialize_oat_file_bss = false;  // Already initialized.
+        initialize_oat_file_data = false;  // Already initialized.
       }
       ++it;
     }
   }
-  if (initialize_oat_file_bss) {
+  if (initialize_oat_file_data) {
+    // Initialize the .data.bimg.rel.ro section.
+    if (!oat_file->GetBootImageRelocations().empty()) {
+      uint8_t* reloc_begin = const_cast<uint8_t*>(oat_file->DataBimgRelRoBegin());
+      CheckedCall(mprotect,
+                  "un-protect boot image relocations",
+                  reloc_begin,
+                  oat_file->DataBimgRelRoSize(),
+                  PROT_READ | PROT_WRITE);
+      uint32_t boot_image_begin = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(
+          Runtime::Current()->GetHeap()->GetBootImageSpaces().front()->Begin()));
+      for (const uint32_t& relocation : oat_file->GetBootImageRelocations()) {
+        const_cast<uint32_t&>(relocation) += boot_image_begin;
+      }
+      CheckedCall(mprotect,
+                  "protect boot image relocations",
+                  reloc_begin,
+                  oat_file->DataBimgRelRoSize(),
+                  PROT_READ);
+    }
+
+    // Initialize the .bss section.
     // TODO: Pre-initialize from boot/app image?
     ArtMethod* resolution_method = Runtime::Current()->GetResolutionMethod();
     for (ArtMethod*& entry : oat_file->GetBssMethods()) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 712e3ae..8d6b3d2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1316,7 +1316,6 @@
   friend class ImageDumper;  // for DexLock
   friend struct linker::CompilationHelper;  // For Compile in ImageTest.
   friend class linker::ImageWriter;  // for GetClassRoots
-  friend class linker::OatWriter;  // for boot image string/class table slot address lookup.
   friend class JniCompilerTest;  // for GetRuntimeQuickGenericJniStub
   friend class JniInternalTest;  // for GetRuntimeQuickGenericJniStub
   friend class VMClassLoader;  // for LookupClass and FindClassInBaseDexClassLoader.
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 48129b1..52e9f82 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -295,7 +295,6 @@
   std::vector<const OatFile*> oat_files_ GUARDED_BY(lock_);
 
   friend class linker::ImageWriter;  // for InsertWithoutLocks.
-  friend class linker::OatWriter;  // for boot class TableSlot address lookup.
 };
 
 }  // namespace art
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 8f65c66..7484dd9 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -884,8 +884,8 @@
 
 // WrongMethodTypeException
 
-void ThrowWrongMethodTypeException(mirror::MethodType* expected_type,
-                                   mirror::MethodType* actual_type) {
+void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> expected_type,
+                                   ObjPtr<mirror::MethodType> actual_type) {
   ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",
                  nullptr,
                  StringPrintf("Expected %s but was %s",
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index e9baa4f..29a056e 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -270,8 +270,8 @@
 
 // WrongMethodTypeException
 
-void ThrowWrongMethodTypeException(mirror::MethodType* callee_type,
-                                   mirror::MethodType* callsite_type)
+void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> callee_type,
+                                   ObjPtr<mirror::MethodType> callsite_type)
     REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
 
 }  // namespace art
diff --git a/runtime/image.cc b/runtime/image.cc
index 0955c3a..56fee9d 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '5', '\0' };  // Bitstring type check off.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '6', '\0' };  // No image tables in .bss.
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index 159a308..fe544cc 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -19,7 +19,6 @@
 
 #include <string.h>
 
-#include "base/bit_utils.h"
 #include "base/enums.h"
 #include "globals.h"
 #include "mirror/object.h"
@@ -327,22 +326,6 @@
     return boot_image_size_ != 0u;
   }
 
-  uint32_t GetBootImageConstantTablesOffset() const {
-    // Interned strings table and class table for boot image are mmapped read only.
-    DCHECK(!IsAppImage());
-    const ImageSection& interned_strings = GetInternedStringsSection();
-    DCHECK_ALIGNED(interned_strings.Offset(), kPageSize);
-    return interned_strings.Offset();
-  }
-
-  uint32_t GetBootImageConstantTablesSize() const {
-    uint32_t start_offset = GetBootImageConstantTablesOffset();
-    const ImageSection& class_table = GetClassTableSection();
-    DCHECK_LE(start_offset, class_table.Offset());
-    size_t tables_size = class_table.Offset() + class_table.Size() - start_offset;
-    return RoundUp(tables_size, kPageSize);
-  }
-
   // Visit mirror::Objects in the section starting at base.
   // TODO: Delete base parameter if it is always equal to GetImageBegin.
   void VisitObjects(ObjectVisitor* visitor,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 7ddd173..84a148f 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -209,9 +209,7 @@
         : StackVisitor(thread_in, context, kInstrumentationStackWalk),
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
           instrumentation_exit_pc_(instrumentation_exit_pc),
-          reached_existing_instrumentation_frames_(false),
-          should_be_at_top_(false),
-          instrumentation_stack_depth_(0),
+          reached_existing_instrumentation_frames_(false), instrumentation_stack_depth_(0),
           last_return_pc_(0) {
     }
 
@@ -235,20 +233,6 @@
         return true;  // Continue.
       }
       uintptr_t return_pc = GetReturnPc();
-      if (UNLIKELY(should_be_at_top_)) {
-        std::string thread_name;
-        GetThread()->GetThreadName(thread_name);
-        uint32_t dex_pc = dex::kDexNoIndex;
-        if (last_return_pc_ != 0 &&
-            GetCurrentOatQuickMethodHeader() != nullptr) {
-          dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_);
-        }
-        LOG(FATAL) << "While walking " << thread_name << " Reached unexpected frame above what "
-                   << "should have been top. Method is " << GetMethod()->PrettyMethod()
-                   << " return_pc: " << std::hex << return_pc
-                   << " dex pc: " << dex_pc;
-        UNREACHABLE();
-      }
       if (kVerboseInstrumentation) {
         LOG(INFO) << "  Installing exit stub in " << DescribeLocation();
       }
@@ -272,7 +256,7 @@
         }
 
         // We've reached a frame which has already been installed with instrumentation exit stub.
-        // We should have already installed instrumentation on previous frames.
+        // We should have already installed instrumentation or be interpreter on previous frames.
         reached_existing_instrumentation_frames_ = true;
 
         const InstrumentationStackFrame& frame =
@@ -283,21 +267,25 @@
         if (kVerboseInstrumentation) {
           LOG(INFO) << "Ignoring already instrumented " << frame.Dump();
         }
-      } else if (UNLIKELY(reached_existing_instrumentation_frames_)) {
-        // If tracing was enabled we might have had all methods have the instrumentation frame
-        // except the runtime transition method at the very top of the stack. This isn't really a
-        // problem since the transition method just goes back into the runtime and never leaves it
-        // so it can be ignored.
-        should_be_at_top_ = true;
-        DCHECK(m->IsRuntimeMethod()) << "Expected method to be runtime method at start of thread "
-                                     << "but was " << m->PrettyMethod();
-        if (kVerboseInstrumentation) {
-          LOG(INFO) << "reached expected top frame " << m->PrettyMethod();
-        }
-        // Don't bother continuing on the upcalls on non-debug builds.
-        return kIsDebugBuild ? true : false;
       } else {
         CHECK_NE(return_pc, 0U);
+        if (UNLIKELY(reached_existing_instrumentation_frames_ && !m->IsRuntimeMethod())) {
+          // We already saw an existing instrumentation frame so this should be a runtime-method
+          // inserted by the interpreter or runtime.
+          std::string thread_name;
+          GetThread()->GetThreadName(thread_name);
+          uint32_t dex_pc = dex::kDexNoIndex;
+          if (last_return_pc_ != 0 &&
+              GetCurrentOatQuickMethodHeader() != nullptr) {
+            dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_);
+          }
+          LOG(FATAL) << "While walking " << thread_name << " found unexpected non-runtime method"
+                     << " without instrumentation exit return or interpreter frame."
+                     << " method is " << GetMethod()->PrettyMethod()
+                     << " return_pc is " << std::hex << return_pc
+                     << " dex pc: " << dex_pc;
+          UNREACHABLE();
+        }
         InstrumentationStackFrame instrumentation_frame(
             m->IsRuntimeMethod() ? nullptr : GetThisObject(),
             m,
@@ -335,7 +323,6 @@
     std::vector<uint32_t> dex_pcs_;
     const uintptr_t instrumentation_exit_pc_;
     bool reached_existing_instrumentation_frames_;
-    bool should_be_at_top_;
     size_t instrumentation_stack_depth_;
     uintptr_t last_return_pc_;
   };
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index cb97691..c9127d6 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -227,7 +227,6 @@
     // modifying the zygote intern table. The back of table is modified when strings are interned.
     std::vector<UnorderedSet> tables_;
 
-    friend class linker::OatWriter;  // for boot image string table slot address lookup.
     ART_FRIEND_TEST(InternTableTest, CrossHash);
   };
 
@@ -287,7 +286,6 @@
   // Weak root state, used for concurrent system weak processing and more.
   gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
 
-  friend class linker::OatWriter;  // for boot image string table slot address lookup.
   friend class Transaction;
   ART_FRIEND_TEST(InternTableTest, CrossHash);
   DISALLOW_COPY_AND_ASSIGN(InternTable);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 380a981..8a85ee4 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -900,171 +900,489 @@
   }
 }
 
+static JValue ConvertScalarBootstrapArgument(jvalue value) {
+  // value either contains a primitive scalar value if it corresponds
+  // to a primitive type, or it contains an integer value if it
+  // corresponds to an object instance reference id (e.g. a string id).
+  return JValue::FromPrimitive(value.j);
+}
+
+static ObjPtr<mirror::Class> GetClassForBootstrapArgument(EncodedArrayValueIterator::ValueType type)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  switch (type) {
+    case EncodedArrayValueIterator::ValueType::kBoolean:
+    case EncodedArrayValueIterator::ValueType::kByte:
+    case EncodedArrayValueIterator::ValueType::kChar:
+    case EncodedArrayValueIterator::ValueType::kShort:
+      // These types are disallowed by JVMS. Treat as integers. This
+      // will result in CCE's being raised if the BSM has one of these
+      // types.
+    case EncodedArrayValueIterator::ValueType::kInt:
+      return class_linker->FindPrimitiveClass('I');
+    case EncodedArrayValueIterator::ValueType::kLong:
+      return class_linker->FindPrimitiveClass('J');
+    case EncodedArrayValueIterator::ValueType::kFloat:
+      return class_linker->FindPrimitiveClass('F');
+    case EncodedArrayValueIterator::ValueType::kDouble:
+      return class_linker->FindPrimitiveClass('D');
+    case EncodedArrayValueIterator::ValueType::kMethodType:
+      return mirror::MethodType::StaticClass();
+    case EncodedArrayValueIterator::ValueType::kMethodHandle:
+      return mirror::MethodHandle::StaticClass();
+    case EncodedArrayValueIterator::ValueType::kString:
+      return mirror::String::GetJavaLangString();
+    case EncodedArrayValueIterator::ValueType::kType:
+      return mirror::Class::GetJavaLangClass();
+    case EncodedArrayValueIterator::ValueType::kField:
+    case EncodedArrayValueIterator::ValueType::kMethod:
+    case EncodedArrayValueIterator::ValueType::kEnum:
+    case EncodedArrayValueIterator::ValueType::kArray:
+    case EncodedArrayValueIterator::ValueType::kAnnotation:
+    case EncodedArrayValueIterator::ValueType::kNull:
+      return nullptr;
+  }
+}
+
+static bool GetArgumentForBootstrapMethod(Thread* self,
+                                          ArtMethod* referrer,
+                                          EncodedArrayValueIterator::ValueType type,
+                                          const JValue* encoded_value,
+                                          JValue* decoded_value)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // The encoded_value contains either a scalar value (IJDF) or a
+  // scalar DEX file index to a reference type to be materialized.
+  switch (type) {
+    case EncodedArrayValueIterator::ValueType::kInt:
+    case EncodedArrayValueIterator::ValueType::kFloat:
+      decoded_value->SetI(encoded_value->GetI());
+      return true;
+    case EncodedArrayValueIterator::ValueType::kLong:
+    case EncodedArrayValueIterator::ValueType::kDouble:
+      decoded_value->SetJ(encoded_value->GetJ());
+      return true;
+    case EncodedArrayValueIterator::ValueType::kMethodType: {
+      StackHandleScope<2> hs(self);
+      Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
+      Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
+      uint32_t index = static_cast<uint32_t>(encoded_value->GetI());
+      ClassLinker* cl = Runtime::Current()->GetClassLinker();
+      ObjPtr<mirror::MethodType> o = cl->ResolveMethodType(self, index, dex_cache, class_loader);
+      if (UNLIKELY(o.IsNull())) {
+        DCHECK(self->IsExceptionPending());
+        return false;
+      }
+      decoded_value->SetL(o);
+      return true;
+    }
+    case EncodedArrayValueIterator::ValueType::kMethodHandle: {
+      uint32_t index = static_cast<uint32_t>(encoded_value->GetI());
+      ClassLinker* cl = Runtime::Current()->GetClassLinker();
+      ObjPtr<mirror::MethodHandle> o = cl->ResolveMethodHandle(self, index, referrer);
+      if (UNLIKELY(o.IsNull())) {
+        DCHECK(self->IsExceptionPending());
+        return false;
+      }
+      decoded_value->SetL(o);
+      return true;
+    }
+    case EncodedArrayValueIterator::ValueType::kString: {
+      StackHandleScope<1> hs(self);
+      Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
+      dex::StringIndex index(static_cast<uint32_t>(encoded_value->GetI()));
+      ClassLinker* cl = Runtime::Current()->GetClassLinker();
+      ObjPtr<mirror::String> o = cl->ResolveString(index, dex_cache);
+      if (UNLIKELY(o.IsNull())) {
+        DCHECK(self->IsExceptionPending());
+        return false;
+      }
+      decoded_value->SetL(o);
+      return true;
+    }
+    case EncodedArrayValueIterator::ValueType::kType: {
+      StackHandleScope<2> hs(self);
+      Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
+      Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
+      dex::TypeIndex index(static_cast<uint32_t>(encoded_value->GetI()));
+      ClassLinker* cl = Runtime::Current()->GetClassLinker();
+      ObjPtr<mirror::Class> o = cl->ResolveType(index, dex_cache, class_loader);
+      if (UNLIKELY(o.IsNull())) {
+        DCHECK(self->IsExceptionPending());
+        return false;
+      }
+      decoded_value->SetL(o);
+      return true;
+    }
+    case EncodedArrayValueIterator::ValueType::kBoolean:
+    case EncodedArrayValueIterator::ValueType::kByte:
+    case EncodedArrayValueIterator::ValueType::kChar:
+    case EncodedArrayValueIterator::ValueType::kShort:
+    case EncodedArrayValueIterator::ValueType::kField:
+    case EncodedArrayValueIterator::ValueType::kMethod:
+    case EncodedArrayValueIterator::ValueType::kEnum:
+    case EncodedArrayValueIterator::ValueType::kArray:
+    case EncodedArrayValueIterator::ValueType::kAnnotation:
+    case EncodedArrayValueIterator::ValueType::kNull:
+      // Unreachable - unsupported types that have been checked when
+      // determining the effect call site type based on the bootstrap
+      // argument types.
+      UNREACHABLE();
+  }
+}
+
+static bool PackArgumentForBootstrapMethod(Thread* self,
+                                           ArtMethod* referrer,
+                                           CallSiteArrayValueIterator* it,
+                                           ShadowFrameSetter* setter)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  auto type = it->GetValueType();
+  const JValue encoded_value = ConvertScalarBootstrapArgument(it->GetJavaValue());
+  JValue decoded_value;
+  if (!GetArgumentForBootstrapMethod(self, referrer, type, &encoded_value, &decoded_value)) {
+    return false;
+  }
+  switch (it->GetValueType()) {
+    case EncodedArrayValueIterator::ValueType::kInt:
+    case EncodedArrayValueIterator::ValueType::kFloat:
+      setter->Set(static_cast<uint32_t>(decoded_value.GetI()));
+      return true;
+    case EncodedArrayValueIterator::ValueType::kLong:
+    case EncodedArrayValueIterator::ValueType::kDouble:
+      setter->SetLong(decoded_value.GetJ());
+      return true;
+    case EncodedArrayValueIterator::ValueType::kMethodType:
+    case EncodedArrayValueIterator::ValueType::kMethodHandle:
+    case EncodedArrayValueIterator::ValueType::kString:
+    case EncodedArrayValueIterator::ValueType::kType:
+      setter->SetReference(decoded_value.GetL());
+      return true;
+    case EncodedArrayValueIterator::ValueType::kBoolean:
+    case EncodedArrayValueIterator::ValueType::kByte:
+    case EncodedArrayValueIterator::ValueType::kChar:
+    case EncodedArrayValueIterator::ValueType::kShort:
+    case EncodedArrayValueIterator::ValueType::kField:
+    case EncodedArrayValueIterator::ValueType::kMethod:
+    case EncodedArrayValueIterator::ValueType::kEnum:
+    case EncodedArrayValueIterator::ValueType::kArray:
+    case EncodedArrayValueIterator::ValueType::kAnnotation:
+    case EncodedArrayValueIterator::ValueType::kNull:
+      // Unreachable - unsupported types that have been checked when
+      // determining the effect call site type based on the bootstrap
+      // argument types.
+      UNREACHABLE();
+  }
+}
+
+static bool PackCollectorArrayForBootstrapMethod(Thread* self,
+                                                 ArtMethod* referrer,
+                                                 ObjPtr<mirror::Class> array_type,
+                                                 int32_t array_length,
+                                                 CallSiteArrayValueIterator* it,
+                                                 ShadowFrameSetter* setter)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  StackHandleScope<1> hs(self);
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  JValue decoded_value;
+
+#define COLLECT_PRIMITIVE_ARRAY(Descriptor, Type)                       \
+  Handle<mirror::Type ## Array> array =                                 \
+      hs.NewHandle(mirror::Type ## Array::Alloc(self, array_length));   \
+  if (array.IsNull()) {                                                 \
+    return false;                                                       \
+  }                                                                     \
+  for (int32_t i = 0; it->HasNext(); it->Next(), ++i) {                 \
+    auto type = it->GetValueType();                                     \
+    DCHECK_EQ(type, EncodedArrayValueIterator::ValueType::k ## Type);   \
+    const JValue encoded_value =                                        \
+        ConvertScalarBootstrapArgument(it->GetJavaValue());             \
+    GetArgumentForBootstrapMethod(self,                                 \
+                                  referrer,                             \
+                                  type,                                 \
+                                  &encoded_value,                       \
+                                  &decoded_value);                      \
+    array->Set(i, decoded_value.Get ## Descriptor());                   \
+  }                                                                     \
+  setter->SetReference(array.Get());                                    \
+  return true;
+
+#define COLLECT_REFERENCE_ARRAY(T, Type)                                \
+  Handle<mirror::ObjectArray<T>> array =                                \
+      hs.NewHandle(mirror::ObjectArray<T>::Alloc(self,                  \
+                                                 array_type,            \
+                                                 array_length));        \
+  if (array.IsNull()) {                                                 \
+    return false;                                                       \
+  }                                                                     \
+  for (int32_t i = 0; it->HasNext(); it->Next(), ++i) {                 \
+    auto type = it->GetValueType();                                     \
+    DCHECK_EQ(type, EncodedArrayValueIterator::ValueType::k ## Type);   \
+    const JValue encoded_value =                                        \
+        ConvertScalarBootstrapArgument(it->GetJavaValue());             \
+    if (!GetArgumentForBootstrapMethod(self,                            \
+                                       referrer,                        \
+                                       type,                            \
+                                       &encoded_value,                  \
+                                       &decoded_value)) {               \
+      return false;                                                     \
+    }                                                                   \
+    ObjPtr<mirror::Object> o = decoded_value.GetL();                    \
+    if (Runtime::Current()->IsActiveTransaction()) {                    \
+      array->Set<true>(i, ObjPtr<T>::DownCast(o));                      \
+    } else {                                                            \
+      array->Set<false>(i, ObjPtr<T>::DownCast(o));                     \
+    }                                                                   \
+  }                                                                     \
+  setter->SetReference(array.Get());                                    \
+  return true;
+
+  if (array_type->GetComponentType() == class_linker->FindPrimitiveClass('I')) {
+    COLLECT_PRIMITIVE_ARRAY(I, Int);
+  } else if (array_type->GetComponentType() == class_linker->FindPrimitiveClass('J')) {
+    COLLECT_PRIMITIVE_ARRAY(J, Long);
+  } else if (array_type->GetComponentType() == class_linker->FindPrimitiveClass('F')) {
+    COLLECT_PRIMITIVE_ARRAY(F, Float);
+  } else if (array_type->GetComponentType() == class_linker->FindPrimitiveClass('D')) {
+    COLLECT_PRIMITIVE_ARRAY(D, Double);
+  } else if (array_type->GetComponentType() == mirror::MethodType::StaticClass()) {
+    COLLECT_REFERENCE_ARRAY(mirror::MethodType, MethodType);
+  } else if (array_type->GetComponentType() == mirror::MethodHandle::StaticClass()) {
+    COLLECT_REFERENCE_ARRAY(mirror::MethodHandle, MethodHandle);
+  } else if (array_type->GetComponentType() == mirror::String::GetJavaLangString()) {
+    COLLECT_REFERENCE_ARRAY(mirror::String, String);
+  } else if (array_type->GetComponentType() == mirror::Class::GetJavaLangClass()) {
+    COLLECT_REFERENCE_ARRAY(mirror::Class, Type);
+  } else {
+    UNREACHABLE();
+  }
+  #undef COLLECT_PRIMITIVE_ARRAY
+  #undef COLLECT_REFERENCE_ARRAY
+}
+
+static ObjPtr<mirror::MethodType> BuildCallSiteForBootstrapMethod(Thread* self,
+                                                                  const DexFile* dex_file,
+                                                                  uint32_t call_site_idx)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const DexFile::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
+  CallSiteArrayValueIterator it(*dex_file, csi);
+  DCHECK_GE(it.Size(), 1u);
+
+  StackHandleScope<2> hs(self);
+  // Create array for parameter types.
+  ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass();
+  mirror::Class* class_array_type =
+      Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_type);
+  Handle<mirror::ObjectArray<mirror::Class>> ptypes = hs.NewHandle(
+      mirror::ObjectArray<mirror::Class>::Alloc(self,
+                                                class_array_type,
+                                                static_cast<int>(it.Size())));
+  if (ptypes.IsNull()) {
+    DCHECK(self->IsExceptionPending());
+    return nullptr;
+  }
+
+  // Populate the first argument with an instance of j.l.i.MethodHandles.Lookup
+  // that the runtime will construct.
+  ptypes->Set(0, mirror::MethodHandlesLookup::StaticClass());
+  it.Next();
+
+  // The remaining parameter types are derived from the types of
+  // arguments present in the DEX file.
+  int index = 1;
+  while (it.HasNext()) {
+    ObjPtr<mirror::Class> ptype = GetClassForBootstrapArgument(it.GetValueType());
+    if (ptype.IsNull()) {
+      ThrowClassCastException("Unsupported bootstrap argument type");
+      return nullptr;
+    }
+    ptypes->Set(index, ptype);
+    index++;
+    it.Next();
+  }
+  DCHECK_EQ(static_cast<size_t>(index), it.Size());
+
+  // By definition, the return type is always a j.l.i.CallSite.
+  Handle<mirror::Class> rtype = hs.NewHandle(mirror::CallSite::StaticClass());
+  return mirror::MethodType::Create(self, rtype, ptypes);
+}
+
 static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
                                                       ShadowFrame& shadow_frame,
                                                       uint32_t call_site_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  StackHandleScope<7> hs(self);
+  // There are three mandatory arguments expected from the call site
+  // value array in the DEX file: the bootstrap method handle, the
+  // method name to pass to the bootstrap method, and the method type
+  // to pass to the bootstrap method.
+  static constexpr size_t kMandatoryArgumentsCount = 3;
   ArtMethod* referrer = shadow_frame.GetMethod();
   const DexFile* dex_file = referrer->GetDexFile();
   const DexFile::CallSiteIdItem& csi = dex_file->GetCallSiteId(call_site_idx);
-
-  StackHandleScope<10> hs(self);
-  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
-
   CallSiteArrayValueIterator it(*dex_file, csi);
-  uint32_t method_handle_idx = static_cast<uint32_t>(it.GetJavaValue().i);
+  if (it.Size() < kMandatoryArgumentsCount) {
+    ThrowBootstrapMethodError("Truncated bootstrap arguments (%zu < %zu)",
+                              it.Size(), kMandatoryArgumentsCount);
+    return nullptr;
+  }
+
+  if (it.GetValueType() != EncodedArrayValueIterator::ValueType::kMethodHandle) {
+    ThrowBootstrapMethodError("First bootstrap argument is not a method handle");
+    return nullptr;
+  }
+
+  uint32_t bsm_index = static_cast<uint32_t>(it.GetJavaValue().i);
+  it.Next();
+
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  Handle<mirror::MethodHandle>
-      bootstrap(hs.NewHandle(class_linker->ResolveMethodHandle(self, method_handle_idx, referrer)));
-  if (bootstrap.IsNull()) {
+  Handle<mirror::MethodHandle> bsm =
+      hs.NewHandle(class_linker->ResolveMethodHandle(self, bsm_index, referrer));
+  if (bsm.IsNull()) {
     DCHECK(self->IsExceptionPending());
     return nullptr;
   }
-  Handle<mirror::MethodType> bootstrap_method_type = hs.NewHandle(bootstrap->GetMethodType());
-  it.Next();
 
-  DCHECK_EQ(static_cast<size_t>(bootstrap->GetMethodType()->GetPTypes()->GetLength()), it.Size());
-  const size_t num_bootstrap_vregs = bootstrap->GetMethodType()->NumberOfVRegs();
+  if (bsm->GetHandleKind() != mirror::MethodHandle::Kind::kInvokeStatic) {
+    // JLS suggests also accepting constructors. This is currently
+    // hard as constructor invocations happen via transformers in ART
+    // today. The constructor would need to be a class derived from java.lang.invoke.CallSite.
+    ThrowBootstrapMethodError("Unsupported bootstrap method invocation kind");
+    return nullptr;
+  }
+
+  // Construct the local call site type information based on the 3
+  // mandatory arguments provided by the runtime and the static arguments
+  // in the DEX file. We will use these arguments to build a shadow frame.
+  MutableHandle<mirror::MethodType> call_site_type =
+      hs.NewHandle(BuildCallSiteForBootstrapMethod(self, dex_file, call_site_idx));
+  if (call_site_type.IsNull()) {
+    DCHECK(self->IsExceptionPending());
+    return nullptr;
+  }
+
+  // Check if this BSM is targeting a variable arity method. If so,
+  // we'll need to collect the trailing arguments into an array.
+  Handle<mirror::Array> collector_arguments;
+  int32_t collector_arguments_length;
+  if (bsm->GetTargetMethod()->IsVarargs()) {
+    int number_of_bsm_parameters = bsm->GetMethodType()->GetNumberOfPTypes();
+    if (number_of_bsm_parameters == 0) {
+      ThrowBootstrapMethodError("Variable arity BSM does not have any arguments");
+      return nullptr;
+    }
+    Handle<mirror::Class> collector_array_class =
+        hs.NewHandle(bsm->GetMethodType()->GetPTypes()->Get(number_of_bsm_parameters - 1));
+    if (!collector_array_class->IsArrayClass()) {
+      ThrowBootstrapMethodError("Variable arity BSM does not have array as final argument");
+      return nullptr;
+    }
+    // The call site may include no arguments to be collected. In this
+    // case the number of arguments must be at least the number of BSM
+    // parameters less the collector array.
+    if (call_site_type->GetNumberOfPTypes() < number_of_bsm_parameters - 1) {
+      ThrowWrongMethodTypeException(bsm->GetMethodType(), call_site_type.Get());
+      return nullptr;
+    }
+    // Check all the arguments to be collected match the collector array component type.
+    for (int i = number_of_bsm_parameters - 1; i < call_site_type->GetNumberOfPTypes(); ++i) {
+      if (call_site_type->GetPTypes()->Get(i) != collector_array_class->GetComponentType()) {
+        ThrowClassCastException(collector_array_class->GetComponentType(),
+                                call_site_type->GetPTypes()->Get(i));
+        return nullptr;
+      }
+    }
+    // Update the call site method type so it now includes the collector array.
+    int32_t collector_arguments_start = number_of_bsm_parameters - 1;
+    collector_arguments_length = call_site_type->GetNumberOfPTypes() - number_of_bsm_parameters + 1;
+    call_site_type.Assign(
+        mirror::MethodType::CollectTrailingArguments(self,
+                                                     call_site_type.Get(),
+                                                     collector_array_class.Get(),
+                                                     collector_arguments_start));
+    if (call_site_type.IsNull()) {
+      DCHECK(self->IsExceptionPending());
+      return nullptr;
+    }
+  } else {
+    collector_arguments_length = 0;
+  }
+
+  if (call_site_type->GetNumberOfPTypes() != bsm->GetMethodType()->GetNumberOfPTypes()) {
+    ThrowWrongMethodTypeException(bsm->GetMethodType(), call_site_type.Get());
+    return nullptr;
+  }
+
+  // BSM invocation has a different set of exceptions that
+  // j.l.i.MethodHandle.invoke(). Scan arguments looking for CCE
+  // "opportunities". Unfortunately we cannot just leave this to the
+  // method handle invocation as this might generate a WMTE.
+  for (int32_t i = 0; i < call_site_type->GetNumberOfPTypes(); ++i) {
+    ObjPtr<mirror::Class> from = call_site_type->GetPTypes()->Get(i);
+    ObjPtr<mirror::Class> to = bsm->GetMethodType()->GetPTypes()->Get(i);
+    if (!IsParameterTypeConvertible(from, to)) {
+      ThrowClassCastException(from, to);
+      return nullptr;
+    }
+  }
+  if (!IsReturnTypeConvertible(call_site_type->GetRType(), bsm->GetMethodType()->GetRType())) {
+    ThrowClassCastException(bsm->GetMethodType()->GetRType(), call_site_type->GetRType());
+    return nullptr;
+  }
 
   // Set-up a shadow frame for invoking the bootstrap method handle.
   ShadowFrameAllocaUniquePtr bootstrap_frame =
-      CREATE_SHADOW_FRAME(num_bootstrap_vregs, nullptr, referrer, shadow_frame.GetDexPC());
+      CREATE_SHADOW_FRAME(call_site_type->NumberOfVRegs(),
+                          nullptr,
+                          referrer,
+                          shadow_frame.GetDexPC());
   ScopedStackedShadowFramePusher pusher(
       self, bootstrap_frame.get(), StackedShadowFrameType::kShadowFrameUnderConstruction);
-  size_t vreg = 0;
+  ShadowFrameSetter setter(bootstrap_frame.get(), 0u);
 
   // The first parameter is a MethodHandles lookup instance.
-  {
-    Handle<mirror::Class> lookup_class =
-        hs.NewHandle(shadow_frame.GetMethod()->GetDeclaringClass());
-    ObjPtr<mirror::MethodHandlesLookup> lookup =
-        mirror::MethodHandlesLookup::Create(self, lookup_class);
-    if (lookup.IsNull()) {
-      DCHECK(self->IsExceptionPending());
-      return nullptr;
-    }
-    bootstrap_frame->SetVRegReference(vreg++, lookup.Ptr());
-  }
-
-  // The second parameter is the name to lookup.
-  {
-    dex::StringIndex name_idx(static_cast<uint32_t>(it.GetJavaValue().i));
-    ObjPtr<mirror::String> name = class_linker->ResolveString(name_idx, dex_cache);
-    if (name.IsNull()) {
-      DCHECK(self->IsExceptionPending());
-      return nullptr;
-    }
-    bootstrap_frame->SetVRegReference(vreg++, name.Ptr());
-  }
-  it.Next();
-
-  // The third parameter is the method type associated with the name.
-  uint32_t method_type_idx = static_cast<uint32_t>(it.GetJavaValue().i);
-  Handle<mirror::MethodType> method_type(hs.NewHandle(
-      class_linker->ResolveMethodType(self, method_type_idx, dex_cache, class_loader)));
-  if (method_type.IsNull()) {
+  Handle<mirror::Class> lookup_class =
+      hs.NewHandle(shadow_frame.GetMethod()->GetDeclaringClass());
+  ObjPtr<mirror::MethodHandlesLookup> lookup =
+      mirror::MethodHandlesLookup::Create(self, lookup_class);
+  if (lookup.IsNull()) {
     DCHECK(self->IsExceptionPending());
     return nullptr;
   }
-  bootstrap_frame->SetVRegReference(vreg++, method_type.Get());
-  it.Next();
+  setter.SetReference(lookup);
 
-  // Append remaining arguments (if any).
-  while (it.HasNext()) {
-    const jvalue& jvalue = it.GetJavaValue();
-    switch (it.GetValueType()) {
-      case EncodedArrayValueIterator::ValueType::kBoolean:
-      case EncodedArrayValueIterator::ValueType::kByte:
-      case EncodedArrayValueIterator::ValueType::kChar:
-      case EncodedArrayValueIterator::ValueType::kShort:
-      case EncodedArrayValueIterator::ValueType::kInt:
-        bootstrap_frame->SetVReg(vreg, jvalue.i);
-        vreg += 1;
-        break;
-      case EncodedArrayValueIterator::ValueType::kLong:
-        bootstrap_frame->SetVRegLong(vreg, jvalue.j);
-        vreg += 2;
-        break;
-      case EncodedArrayValueIterator::ValueType::kFloat:
-        bootstrap_frame->SetVRegFloat(vreg, jvalue.f);
-        vreg += 1;
-        break;
-      case EncodedArrayValueIterator::ValueType::kDouble:
-        bootstrap_frame->SetVRegDouble(vreg, jvalue.d);
-        vreg += 2;
-        break;
-      case EncodedArrayValueIterator::ValueType::kMethodType: {
-        uint32_t idx = static_cast<uint32_t>(jvalue.i);
-        ObjPtr<mirror::MethodType> ref =
-            class_linker->ResolveMethodType(self, idx, dex_cache, class_loader);
-        if (ref.IsNull()) {
-          DCHECK(self->IsExceptionPending());
-          return nullptr;
-        }
-        bootstrap_frame->SetVRegReference(vreg, ref.Ptr());
-        vreg += 1;
-        break;
+  // Pack the remaining arguments into the frame.
+  int number_of_arguments = call_site_type->GetNumberOfPTypes();
+  int argument_index;
+  for (argument_index = 1; argument_index < number_of_arguments; ++argument_index) {
+    if (argument_index == number_of_arguments - 1 &&
+        call_site_type->GetPTypes()->Get(argument_index)->IsArrayClass()) {
+      ObjPtr<mirror::Class> array_type = call_site_type->GetPTypes()->Get(argument_index);
+      if (!PackCollectorArrayForBootstrapMethod(self,
+                                                referrer,
+                                                array_type,
+                                                collector_arguments_length,
+                                                &it,
+                                                &setter)) {
+        DCHECK(self->IsExceptionPending());
+        return nullptr;
       }
-      case EncodedArrayValueIterator::ValueType::kMethodHandle: {
-        uint32_t idx = static_cast<uint32_t>(jvalue.i);
-        ObjPtr<mirror::MethodHandle> ref =
-            class_linker->ResolveMethodHandle(self, idx, referrer);
-        if (ref.IsNull()) {
-          DCHECK(self->IsExceptionPending());
-          return nullptr;
-        }
-        bootstrap_frame->SetVRegReference(vreg, ref.Ptr());
-        vreg += 1;
-        break;
-      }
-      case EncodedArrayValueIterator::ValueType::kString: {
-        dex::StringIndex idx(static_cast<uint32_t>(jvalue.i));
-        ObjPtr<mirror::String> ref = class_linker->ResolveString(idx, dex_cache);
-        if (ref.IsNull()) {
-          DCHECK(self->IsExceptionPending());
-          return nullptr;
-        }
-        bootstrap_frame->SetVRegReference(vreg, ref.Ptr());
-        vreg += 1;
-        break;
-      }
-      case EncodedArrayValueIterator::ValueType::kType: {
-        dex::TypeIndex idx(static_cast<uint32_t>(jvalue.i));
-        ObjPtr<mirror::Class> ref = class_linker->ResolveType(idx, dex_cache, class_loader);
-        if (ref.IsNull()) {
-          DCHECK(self->IsExceptionPending());
-          return nullptr;
-        }
-        bootstrap_frame->SetVRegReference(vreg, ref.Ptr());
-        vreg += 1;
-        break;
-      }
-      case EncodedArrayValueIterator::ValueType::kNull:
-        bootstrap_frame->SetVRegReference(vreg, nullptr);
-        vreg += 1;
-        break;
-      case EncodedArrayValueIterator::ValueType::kField:
-      case EncodedArrayValueIterator::ValueType::kMethod:
-      case EncodedArrayValueIterator::ValueType::kEnum:
-      case EncodedArrayValueIterator::ValueType::kArray:
-      case EncodedArrayValueIterator::ValueType::kAnnotation:
-        // Unreachable based on current EncodedArrayValueIterator::Next().
-        UNREACHABLE();
+    } else if (!PackArgumentForBootstrapMethod(self, referrer, &it, &setter)) {
+      DCHECK(self->IsExceptionPending());
+      return nullptr;
     }
-
     it.Next();
   }
+  DCHECK(!it.HasNext());
+  DCHECK(setter.Done());
 
   // Invoke the bootstrap method handle.
   JValue result;
-  RangeInstructionOperands operands(0, vreg);
-  bool invoke_success = MethodHandleInvokeExact(self,
-                                                *bootstrap_frame,
-                                                bootstrap,
-                                                bootstrap_method_type,
-                                                &operands,
-                                                &result);
+  RangeInstructionOperands operands(0, bootstrap_frame->NumberOfVRegs());
+  bool invoke_success = MethodHandleInvoke(self,
+                                           *bootstrap_frame,
+                                           bsm,
+                                           call_site_type,
+                                           &operands,
+                                           &result);
   if (!invoke_success) {
     DCHECK(self->IsExceptionPending());
     return nullptr;
@@ -1077,31 +1395,20 @@
     return nullptr;
   }
 
-  // Check the result type is a subclass of CallSite.
+  // Check the result type is a subclass of j.l.i.CallSite.
   if (UNLIKELY(!object->InstanceOf(mirror::CallSite::StaticClass()))) {
     ThrowClassCastException(object->GetClass(), mirror::CallSite::StaticClass());
     return nullptr;
   }
 
+  // Check the call site target is not null as we're going to invoke it.
   Handle<mirror::CallSite> call_site =
       hs.NewHandle(ObjPtr<mirror::CallSite>::DownCast(ObjPtr<mirror::Object>(result.GetL())));
-  // Check the call site target is not null as we're going to invoke it.
   Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
   if (UNLIKELY(target.IsNull())) {
-    ThrowClassCastException("Bootstrap method did not return a callsite");
+    ThrowClassCastException("Bootstrap method returned a CallSite with a null target");
     return nullptr;
   }
-
-  // Check the target method type matches the method type requested modulo the receiver
-  // needs to be compatible rather than exact.
-  Handle<mirror::MethodType> target_method_type = hs.NewHandle(target->GetMethodType());
-  if (UNLIKELY(!target_method_type->IsExactMatch(method_type.Get()) &&
-               !IsParameterTypeConvertible(target_method_type->GetPTypes()->GetWithoutChecks(0),
-                                           method_type->GetPTypes()->GetWithoutChecks(0)))) {
-    ThrowWrongMethodTypeException(target_method_type.Get(), method_type.Get());
-    return nullptr;
-  }
-
   return call_site.Get();
 }
 
@@ -1129,8 +1436,11 @@
     call_site.Assign(InvokeBootstrapMethod(self, shadow_frame, call_site_idx));
     if (UNLIKELY(call_site.IsNull())) {
       CHECK(self->IsExceptionPending());
-      ThrowWrappedBootstrapMethodError("Exception from call site #%u bootstrap method",
-                                       call_site_idx);
+      if (!self->GetException()->IsError()) {
+        // Use a BootstrapMethodError if the exception is not an instance of java.lang.Error.
+        ThrowWrappedBootstrapMethodError("Exception from call site #%u bootstrap method",
+                                         call_site_idx);
+      }
       result->SetJ(0);
       return false;
     }
@@ -1139,9 +1449,6 @@
     call_site.Assign(winning_call_site);
   }
 
-  // CallSite.java checks the re-assignment of the call site target
-  // when mutating call site targets. We only check the target is
-  // non-null and has the right type during bootstrap method execution.
   Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
   Handle<mirror::MethodType> target_method_type = hs.NewHandle(target->GetMethodType());
   DCHECK_EQ(static_cast<size_t>(inst->VRegA()), target_method_type->NumberOfVRegs());
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 51a63dd..b2d58da 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -328,33 +328,20 @@
 
 class ScopedCodeCacheWrite : ScopedTrace {
  public:
-  explicit ScopedCodeCacheWrite(MemMap* code_map, bool only_for_tlb_shootdown = false)
+  explicit ScopedCodeCacheWrite(MemMap* code_map)
       : ScopedTrace("ScopedCodeCacheWrite"),
-        code_map_(code_map),
-        only_for_tlb_shootdown_(only_for_tlb_shootdown) {
+        code_map_(code_map) {
     ScopedTrace trace("mprotect all");
-    CheckedCall(mprotect,
-                "make code writable",
-                code_map_->Begin(),
-                only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(),
-                kProtAll);
+    CheckedCall(mprotect, "make code writable", code_map_->Begin(), code_map_->Size(), kProtAll);
   }
   ~ScopedCodeCacheWrite() {
     ScopedTrace trace("mprotect code");
-    CheckedCall(mprotect,
-                "make code protected",
-                code_map_->Begin(),
-                only_for_tlb_shootdown_ ? kPageSize : code_map_->Size(),
-                kProtCode);
+    CheckedCall(mprotect, "make code protected", code_map_->Begin(), code_map_->Size(), kProtCode);
   }
 
  private:
   MemMap* const code_map_;
 
-  // If we're using ScopedCacheWrite only for TLB shootdown, we limit the scope of mprotect to
-  // one page.
-  const bool only_for_tlb_shootdown_;
-
   DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
 };
 
@@ -684,7 +671,11 @@
 
 static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
   if (was_warm) {
-    method->SetPreviouslyWarm();
+    // Don't do any read barrier, as the declaring class of `method` may
+    // be in the process of being GC'ed (reading the declaring class is done
+    // when DCHECKing the declaring class is resolved, which we know it is
+    // at this point).
+    method->SetPreviouslyWarm<kWithoutReadBarrier>();
   }
   // We reset the counter to 1 so that the profile knows that the method was executed at least once.
   // This is required for layout purposes.
@@ -812,8 +803,6 @@
       FillRootTable(roots_data, roots);
       {
         // Flush data cache, as compiled code references literals in it.
-        // We also need a TLB shootdown to act as memory barrier across cores.
-        ScopedCodeCacheWrite ccw(code_map_.get(), /* only_for_tlb_shootdown */ true);
         FlushDataCache(reinterpret_cast<char*>(roots_data),
                        reinterpret_cast<char*>(roots_data + data_size));
       }
diff --git a/runtime/method_handles.h b/runtime/method_handles.h
index 7e60a5c..fce3d06 100644
--- a/runtime/method_handles.h
+++ b/runtime/method_handles.h
@@ -174,19 +174,26 @@
       : shadow_frame_(shadow_frame), arg_index_(first_dst_reg) {}
 
   ALWAYS_INLINE void Set(uint32_t value) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK_LT(arg_index_, shadow_frame_->NumberOfVRegs());
     shadow_frame_->SetVReg(arg_index_++, value);
   }
 
   ALWAYS_INLINE void SetReference(ObjPtr<mirror::Object> value)
       REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK_LT(arg_index_, shadow_frame_->NumberOfVRegs());
     shadow_frame_->SetVRegReference(arg_index_++, value.Ptr());
   }
 
   ALWAYS_INLINE void SetLong(int64_t value) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK_LT(arg_index_, shadow_frame_->NumberOfVRegs());
     shadow_frame_->SetVRegLong(arg_index_, value);
     arg_index_ += 2;
   }
 
+  ALWAYS_INLINE bool Done() const {
+    return arg_index_ == shadow_frame_->NumberOfVRegs();
+  }
+
  private:
   ShadowFrame* shadow_frame_;
   size_t arg_index_;
diff --git a/runtime/mirror/method_type.cc b/runtime/mirror/method_type.cc
index 6ac5012..45f7a87 100644
--- a/runtime/mirror/method_type.cc
+++ b/runtime/mirror/method_type.cc
@@ -23,6 +23,18 @@
 namespace art {
 namespace mirror {
 
+namespace {
+
+ObjPtr<ObjectArray<Class>> AllocatePTypesArray(Thread* self, int count)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<Class> class_type = Class::GetJavaLangClass();
+  ObjPtr<Class> class_array_type =
+      Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_type);
+  return ObjectArray<Class>::Alloc(self, class_array_type, count);
+}
+
+}  // namespace
+
 GcRoot<Class> MethodType::static_class_;
 
 MethodType* MethodType::Create(Thread* const self,
@@ -47,18 +59,41 @@
 MethodType* MethodType::CloneWithoutLeadingParameter(Thread* const self,
                                                      ObjPtr<MethodType> method_type) {
   StackHandleScope<3> hs(self);
-  Handle<Class> rtype = hs.NewHandle(method_type->GetRType());
   Handle<ObjectArray<Class>> src_ptypes = hs.NewHandle(method_type->GetPTypes());
-  ObjPtr<Class> class_type = Class::GetJavaLangClass();
-  ObjPtr<Class> class_array_type =
-      Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_type);
-  const int32_t dst_ptypes_count = src_ptypes->GetLength() - 1;
-  Handle<ObjectArray<Class>> dst_ptypes = hs.NewHandle(
-      ObjectArray<Class>::Alloc(self, class_array_type, dst_ptypes_count));
+  Handle<Class> dst_rtype = hs.NewHandle(method_type->GetRType());
+  const int32_t dst_ptypes_count = method_type->GetNumberOfPTypes() - 1;
+  Handle<ObjectArray<Class>> dst_ptypes = hs.NewHandle(AllocatePTypesArray(self, dst_ptypes_count));
+  if (dst_ptypes.IsNull()) {
+    return nullptr;
+  }
   for (int32_t i = 0; i < dst_ptypes_count; ++i) {
     dst_ptypes->Set(i, src_ptypes->Get(i + 1));
   }
-  return Create(self, rtype, dst_ptypes);
+  return Create(self, dst_rtype, dst_ptypes);
+}
+
+MethodType* MethodType::CollectTrailingArguments(Thread* self,
+                                                 ObjPtr<MethodType> method_type,
+                                                 ObjPtr<Class> collector_array_class,
+                                                 int32_t start_index) {
+  int32_t ptypes_length = method_type->GetNumberOfPTypes();
+  if (start_index > ptypes_length) {
+    return method_type.Ptr();
+  }
+
+  StackHandleScope<4> hs(self);
+  Handle<Class> collector_class = hs.NewHandle(collector_array_class);
+  Handle<Class> dst_rtype = hs.NewHandle(method_type->GetRType());
+  Handle<ObjectArray<Class>> src_ptypes = hs.NewHandle(method_type->GetPTypes());
+  Handle<ObjectArray<Class>> dst_ptypes = hs.NewHandle(AllocatePTypesArray(self, start_index + 1));
+  if (dst_ptypes.IsNull()) {
+    return nullptr;
+  }
+  for (int32_t i = 0; i < start_index; ++i) {
+    dst_ptypes->Set(i, src_ptypes->Get(i));
+  }
+  dst_ptypes->Set(start_index, collector_class.Get());
+  return Create(self, dst_rtype, dst_ptypes);
 }
 
 size_t MethodType::NumberOfVRegs() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/method_type.h b/runtime/mirror/method_type.h
index 3627214..771162a 100644
--- a/runtime/mirror/method_type.h
+++ b/runtime/mirror/method_type.h
@@ -40,6 +40,14 @@
                                                   ObjPtr<MethodType> method_type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Collects trailing parameter types into an array. Assumes caller
+  // has checked trailing arguments are all of the same type.
+  static MethodType* CollectTrailingArguments(Thread* const self,
+                                              ObjPtr<MethodType> method_type,
+                                              ObjPtr<Class> collector_array_class,
+                                              int32_t start_index)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   static Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return static_class_.Read();
   }
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index b6173d4..7006973 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -75,6 +75,10 @@
   return !InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_RuntimeException));
 }
 
+bool Throwable::IsError() {
+  return InstanceOf(WellKnownClasses::ToClass(WellKnownClasses::java_lang_Error));
+}
+
 int32_t Throwable::GetStackDepth() {
   ObjPtr<Object> stack_state = GetStackState();
   if (stack_state == nullptr || !stack_state->IsObjectArray()) {
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index fb45228..b901ca2 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -44,6 +44,7 @@
   void SetCause(ObjPtr<Throwable> cause) REQUIRES_SHARED(Locks::mutator_lock_);
   void SetStackState(ObjPtr<Object> state) REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsCheckedException() REQUIRES_SHARED(Locks::mutator_lock_);
+  bool IsError() REQUIRES_SHARED(Locks::mutator_lock_);
 
   static Class* GetJavaLangThrowable() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!java_lang_Throwable_.IsNull());
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 396b09a..c26c26e 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -32,12 +32,14 @@
 #include <vector>
 
 #include <linux/unistd.h>
+#include <poll.h>
 #include <signal.h>
 #include <stdlib.h>
 #include <sys/time.h>
 #include <sys/types.h>
 
 #include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 
 #include "arch/instruction_set.h"
 #include "base/aborting.h"
@@ -145,21 +147,14 @@
   bool prefix_written = false;
 
   for (;;) {
-    constexpr uint32_t kWaitTimeExpectedMicros = 500 * 1000;
-    constexpr uint32_t kWaitTimeUnexpectedMicros = 50 * 1000;
+    constexpr uint32_t kWaitTimeExpectedMilli = 500;
+    constexpr uint32_t kWaitTimeUnexpectedMilli = 50;
 
-    struct timeval tv;
-    tv.tv_sec = 0;
-    tv.tv_usec = expected > 0 ? kWaitTimeExpectedMicros : kWaitTimeUnexpectedMicros;
-
-    fd_set rfds;
-    FD_ZERO(&rfds);
-    FD_SET(in, &rfds);
-
-    int retval = TEMP_FAILURE_RETRY(select(in + 1, &rfds, nullptr, nullptr, &tv));
-
-    if (retval < 0) {
-      // Other side may have crashed or other errors.
+    int timeout = expected > 0 ? kWaitTimeExpectedMilli : kWaitTimeUnexpectedMilli;
+    struct pollfd read_fd{in, POLLIN, 0};
+    int retval = TEMP_FAILURE_RETRY(poll(&read_fd, 1, timeout));
+    if (retval == -1) {
+      // An error occurred.
       pipe->reset();
       return;
     }
@@ -169,19 +164,23 @@
       return;
     }
 
-    DCHECK_EQ(retval, 1);
+    if (!(read_fd.revents & POLLIN)) {
+      // addr2line call exited.
+      pipe->reset();
+      return;
+    }
 
     constexpr size_t kMaxBuffer = 128;  // Relatively small buffer. Should be OK as we're on an
     // alt stack, but just to be sure...
     char buffer[kMaxBuffer];
     memset(buffer, 0, kMaxBuffer);
     int bytes_read = TEMP_FAILURE_RETRY(read(in, buffer, kMaxBuffer - 1));
-
-    if (bytes_read < 0) {
+    if (bytes_read <= 0) {
       // This should not really happen...
       pipe->reset();
       return;
     }
+    buffer[bytes_read] = '\0';
 
     char* tmp = buffer;
     while (*tmp != 0) {
@@ -219,8 +218,10 @@
                       std::unique_ptr<Addr2linePipe>* pipe /* inout */) {
   DCHECK(pipe != nullptr);
 
-  if (map_src == "[vdso]") {
-    // Special-case this, our setup has problems with this.
+  if (map_src == "[vdso]" || android::base::EndsWith(map_src, ".vdex")) {
+    // addr2line will not work on the vdso.
+    // vdex files are special frames injected for the interpreter
+    // so they don't have any line number information available.
     return;
   }
 
diff --git a/runtime/oat.h b/runtime/oat.h
index 292c9d6..01d3914 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: Math.pow() intrinsic.
-  static constexpr uint8_t kOatVersion[] = { '1', '3', '8', '\0' };
+  // Last oat version changed reason: Retrieve Class* and String* from .data.bimg.rel.ro .
+  static constexpr uint8_t kOatVersion[] = { '1', '4', '0', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index b0e1de2..cfbcda3 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -343,6 +343,19 @@
   // Readjust to be non-inclusive upper bound.
   end_ += sizeof(uint32_t);
 
+  data_bimg_rel_ro_begin_ = FindDynamicSymbolAddress("oatdatabimgrelro", &symbol_error_msg);
+  if (data_bimg_rel_ro_begin_ != nullptr) {
+    data_bimg_rel_ro_end_ =
+        FindDynamicSymbolAddress("oatdatabimgrelrolastword", &symbol_error_msg);
+    if (data_bimg_rel_ro_end_ == nullptr) {
+      *error_msg =
+          StringPrintf("Failed to find oatdatabimgrelrolastword symbol in '%s'", file_path.c_str());
+      return false;
+    }
+    // Readjust to be non-inclusive upper bound.
+    data_bimg_rel_ro_end_ += sizeof(uint32_t);
+  }
+
   bss_begin_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbss", &symbol_error_msg));
   if (bss_begin_ == nullptr) {
     // No .bss section.
@@ -399,37 +412,6 @@
   return true;
 }
 
-static inline bool MapConstantTables(const gc::space::ImageSpace* space,
-                                     uint8_t* address) {
-  // If MREMAP_DUP is ever merged to Linux kernel, use it to avoid the unnecessary open()/close().
-  // Note: The current approach relies on the filename still referencing the same inode.
-
-  File file(space->GetImageFilename(), O_RDONLY, /* checkUsage */ false);
-  if (!file.IsOpened()) {
-    LOG(ERROR) << "Failed to open boot image file " << space->GetImageFilename();
-    return false;
-  }
-
-  uint32_t offset = space->GetImageHeader().GetBootImageConstantTablesOffset();
-  uint32_t size = space->GetImageHeader().GetBootImageConstantTablesSize();
-  std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapFileAtAddress(address,
-                                                           size,
-                                                           PROT_READ,
-                                                           MAP_PRIVATE,
-                                                           file.Fd(),
-                                                           offset,
-                                                           /* low_4gb */ false,
-                                                           /* reuse */ true,
-                                                           file.GetPath().c_str(),
-                                                           &error_msg));
-  if (mem_map == nullptr) {
-    LOG(ERROR) << "Failed to mmap boot image tables from file " << space->GetImageFilename();
-    return false;
-  }
-  return true;
-}
-
 static bool ReadIndexBssMapping(OatFile* oat_file,
                                 /*inout*/const uint8_t** oat,
                                 size_t dex_file_index,
@@ -536,6 +518,17 @@
   }
   const uint8_t* oat = Begin() + oat_dex_files_offset;  // Jump to the OatDexFile records.
 
+  if (!IsAligned<sizeof(uint32_t)>(data_bimg_rel_ro_begin_) ||
+      !IsAligned<sizeof(uint32_t)>(data_bimg_rel_ro_end_) ||
+      data_bimg_rel_ro_begin_ > data_bimg_rel_ro_end_) {
+    *error_msg = StringPrintf("In oat file '%s' found unaligned or unordered databimgrelro "
+                                  "symbol(s): begin = %p, end = %p",
+                              GetLocation().c_str(),
+                              data_bimg_rel_ro_begin_,
+                              data_bimg_rel_ro_end_);
+    return false;
+  }
+
   DCHECK_GE(static_cast<size_t>(pointer_size), alignof(GcRoot<mirror::Object>));
   if (!IsAligned<kPageSize>(bss_begin_) ||
       !IsAlignedParam(bss_methods_, static_cast<size_t>(pointer_size)) ||
@@ -564,12 +557,15 @@
     return false;
   }
 
-  uint8_t* after_tables =
-      (bss_methods_ != nullptr) ? bss_methods_ : bss_roots_;  // May be null.
-  uint8_t* boot_image_tables = (bss_begin_ == after_tables) ? nullptr : bss_begin_;
-  uint8_t* boot_image_tables_end =
-      (bss_begin_ == after_tables) ? nullptr : (after_tables != nullptr) ? after_tables : bss_end_;
-  DCHECK_EQ(boot_image_tables != nullptr, boot_image_tables_end != nullptr);
+  if (bss_methods_ != nullptr && bss_methods_ != bss_begin_) {
+    *error_msg = StringPrintf("In oat file '%s' found unexpected .bss gap before 'oatbssmethods': "
+                                  "begin = %p, methods = %p",
+                              GetLocation().c_str(),
+                              bss_begin_,
+                              bss_methods_);
+    return false;
+  }
+
   uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
   oat_dex_files_storage_.reserve(dex_file_count);
   for (size_t i = 0; i < dex_file_count; i++) {
@@ -849,39 +845,28 @@
     }
   }
 
-  if (boot_image_tables != nullptr) {
-    Runtime* runtime = Runtime::Current();
+  Runtime* runtime = Runtime::Current();
+
+  if (DataBimgRelRoBegin() != nullptr) {
+    // Make .data.bimg.rel.ro read only. ClassLinker shall make it writable for relocation.
+    uint8_t* reloc_begin = const_cast<uint8_t*>(DataBimgRelRoBegin());
+    CheckedCall(mprotect, "protect relocations", reloc_begin, DataBimgRelRoSize(), PROT_READ);
     if (UNLIKELY(runtime == nullptr)) {
-      // This must be oatdump without boot image. Make sure the .bss is inaccessible.
-      CheckedCall(mprotect, "protect bss", const_cast<uint8_t*>(BssBegin()), BssSize(), PROT_NONE);
+      // This must be oatdump without boot image.
     } else if (!IsExecutable()) {
-      // Do not try to mmap boot image tables into .bss if the oat file is not executable.
+      // Do not check whether we have a boot image if the oat file is not executable.
+    } else if (UNLIKELY(runtime->GetHeap()->GetBootImageSpaces().empty())) {
+      *error_msg = StringPrintf("Cannot load oat file '%s' with .data.bimg.rel.ro as executable "
+                                    "without boot image.",
+                                GetLocation().c_str());
+      return false;
     } else {
-      // Map boot image tables into the .bss. The reserved size must match size of the tables.
-      size_t reserved_size = static_cast<size_t>(boot_image_tables_end - boot_image_tables);
-      size_t tables_size = 0u;
-      for (gc::space::ImageSpace* space : runtime->GetHeap()->GetBootImageSpaces()) {
-        tables_size += space->GetImageHeader().GetBootImageConstantTablesSize();
-        DCHECK_ALIGNED(tables_size, kPageSize);
-      }
-      if (tables_size != reserved_size) {
-        *error_msg = StringPrintf("In oat file '%s' found unexpected boot image table sizes, "
-                                      " %zu bytes, should be %zu.",
-                                  GetLocation().c_str(),
-                                  reserved_size,
-                                  tables_size);
-        return false;
-      }
-      for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
-        uint32_t current_tables_size = space->GetImageHeader().GetBootImageConstantTablesSize();
-        if (current_tables_size != 0u && !MapConstantTables(space, boot_image_tables)) {
-          return false;
-        }
-        boot_image_tables += current_tables_size;
-      }
-      DCHECK(boot_image_tables == boot_image_tables_end);
+      // ClassLinker shall perform the relocation when we register a dex file from
+      // this oat file. We do not do the relocation here to avoid dirtying the pages
+      // if the code is never actually ready to be executed.
     }
   }
+
   return true;
 }
 
@@ -1513,6 +1498,8 @@
       vdex_(nullptr),
       begin_(nullptr),
       end_(nullptr),
+      data_bimg_rel_ro_begin_(nullptr),
+      data_bimg_rel_ro_end_(nullptr),
       bss_begin_(nullptr),
       bss_end_(nullptr),
       bss_methods_(nullptr),
@@ -1542,22 +1529,6 @@
   return end_;
 }
 
-const uint8_t* OatFile::BssBegin() const {
-  return bss_begin_;
-}
-
-const uint8_t* OatFile::BssEnd() const {
-  return bss_end_;
-}
-
-const uint8_t* OatFile::VdexBegin() const {
-  return vdex_begin_;
-}
-
-const uint8_t* OatFile::VdexEnd() const {
-  return vdex_end_;
-}
-
 const uint8_t* OatFile::DexBegin() const {
   return vdex_->Begin();
 }
@@ -1566,6 +1537,16 @@
   return vdex_->End();
 }
 
+ArrayRef<const uint32_t> OatFile::GetBootImageRelocations() const {
+  if (data_bimg_rel_ro_begin_ != nullptr) {
+    const uint32_t* relocations = reinterpret_cast<const uint32_t*>(data_bimg_rel_ro_begin_);
+    const uint32_t* relocations_end = reinterpret_cast<const uint32_t*>(data_bimg_rel_ro_end_);
+    return ArrayRef<const uint32_t>(relocations, relocations_end - relocations);
+  } else {
+    return ArrayRef<const uint32_t>();
+  }
+}
+
 ArrayRef<ArtMethod*> OatFile::GetBssMethods() const {
   if (bss_methods_ != nullptr) {
     ArtMethod** methods = reinterpret_cast<ArtMethod**>(bss_methods_);
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 3c2cd00..24868dd 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -275,6 +275,10 @@
     return p >= Begin() && p < End();
   }
 
+  size_t DataBimgRelRoSize() const {
+    return DataBimgRelRoEnd() - DataBimgRelRoBegin();
+  }
+
   size_t BssSize() const {
     return BssEnd() - BssBegin();
   }
@@ -300,15 +304,19 @@
   const uint8_t* Begin() const;
   const uint8_t* End() const;
 
-  const uint8_t* BssBegin() const;
-  const uint8_t* BssEnd() const;
+  const uint8_t* DataBimgRelRoBegin() const { return data_bimg_rel_ro_begin_; }
+  const uint8_t* DataBimgRelRoEnd() const { return data_bimg_rel_ro_end_; }
 
-  const uint8_t* VdexBegin() const;
-  const uint8_t* VdexEnd() const;
+  const uint8_t* BssBegin() const { return bss_begin_; }
+  const uint8_t* BssEnd() const { return bss_end_; }
+
+  const uint8_t* VdexBegin() const { return vdex_begin_; }
+  const uint8_t* VdexEnd() const { return vdex_end_; }
 
   const uint8_t* DexBegin() const;
   const uint8_t* DexEnd() const;
 
+  ArrayRef<const uint32_t> GetBootImageRelocations() const;
   ArrayRef<ArtMethod*> GetBssMethods() const;
   ArrayRef<GcRoot<mirror::Object>> GetBssGcRoots() const;
 
@@ -355,6 +363,12 @@
   // Pointer to end of oat region for bounds checking.
   const uint8_t* end_;
 
+  // Pointer to the .data.bimg.rel.ro section, if present, otherwise null.
+  const uint8_t* data_bimg_rel_ro_begin_;
+
+  // Pointer to the end of the .data.bimg.rel.ro section, if present, otherwise null.
+  const uint8_t* data_bimg_rel_ro_end_;
+
   // Pointer to the .bss section, if present, otherwise null.
   uint8_t* bss_begin_;
 
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 1d091e9..5888c37 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -1263,10 +1263,14 @@
 
   switch (Status()) {
     case kOatBootImageOutOfDate:
+      // OutOfDate may be either a mismatched image, or a missing image.
       if (oat_file_assistant_->HasOriginalDexFiles()) {
-        // If there are original dex files, it is better to use them.
+        // If there are original dex files, it is better to use them (to avoid a potential
+        // quickening mismatch because the boot image changed).
         break;
       }
+      // If we do not accept the oat file, we may not have access to dex bytecode at all. Grudgingly
+      // go forward.
       FALLTHROUGH_INTENDED;
 
     case kOatRelocationOutOfDate:
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 0ca646c..793d430 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1276,7 +1276,7 @@
   jdwp_provider_ = runtime_options.GetOrDefault(Opt::JdwpProvider);
   switch (jdwp_provider_) {
     case JdwpProvider::kNone: {
-      LOG(INFO) << "Disabling all JDWP support.";
+      VLOG(jdwp) << "Disabling all JDWP support.";
       if (!jdwp_options_.empty()) {
         bool has_transport = jdwp_options_.find("transport") != std::string::npos;
         const char* transport_internal = !has_transport ? "transport=dt_android_adb," : "";
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 74c2244..21a4ecc 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -4206,14 +4206,19 @@
   if (it.Size() < 3) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
                                       << " has too few arguments: "
-                                      << it.Size() << "< 3";
+                                      << it.Size() << " < 3";
     return false;
   }
 
   // Get and check the first argument: the method handle (index range
   // checked by the dex file verifier).
   uint32_t method_handle_idx = static_cast<uint32_t>(it.GetJavaValue().i);
-  it.Next();
+  if (method_handle_idx > dex_file_->NumMethodHandles()) {
+    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site id #" << call_site_idx
+                                      << " method handle index invalid " << method_handle_idx
+                                      << " >= "  << dex_file_->NumMethodHandles();
+    return false;
+  }
 
   const DexFile::MethodHandleItem& mh = dex_file_->GetMethodHandle(method_handle_idx);
   if (mh.method_handle_type_ != static_cast<uint16_t>(DexFile::MethodHandleType::kInvokeStatic)) {
@@ -4222,93 +4227,6 @@
                                       << mh.method_handle_type_;
     return false;
   }
-
-  // Skip the second argument, the name to resolve, as checked by the
-  // dex file verifier.
-  it.Next();
-
-  // Skip the third argument, the method type expected, as checked by
-  // the dex file verifier.
-  it.Next();
-
-  // Check the bootstrap method handle and remaining arguments.
-  const DexFile::MethodId& method_id = dex_file_->GetMethodId(mh.field_or_method_idx_);
-  uint32_t length;
-  const char* shorty = dex_file_->GetMethodShorty(method_id, &length);
-
-  if (it.Size() < length - 1) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
-                                      << " too few arguments for bootstrap method: "
-                                      << it.Size() << " < " << (length - 1);
-    return false;
-  }
-
-  // Check the return type and first 3 arguments are references
-  // (CallSite, Lookup, String, MethodType). If they are not of the
-  // expected types (or subtypes), it will trigger a
-  // WrongMethodTypeException during execution.
-  if (shorty[0] != 'L') {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
-                                      << " bootstrap return type is not a reference";
-    return false;
-  }
-
-  for (uint32_t i = 1; i < 4; ++i) {
-    if (shorty[i] != 'L') {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
-                                        << " bootstrap method argument " << (i - 1)
-                                        << " is not a reference";
-      return false;
-    }
-  }
-
-  // Check the optional arguments.
-  for (uint32_t i = 4; i < length; ++i, it.Next()) {
-    bool match = false;
-    switch (it.GetValueType()) {
-      case EncodedArrayValueIterator::ValueType::kBoolean:
-      case EncodedArrayValueIterator::ValueType::kByte:
-      case EncodedArrayValueIterator::ValueType::kShort:
-      case EncodedArrayValueIterator::ValueType::kChar:
-      case EncodedArrayValueIterator::ValueType::kInt:
-        // These all fit within one register and encoders do not seem
-        // too exacting on the encoding type they use (ie using
-        // integer for all of these).
-        match = (strchr("ZBCSI", shorty[i]) != nullptr);
-        break;
-      case EncodedArrayValueIterator::ValueType::kLong:
-        match = ('J' == shorty[i]);
-        break;
-      case EncodedArrayValueIterator::ValueType::kFloat:
-        match = ('F' == shorty[i]);
-        break;
-      case EncodedArrayValueIterator::ValueType::kDouble:
-        match = ('D' == shorty[i]);
-        break;
-      case EncodedArrayValueIterator::ValueType::kMethodType:
-      case EncodedArrayValueIterator::ValueType::kMethodHandle:
-      case EncodedArrayValueIterator::ValueType::kString:
-      case EncodedArrayValueIterator::ValueType::kType:
-      case EncodedArrayValueIterator::ValueType::kNull:
-        match = ('L' == shorty[i]);
-        break;
-      case EncodedArrayValueIterator::ValueType::kField:
-      case EncodedArrayValueIterator::ValueType::kMethod:
-      case EncodedArrayValueIterator::ValueType::kEnum:
-      case EncodedArrayValueIterator::ValueType::kArray:
-      case EncodedArrayValueIterator::ValueType::kAnnotation:
-        // Unreachable based on current EncodedArrayValueIterator::Next().
-        UNREACHABLE();
-    }
-
-    if (!match) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
-                                        << " bootstrap method argument " << (i - 1)
-                                        << " expected " << shorty[i]
-                                        << " got value type: " << it.GetValueType();
-      return false;
-    }
-  }
   return true;
 }
 
diff --git a/sigchainlib/sigchain_test.cc b/sigchainlib/sigchain_test.cc
index 9f338ad..1d1e54f 100644
--- a/sigchainlib/sigchain_test.cc
+++ b/sigchainlib/sigchain_test.cc
@@ -50,7 +50,7 @@
 
 static int RealSigprocmask(int how, const sigset64_t* new_sigset, sigset64_t* old_sigset) {
   // glibc's sigset_t is overly large, so sizeof(*new_sigset) doesn't work.
-  return syscall(__NR_rt_sigprocmask, how, new_sigset, old_sigset, 8);
+  return syscall(__NR_rt_sigprocmask, how, new_sigset, old_sigset, NSIG/8);
 }
 
 class SigchainTest : public ::testing::Test {
diff --git a/test/445-checker-licm/src/Main.java b/test/445-checker-licm/src/Main.java
index bd5d9e2..517aacd 100644
--- a/test/445-checker-licm/src/Main.java
+++ b/test/445-checker-licm/src/Main.java
@@ -153,7 +153,6 @@
     return result;
   }
 
-
   /// CHECK-START: int Main.invariantBoundIntrinsic(int) instruction_simplifier (before)
   /// CHECK-DAG: InvokeStaticOrDirect loop:{{B\d+}}
   //
@@ -176,14 +175,17 @@
     return result;
   }
 
-  /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (before)
+  /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) instruction_simplifier (before)
   /// CHECK-DAG: InvokeStaticOrDirect loop:{{B\d+}}
 
-  /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (after)
-  /// CHECK-NOT: InvokeStaticOrDirect loop:{{B\d+}}
+  /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (before)
+  /// CHECK-DAG: Max loop:{{B\d+}}
 
   /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (after)
-  /// CHECK-DAG: InvokeStaticOrDirect loop:none
+  /// CHECK-NOT: Max loop:{{B\d+}}
+
+  /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (after)
+  /// CHECK-DAG: Max loop:none
 
   public static int invariantBodyIntrinsic(int x, int y) {
     int result = 0;
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 3173afd..17707e1 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -141,7 +141,7 @@
 
   /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$getBootImageString() builder (after)
   // Note: load kind depends on PIC/non-PIC
-  /// CHECK:                LoadString load_kind:{{BootImageAddress|BootImageInternTable}}
+  /// CHECK:                LoadString load_kind:{{BootImageAddress|BootImageRelRo}}
 
   public static String $noinline$getBootImageString() {
     // Prevent inlining to avoid the string comparison being optimized away.
@@ -169,7 +169,7 @@
 
   /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.Class Main.$noinline$getStringClass() builder (after)
   // Note: load kind depends on PIC/non-PIC
-  /// CHECK:                LoadClass load_kind:{{BootImageAddress|BootImageClassTable}} class_name:java.lang.String
+  /// CHECK:                LoadClass load_kind:{{BootImageAddress|BootImageRelRo}} class_name:java.lang.String
 
   public static Class<?> $noinline$getStringClass() {
     // Prevent inlining to avoid the string comparison being optimized away.
@@ -195,6 +195,32 @@
     return Other.class;
   }
 
+  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) builder (after)
+  /// CHECK:                InvokeStaticOrDirect method_load_kind:RuntimeCall
+
+  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) sharpening (after)
+  // Note: load kind depends on PIC/non-PIC
+  /// CHECK:                InvokeStaticOrDirect method_load_kind:{{BootImageRelRo|DirectAddress}}
+  public static String $noinline$toHexString(int value) {
+    return Integer.toString(value, 16);
+  }
+
+  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) builder (after)
+  /// CHECK:                InvokeStaticOrDirect method_load_kind:RuntimeCall
+
+  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) sharpening (after)
+  /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
+
+  /// CHECK-START-X86: java.lang.String Main.$noinline$toHexStringIndirect(int) pc_relative_fixups_x86 (before)
+  /// CHECK-NOT:            X86ComputeBaseMethodAddress
+
+  /// CHECK-START-X86: java.lang.String Main.$noinline$toHexStringIndirect(int) pc_relative_fixups_x86 (after)
+  /// CHECK-DAG:            X86ComputeBaseMethodAddress
+  /// CHECK-DAG:            InvokeStaticOrDirect method_load_kind:BssEntry
+  public static String $noinline$toHexStringIndirect(int value) {
+    return $noinline$toHexString(value);
+  }
+
   public static void main(String[] args) {
     assertIntEquals(1, testSimple(1));
     assertIntEquals(1, testDiamond(false, 1));
@@ -208,6 +234,8 @@
     assertStringEquals("non-boot-image-string", $noinline$getNonBootImageString());
     assertClassEquals(String.class, $noinline$getStringClass());
     assertClassEquals(Other.class, $noinline$getOtherClass());
+    assertStringEquals("12345678", $noinline$toHexString(0x12345678));
+    assertStringEquals("76543210", $noinline$toHexStringIndirect(0x76543210));
   }
 }
 
diff --git a/test/562-checker-no-intermediate/src/Main.java b/test/562-checker-no-intermediate/src/Main.java
index d61a9b1..2b19918 100644
--- a/test/562-checker-no-intermediate/src/Main.java
+++ b/test/562-checker-no-intermediate/src/Main.java
@@ -18,7 +18,7 @@
 
   /**
    * Check that the intermediate address computation is not reordered or merged
-   * across the call to Math.abs().
+   * across a method call.
    */
 
   /// CHECK-START-ARM: void Main.main(java.lang.String[]) instruction_simplifier_arm (before)
diff --git a/test/651-checker-byte-simd-minmax/src/Main.java b/test/651-checker-byte-simd-minmax/src/Main.java
index 45949ae..4e667bb 100644
--- a/test/651-checker-byte-simd-minmax/src/Main.java
+++ b/test/651-checker-byte-simd-minmax/src/Main.java
@@ -23,7 +23,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -54,7 +54,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -74,7 +74,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Max>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -105,7 +105,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Max>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -125,7 +125,7 @@
   /// CHECK-DAG: <<I100:i\d+>> IntConstant 100                     loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get>>,<<I100>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get>>,<<I100>>]              loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
diff --git a/test/651-checker-char-simd-minmax/src/Main.java b/test/651-checker-char-simd-minmax/src/Main.java
index 9b05609..520e10b 100644
--- a/test/651-checker-char-simd-minmax/src/Main.java
+++ b/test/651-checker-char-simd-minmax/src/Main.java
@@ -23,7 +23,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -43,7 +43,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Max>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -63,7 +63,7 @@
   /// CHECK-DAG: <<I100:i\d+>> IntConstant 100                     loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get>>,<<I100>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get>>,<<I100>>]              loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
diff --git a/test/651-checker-double-simd-minmax/src/Main.java b/test/651-checker-double-simd-minmax/src/Main.java
index 6b12e7e..2eaf907 100644
--- a/test/651-checker-double-simd-minmax/src/Main.java
+++ b/test/651-checker-double-simd-minmax/src/Main.java
@@ -23,7 +23,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:d\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:d\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:d\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinDoubleDouble loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:d\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>>      outer_loop:none
   //
   // TODO x86: 0.0 vs -0.0?
@@ -45,7 +45,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:d\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:d\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:d\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxDoubleDouble loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:d\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>>      outer_loop:none
   //
   // TODO x86: 0.0 vs -0.0?
diff --git a/test/651-checker-float-simd-minmax/src/Main.java b/test/651-checker-float-simd-minmax/src/Main.java
index 278a9c9..dc09dfc 100644
--- a/test/651-checker-float-simd-minmax/src/Main.java
+++ b/test/651-checker-float-simd-minmax/src/Main.java
@@ -23,7 +23,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:f\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:f\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:f\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinFloatFloat loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:f\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>>      outer_loop:none
   //
   // TODO x86: 0.0 vs -0.0?
@@ -45,7 +45,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:f\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:f\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:f\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxFloatFloat loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:f\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>>      outer_loop:none
   //
   // TODO x86: 0.0 vs -0.0?
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index cfa0ae7..82fad84 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -23,7 +23,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:i\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:i\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>>      outer_loop:none
   //
   /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
@@ -42,7 +42,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:i\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:i\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>>      outer_loop:none
   //
   /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
diff --git a/test/651-checker-long-simd-minmax/src/Main.java b/test/651-checker-long-simd-minmax/src/Main.java
index 458cb8b..f52686e 100644
--- a/test/651-checker-long-simd-minmax/src/Main.java
+++ b/test/651-checker-long-simd-minmax/src/Main.java
@@ -23,7 +23,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:j\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:j\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:j\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinLongLong loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:j\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>>      outer_loop:none
   //
   // Not directly supported for longs.
@@ -48,7 +48,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:j\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:j\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:j\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxLongLong loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:j\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>>      outer_loop:none
   //
   // Not directly supported for longs.
diff --git a/test/651-checker-short-simd-minmax/src/Main.java b/test/651-checker-short-simd-minmax/src/Main.java
index 5f10ada..4300ca2 100644
--- a/test/651-checker-short-simd-minmax/src/Main.java
+++ b/test/651-checker-short-simd-minmax/src/Main.java
@@ -23,7 +23,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -54,7 +54,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -74,7 +74,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Max>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -105,7 +105,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Max:i\d+>>  InvokeStaticOrDirect [<<Get1>>,<<Get2>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Max:i\d+>>  Max [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Max>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
@@ -125,7 +125,7 @@
   /// CHECK-DAG: <<I100:i\d+>> IntConstant 100                     loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Min:i\d+>>  InvokeStaticOrDirect [<<Get>>,<<I100>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG: <<Min:i\d+>>  Min [<<Get>>,<<I100>>]              loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Min>>]            loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   //
diff --git a/test/661-checker-simd-reduc/src/Main.java b/test/661-checker-simd-reduc/src/Main.java
index 3a0a049..fcd50a6 100644
--- a/test/661-checker-simd-reduc/src/Main.java
+++ b/test/661-checker-simd-reduc/src/Main.java
@@ -378,7 +378,7 @@
   /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]      loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<ConsM>>,{{i\d+}}]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 InvokeStaticOrDirect [<<Phi2>>,<<Get>>] intrinsic:MathMinIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Min [<<Phi2>>,<<Get>>]        loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
@@ -438,7 +438,7 @@
   /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]      loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<ConsM>>,{{i\d+}}]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 InvokeStaticOrDirect [<<Phi2>>,<<Get>>] intrinsic:MathMaxIntInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Max [<<Phi2>>,<<Get>>]        loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
diff --git a/test/952-invoke-custom/expected.txt b/test/952-invoke-custom/expected.txt
index 767cc7e..7e8ffa6 100644
--- a/test/952-invoke-custom/expected.txt
+++ b/test/952-invoke-custom/expected.txt
@@ -18,3 +18,67 @@
 testInstanceFieldAccessors
 testInvokeVirtual => max(77, -3) = 77
 testConstructor => class TestInvocationKinds$Widget
+TestDynamicArguments
+bsm
+0, One, 3.141592653589793
+bsm
+1, Two, 2.718281828459045
+bsm
+2, Three, 0.0
+0, One, 3.141592653589793
+1, Two, 2.718281828459045
+2, Three, 0.0
+TestBadBootstrapArguments
+bsm(class TestBadBootstrapArguments, happy, ()void, -1, very)
+happy
+invokeWrongParameterTypes => class java.lang.NoSuchMethodError
+invokeMissingParameterTypes => class java.lang.NoSuchMethodError
+invokeExtraArguments => class java.lang.BootstrapMethodError => class java.lang.invoke.WrongMethodTypeException
+invokeWrongArguments => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+invokeWrongArguments => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+invokeWrongArgumentsAgain => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+invokeNarrowArguments => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+bsmDJ(..., 1.7976931348623157E308, 2147483647)
+wideningArguments
+bsmDoubleLong(..., 1.7976931348623157E308, 9223372036854775807)
+boxingArguments
+invokeWideningBoxingArguments => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+bsm returning void value.
+invokeVoidReturnType() => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+bsm returning Object value.
+invokeObjectReturnType() => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+bsm returning Integer value.
+invokeIntegerReturnType() => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+Hello!
+bsmWithStringArray(TestVariableArityLinkerMethod, methodA, ()void, [Aachen, Aalborg, Aalto]);
+methodA
+bsmWithStringArray(TestVariableArityLinkerMethod, methodB, ()void, [barium]);
+methodB
+bsmWithStringArray(TestVariableArityLinkerMethod, methodC, ()void, []);
+methodC
+methodA
+methodB
+methodC
+bsmWithIntAndStringArray(TestVariableArityLinkerMethod, methodD, ()void, 101, [zoo, zoogene, zoogenic]);
+methodD
+bsmWithIntAndStringArray(TestVariableArityLinkerMethod, methodE, ()void, 102, [zonic]);
+methodE
+bsmWithIntAndStringArray(TestVariableArityLinkerMethod, methodF, ()void, 103, []);
+methodF
+methodD
+methodE
+methodF
+bsmWithLongAndIntArray(TestVariableArityLinkerMethod, methodG, ()void, 81985529216486895, [1, -1, 2, -2]);
+methodG
+bsmWithFloatAndLongArray(TestVariableArityLinkerMethod, methodH, ()void, -2.7182817, [999999999999, -8888888888888]);
+methodH
+bsmWithClassAndFloatArray(TestVariableArityLinkerMethod, methodI, ()void, class java.lang.Throwable, [3.4028235E38, 1.4E-45, 3.1415927, -3.1415927]);
+methodI
+bsmWithDoubleArray(TestVariableArityLinkerMethod, methodJ, ()void, [1.7976931348623157E308, 4.9E-324, 2.718281828459045, -3.141592653589793]);
+methodJ
+bsmWithClassArray(TestVariableArityLinkerMethod, methodK, ()void, [class java.lang.Integer, class java.lang.invoke.MethodHandles, class java.util.Arrays]);
+methodK
+methodO => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+methodP => class java.lang.BootstrapMethodError => class java.lang.ClassCastException
+methodQ => class java.lang.BootstrapMethodError => class java.lang.invoke.WrongMethodTypeException
+methodR => class java.lang.BootstrapMethodError => class java.lang.invoke.WrongMethodTypeException
diff --git a/test/952-invoke-custom/src/Main.java b/test/952-invoke-custom/src/Main.java
index 0b1c1ff..d2250a9 100644
--- a/test/952-invoke-custom/src/Main.java
+++ b/test/952-invoke-custom/src/Main.java
@@ -74,18 +74,15 @@
                 TestLinkerMethodMinimalArguments.FAILURE_TYPE_NONE, 10, 13);
     }
 
-    private static void TestInvokeCustomWithConcurrentThreads() throws Throwable {
-        // This is a concurrency test that attempts to run invoke-custom on the same
-        // call site.
-        TestInvokeCustomWithConcurrentThreads.test();
-    }
-
     public static void main(String[] args) throws Throwable {
         TestUninitializedCallSite();
         TestLinkerMethodMinimalArguments();
         TestLinkerMethodMultipleArgumentTypes();
         TestLinkerUnrelatedBSM.test();
-        TestInvokeCustomWithConcurrentThreads();
+        TestInvokeCustomWithConcurrentThreads.test();
         TestInvocationKinds.test();
+        TestDynamicBootstrapArguments.test();
+        TestBadBootstrapArguments.test();
+        TestVariableArityLinkerMethod.test();
     }
 }
diff --git a/test/952-invoke-custom/src/TestBadBootstrapArguments.java b/test/952-invoke-custom/src/TestBadBootstrapArguments.java
new file mode 100644
index 0000000..25d8b59
--- /dev/null
+++ b/test/952-invoke-custom/src/TestBadBootstrapArguments.java
@@ -0,0 +1,583 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import annotations.BootstrapMethod;
+import annotations.CalledByIndy;
+import annotations.Constant;
+import java.lang.invoke.CallSite;
+import java.lang.invoke.ConstantCallSite;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+import java.lang.invoke.WrongMethodTypeException;
+
+public class TestBadBootstrapArguments extends TestBase {
+    private static CallSite bsm(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            int extraInt,
+            String extraString)
+            throws Throwable {
+        System.out.print("bsm(");
+        System.out.print(lookup.lookupClass());
+        System.out.print(", ");
+        System.out.print(methodName);
+        System.out.print(", ");
+        System.out.print(methodType);
+        System.out.print(", ");
+        System.out.print(extraInt);
+        System.out.print(", ");
+        System.out.print(extraString);
+        System.out.println(")");
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsm",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String.class
+                    }
+                ),
+        fieldOrMethodName = "happy",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = -1),
+            @Constant(stringValue = "very")
+        }
+    )
+    private static void invokeHappy() {
+        assertNotReached();
+    }
+
+    private static void happy() {
+        System.out.println("happy");
+    }
+
+    // BootstrapMethod.parameterTypes != parameterTypesOf(constantArgumentsForBootstrapMethod)
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsm",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        double.class
+                    }
+                ),
+        fieldOrMethodName = "wrongParameterTypes",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = -1),
+            @Constant(stringValue = "very")
+        }
+    )
+    private static void invokeWrongParameterTypes() throws NoSuchMethodError {
+        assertNotReached();
+    }
+
+    private static void wrongParameterTypes() {
+        System.out.println("wrongParameterTypes");
+    }
+
+    // BootstrapMethod.parameterTypes != parameterTypesOf(constantArgumentsForBootstrapMethod)
+    // (missing constantArgumentTypes))
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsm",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        double.class
+                    }
+                ),
+        fieldOrMethodName = "missingParameterTypes",
+        constantArgumentsForBootstrapMethod = {}
+    )
+    private static void invokeMissingParameterTypes() throws NoSuchMethodError {
+        assertNotReached();
+    }
+
+    private static void missingParameterTypes() {
+        System.out.println("missingParameterTypes");
+    }
+
+    // BootstrapMethod.parameterTypes != parameterTypesOf(constantArgumentsForBootstrapMethod):
+    // extra constant present
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsm",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String.class
+                    }
+                ),
+        fieldOrMethodName = "extraArguments",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = 1),
+            @Constant(stringValue = "2"),
+            @Constant(intValue = 3)
+        }
+    )
+    private static void invokeExtraArguments() {
+        assertNotReached();
+    }
+
+    private static void extraArguments() {
+        System.out.println("extraArguments");
+    }
+
+    // constantArgumentTypes do not correspond to expected parameter types
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsm",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String.class
+                    }
+                ),
+        fieldOrMethodName = "wrongArguments",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(stringValue = "1"),
+            @Constant(doubleValue = Math.PI)
+        }
+    )
+    private static void invokeWrongArguments() {
+        assertNotReached();
+    }
+
+    private static void wrongArguments() {
+        System.out.println("wrongArguments");
+    }
+
+    // constantArgumentTypes do not correspond to expected parameter types
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsm",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String.class
+                    }
+                ),
+        fieldOrMethodName = "wrongArgumentsAgain",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(doubleValue = Math.PI),
+            @Constant(stringValue = "pie")
+        }
+    )
+    private static void invokeWrongArgumentsAgain() {
+        assertNotReached();
+    }
+
+    private static void wrongArgumentsAgain() {
+        System.out.println("wrongArgumentsAgain");
+    }
+
+    // Primitive argument types not supported {Z, B, C, S}.
+    private static CallSite bsmZBCS(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            boolean extraArg0,
+            byte extraArg1,
+            char extraArg2,
+            short extraArg3)
+            throws Throwable {
+        assertNotReached();
+        return null;
+    }
+
+    // Arguments are narrower than supported.
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmZBCS",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        boolean.class,
+                        byte.class,
+                        char.class,
+                        short.class
+                    }
+                ),
+        fieldOrMethodName = "narrowArguments",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(booleanValue = true),
+            @Constant(byteValue = Byte.MAX_VALUE),
+            @Constant(charValue = 'A'),
+            @Constant(shortValue = Short.MIN_VALUE)
+        }
+    )
+    private static void invokeNarrowArguments() {
+        assertNotReached();
+    }
+
+    private static void narrowArguments() {
+        assertNotReached();
+    }
+
+    private static CallSite bsmDJ(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            double extraArg0,
+            long extraArg1)
+            throws Throwable {
+        System.out.print("bsmDJ(..., ");
+        System.out.print(extraArg0);
+        System.out.print(", ");
+        System.out.print(extraArg1);
+        System.out.println(")");
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    // Arguments need widening to parameter types.
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmDJ",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        double.class,
+                        long.class
+                    }
+                ),
+        fieldOrMethodName = "wideningArguments",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(doubleValue = Double.MAX_VALUE),
+            @Constant(intValue = Integer.MAX_VALUE)
+        }
+    )
+    private static void invokeWideningArguments() {
+        assertNotReached();
+    }
+
+    private static void wideningArguments() {
+        System.out.println("wideningArguments");
+    }
+
+    private static CallSite bsmDoubleLong(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            Double extraArg0,
+            Long extraArg1)
+            throws Throwable {
+        System.out.print("bsmDoubleLong(..., ");
+        System.out.print(extraArg0);
+        System.out.print(", ");
+        System.out.print(extraArg1);
+        System.out.println(")");
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    // Arguments need boxing to parameter types
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmDoubleLong",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        Double.class,
+                        Long.class
+                    }
+                ),
+        fieldOrMethodName = "boxingArguments",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(doubleValue = Double.MAX_VALUE),
+            @Constant(longValue = Long.MAX_VALUE)
+        }
+    )
+    private static void invokeBoxingArguments() {
+        assertNotReached();
+    }
+
+    private static void boxingArguments() {
+        System.out.println("boxingArguments");
+    }
+
+    // Arguments need widening and boxing to parameter types
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmDoubleLong",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        Double.class,
+                        Long.class
+                    }
+                ),
+        fieldOrMethodName = "wideningBoxingArguments",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(floatValue = Float.MAX_VALUE),
+            @Constant(longValue = Integer.MAX_VALUE)
+        }
+    )
+    private static void invokeWideningBoxingArguments() {
+        assertNotReached();
+    }
+
+    private static void wideningBoxingArguments() {
+        System.out.println("wideningBoxingArguments");
+    }
+
+    static void bsmReturningVoid(MethodHandles.Lookup lookup, String name, MethodType type) {
+        System.out.println("bsm returning void value.");
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmReturningVoid",
+                    parameterTypes = {MethodHandles.Lookup.class, String.class, MethodType.class},
+                    returnType = void.class
+                ),
+        fieldOrMethodName = "voidReturnType"
+    )
+    private static void invokeVoidReturnType() {
+        assertNotReached();
+    }
+
+    private static void voidReturnType() {
+        assertNotReached();
+    }
+
+    static Object bsmReturningObject(MethodHandles.Lookup lookup, String name, MethodType type) {
+        System.out.println("bsm returning Object value.");
+        return new Object();
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmReturningObject",
+                    parameterTypes = {MethodHandles.Lookup.class, String.class, MethodType.class},
+                    returnType = Object.class
+                ),
+        fieldOrMethodName = "ObjectReturnType"
+    )
+    private static void invokeObjectReturnType() {
+        assertNotReached();
+    }
+
+    private static void objectReturnType() {
+        assertNotReached();
+    }
+
+    static Integer bsmReturningInteger(MethodHandles.Lookup lookup, String name, MethodType type) {
+        System.out.println("bsm returning Integer value.");
+        return Integer.valueOf(3);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmReturningInteger",
+                    parameterTypes = {MethodHandles.Lookup.class, String.class, MethodType.class},
+                    returnType = Integer.class
+                ),
+        fieldOrMethodName = "integerReturnType"
+    )
+    private static void invokeIntegerReturnType() {
+        assertNotReached();
+    }
+
+    private static void integerReturnType() {
+        assertNotReached();
+    }
+
+    static class TestersConstantCallSite extends ConstantCallSite {
+        public TestersConstantCallSite(MethodHandle mh) {
+            super(mh);
+        }
+    }
+
+    static TestersConstantCallSite bsmReturningTestersConstantCallsite(
+            MethodHandles.Lookup lookup, String name, MethodType type) throws Throwable {
+        return new TestersConstantCallSite(lookup.findStatic(lookup.lookupClass(), name, type));
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestBadBootstrapArguments.class,
+                    name = "bsmReturningTestersConstantCallsite",
+                    parameterTypes = {MethodHandles.Lookup.class, String.class, MethodType.class},
+                    returnType = TestersConstantCallSite.class
+                ),
+        fieldOrMethodName = "sayHello"
+    )
+    private static void invokeViaCustomCallSiteClass() {
+        assertNotReached();
+    }
+
+    private static void sayHello() {
+        System.out.println("Hello!");
+    }
+
+    static void test() {
+        System.out.println("TestBadBootstrapArguments");
+        invokeHappy();
+        try {
+            invokeWrongParameterTypes();
+            assertNotReached();
+        } catch (NoSuchMethodError expected) {
+            System.out.print("invokeWrongParameterTypes => ");
+            System.out.println(expected.getClass());
+        }
+        try {
+            invokeMissingParameterTypes();
+            assertNotReached();
+        } catch (NoSuchMethodError expected) {
+            System.out.print("invokeMissingParameterTypes => ");
+            System.out.println(expected.getClass());
+        }
+        try {
+            invokeExtraArguments();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            assertEquals(WrongMethodTypeException.class, expected.getCause().getClass());
+            System.out.print("invokeExtraArguments => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            invokeWrongArguments();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            assertEquals(ClassCastException.class, expected.getCause().getClass());
+            System.out.print("invokeWrongArguments => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            invokeWrongArguments();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            assertEquals(ClassCastException.class, expected.getCause().getClass());
+            System.out.print("invokeWrongArguments => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            invokeWrongArgumentsAgain();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            assertEquals(ClassCastException.class, expected.getCause().getClass());
+            System.out.print("invokeWrongArgumentsAgain => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            invokeNarrowArguments();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            assertEquals(ClassCastException.class, expected.getCause().getClass());
+            System.out.print("invokeNarrowArguments => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        invokeWideningArguments();
+        invokeBoxingArguments();
+        try {
+            invokeWideningBoxingArguments();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("invokeWideningBoxingArguments => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            invokeVoidReturnType();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("invokeVoidReturnType() => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            invokeObjectReturnType();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("invokeObjectReturnType() => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            invokeIntegerReturnType();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("invokeIntegerReturnType() => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        invokeViaCustomCallSiteClass();
+    }
+}
diff --git a/test/952-invoke-custom/src/TestDynamicBootstrapArguments.java b/test/952-invoke-custom/src/TestDynamicBootstrapArguments.java
new file mode 100644
index 0000000..782feca
--- /dev/null
+++ b/test/952-invoke-custom/src/TestDynamicBootstrapArguments.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import annotations.BootstrapMethod;
+import annotations.CalledByIndy;
+import annotations.Constant;
+import java.lang.invoke.CallSite;
+import java.lang.invoke.ConstantCallSite;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+class TestDynamicBootstrapArguments extends TestBase {
+    private static int bsmCalls = 0;
+
+    static CallSite bsm(
+            MethodHandles.Lookup lookup,
+            String name,
+            MethodType methodType,
+            String otherNameComponent,
+            long nameSuffix)
+            throws Throwable {
+        bsmCalls = bsmCalls + 1;
+        Class<?> definingClass = TestDynamicBootstrapArguments.class;
+        String methodName = name + otherNameComponent + nameSuffix;
+        MethodHandle mh = lookup.findStatic(definingClass, methodName, methodType);
+        System.out.println("bsm");
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestDynamicBootstrapArguments.class,
+                    name = "bsm",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        String.class,
+                        long.class
+                    }
+                ),
+        fieldOrMethodName = "target",
+        returnType = int.class,
+        parameterTypes = {int.class, String.class, double.class},
+        constantArgumentsForBootstrapMethod = {
+            @Constant(stringValue = "A"),
+            @Constant(longValue = 100000000l)
+        }
+    )
+    private static int testDynamic(int i, String s, Double d) {
+        assertNotReached();
+        return 0;
+    }
+
+    private static int targetA100000000(int i, String s, Double d) {
+        System.out.print(i);
+        System.out.print(", ");
+        System.out.print(s);
+        System.out.print(", ");
+        System.out.println(d);
+        return i;
+    }
+
+    static void testCallSites() {
+        assertEquals(0, testDynamic(0, "One", Math.PI));
+        assertEquals(1, testDynamic(1, "Two", Math.E));
+        assertEquals(2, testDynamic(2, "Three", 0.0));
+    }
+
+    static void test() {
+        System.out.println("TestDynamicArguments");
+        testCallSites();
+        assertEquals(3, bsmCalls);
+        testCallSites();
+        assertEquals(3, bsmCalls);
+    }
+}
diff --git a/test/952-invoke-custom/src/TestInvocationKinds.java b/test/952-invoke-custom/src/TestInvocationKinds.java
index 7b88c18..f743bef 100644
--- a/test/952-invoke-custom/src/TestInvocationKinds.java
+++ b/test/952-invoke-custom/src/TestInvocationKinds.java
@@ -173,6 +173,7 @@
 
     static class Widget {
         int value;
+
         public Widget(int value) {}
     }
 
diff --git a/test/952-invoke-custom/src/TestVariableArityLinkerMethod.java b/test/952-invoke-custom/src/TestVariableArityLinkerMethod.java
new file mode 100644
index 0000000..597273c
--- /dev/null
+++ b/test/952-invoke-custom/src/TestVariableArityLinkerMethod.java
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import annotations.BootstrapMethod;
+import annotations.CalledByIndy;
+import annotations.Constant;
+import java.lang.invoke.CallSite;
+import java.lang.invoke.ConstantCallSite;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+import java.util.Arrays;
+
+public class TestVariableArityLinkerMethod extends TestBase {
+    private static void printBsmArgs(String method, Object... args) {
+        System.out.print(method);
+        System.out.print("(");
+        for (int i = 0; i < args.length; ++i) {
+            if (i != 0) {
+                System.out.print(", ");
+            }
+            if (args[i] != null && args[i].getClass().isArray()) {
+                Object array = args[i];
+                if (array.getClass() == int[].class) {
+                    System.out.print(Arrays.toString((int[]) array));
+                } else if (array.getClass() == long[].class) {
+                    System.out.print(Arrays.toString((long[]) array));
+                } else if (array.getClass() == float[].class) {
+                    System.out.print(Arrays.toString((float[]) array));
+                } else if (array.getClass() == double[].class) {
+                    System.out.print(Arrays.toString((double[]) array));
+                } else {
+                    System.out.print(Arrays.toString((Object[]) array));
+                }
+            } else {
+                System.out.print(args[i]);
+            }
+        }
+        System.out.println(");");
+    }
+
+    private static CallSite bsmWithStringArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            String... arityArgs)
+            throws Throwable {
+        printBsmArgs("bsmWithStringArray", lookup, methodName, methodType, arityArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodA",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(stringValue = "Aachen"),
+            @Constant(stringValue = "Aalborg"),
+            @Constant(stringValue = "Aalto")
+        }
+    )
+    private static void methodA() {
+        System.out.println("methodA");
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodB",
+        constantArgumentsForBootstrapMethod = {@Constant(stringValue = "barium")}
+    )
+    private static void methodB() {
+        System.out.println("methodB");
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodC"
+    )
+    private static void methodC() {
+        System.out.println("methodC");
+    }
+
+    private static CallSite bsmWithIntAndStringArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            int extraInt,
+            String... extraArityArgs)
+            throws Throwable {
+        printBsmArgs(
+                "bsmWithIntAndStringArray",
+                lookup,
+                methodName,
+                methodType,
+                extraInt,
+                extraArityArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithIntAndStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodD",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = 101),
+            @Constant(stringValue = "zoo"),
+            @Constant(stringValue = "zoogene"),
+            @Constant(stringValue = "zoogenic")
+        }
+    )
+    private static void methodD() {
+        System.out.println("methodD");
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithIntAndStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodE",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = 102),
+            @Constant(stringValue = "zonic")
+        }
+    )
+    private static void methodE() {
+        System.out.println("methodE");
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithIntAndStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodF",
+        constantArgumentsForBootstrapMethod = {@Constant(intValue = 103)}
+    )
+    private static void methodF() {
+        System.out.println("methodF");
+    }
+
+    private static CallSite bsmWithLongAndIntArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            long extraArg,
+            int... arityArgs)
+            throws Throwable {
+        printBsmArgs("bsmWithLongAndIntArray", lookup, methodName, methodType, extraArg, arityArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithLongAndIntArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        long.class,
+                        int[].class
+                    }
+                ),
+        fieldOrMethodName = "methodG",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(longValue = 0x123456789abcdefl),
+            @Constant(intValue = +1),
+            @Constant(intValue = -1),
+            @Constant(intValue = +2),
+            @Constant(intValue = -2)
+        }
+    )
+    private static void methodG() {
+        System.out.println("methodG");
+    }
+
+    private static CallSite bsmWithFloatAndLongArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            float extraArg,
+            long... arityArgs)
+            throws Throwable {
+        printBsmArgs(
+                "bsmWithFloatAndLongArray", lookup, methodName, methodType, extraArg, arityArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithFloatAndLongArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        float.class,
+                        long[].class
+                    }
+                ),
+        fieldOrMethodName = "methodH",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(floatValue = (float) -Math.E),
+            @Constant(longValue = 999999999999l),
+            @Constant(longValue = -8888888888888l)
+        }
+    )
+    private static void methodH() {
+        System.out.println("methodH");
+    }
+
+    private static CallSite bsmWithClassAndFloatArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            Class<?> extraArg,
+            float... arityArgs)
+            throws Throwable {
+        printBsmArgs(
+                "bsmWithClassAndFloatArray", lookup, methodName, methodType, extraArg, arityArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithClassAndFloatArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        Class.class,
+                        float[].class
+                    }
+                ),
+        fieldOrMethodName = "methodI",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(classValue = Throwable.class),
+            @Constant(floatValue = Float.MAX_VALUE),
+            @Constant(floatValue = Float.MIN_VALUE),
+            @Constant(floatValue = (float) Math.PI),
+            @Constant(floatValue = (float) -Math.PI)
+        }
+    )
+    private static void methodI() {
+        System.out.println("methodI");
+    }
+
+    private static CallSite bsmWithDoubleArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            double... arityArgs)
+            throws Throwable {
+        printBsmArgs("bsmWithDoubleArray", lookup, methodName, methodType, arityArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithDoubleArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        double[].class
+                    }
+                ),
+        fieldOrMethodName = "methodJ",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(doubleValue = Double.MAX_VALUE),
+            @Constant(doubleValue = Double.MIN_VALUE),
+            @Constant(doubleValue = Math.E),
+            @Constant(doubleValue = -Math.PI)
+        }
+    )
+    private static void methodJ() {
+        System.out.println("methodJ");
+    }
+
+    private static CallSite bsmWithClassArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            Class... arityArgs)
+            throws Throwable {
+        printBsmArgs("bsmWithClassArray", lookup, methodName, methodType, arityArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithClassArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        Class[].class
+                    }
+                ),
+        fieldOrMethodName = "methodK",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(classValue = Integer.class),
+            @Constant(classValue = MethodHandles.class),
+            @Constant(classValue = Arrays.class)
+        }
+    )
+    private static void methodK() {
+        System.out.println("methodK");
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithIntAndStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodO",
+        constantArgumentsForBootstrapMethod = {@Constant(intValue = 103), @Constant(intValue = 104)}
+    )
+    private static void methodO() {
+        // Arguments are not compatible
+        assertNotReached();
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithIntAndStringArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        String[].class
+                    }
+                ),
+        fieldOrMethodName = "methodP",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = 103),
+            @Constant(stringValue = "A"),
+            @Constant(stringValue = "B"),
+            @Constant(intValue = 42)
+        }
+    )
+    private static void methodP() {
+        // Arguments are not compatible - specifically, the third
+        // component of potential collector array is an integer
+        // argument (42).
+        assertNotReached();
+    }
+
+    private static CallSite bsmWithWiderArray(
+            MethodHandles.Lookup lookup, String methodName, MethodType methodType, long[] extraArgs)
+            throws Throwable {
+        printBsmArgs("bsmWithWiderArray", lookup, methodName, methodType, extraArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithWiderArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        long[].class
+                    }
+                ),
+        fieldOrMethodName = "methodQ",
+        constantArgumentsForBootstrapMethod = {@Constant(intValue = 103), @Constant(intValue = 42)}
+    )
+    private static void methodQ() {
+        assertNotReached();
+    }
+
+    private static CallSite bsmWithBoxedArray(
+            MethodHandles.Lookup lookup,
+            String methodName,
+            MethodType methodType,
+            Integer[] extraArgs)
+            throws Throwable {
+        printBsmArgs("bsmWithBoxedArray", lookup, methodName, methodType, extraArgs);
+        MethodHandle mh = lookup.findStatic(lookup.lookupClass(), methodName, methodType);
+        return new ConstantCallSite(mh);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestVariableArityLinkerMethod.class,
+                    name = "bsmWithBoxedArray",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        Integer[].class
+                    }
+                ),
+        fieldOrMethodName = "methodR",
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = 1030),
+            @Constant(intValue = 420)
+        }
+    )
+    private static void methodR() {
+        assertNotReached();
+    }
+
+    static void test() {
+        // Happy cases
+        for (int i = 0; i < 2; ++i) {
+            methodA();
+            methodB();
+            methodC();
+        }
+        for (int i = 0; i < 2; ++i) {
+            methodD();
+            methodE();
+            methodF();
+        }
+        methodG();
+        methodH();
+        methodI();
+        methodJ();
+        methodK();
+
+        // Broken cases
+        try {
+            // bsm has incompatible static methods. Collector
+            // component type is String, the corresponding static
+            // arguments are int values.
+            methodO();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("methodO => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            // bsm has a trailing String array for the collector array.
+            // There is an int value amongst the String values.
+            methodP();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("methodP => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            // bsm has as trailing long[] element for the collector array.
+            // The corresponding static bsm arguments are of type int.
+            methodQ();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("methodQ => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+        try {
+            // bsm has as trailing Integer[] element for the collector array.
+            // The corresponding static bsm arguments are of type int.
+            methodR();
+            assertNotReached();
+        } catch (BootstrapMethodError expected) {
+            System.out.print("methodR => ");
+            System.out.print(expected.getClass());
+            System.out.print(" => ");
+            System.out.println(expected.getCause().getClass());
+        }
+    }
+}
diff --git a/test/988-method-trace/check b/test/988-method-trace/check
deleted file mode 100644
index de64a3e..0000000
--- a/test/988-method-trace/check
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Building for libcore, this uses @hide API which gives it wrong method trace in the expected.txt
-# TODO: would be nice if we could build against core_current jars in the future to avoid this.
-if [[ "$NEED_DEX" == true ]]; then
-  patch -p0 expected.txt < expected_jack.diff >/dev/null
-fi
-
-./default-check "$@"
diff --git a/test/988-method-trace/expected.txt b/test/988-method-trace/expected.txt
index 574d5b0..7f64e23 100644
--- a/test/988-method-trace/expected.txt
+++ b/test/988-method-trace/expected.txt
@@ -107,8 +107,8 @@
 ......=> public static char[] java.util.Arrays.copyOf(char[],int)
 .......=> public static int java.lang.Math.min(int,int)
 .......<= public static int java.lang.Math.min(int,int) -> <class java.lang.Integer: 16>
-.......=> public static void java.lang.System.arraycopy(char[],int,char[],int,int)
-.......<= public static void java.lang.System.arraycopy(char[],int,char[],int,int) -> <null: null>
+.......=> public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int)
+.......<= public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int) -> <null: null>
 ......<= public static char[] java.util.Arrays.copyOf(char[],int) -> <class [C: [B, a, d,  , a, r, g, u, m, e, n, t, :,  , -, 1, 9,  , <,  , 0, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>]>
 .....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
 .....=> static void java.lang.Integer.getChars(int,int,char[])
@@ -208,8 +208,8 @@
 ......=> public static char[] java.util.Arrays.copyOf(char[],int)
 .......=> public static int java.lang.Math.min(int,int)
 .......<= public static int java.lang.Math.min(int,int) -> <class java.lang.Integer: 16>
-.......=> public static void java.lang.System.arraycopy(char[],int,char[],int,int)
-.......<= public static void java.lang.System.arraycopy(char[],int,char[],int,int) -> <null: null>
+.......=> public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int)
+.......<= public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int) -> <null: null>
 ......<= public static char[] java.util.Arrays.copyOf(char[],int) -> <class [C: [B, a, d,  , a, r, g, u, m, e, n, t, :,  , -, 1, 9,  , <,  , 0, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>, <control-0000>]>
 .....<= private void java.lang.AbstractStringBuilder.ensureCapacityInternal(int) -> <null: null>
 .....=> static void java.lang.Integer.getChars(int,int,char[])
diff --git a/test/988-method-trace/expected_jack.diff b/test/988-method-trace/expected_jack.diff
deleted file mode 100644
index 11364a0..0000000
--- a/test/988-method-trace/expected_jack.diff
+++ /dev/null
@@ -1,10 +0,0 @@
-450,453c450,453
-< .=> public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int)
-< .<= public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int) -> <null: null>
-< .=> public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int)
-< .<= public static void java.lang.System.arraycopy(java.lang.Object,int,java.lang.Object,int,int) -> <null: null>
----
-> .=> public static void java.lang.System.arraycopy(int[],int,int[],int,int)
-> .<= public static void java.lang.System.arraycopy(int[],int,int[],int,int) -> <null: null>
-> .=> public static void java.lang.System.arraycopy(char[],int,char[],int,int)
-> .<= public static void java.lang.System.arraycopy(char[],int,char[],int,int) -> <null: null>
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 43684f8..86adb73 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -865,7 +865,12 @@
     fi
 
     # System libraries needed by libarttestd.so
-    PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libdexfile.so:libdexfiled.so:libbase.so:libnativehelper.so
+    PUBLIC_LIBS=libc++.so:libbacktrace.so:libbase.so:libnativehelper.so
+    if [ "$TEST_IS_NDEBUG" = "y" ]; then
+      PUBLIC_LIBS=$PUBLIC_LIBS:libart.so:libdexfile.so
+    else
+      PUBLIC_LIBS=$PUBLIC_LIBS:libartd.so:libdexfiled.so
+    fi
 
     # Create a script with the command. The command can get longer than the longest
     # allowed adb command and there is no way to get the exit status from a adb shell
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 5fb7819..b2f579d 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -934,7 +934,6 @@
           "946-obsolete-throw",
           "948-change-annotations",
           "950-redefine-intrinsic",
-          "952-invoke-custom",
           "954-invoke-polymorphic-verifier",
           "955-methodhandles-smali",
           "956-methodhandles",
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 5556962..70efce5 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -17,6 +17,16 @@
 import tempfile
 import subprocess
 
+# begin import $ANDROID_BUILD_TOP/art/tools/build/var_cache.py
+_THIS_DIR = os.path.dirname(os.path.realpath(__file__))
+_TOP = os.path.join(_THIS_DIR, "../../..")
+_VAR_CACHE_DIR = os.path.join(_TOP, "art/tools/build/")
+
+import sys
+sys.path.append(_VAR_CACHE_DIR)
+import var_cache
+# end import var_cache.py
+
 _env = dict(os.environ)
 
 def _getEnvBoolean(var, default):
@@ -28,55 +38,8 @@
       return False
   return default
 
-_DUMP_MANY_VARS_LIST = ['HOST_2ND_ARCH_PREFIX',
-                        'TARGET_2ND_ARCH',
-                        'TARGET_ARCH',
-                        'HOST_PREFER_32_BIT',
-                        'HOST_OUT_EXECUTABLES',
-                        'ANDROID_JAVA_TOOLCHAIN',
-                        'ANDROID_COMPILE_WITH_JACK',
-                        'USE_D8_BY_DEFAULT']
-_DUMP_MANY_VARS = None  # To be set to a dictionary with above list being the keys,
-                        # and the build variable being the value.
-def _dump_many_vars(var_name):
-  """
-  Reach into the Android build system to dump many build vars simultaneously.
-  Since the make system is so slow, we want to avoid calling into build frequently.
-  """
-  global _DUMP_MANY_VARS
-  global _DUMP_MANY_VARS_LIST
-
-  # Look up var from cache.
-  if _DUMP_MANY_VARS:
-    return _DUMP_MANY_VARS[var_name]
-
-  all_vars=" ".join(_DUMP_MANY_VARS_LIST)
-
-  # The command is taken from build/envsetup.sh to fetch build variables.
-  command = ("build/soong/soong_ui.bash --dumpvars-mode --vars=\"%s\"") % (all_vars)
-
-  config = subprocess.Popen(command,
-                            stdout=subprocess.PIPE,
-                            universal_newlines=True,
-                            shell=True,
-                            cwd=ANDROID_BUILD_TOP).communicate()[0] # read until EOF, select stdin
-  # Prints out something like:
-  # TARGET_ARCH='arm64'
-  # HOST_ARCH='x86_64'
-  _DUMP_MANY_VARS = {}
-  for line in config.split("\n"):
-    # Split out "$key='$value'" via regex.
-    match = re.search("([^=]+)='([^']*)", line)
-    if not match:
-      continue
-    key = match.group(1)
-    value = match.group(2)
-    _DUMP_MANY_VARS[key] = value
-
-  return _DUMP_MANY_VARS[var_name]
-
 def _get_build_var(var_name):
-  return _dump_many_vars(var_name)
+  return var_cache.get_build_var(var_name)
 
 def _get_build_var_boolean(var, default):
   val = _get_build_var(var)
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index fcc5505..e0ccc3e 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 #
 # Copyright 2017, The Android Open Source Project
 #
@@ -45,9 +45,9 @@
 ##########
 
 if options.list:
-  print "List of all known build_target: "
-  for k in sorted(target_config.iterkeys()):
-    print " * " + k
+  print("List of all known build_target: ")
+  for k in sorted(target_config.keys()):
+    print(" * " + k)
   # TODO: would be nice if this was the same order as the target config file.
   sys.exit(1)
 
@@ -59,10 +59,10 @@
 n_threads = options.n_threads
 custom_env = target.get('env', {})
 custom_env['SOONG_ALLOW_MISSING_DEPENDENCIES'] = 'true'
-print custom_env
+print(custom_env)
 os.environ.update(custom_env)
 
-if target.has_key('make'):
+if 'make' in target:
   build_command = 'make'
   build_command += ' DX='
   build_command += ' -j' + str(n_threads)
@@ -75,7 +75,7 @@
   if subprocess.call(build_command.split()):
     sys.exit(1)
 
-if target.has_key('golem'):
+if 'golem' in target:
   machine_type = target.get('golem')
   # use art-opt-cc by default since it mimics the default preopt config.
   default_golem_config = 'art-opt-cc'
@@ -93,7 +93,7 @@
   if subprocess.call(cmd):
     sys.exit(1)
 
-if target.has_key('run-test'):
+if 'run-test' in target:
   run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
                                    'art/test/testrunner/testrunner.py')]
   test_flags = target.get('run-test', [])
diff --git a/tools/bisection_search/bisection_search.py b/tools/bisection_search/bisection_search.py
index 27bd599..a1ac72d 100755
--- a/tools/bisection_search/bisection_search.py
+++ b/tools/bisection_search/bisection_search.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python3
 #
 # Copyright (C) 2016 The Android Open Source Project
 #
diff --git a/tools/bisection_search/bisection_test.py b/tools/bisection_search/bisection_test.py
index 9aa08fb..b6a73c0 100755
--- a/tools/bisection_search/bisection_test.py
+++ b/tools/bisection_search/bisection_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python3
 #
 # Copyright (C) 2016 The Android Open Source Project
 #
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index f710de9..dca209d 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -21,7 +21,7 @@
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 TOP="$DIR/../.."
 
-source "${TOP}/build/envsetup.sh" >&/dev/null # import get_build_var
+source "${TOP}/art/tools/build/var_cache.sh" >&/dev/null # import get_build_var
 
 selected_env_var=
 core_jars_only=n
diff --git a/tools/build/var_cache.py b/tools/build/var_cache.py
new file mode 100644
index 0000000..9e616fa
--- /dev/null
+++ b/tools/build/var_cache.py
@@ -0,0 +1,148 @@
+# Copyright 2018, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# !!! Keep up-to-date with var_cache.sh
+#
+
+#
+# Provide a soong-build variable query mechanism that is cached
+# in the current process and any other subchild process that knows
+# how to parse the exported variable:
+#
+# export ART_TOOLS_BUILD_VAR_CACHE="..."
+#
+# Of the format:
+#
+#   <key1>='<value1>'\n
+#   <key2>='<value2>'\n
+#   ...
+#   <keyN>='<valueN>'
+#
+# Note: This is intentionally the same output format as
+#     build/soong/soong_ui.bash --dumpvars-mode --vars "key1 key2 ... keyN"
+#
+# For example, this would be a valid var-cache:
+#
+# export ART_TOOLS_BUILD_VAR_CACHE="TARGET_CORE_JARS='core-oj core-libart'
+#   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex'"
+#
+# Calling into soong repeatedly is very slow; whenever it needs to be done
+# more than once, the var_cache.py or var_cache.sh script should be used instead.
+#
+
+import os
+import subprocess
+import sys
+
+def get_build_var(name):
+  """
+  Query soong build for a variable value and return it as a string.
+
+  Var lookup is cached, subsequent var lookups in any child process
+  (that includes a var-cache is free). The var must be in 'var_list'
+  to participate in the cache.
+
+  Example:
+     host_out = var_cache.get_build_var('HOST_OUT')
+
+  Note that build vars can often have spaces in them,
+  so the caller must take care to ensure space-correctness.
+
+  Raises KeyError if the variable name is not in 'var_list'.
+  """
+  _populate()
+  _build_dict()
+
+  value = _var_cache_dict.get(name)
+  if value is None:
+    _debug(_var_cache_dict)
+    raise KeyError("The variable '%s' is not in 'var_list', can't lookup" %(name))
+
+  return value
+
+_var_cache_dict = None
+_THIS_DIR = os.path.dirname(os.path.realpath(__file__))
+_TOP = os.path.join(_THIS_DIR, "../../..")
+_VAR_LIST_PATH = os.path.join(_THIS_DIR, "var_list")
+_SOONG_UI_SCRIPT = os.path.join(_TOP, "build/soong/soong_ui.bash")
+_DEBUG = False
+
+def _populate():
+  if os.environ.get('ART_TOOLS_BUILD_VAR_CACHE'):
+    return
+
+  _debug("Varcache missing (PY)... repopulate")
+
+  interesting_vars=[]
+  with open(_VAR_LIST_PATH) as f:
+    for line in f.readlines():
+      line = line.strip()
+      if not line or line.startswith('#'):
+        continue
+
+      _debug(line)
+
+      interesting_vars.append(line)
+
+  _debug("Interesting vars: ", interesting_vars)
+
+  # Invoke soong exactly once for optimal performance.
+  var_values = subprocess.check_output([
+      _SOONG_UI_SCRIPT, '--dumpvars-mode', '-vars', " ".join(interesting_vars)],
+      cwd=_TOP)
+
+  # Export the ART_TOOLS_BUILD_VAR_CACHE in the same format as soong_ui.bash --dumpvars-mode.
+  os.environb[b'ART_TOOLS_BUILD_VAR_CACHE'] = var_values
+
+  _debug("Soong output: ", var_values)
+
+def _build_dict():
+  global _var_cache_dict
+
+  if _var_cache_dict:
+    return
+
+  _debug("_var_cache_build_dict()")
+
+  _var_cache_dict = {}
+
+  # Parse $ART_TOOLS_BUILD_VAR_CACHE, e.g.
+  #   TARGET_CORE_JARS='core-oj core-libart conscrypt okhttp bouncycastle apache-xml'
+  #   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex ...'
+
+  for line in os.environ['ART_TOOLS_BUILD_VAR_CACHE'].splitlines():
+    _debug(line)
+    var_name, var_value = line.split("=")
+    var_value = var_value.strip("'")
+
+    _debug("Var name =", var_name)
+    _debug("Var value =", var_value)
+
+    _var_cache_dict[var_name] = var_value
+
+  _debug("Num entries in dict: ", len(_var_cache_dict))
+
+def _debug(*args):
+  if _DEBUG:
+    print(*args, file=sys.stderr)
+
+# Below definitions are for interactive use only, e.g.
+# python -c 'import var_cache; var_cache._dump_cache()'
+
+def _dump_cache():
+  _populate()
+  print(os.environ['ART_TOOLS_BUILD_VAR_CACHE'])
+
+#get_build_var("xyz")
diff --git a/tools/build/var_cache.sh b/tools/build/var_cache.sh
new file mode 100755
index 0000000..26e9770
--- /dev/null
+++ b/tools/build/var_cache.sh
@@ -0,0 +1,195 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# !!! Keep up-to-date with var_cache.py
+#
+
+#
+# Provide a soong-build variable query mechanism that is cached
+# in the current process and any other subchild process that knows
+# how to parse the exported variable:
+#
+# export ART_TOOLS_BUILD_VAR_CACHE="..."
+#
+# Of the format:
+#
+#   <key1>='<value1>'\n
+#   <key2>='<value2>'\n
+#   ...
+#   <keyN>='<valueN>'
+#
+# Note: This is intentionally the same output format as
+#     build/soong/soong_ui.bash --dumpvars-mode --vars "key1 key2 ... keyN"
+#
+# For example, this would be a valid var-cache:
+#
+# export ART_TOOLS_BUILD_VAR_CACHE="TARGET_CORE_JARS='core-oj core-libart'
+#   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex'"
+#
+# Calling into soong repeatedly is very slow; whenever it needs to be done
+# more than once, the var_cache.py or var_cache.sh script should be used instead.
+#
+
+# -------------------------------------------------------
+
+# Echoes the result of get_build_var <var_name>.
+# Var lookup is cached, subsequent var lookups in any child process
+# (that includes a var-cache is free). The var must be in 'var_list'
+# to participate in the cache.
+#
+# Example:
+#    local host_out="$(get_build_var HOST_OUT)"
+#
+# Note that build vars can often have spaces in them,
+# so the caller must take care to ensure space-correctness.
+get_build_var() {
+  local var_name="$1"
+
+  _var_cache_populate
+  _var_cache_build_dict
+
+  if [[ ${_VAR_CACHE_DICT[$var_name]+exists} ]]; then
+    echo "${_VAR_CACHE_DICT[$var_name]}"
+    return 0
+  else
+    echo "[ERROR] get_build_var: The variable '$var_name' is not in 'var_list', can't lookup." >&2
+    return 1
+  fi
+}
+
+# The above functions are "public" and are intentionally not exported.
+# User scripts must have "source var_cache.sh" to take advantage of caching.
+
+# -------------------------------------------------------
+# Below functions are "private";
+# do not call them outside of this file.
+
+_var_cache_populate() {
+  if [[ -n $ART_TOOLS_BUILD_VAR_CACHE ]]; then
+    _var_cache_debug "ART_TOOLS_BUILD_VAR_CACHE preset to (quotes added)..."
+    _var_cache_debug \""$ART_TOOLS_BUILD_VAR_CACHE"\"
+    return 0
+  fi
+
+  _var_cache_debug "Varcache missing... repopulate"
+
+  local this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+  local top="$this_dir/../../.."
+
+  local interesting_vars=()
+  while read -r line; do
+    if [[ -z $line ]] || [[ $line == '#'* ]]; then
+      continue;
+    fi
+    interesting_vars+=($line)
+  done < "$this_dir"/var_list
+
+  _var_cache_debug "Interesting vars: " ${interesting_vars[@]}
+
+  local flat_vars="${interesting_vars[*]}"
+
+  local var_values
+  _var_cache_show_command "$top"/build/soong/soong_ui.bash --dumpvars-mode -vars=\"${interesting_vars[*]}\"
+
+  # Invoke soong exactly once for optimal performance.
+  # soong_ui.bash must be invoked from $ANDROID_BUILD_TOP or it gets confused and breaks.
+  var_values="$(cd "$top" && "$top"/build/soong/soong_ui.bash --dumpvars-mode -vars="$flat_vars")"
+
+  # Export the ART_TOOLS_BUILD_VAR_CACHE in the same format as soong_ui.bash --dumpvars-mode.
+  export ART_TOOLS_BUILD_VAR_CACHE="$var_values"
+
+  _var_cache_debug ART_TOOLS_BUILD_VAR_CACHE=\"$var_values\"
+}
+
+_var_cache_build_dict() {
+  if [[ ${#_VAR_CACHE_DICT[@]} -ne 0 ]]; then
+    # Associative arrays cannot be exported, have
+    # a separate step to reconstruct the associative
+    # array from a flat variable.
+    return 0
+  fi
+
+  # Parse $ART_TOOLS_BUILD_VAR_CACHE, e.g.
+  #   TARGET_CORE_JARS='core-oj core-libart conscrypt okhttp bouncycastle apache-xml'
+  #   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex ...'
+
+  local var_name
+  local var_value
+  local strip_quotes
+
+  _var_cache_debug "_var_cache_build_dict()"
+
+  declare -g -A _VAR_CACHE_DICT  # global associative array.
+  while IFS='=' read -r var_name var_value; do
+    if [[ -z $var_name ]]; then
+      # skip empty lines, e.g. blank newline at the end
+      continue
+    fi
+    _var_cache_debug "Var_name was $var_name"
+    _var_cache_debug "Var_value was $var_value"
+    strip_quotes=${var_value//\'/}
+    _VAR_CACHE_DICT["$var_name"]="$strip_quotes"
+  done < <(echo "$ART_TOOLS_BUILD_VAR_CACHE")
+
+  _var_cache_debug ${#_VAR_CACHE_DICT[@]} -eq 0
+}
+
+_var_cache_debug() {
+  if ((_var_cache_debug_enabled)); then
+    echo "[DBG]: " "$@" >&2
+  fi
+}
+
+_var_cache_show_command() {
+  if (( _var_cache_show_commands || _var_cache_debug_enabled)); then
+    echo "$@" >&2
+  fi
+}
+
+while true; do
+  case $1 in
+    --help)
+      echo "Usage: $0 [--debug] [--show-commands] [--dump-cache] [--var <name>] [--var <name2>...]"
+      echo ""
+      echo "Exposes a function 'get_build_var' which returns the result of"
+      echo "a soong build variable."
+      echo ""
+      echo "Primarily intended to be used as 'source var_cache.sh',"
+      echo "but also allows interactive command line usage for simplifying development."
+      exit 0
+      ;;
+    --var)
+      echo -ne "$2="
+      get_build_var "$2"
+      shift
+      ;;
+    --debug)
+      _var_cache_debug_enabled=1
+      ;;
+    --show-commands)
+      _var_cache_show_commands=1
+      ;;
+    --dump-cache)
+      _var_cache_populate
+      echo "ART_TOOLS_BUILD_VAR_CACHE=\"$ART_TOOLS_BUILD_VAR_CACHE\""
+      ;;
+    *)
+      break
+      ;;
+  esac
+  shift
+done
diff --git a/tools/build/var_list b/tools/build/var_list
new file mode 100644
index 0000000..3727741
--- /dev/null
+++ b/tools/build/var_list
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This file contains a list of all the build vars that need to be eagerly cached
+# by the var_cache.sh and var_cache.py scripts.
+# Lines starting with '#' or blank lines are ignored.
+#
+
+# javac-helper.sh
+TARGET_CORE_JARS
+PRODUCT_BOOT_JARS
+TARGET_OUT_COMMON_INTERMEDIATES
+HOST_CORE_JARS
+HOST_OUT_COMMON_INTERMEDIATES
+
+# testrunner/env.py
+HOST_2ND_ARCH_PREFIX
+TARGET_2ND_ARCH
+TARGET_ARCH
+HOST_PREFER_32_BIT
+HOST_OUT_EXECUTABLES
+ANDROID_JAVA_TOOLCHAIN
+ANDROID_COMPILE_WITH_JACK
+USE_D8_BY_DEFAULT
+
diff --git a/tools/common/common.py b/tools/common/common.py
index b822dca..735bbaa 100755
--- a/tools/common/common.py
+++ b/tools/common/common.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python3
 #
 # Copyright (C) 2016 The Android Open Source Project
 #
diff --git a/tools/external_oj_libjdwp_art_failures.txt b/tools/external_oj_libjdwp_art_failures.txt
index 6c2206f..9b6ff98 100644
--- a/tools/external_oj_libjdwp_art_failures.txt
+++ b/tools/external_oj_libjdwp_art_failures.txt
@@ -13,12 +13,6 @@
   name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference.ThreadGroup002Test#testThreadGroup002"
 },
 {
-  description: "Test fails due to modifiers not including ACC_SUPER",
-  result: EXEC_FAILED,
-  bug: 66906055,
-  name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.ModifiersTest#testModifiers001"
-},
-{
   description: "Test fails due to static values not being set correctly.",
   result: EXEC_FAILED,
   bug: 66905894,
diff --git a/tools/hiddenapi/find_api_violations.pl b/tools/hiddenapi/find_api_violations.pl
new file mode 100755
index 0000000..a022999
--- /dev/null
+++ b/tools/hiddenapi/find_api_violations.pl
@@ -0,0 +1,124 @@
+#!/usr/bin/perl
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+use strict;
+use Getopt::Long;
+use Pod::Usage;
+
+=pod
+
+=head1 DESCRIPTION
+
+This script parses API violations from C<adb logcat>. Output is in CSV format
+with columns C<package>, C<symbol>, C<list>.
+
+The package name is mapped from a PID, parsed from the same log. To ensure you
+get all packages names, you should process the logcat from device boot time.
+
+=head1 SYNOPSIS
+
+  adb logcat | perl find_api_violations.pl > violations.csv
+  cat bugreport.txt | perl find_api_violations.pl --bugreport > violations.csv
+
+=head1 OPTIONS
+
+=over
+
+=item --[no]lightgrey
+
+(Don't) show light grey list accesses (default true)
+
+=item --[no]darkgrey
+
+(Don't) show dark grey list accesses (default true)
+
+=item --[no]black
+
+(Don't) show black list accesses (default true)
+
+=item --bugreport|-b
+
+Process a bugreport, rather than raw logcat
+
+=item --help
+
+=back
+
+=cut
+
+my $lightgrey = 1;
+my $darkgrey = 1;
+my $black = 1;
+my $bugreport = 0;
+my $help = 0;
+
+GetOptions("lightgrey!"  => \$lightgrey,
+           "darkgrey!"   => \$darkgrey,
+           "black!"      => \$black,
+           "bugreport|b" => \$bugreport,
+           "help"        => \$help)
+  or pod2usage(q(-verbose) => 1);
+
+pod2usage(q(-verbose) => 2) if ($help);
+
+my $in;
+
+if ($bugreport) {
+  my $found_main = 0;
+  while (my $line = <>) {
+    chomp $line;
+    if ($line =~ m/^------ SYSTEM LOG /) {
+      $found_main = 1;
+      last;
+    }
+  }
+  if (!$found_main) {
+    die "Couldn't find main log in bugreport\n";
+  }
+}
+
+my $procmap = {};
+print "package,symbol,list\n";
+while (my $line = <>) {
+  chomp $line;
+  last if $bugreport and $line =~ m/^------ \d+\.\d+s was the duration of 'SYSTEM LOG' ------/;
+  next if $line =~ m/^--------- beginning of/;
+  unless ($line =~ m/^\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}\s+(?:\w+\s+)?(\d+)\s+(\d+)\s+([VWIDE])\s+(.*?): (.*)$/) {
+    die "Cannot match line: $line\n";
+    next;
+  }
+  my ($pid, $tid, $class, $tag, $msg) = ($1, $2, $3, $4, $5);
+  if ($tag eq "ActivityManager" && $msg =~ m/^Start proc (\d+):(.*?) for /) {
+    my ($new_pid, $proc_name) = ($1, $2);
+    my $package;
+    if ($proc_name =~ m/^(.*?)(:.*?)?\/(.*)$/) {
+      $package = $1;
+    } else {
+      $package = $proc_name;
+    }
+    $procmap->{$new_pid} = $package;
+  }
+  if ($tag eq "zygote" || $tag eq "zygote64") {
+    if ($msg =~ m/Accessing hidden (\w+) (L.*?) \((.*list), (.*?)\)/) {
+      my ($member_type, $symbol, $list, $access_type) = ($1, $2, $3, $4);
+      my $package = $procmap->{$pid} || "unknown($pid)";
+      print "$package,$symbol,$list\n"
+        if (($list =~ m/light/ && $lightgrey)
+          || ($list =~ m/dark/ && $darkgrey)
+          || ($list =~ m/black/ && $black));
+    }
+  }
+}
+
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
index 203e03d..47fe072 100755
--- a/tools/jfuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python3
 #
 # Copyright (C) 2016 The Android Open Source Project
 #
diff --git a/tools/jfuzz/run_jfuzz_test.py b/tools/jfuzz/run_jfuzz_test.py
index 4a54a3a..3ff9f45 100755
--- a/tools/jfuzz/run_jfuzz_test.py
+++ b/tools/jfuzz/run_jfuzz_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python3
 #
 # Copyright (C) 2016 The Android Open Source Project
 #
diff --git a/tools/jfuzz/run_jfuzz_test_nightly.py b/tools/jfuzz/run_jfuzz_test_nightly.py
index e6c216d..fecf116 100755
--- a/tools/jfuzz/run_jfuzz_test_nightly.py
+++ b/tools/jfuzz/run_jfuzz_test_nightly.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python3
 #
 # Copyright (C) 2016 The Android Open Source Project
 #