Merge "Revert "Remove support for Valgrind in ART.""
diff --git a/Android.mk b/Android.mk
index d6472be..08a1a10 100644
--- a/Android.mk
+++ b/Android.mk
@@ -487,6 +487,7 @@
 	sed -i '/libartd.so/d' $(TARGET_OUT)/etc/public.libraries.txt
 	sed -i '/libdexfiled.so/d' $(TARGET_OUT)/etc/public.libraries.txt
 	sed -i '/libprofiled.so/d' $(TARGET_OUT)/etc/public.libraries.txt
+	sed -i '/libartbased.so/d' $(TARGET_OUT)/etc/public.libraries.txt
 
 ########################################################################
 # Phony target for building what go/lem requires on host.
diff --git a/adbconnection/Android.bp b/adbconnection/Android.bp
index 441b706..95fc274 100644
--- a/adbconnection/Android.bp
+++ b/adbconnection/Android.bp
@@ -65,6 +65,7 @@
     defaults: ["adbconnection-defaults"],
     shared_libs: [
         "libart",
+        "libartbase",
     ],
 }
 
@@ -76,5 +77,6 @@
     ],
     shared_libs: [
         "libartd",
+        "libartbased",
     ],
 }
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 3daaf01..b481352 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -277,6 +277,16 @@
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
   dexdump2-target
 
+# The dexanalyze test requires an image and the dexanalyze utility.
+ART_GTEST_dexanalyze_test_HOST_DEPS := \
+  $(HOST_CORE_IMAGE_DEFAULT_64) \
+  $(HOST_CORE_IMAGE_DEFAULT_32) \
+  dexanalyze-host
+ART_GTEST_dexanalyze_test_TARGET_DEPS := \
+  $(TARGET_CORE_IMAGE_DEFAULT_64) \
+  $(TARGET_CORE_IMAGE_DEFAULT_32) \
+  dexanalyze-target
+
 # The dexlayout test requires an image and the dexlayout utility.
 # TODO: rename into dexdump when migration completes
 ART_GTEST_dexlayout_test_HOST_DEPS := \
diff --git a/build/art.go b/build/art.go
index 59480a0..3dabce3 100644
--- a/build/art.go
+++ b/build/art.go
@@ -278,6 +278,7 @@
 	android.RegisterModuleType("art_cc_test", artTest)
 	android.RegisterModuleType("art_cc_test_library", artTestLibrary)
 	android.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
+	android.RegisterModuleType("libart_cc_defaults", libartDefaultsFactory)
 	android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
 	android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
 }
@@ -304,6 +305,33 @@
 	return module
 }
 
+func libartDefaultsFactory() android.Module {
+	c := &codegenProperties{}
+	module := cc.DefaultsFactory(c)
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) {
+		codegen(ctx, c, true)
+
+		type props struct {
+		  Target struct {
+		    Android struct {
+		      Shared_libs []string
+		    }
+		  }
+		}
+
+		p := &props{}
+		// TODO: express this in .bp instead b/79671158
+		if !envTrue(ctx, "ART_TARGET_LINUX") {
+		  p.Target.Android.Shared_libs = []string {
+		    "libmetricslogger",
+		  }
+		}
+		ctx.AppendProperties(p)
+	})
+
+	return module
+}
+
 func artLibrary() android.Module {
 	m, _ := cc.NewLibrary(android.HostAndDeviceSupported)
 	module := m.Init()
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 5884a54..be963fb 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -246,6 +246,7 @@
         "libart",
         "libprofile",
         "libdexfile",
+        "libartbase",
     ],
 
     target: {
@@ -295,6 +296,7 @@
         "libartd",
         "libprofiled",
         "libdexfiled",
+        "libartbased",
     ],
 }
 
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index 893cad2..87e679f 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -207,11 +207,10 @@
       std::vector<DexRegisterMap> dex_reg_maps;
       if (accessor.HasCodeItem() && mi->code_info != nullptr) {
         const CodeInfo code_info(mi->code_info);
-        CodeInfoEncoding encoding = code_info.ExtractEncoding();
-        for (size_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); ++s) {
-          const StackMap& stack_map = code_info.GetStackMapAt(s, encoding);
+        for (size_t s = 0; s < code_info.GetNumberOfStackMaps(); ++s) {
+          const StackMap stack_map = code_info.GetStackMapAt(s);
           dex_reg_maps.push_back(code_info.GetDexRegisterMapOf(
-              stack_map, encoding, accessor.RegistersSize()));
+              stack_map, accessor.RegistersSize()));
         }
       }
 
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 44504c1..a7adab5 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -100,15 +100,14 @@
       if (mi->code_info != nullptr) {
         // Use stack maps to create mapping table from pc to dex.
         const CodeInfo code_info(mi->code_info);
-        const CodeInfoEncoding encoding = code_info.ExtractEncoding();
-        pc2dex_map.reserve(code_info.GetNumberOfStackMaps(encoding));
-        for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
-          StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+        pc2dex_map.reserve(code_info.GetNumberOfStackMaps());
+        for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+          StackMap stack_map = code_info.GetStackMapAt(s);
           DCHECK(stack_map.IsValid());
-          const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
-          const int32_t dex = stack_map.GetDexPc(encoding.stack_map.encoding);
+          const uint32_t pc = stack_map.GetNativePcOffset(isa);
+          const int32_t dex = stack_map.GetDexPc();
           pc2dex_map.push_back({pc, dex});
-          if (stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+          if (stack_map.HasDexRegisterMap()) {
             // Guess that the first map with local variables is the end of prologue.
             prologue_end = std::min(prologue_end, pc);
           }
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 9ea9f01..c1bf915 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -99,12 +99,11 @@
   // Get stack maps sorted by pc (they might not be sorted internally).
   // TODO(dsrbecky) Remove this once stackmaps get sorted by pc.
   const CodeInfo code_info(method_info->code_info);
-  const CodeInfoEncoding encoding = code_info.ExtractEncoding();
   std::map<uint32_t, uint32_t> stack_maps;  // low_pc -> stack_map_index.
-  for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
-    StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+  for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+    StackMap stack_map = code_info.GetStackMapAt(s);
     DCHECK(stack_map.IsValid());
-    if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+    if (!stack_map.HasDexRegisterMap()) {
       // The compiler creates stackmaps without register maps at the start of
       // basic blocks in order to keep instruction-accurate line number mapping.
       // However, we never stop at those (breakpoint locations always have map).
@@ -112,7 +111,7 @@
       // The main reason for this is to save space by avoiding undefined gaps.
       continue;
     }
-    const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa);
+    const uint32_t pc_offset = stack_map.GetNativePcOffset(isa);
     DCHECK_LE(pc_offset, method_info->code_size);
     DCHECK_LE(compilation_unit_code_address, method_info->code_address);
     const uint32_t low_pc = dchecked_integral_cast<uint32_t>(
@@ -124,7 +123,7 @@
   for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) {
     const uint32_t low_pc = it->first;
     const uint32_t stack_map_index = it->second;
-    const StackMap& stack_map = code_info.GetStackMapAt(stack_map_index, encoding);
+    const StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
     auto next_it = it;
     next_it++;
     const uint32_t high_pc = next_it != stack_maps.end()
@@ -136,7 +135,7 @@
     }
 
     // Check that the stack map is in the requested range.
-    uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
+    uint32_t dex_pc = stack_map.GetDexPc();
     if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
       // The variable is not in scope at this PC. Therefore omit the entry.
       // Note that this is different to None() entry which means in scope, but unknown location.
@@ -151,10 +150,10 @@
     DCHECK(dex_register_map.IsValid());
     CodeItemDataAccessor accessor(*method_info->dex_file, method_info->code_item);
     reg_lo = dex_register_map.GetDexRegisterLocation(
-        vreg, accessor.RegistersSize(), code_info, encoding);
+        vreg, accessor.RegistersSize(), code_info);
     if (is64bitValue) {
       reg_hi = dex_register_map.GetDexRegisterLocation(
-          vreg + 1, accessor.RegistersSize(), code_info, encoding);
+          vreg + 1, accessor.RegistersSize(), code_info);
     }
 
     // Add location entry for this address range.
diff --git a/compiler/debug/elf_gnu_debugdata_writer.h b/compiler/debug/elf_gnu_debugdata_writer.h
index a88c5cb..fd132f4 100644
--- a/compiler/debug/elf_gnu_debugdata_writer.h
+++ b/compiler/debug/elf_gnu_debugdata_writer.h
@@ -41,23 +41,23 @@
   Lzma2EncProps_Normalize(&lzma2Props);
   CXzProps props;
   XzProps_Init(&props);
-  props.lzma2Props = &lzma2Props;
+  props.lzma2Props = lzma2Props;
   // Implement the required interface for communication (written in C so no virtual methods).
   struct XzCallbacks : public ISeqInStream, public ISeqOutStream, public ICompressProgress {
-    static SRes ReadImpl(void* p, void* buf, size_t* size) {
-      auto* ctx = static_cast<XzCallbacks*>(reinterpret_cast<ISeqInStream*>(p));
+    static SRes ReadImpl(const ISeqInStream* p, void* buf, size_t* size) {
+      auto* ctx = static_cast<XzCallbacks*>(const_cast<ISeqInStream*>(p));
       *size = std::min(*size, ctx->src_->size() - ctx->src_pos_);
       memcpy(buf, ctx->src_->data() + ctx->src_pos_, *size);
       ctx->src_pos_ += *size;
       return SZ_OK;
     }
-    static size_t WriteImpl(void* p, const void* buf, size_t size) {
-      auto* ctx = static_cast<XzCallbacks*>(reinterpret_cast<ISeqOutStream*>(p));
+    static size_t WriteImpl(const ISeqOutStream* p, const void* buf, size_t size) {
+      auto* ctx = static_cast<const XzCallbacks*>(p);
       const uint8_t* buffer = reinterpret_cast<const uint8_t*>(buf);
       ctx->dst_->insert(ctx->dst_->end(), buffer, buffer + size);
       return size;
     }
-    static SRes ProgressImpl(void* , UInt64, UInt64) {
+    static SRes ProgressImpl(const ICompressProgress* , UInt64, UInt64) {
       return SZ_OK;
     }
     size_t src_pos_;
@@ -113,4 +113,3 @@
 }  // namespace art
 
 #endif  // ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_
-
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index 19b1900..082e609 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -20,6 +20,7 @@
 #include "common_compiler_test.h"
 #include "compiled_method-inl.h"
 #include "compiler_callbacks.h"
+#include "dex/class_accessor-inl.h"
 #include "dex/dex_file.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
@@ -82,30 +83,21 @@
 
     // Unquicken the dex file.
     for (uint32_t i = 0; i < updated_dex_file->NumClassDefs(); ++i) {
-      const DexFile::ClassDef& class_def = updated_dex_file->GetClassDef(i);
-      const uint8_t* class_data = updated_dex_file->GetClassData(class_def);
-      if (class_data == nullptr) {
-        continue;
-      }
-      ClassDataItemIterator it(*updated_dex_file, class_data);
-      it.SkipAllFields();
-
       // Unquicken each method.
-      while (it.HasNextMethod()) {
-        uint32_t method_idx = it.GetMemberIndex();
-        CompiledMethod* compiled_method =
-            compiler_driver_->GetCompiledMethod(MethodReference(updated_dex_file, method_idx));
+      ClassAccessor accessor(*updated_dex_file, updated_dex_file->GetClassDef(i));
+      accessor.VisitMethods([&](const ClassAccessor::Method& method) {
+        CompiledMethod* compiled_method = compiler_driver_->GetCompiledMethod(
+            MethodReference(updated_dex_file,
+                            method.GetIndex()));
         ArrayRef<const uint8_t> table;
         if (compiled_method != nullptr) {
           table = compiled_method->GetVmapTable();
         }
         optimizer::ArtDecompileDEX(*updated_dex_file,
-                                   *it.GetMethodCodeItem(),
+                                   *accessor.GetCodeItem(method),
                                    table,
                                    /* decompile_return_instruction */ true);
-        it.Next();
-      }
-      DCHECK(!it.HasNext());
+      });
     }
 
     // Make sure after unquickening we go back to the same contents as the original dex file.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 39ed825..7dc44fa 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2160,6 +2160,9 @@
         DCHECK(failure_kind == verifier::FailureKind::kNoFailure) << failure_kind;
         failure_kind = verifier::FailureKind::kSoftFailure;
       }
+    } else if (&klass->GetDexFile() != &dex_file) {
+      // Skip a duplicate class (as the resolved class is from another, earlier dex file).
+      return;  // Do not update state.
     } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
       CHECK(klass->IsResolved()) << klass->PrettyClass();
       failure_kind = class_linker->VerifyClass(soa.Self(), klass, log_level_);
@@ -2804,6 +2807,9 @@
       dex_cache = hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
     } else if (SkipClass(jclass_loader, dex_file, klass.Get())) {
       return;
+    } else if (&klass->GetDexFile() != &dex_file) {
+      // Skip a duplicate class (as the resolved class is from another, earlier dex file).
+      return;  // Do not update state.
     } else {
       dex_cache = hs.NewHandle(klass->GetDexCache());
     }
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 1e44311..de1be5b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -736,6 +736,26 @@
   }
 }
 
+void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
+    HLoadMethodHandle* method_handle,
+    Location runtime_proto_index_location,
+    Location runtime_return_location) {
+  DCHECK_EQ(method_handle->InputCount(), 1u);
+  LocationSummary* locations =
+      new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
+          method_handle, LocationSummary::kCallOnMainOnly);
+  locations->SetInAt(0, Location::NoLocation());
+  locations->AddTemp(runtime_proto_index_location);
+  locations->SetOut(runtime_return_location);
+}
+
+void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
+  LocationSummary* locations = method_handle->GetLocations();
+  MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
+  CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
+  InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc());
+}
+
 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
     HLoadMethodType* method_type,
     Location runtime_proto_index_location,
@@ -751,7 +771,7 @@
 
 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
   LocationSummary* locations = method_type->GetLocations();
-  MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex());
+  MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
   CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
   InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc());
 }
@@ -955,11 +975,10 @@
                         const CodeInfo& code_info,
                         const ArenaVector<HSuspendCheck*>& loop_headers,
                         ArenaVector<size_t>* covered) {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
   for (size_t i = 0; i < loop_headers.size(); ++i) {
     if (loop_headers[i]->GetDexPc() == dex_pc) {
       if (graph.IsCompilingOsr()) {
-        DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc, encoding).IsValid());
+        DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
       }
       ++(*covered)[i];
     }
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 7e84a44..a340446 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -35,7 +35,6 @@
 #include "optimizing_compiler_stats.h"
 #include "read_barrier_option.h"
 #include "stack.h"
-#include "stack_map.h"
 #include "utils/label.h"
 
 namespace art {
@@ -564,6 +563,11 @@
                                                         Location runtime_return_location);
   void GenerateLoadClassRuntimeCall(HLoadClass* cls);
 
+  static void CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle* method_handle,
+                                                             Location runtime_handle_index_location,
+                                                             Location runtime_return_location);
+  void GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle);
+
   static void CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType* method_type,
                                                              Location runtime_type_index_location,
                                                              Location runtime_return_location);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 0601d2d..6f173e1 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -5144,6 +5144,16 @@
   }
 }
 
+void LocationsBuilderARM64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  InvokeRuntimeCallingConvention calling_convention;
+  Location location = LocationFrom(calling_convention.GetRegisterAt(0));
+  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, location, location);
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
+}
+
 void LocationsBuilderARM64::VisitLoadMethodType(HLoadMethodType* load) {
   InvokeRuntimeCallingConvention calling_convention;
   Location location = LocationFrom(calling_convention.GetRegisterAt(0));
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index aa343b1..e7fe5b7 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -17,7 +17,6 @@
 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
 
-#include "arch/arm64/quick_method_frame_info_arm64.h"
 #include "base/bit_field.h"
 #include "code_generator.h"
 #include "common_arm64.h"
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 33304c6..859e159 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7527,6 +7527,16 @@
   }
 }
 
+void LocationsBuilderARMVIXL::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  InvokeRuntimeCallingConventionARMVIXL calling_convention;
+  Location location = LocationFrom(calling_convention.GetRegisterAt(0));
+  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, location, location);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
+}
+
 void LocationsBuilderARMVIXL::VisitLoadMethodType(HLoadMethodType* load) {
   InvokeRuntimeCallingConventionARMVIXL calling_convention;
   Location location = LocationFrom(calling_convention.GetRegisterAt(0));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 3a3fcff..7f3441f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -8226,6 +8226,16 @@
   }
 }
 
+void LocationsBuilderMIPS::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  InvokeRuntimeCallingConvention calling_convention;
+  Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
+  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, loc, loc);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
+}
+
 void LocationsBuilderMIPS::VisitLoadMethodType(HLoadMethodType* load) {
   InvokeRuntimeCallingConvention calling_convention;
   Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index d6fc9a1..ee32b96 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -6262,6 +6262,16 @@
   }
 }
 
+void LocationsBuilderMIPS64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  InvokeRuntimeCallingConvention calling_convention;
+  Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
+  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, loc, loc);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
+}
+
 void LocationsBuilderMIPS64::VisitLoadMethodType(HLoadMethodType* load) {
   InvokeRuntimeCallingConvention calling_convention;
   Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d18a750..9e31538 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6539,6 +6539,16 @@
   }
 }
 
+void LocationsBuilderX86::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  InvokeRuntimeCallingConvention calling_convention;
+  Location location = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
+  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, location, location);
+}
+
+void InstructionCodeGeneratorX86::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
+}
+
 void LocationsBuilderX86::VisitLoadMethodType(HLoadMethodType* load) {
   InvokeRuntimeCallingConvention calling_convention;
   Location location = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 450c857..f739704 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5908,6 +5908,16 @@
   }
 }
 
+void LocationsBuilderX86_64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  // Custom calling convention: RAX serves as both input and output.
+  Location location = Location::RegisterLocation(RAX);
+  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, location, location);
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
+  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
+}
+
 void LocationsBuilderX86_64::VisitLoadMethodType(HLoadMethodType* load) {
   // Custom calling convention: RAX serves as both input and output.
   Location location = Location::RegisterLocation(RAX);
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index be26e67..5ac6e46 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -216,6 +216,21 @@
         Size(result_type) > Size(input_type);
   }
 
+  static Type ToSigned(Type type) {
+    switch (type) {
+      case Type::kUint8:
+        return Type::kInt8;
+      case Type::kUint16:
+        return Type::kInt16;
+      case Type::kUint32:
+        return Type::kInt32;
+      case Type::kUint64:
+        return Type::kInt64;
+      default:
+        return type;
+    }
+  }
+
   static const char* PrettyDescriptor(Type type);
 
  private:
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 87ce1f0..d65ad40 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -386,6 +386,11 @@
         << load_class->NeedsAccessCheck() << std::noboolalpha;
   }
 
+  void VisitLoadMethodHandle(HLoadMethodHandle* load_method_handle) OVERRIDE {
+    StartAttributeStream("load_kind") << "RuntimeCall";
+    StartAttributeStream("method_handle_index") << load_method_handle->GetMethodHandleIndex();
+  }
+
   void VisitLoadMethodType(HLoadMethodType* load_method_type) OVERRIDE {
     StartAttributeStream("load_kind") << "RuntimeCall";
     const DexFile& dex_file = load_method_type->GetDexFile();
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 61730a8..24dc2ee 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -448,11 +448,9 @@
       invoke_type,
       target_method,
       HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
+  RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
   HandleInvoke(invoke,
-               in_vregs,
-               /* args */ nullptr,
-               graph_->GetNumberOfVRegs() - in_vregs,
-               /* is_range */ true,
+               operands,
                dex_file_->GetMethodShorty(method_idx),
                /* clinit_check */ nullptr,
                /* is_unresolved */ false);
@@ -916,10 +914,7 @@
 bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
                                       uint32_t dex_pc,
                                       uint32_t method_idx,
-                                      uint32_t number_of_vreg_arguments,
-                                      bool is_range,
-                                      uint32_t* args,
-                                      uint32_t register_index) {
+                                      const InstructionOperands& operands) {
   InvokeType invoke_type = GetInvokeTypeFromOpCode(instruction.Opcode());
   const char* descriptor = dex_file_->GetMethodShorty(method_idx);
   DataType::Type return_type = DataType::FromShorty(descriptor[0]);
@@ -943,12 +938,9 @@
                                                          method_idx,
                                                          invoke_type);
     return HandleInvoke(invoke,
-                        number_of_vreg_arguments,
-                        args,
-                        register_index,
-                        is_range,
+                        operands,
                         descriptor,
-                        nullptr, /* clinit_check */
+                        nullptr /* clinit_check */,
                         true /* is_unresolved */);
   }
 
@@ -976,12 +968,7 @@
         invoke_type,
         target_method,
         HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit);
-    return HandleStringInit(invoke,
-                            number_of_vreg_arguments,
-                            args,
-                            register_index,
-                            is_range,
-                            descriptor);
+    return HandleStringInit(invoke, operands, descriptor);
   }
 
   // Potential class initialization check, in the case of a static method call.
@@ -1042,26 +1029,16 @@
                                                ImTable::GetImtIndex(resolved_method));
   }
 
-  return HandleInvoke(invoke,
-                      number_of_vreg_arguments,
-                      args,
-                      register_index,
-                      is_range,
-                      descriptor,
-                      clinit_check,
-                      false /* is_unresolved */);
+  return HandleInvoke(invoke, operands, descriptor, clinit_check, false /* is_unresolved */);
 }
 
 bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction ATTRIBUTE_UNUSED,
                                                  uint32_t dex_pc,
                                                  uint32_t method_idx,
-                                                 uint32_t proto_idx,
-                                                 uint32_t number_of_vreg_arguments,
-                                                 bool is_range,
-                                                 uint32_t* args,
-                                                 uint32_t register_index) {
+                                                 dex::ProtoIndex proto_idx,
+                                                 const InstructionOperands& operands) {
   const char* descriptor = dex_file_->GetShorty(proto_idx);
-  DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), number_of_vreg_arguments);
+  DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), operands.GetNumberOfOperands());
   DataType::Type return_type = DataType::FromShorty(descriptor[0]);
   size_t number_of_arguments = strlen(descriptor);
   HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
@@ -1070,10 +1047,7 @@
                                                         dex_pc,
                                                         method_idx);
   return HandleInvoke(invoke,
-                      number_of_vreg_arguments,
-                      args,
-                      register_index,
-                      is_range,
+                      operands,
                       descriptor,
                       nullptr /* clinit_check */,
                       false /* is_unresolved */);
@@ -1222,26 +1196,22 @@
 }
 
 bool HInstructionBuilder::SetupInvokeArguments(HInvoke* invoke,
-                                               uint32_t number_of_vreg_arguments,
-                                               uint32_t* args,
-                                               uint32_t register_index,
-                                               bool is_range,
+                                               const InstructionOperands& operands,
                                                const char* descriptor,
                                                size_t start_index,
                                                size_t* argument_index) {
   uint32_t descriptor_index = 1;  // Skip the return type.
-
+  const size_t number_of_operands = operands.GetNumberOfOperands();
   for (size_t i = start_index;
        // Make sure we don't go over the expected arguments or over the number of
        // dex registers given. If the instruction was seen as dead by the verifier,
        // it hasn't been properly checked.
-       (i < number_of_vreg_arguments) && (*argument_index < invoke->GetNumberOfArguments());
+       (i < number_of_operands) && (*argument_index < invoke->GetNumberOfArguments());
        i++, (*argument_index)++) {
     DataType::Type type = DataType::FromShorty(descriptor[descriptor_index++]);
     bool is_wide = (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
-    if (!is_range
-        && is_wide
-        && ((i + 1 == number_of_vreg_arguments) || (args[i] + 1 != args[i + 1]))) {
+    if (is_wide && ((i + 1 == number_of_operands) ||
+                    (operands.GetOperand(i) + 1 != operands.GetOperand(i + 1)))) {
       // Longs and doubles should be in pairs, that is, sequential registers. The verifier should
       // reject any class where this is violated. However, the verifier only does these checks
       // on non trivially dead instructions, so we just bailout the compilation.
@@ -1252,7 +1222,7 @@
                       MethodCompilationStat::kNotCompiledMalformedOpcode);
       return false;
     }
-    HInstruction* arg = LoadLocal(is_range ? register_index + i : args[i], type);
+    HInstruction* arg = LoadLocal(operands.GetOperand(i), type);
     invoke->SetArgumentAt(*argument_index, arg);
     if (is_wide) {
       i++;
@@ -1279,10 +1249,7 @@
 }
 
 bool HInstructionBuilder::HandleInvoke(HInvoke* invoke,
-                                       uint32_t number_of_vreg_arguments,
-                                       uint32_t* args,
-                                       uint32_t register_index,
-                                       bool is_range,
+                                       const InstructionOperands& operands,
                                        const char* descriptor,
                                        HClinitCheck* clinit_check,
                                        bool is_unresolved) {
@@ -1291,7 +1258,7 @@
   size_t start_index = 0;
   size_t argument_index = 0;
   if (invoke->GetInvokeType() != InvokeType::kStatic) {  // Instance call.
-    uint32_t obj_reg = is_range ? register_index : args[0];
+    uint32_t obj_reg = operands.GetOperand(0);
     HInstruction* arg = is_unresolved
         ? LoadLocal(obj_reg, DataType::Type::kReference)
         : LoadNullCheckedLocal(obj_reg, invoke->GetDexPc());
@@ -1300,14 +1267,7 @@
     argument_index = 1;
   }
 
-  if (!SetupInvokeArguments(invoke,
-                            number_of_vreg_arguments,
-                            args,
-                            register_index,
-                            is_range,
-                            descriptor,
-                            start_index,
-                            &argument_index)) {
+  if (!SetupInvokeArguments(invoke, operands, descriptor, start_index, &argument_index)) {
     return false;
   }
 
@@ -1327,24 +1287,14 @@
 }
 
 bool HInstructionBuilder::HandleStringInit(HInvoke* invoke,
-                                           uint32_t number_of_vreg_arguments,
-                                           uint32_t* args,
-                                           uint32_t register_index,
-                                           bool is_range,
+                                           const InstructionOperands& operands,
                                            const char* descriptor) {
   DCHECK(invoke->IsInvokeStaticOrDirect());
   DCHECK(invoke->AsInvokeStaticOrDirect()->IsStringInit());
 
   size_t start_index = 1;
   size_t argument_index = 0;
-  if (!SetupInvokeArguments(invoke,
-                            number_of_vreg_arguments,
-                            args,
-                            register_index,
-                            is_range,
-                            descriptor,
-                            start_index,
-                            &argument_index)) {
+  if (!SetupInvokeArguments(invoke, operands, descriptor, start_index, &argument_index)) {
     return false;
   }
 
@@ -1352,7 +1302,7 @@
 
   // This is a StringFactory call, not an actual String constructor. Its result
   // replaces the empty String pre-allocated by NewInstance.
-  uint32_t orig_this_reg = is_range ? register_index : args[0];
+  uint32_t orig_this_reg = operands.GetOperand(0);
   HInstruction* arg_this = LoadLocal(orig_this_reg, DataType::Type::kReference);
 
   // Replacing the NewInstance might render it redundant. Keep a list of these
@@ -1360,9 +1310,15 @@
   if (arg_this->IsNewInstance()) {
     ssa_builder_->AddUninitializedString(arg_this->AsNewInstance());
   } else {
+    // The only reason a HPhi can flow in a String.<init> is when there is an
+    // irreducible loop, which will create HPhi for all dex registers at loop entry.
     DCHECK(arg_this->IsPhi());
-    // NewInstance is not the direct input of the StringFactory call. It might
-    // be redundant but optimizing this case is not worth the effort.
+    DCHECK(graph_->HasIrreducibleLoops());
+    // Don't bother compiling a method in that situation. While we could look at all
+    // phis related to the HNewInstance, it's not worth the trouble.
+    MaybeRecordStat(compilation_stats_,
+                    MethodCompilationStat::kNotCompiledIrreducibleAndStringInit);
+    return false;
   }
 
   // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
@@ -1699,11 +1655,9 @@
 
 HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
                                                     dex::TypeIndex type_index,
-                                                    uint32_t number_of_vreg_arguments,
-                                                    bool is_range,
-                                                    uint32_t* args,
-                                                    uint32_t register_index) {
-  HInstruction* length = graph_->GetIntConstant(number_of_vreg_arguments, dex_pc);
+                                                    const InstructionOperands& operands) {
+  const size_t number_of_operands = operands.GetNumberOfOperands();
+  HInstruction* length = graph_->GetIntConstant(number_of_operands, dex_pc);
   HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
   HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
   AppendInstruction(object);
@@ -1717,8 +1671,8 @@
   bool is_reference_array = (primitive == 'L') || (primitive == '[');
   DataType::Type type = is_reference_array ? DataType::Type::kReference : DataType::Type::kInt32;
 
-  for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
-    HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
+  for (size_t i = 0; i < number_of_operands; ++i) {
+    HInstruction* value = LoadLocal(operands.GetOperand(i), type);
     HInstruction* index = graph_->GetIntConstant(i, dex_pc);
     HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
     ssa_builder_->MaybeAddAmbiguousArraySet(aset);
@@ -1896,10 +1850,17 @@
   }
 }
 
-void HInstructionBuilder::BuildLoadMethodType(uint16_t proto_idx, uint32_t dex_pc) {
+void HInstructionBuilder::BuildLoadMethodHandle(uint16_t method_handle_index, uint32_t dex_pc) {
+  const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
+  HLoadMethodHandle* load_method_handle = new (allocator_) HLoadMethodHandle(
+      graph_->GetCurrentMethod(), method_handle_index, dex_file, dex_pc);
+  AppendInstruction(load_method_handle);
+}
+
+void HInstructionBuilder::BuildLoadMethodType(dex::ProtoIndex proto_index, uint32_t dex_pc) {
   const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
   HLoadMethodType* load_method_type =
-      new (allocator_) HLoadMethodType(graph_->GetCurrentMethod(), proto_idx, dex_file, dex_pc);
+      new (allocator_) HLoadMethodType(graph_->GetCurrentMethod(), proto_index, dex_file, dex_pc);
   AppendInstruction(load_method_type);
 }
 
@@ -2144,11 +2105,10 @@
       } else {
         method_idx = instruction.VRegB_35c();
       }
-      uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
       uint32_t args[5];
-      instruction.GetVarArgs(args);
-      if (!BuildInvoke(instruction, dex_pc, method_idx,
-                       number_of_vreg_arguments, false, args, -1)) {
+      uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
+      VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
+      if (!BuildInvoke(instruction, dex_pc, method_idx, operands)) {
         return false;
       }
       break;
@@ -2171,10 +2131,8 @@
       } else {
         method_idx = instruction.VRegB_3rc();
       }
-      uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
-      uint32_t register_index = instruction.VRegC();
-      if (!BuildInvoke(instruction, dex_pc, method_idx,
-                       number_of_vreg_arguments, true, nullptr, register_index)) {
+      RangeInstructionOperands operands(instruction.VRegC(), instruction.VRegA_3rc());
+      if (!BuildInvoke(instruction, dex_pc, method_idx, operands)) {
         return false;
       }
       break;
@@ -2182,33 +2140,18 @@
 
     case Instruction::INVOKE_POLYMORPHIC: {
       uint16_t method_idx = instruction.VRegB_45cc();
-      uint16_t proto_idx = instruction.VRegH_45cc();
-      uint32_t number_of_vreg_arguments = instruction.VRegA_45cc();
+      dex::ProtoIndex proto_idx(instruction.VRegH_45cc());
       uint32_t args[5];
-      instruction.GetVarArgs(args);
-      return BuildInvokePolymorphic(instruction,
-                                    dex_pc,
-                                    method_idx,
-                                    proto_idx,
-                                    number_of_vreg_arguments,
-                                    false,
-                                    args,
-                                    -1);
+      uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
+      VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
+      return BuildInvokePolymorphic(instruction, dex_pc, method_idx, proto_idx, operands);
     }
 
     case Instruction::INVOKE_POLYMORPHIC_RANGE: {
       uint16_t method_idx = instruction.VRegB_4rcc();
-      uint16_t proto_idx = instruction.VRegH_4rcc();
-      uint32_t number_of_vreg_arguments = instruction.VRegA_4rcc();
-      uint32_t register_index = instruction.VRegC_4rcc();
-      return BuildInvokePolymorphic(instruction,
-                                    dex_pc,
-                                    method_idx,
-                                    proto_idx,
-                                    number_of_vreg_arguments,
-                                    true,
-                                    nullptr,
-                                    register_index);
+      dex::ProtoIndex proto_idx(instruction.VRegH_4rcc());
+      RangeInstructionOperands operands(instruction.VRegC_4rcc(), instruction.VRegA_4rcc());
+      return BuildInvokePolymorphic(instruction, dex_pc, method_idx, proto_idx, operands);
     }
 
     case Instruction::NEG_INT: {
@@ -2756,30 +2699,19 @@
     }
 
     case Instruction::FILLED_NEW_ARRAY: {
-      uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
       dex::TypeIndex type_index(instruction.VRegB_35c());
       uint32_t args[5];
-      instruction.GetVarArgs(args);
-      HNewArray* new_array = BuildFilledNewArray(dex_pc,
-                                                 type_index,
-                                                 number_of_vreg_arguments,
-                                                 /* is_range */ false,
-                                                 args,
-                                                 /* register_index */ 0);
+      uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
+      VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
+      HNewArray* new_array = BuildFilledNewArray(dex_pc, type_index, operands);
       BuildConstructorFenceForAllocation(new_array);
       break;
     }
 
     case Instruction::FILLED_NEW_ARRAY_RANGE: {
-      uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
       dex::TypeIndex type_index(instruction.VRegB_3rc());
-      uint32_t register_index = instruction.VRegC_3rc();
-      HNewArray* new_array = BuildFilledNewArray(dex_pc,
-                                                 type_index,
-                                                 number_of_vreg_arguments,
-                                                 /* is_range */ true,
-                                                 /* args*/ nullptr,
-                                                 register_index);
+      RangeInstructionOperands operands(instruction.VRegC_3rc(), instruction.VRegA_3rc());
+      HNewArray* new_array = BuildFilledNewArray(dex_pc, type_index, operands);
       BuildConstructorFenceForAllocation(new_array);
       break;
     }
@@ -2934,8 +2866,15 @@
       break;
     }
 
+    case Instruction::CONST_METHOD_HANDLE: {
+      uint16_t method_handle_idx = instruction.VRegB_21c();
+      BuildLoadMethodHandle(method_handle_idx, dex_pc);
+      UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+      break;
+    }
+
     case Instruction::CONST_METHOD_TYPE: {
-      uint16_t proto_idx = instruction.VRegB_21c();
+      dex::ProtoIndex proto_idx(instruction.VRegB_21c());
       BuildLoadMethodType(proto_idx, dex_pc);
       UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
       break;
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 3fde54c..2218a69 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -38,6 +38,7 @@
 class DexCompilationUnit;
 class HBasicBlockBuilder;
 class Instruction;
+class InstructionOperands;
 class OptimizingCompilerStats;
 class ScopedObjectAccess;
 class SsaBuilder;
@@ -168,29 +169,20 @@
   bool BuildInvoke(const Instruction& instruction,
                    uint32_t dex_pc,
                    uint32_t method_idx,
-                   uint32_t number_of_vreg_arguments,
-                   bool is_range,
-                   uint32_t* args,
-                   uint32_t register_index);
+                   const InstructionOperands& operands);
 
   // Builds an invocation node for invoke-polymorphic and returns whether the
   // instruction is supported.
   bool BuildInvokePolymorphic(const Instruction& instruction,
                               uint32_t dex_pc,
                               uint32_t method_idx,
-                              uint32_t proto_idx,
-                              uint32_t number_of_vreg_arguments,
-                              bool is_range,
-                              uint32_t* args,
-                              uint32_t register_index);
+                              dex::ProtoIndex proto_idx,
+                              const InstructionOperands& operands);
 
   // Builds a new array node and the instructions that fill it.
   HNewArray* BuildFilledNewArray(uint32_t dex_pc,
                                  dex::TypeIndex type_index,
-                                 uint32_t number_of_vreg_arguments,
-                                 bool is_range,
-                                 uint32_t* args,
-                                 uint32_t register_index);
+                                 const InstructionOperands& operands);
 
   void BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc);
 
@@ -240,8 +232,11 @@
   bool LoadClassNeedsAccessCheck(Handle<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Builds a `HLoadMethodHandle` loading the given `method_handle_index`.
+  void BuildLoadMethodHandle(uint16_t method_handle_idx, uint32_t dex_pc);
+
   // Builds a `HLoadMethodType` loading the given `proto_index`.
-  void BuildLoadMethodType(uint16_t proto_idx, uint32_t dex_pc);
+  void BuildLoadMethodType(dex::ProtoIndex proto_index, uint32_t dex_pc);
 
   // Returns the outer-most compiling method's class.
   ObjPtr<mirror::Class> GetOutermostCompilingClass() const;
@@ -257,28 +252,19 @@
                                      HInvoke* invoke);
 
   bool SetupInvokeArguments(HInvoke* invoke,
-                            uint32_t number_of_vreg_arguments,
-                            uint32_t* args,
-                            uint32_t register_index,
-                            bool is_range,
+                            const InstructionOperands& operands,
                             const char* descriptor,
                             size_t start_index,
                             size_t* argument_index);
 
   bool HandleInvoke(HInvoke* invoke,
-                    uint32_t number_of_vreg_arguments,
-                    uint32_t* args,
-                    uint32_t register_index,
-                    bool is_range,
+                    const InstructionOperands& operands,
                     const char* descriptor,
                     HClinitCheck* clinit_check,
                     bool is_unresolved);
 
   bool HandleStringInit(HInvoke* invoke,
-                        uint32_t number_of_vreg_arguments,
-                        uint32_t* args,
-                        uint32_t register_index,
-                        bool is_range,
+                        const InstructionOperands& operands,
                         const char* descriptor);
   void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
 
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index f84846d..769a3f1 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -94,11 +94,13 @@
   static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
 
   HeapLocation(ReferenceInfo* ref_info,
+               DataType::Type type,
                size_t offset,
                HInstruction* index,
                size_t vector_length,
                int16_t declaring_class_def_index)
       : ref_info_(ref_info),
+        type_(DataType::ToSigned(type)),
         offset_(offset),
         index_(index),
         vector_length_(vector_length),
@@ -116,6 +118,7 @@
   }
 
   ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
+  DataType::Type GetType() const { return type_; }
   size_t GetOffset() const { return offset_; }
   HInstruction* GetIndex() const { return index_; }
   size_t GetVectorLength() const { return vector_length_; }
@@ -149,6 +152,10 @@
  private:
   // Reference for instance/static field, array element or vector data.
   ReferenceInfo* const ref_info_;
+  // Type of data residing at HeapLocation (always signed for integral
+  // data since e.g. a[i] and a[i] & 0xff are represented by differently
+  // signed types; char vs short are disambiguated through the reference).
+  const DataType::Type type_;
   // Offset of static/instance field.
   // Invalid when this HeapLocation is not field.
   const size_t offset_;
@@ -237,19 +244,31 @@
     DCHECK(object != nullptr);
     DCHECK(field != nullptr);
     return FindHeapLocationIndex(FindReferenceInfoOf(HuntForOriginalReference(object)),
+                                 field->GetFieldType(),
                                  field->GetFieldOffset().SizeValue(),
                                  nullptr,
                                  HeapLocation::kScalar,
                                  field->GetDeclaringClassDefIndex());
   }
 
-  size_t GetArrayHeapLocation(HInstruction* array,
-                              HInstruction* index,
-                              size_t vector_length = HeapLocation::kScalar) const {
-    DCHECK(array != nullptr);
-    DCHECK(index != nullptr);
-    DCHECK_GE(vector_length, HeapLocation::kScalar);
+  size_t GetArrayHeapLocation(HInstruction* instruction) const {
+    DCHECK(instruction != nullptr);
+    HInstruction* array = instruction->InputAt(0);
+    HInstruction* index = instruction->InputAt(1);
+    DataType::Type type = instruction->GetType();
+    size_t vector_length = HeapLocation::kScalar;
+    if (instruction->IsArraySet()) {
+      type = instruction->AsArraySet()->GetComponentType();
+    } else if (instruction->IsVecStore() ||
+               instruction->IsVecLoad()) {
+      HVecOperation* vec_op = instruction->AsVecOperation();
+      type = vec_op->GetPackedType();
+      vector_length = vec_op->GetVectorLength();
+    } else {
+      DCHECK(instruction->IsArrayGet());
+    }
     return FindHeapLocationIndex(FindReferenceInfoOf(HuntForOriginalReference(array)),
+                                 type,
                                  HeapLocation::kInvalidFieldOffset,
                                  index,
                                  vector_length,
@@ -279,13 +298,16 @@
   // In later analysis, ComputeMayAlias() and MayAlias() compute and tell whether
   // these indexes alias.
   size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
+                               DataType::Type type,
                                size_t offset,
                                HInstruction* index,
                                size_t vector_length,
                                int16_t declaring_class_def_index) const {
+    DataType::Type lookup_type = DataType::ToSigned(type);
     for (size_t i = 0; i < heap_locations_.size(); i++) {
       HeapLocation* loc = heap_locations_[i];
       if (loc->GetReferenceInfo() == ref_info &&
+          loc->GetType() == lookup_type &&
           loc->GetOffset() == offset &&
           loc->GetIndex() == index &&
           loc->GetVectorLength() == vector_length &&
@@ -425,6 +447,7 @@
   }
 
   HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
+                                        DataType::Type type,
                                         size_t offset,
                                         HInstruction* index,
                                         size_t vector_length,
@@ -432,10 +455,10 @@
     HInstruction* original_ref = HuntForOriginalReference(ref);
     ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
     size_t heap_location_idx = FindHeapLocationIndex(
-        ref_info, offset, index, vector_length, declaring_class_def_index);
+        ref_info, type, offset, index, vector_length, declaring_class_def_index);
     if (heap_location_idx == kHeapLocationNotFound) {
       HeapLocation* heap_loc = new (GetGraph()->GetAllocator())
-          HeapLocation(ref_info, offset, index, vector_length, declaring_class_def_index);
+          HeapLocation(ref_info, type, offset, index, vector_length, declaring_class_def_index);
       heap_locations_.push_back(heap_loc);
       return heap_loc;
     }
@@ -446,17 +469,23 @@
     if (field_info.IsVolatile()) {
       has_volatile_ = true;
     }
+    DataType::Type type = field_info.GetFieldType();
     const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
     const size_t offset = field_info.GetFieldOffset().SizeValue();
     return GetOrCreateHeapLocation(ref,
+                                   type,
                                    offset,
                                    nullptr,
                                    HeapLocation::kScalar,
                                    declaring_class_def_index);
   }
 
-  void VisitArrayAccess(HInstruction* array, HInstruction* index, size_t vector_length) {
+  void VisitArrayAccess(HInstruction* array,
+                        HInstruction* index,
+                        DataType::Type type,
+                        size_t vector_length) {
     GetOrCreateHeapLocation(array,
+                            type,
                             HeapLocation::kInvalidFieldOffset,
                             index,
                             vector_length,
@@ -510,28 +539,32 @@
   void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
-    VisitArrayAccess(array, index, HeapLocation::kScalar);
+    DataType::Type type = instruction->GetType();
+    VisitArrayAccess(array, index, type, HeapLocation::kScalar);
     CreateReferenceInfoForReferenceType(instruction);
   }
 
   void VisitArraySet(HArraySet* instruction) OVERRIDE {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
-    VisitArrayAccess(array, index, HeapLocation::kScalar);
+    DataType::Type type = instruction->GetComponentType();
+    VisitArrayAccess(array, index, type, HeapLocation::kScalar);
     has_heap_stores_ = true;
   }
 
   void VisitVecLoad(HVecLoad* instruction) OVERRIDE {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
-    VisitArrayAccess(array, index, instruction->GetVectorLength());
+    DataType::Type type = instruction->GetPackedType();
+    VisitArrayAccess(array, index, type, instruction->GetVectorLength());
     CreateReferenceInfoForReferenceType(instruction);
   }
 
   void VisitVecStore(HVecStore* instruction) OVERRIDE {
     HInstruction* array = instruction->InputAt(0);
     HInstruction* index = instruction->InputAt(1);
-    VisitArrayAccess(array, index, instruction->GetVectorLength());
+    DataType::Type type = instruction->GetPackedType();
+    VisitArrayAccess(array, index, type, instruction->GetVectorLength());
     has_heap_stores_ = true;
   }
 
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index 56361a8..bfe7a4f 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -78,12 +78,16 @@
 
   // Test queries on HeapLocationCollector's ref info and index records.
   ReferenceInfo* ref = heap_location_collector.FindReferenceInfoOf(array);
+  DataType::Type type = DataType::Type::kInt32;
   size_t field = HeapLocation::kInvalidFieldOffset;
   size_t vec = HeapLocation::kScalar;
   size_t class_def = HeapLocation::kDeclaringClassDefIndexForArrays;
-  size_t loc1 = heap_location_collector.FindHeapLocationIndex(ref, field, c1, vec, class_def);
-  size_t loc2 = heap_location_collector.FindHeapLocationIndex(ref, field, c2, vec, class_def);
-  size_t loc3 = heap_location_collector.FindHeapLocationIndex(ref, field, index, vec, class_def);
+  size_t loc1 = heap_location_collector.FindHeapLocationIndex(
+      ref, type, field, c1, vec, class_def);
+  size_t loc2 = heap_location_collector.FindHeapLocationIndex(
+      ref, type, field, c2, vec, class_def);
+  size_t loc3 = heap_location_collector.FindHeapLocationIndex(
+      ref, type, field, index, vec, class_def);
   // must find this reference info for array in HeapLocationCollector.
   ASSERT_TRUE(ref != nullptr);
   // must find these heap locations;
@@ -246,28 +250,28 @@
   size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
 
   // Test alias: array[0] and array[1]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c1);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set1);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set2);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+0] and array[i-0]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add0);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub0);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set3);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set5);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+1] and array[i-1]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub1);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set4);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set6);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+1] and array[1-i]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, rev_sub1);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set4);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set7);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+1] and array[i-(-1)]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add1);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_neg1);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set4);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set8);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 }
 
@@ -409,70 +413,75 @@
   size_t loc1, loc2;
 
   // Test alias: array[0] and array[0,1,2,3]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_0);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_0);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
+  // Test alias: array[0] and array[1,2,3,4]
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_0);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_1);
+  ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
   // Test alias: array[0] and array[8,9,10,11]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_0);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_8);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[1] and array[8,9,10,11]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_1);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_8);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[1] and array[0,1,2,3]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_1);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_0);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[0,1,2,3] and array[8,9,10,11]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c8, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(vstore_0);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_8);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[0,1,2,3] and array[1,2,3,4]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c1, 4);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(vstore_0);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_1);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[0] and array[i,i+1,i+2,i+3]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, c0);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_0);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_i);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i] and array[0,1,2,3]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, c0, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_0);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i] and array[i,i+1,i+2,i+3]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_i);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i] and array[i+8,i+9,i+10,i+11]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, index);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_i_add8);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+6,i+7,i+8,i+9] and array[i+8,i+9,i+10,i+11]
   // Test partial overlap.
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 4);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(vstore_i_add6);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_i_add8);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+6,i+7] and array[i,i+1,i+2,i+3]
   // Test different vector lengths.
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 2);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, index, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(vstore_i_add6_vlen2);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_i);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+6,i+7] and array[i+8,i+9,i+10,i+11]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, i_add6, 2);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, i_add8, 4);
+  loc1 = heap_location_collector.GetArrayHeapLocation(vstore_i_add6_vlen2);
+  loc2 = heap_location_collector.GetArrayHeapLocation(vstore_i_add8);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 }
 
@@ -563,33 +572,33 @@
   size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
 
   // Test alias: array[i+0x80000000] and array[i-0x80000000]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x80000000);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_1);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_2);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+0x10] and array[i-0xFFFFFFF0]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x10);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0xFFFFFFF0);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_3);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_4);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+0x7FFFFFFF] and array[i-0x80000001]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0x7FFFFFFF);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000001);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_5);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_6);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Test alias: array[i+0] and array[i-0]
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_7);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_8);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Should not alias:
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000001);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_2);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_6);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 
   // Should not alias:
-  loc1 = heap_location_collector.GetArrayHeapLocation(array, add_0);
-  loc2 = heap_location_collector.GetArrayHeapLocation(array, sub_0x80000000);
+  loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_7);
+  loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_2);
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
 }
 
@@ -647,10 +656,10 @@
   // times the original reference has been transformed by BoundType,
   // NullCheck, IntermediateAddress, etc.
   ASSERT_EQ(heap_location_collector.GetNumberOfHeapLocations(), 1U);
-  size_t loc1 = heap_location_collector.GetArrayHeapLocation(array, c1);
-  size_t loc2 = heap_location_collector.GetArrayHeapLocation(bound_type, c1);
-  size_t loc3 = heap_location_collector.GetArrayHeapLocation(null_check, c1);
-  size_t loc4 = heap_location_collector.GetArrayHeapLocation(inter_addr, c1);
+  size_t loc1 = heap_location_collector.GetArrayHeapLocation(array_get1);
+  size_t loc2 = heap_location_collector.GetArrayHeapLocation(array_get2);
+  size_t loc3 = heap_location_collector.GetArrayHeapLocation(array_get3);
+  size_t loc4 = heap_location_collector.GetArrayHeapLocation(array_get4);
   ASSERT_TRUE(loc1 != HeapLocationCollector::kHeapLocationNotFound);
   ASSERT_EQ(loc1, loc2);
   ASSERT_EQ(loc1, loc3);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index d598ff5..28ac942 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -160,7 +160,7 @@
 
   // Scan the list of removed loads to see if we can reuse `type_conversion`, if
   // the other removed load has the same substitute and type and is dominated
-  // by `type_conversioni`.
+  // by `type_conversion`.
   void TryToReuseTypeConversion(HInstruction* type_conversion, size_t index) {
     size_t size = removed_loads_.size();
     HInstruction* load = removed_loads_[index];
@@ -458,8 +458,13 @@
       }
       if (from_all_predecessors) {
         if (ref_info->IsSingletonAndRemovable() &&
-            block->IsSingleReturnOrReturnVoidAllowingPhis()) {
-          // Values in the singleton are not needed anymore.
+            (block->IsSingleReturnOrReturnVoidAllowingPhis() ||
+             (block->EndsWithReturn() && (merged_value != kUnknownHeapValue ||
+                                          merged_store_value != kUnknownHeapValue)))) {
+          // Values in the singleton are not needed anymore:
+          // (1) if this block consists of a sole return, or
+          // (2) if this block returns and a usable merged value is obtained
+          //     (loads prior to the return will always use that value).
         } else if (!IsStore(merged_value)) {
           // We don't track merged value as a store anymore. We have to
           // hold the stores in predecessors live here.
@@ -542,16 +547,7 @@
     }
   }
 
-  void VisitGetLocation(HInstruction* instruction,
-                        HInstruction* ref,
-                        size_t offset,
-                        HInstruction* index,
-                        size_t vector_length,
-                        int16_t declaring_class_def_index) {
-    HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
-    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
-    size_t idx = heap_location_collector_.FindHeapLocationIndex(
-        ref_info, offset, index, vector_length, declaring_class_def_index);
+  void VisitGetLocation(HInstruction* instruction, size_t idx) {
     DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
     ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[instruction->GetBlock()->GetBlockId()];
@@ -569,23 +565,7 @@
       heap_values[idx] = instruction;
       KeepStoresIfAliasedToLocation(heap_values, idx);
     } else {
-      if (DataType::Kind(heap_value->GetType()) != DataType::Kind(instruction->GetType())) {
-        // The only situation where the same heap location has different type is when
-        // we do an array get on an instruction that originates from the null constant
-        // (the null could be behind a field access, an array access, a null check or
-        // a bound type).
-        // In order to stay properly typed on primitive types, we do not eliminate
-        // the array gets.
-        if (kIsDebugBuild) {
-          DCHECK(heap_value->IsArrayGet()) << heap_value->DebugName();
-          DCHECK(instruction->IsArrayGet()) << instruction->DebugName();
-        }
-        // Load isn't eliminated. Put the load as the value into the HeapLocation.
-        // This acts like GVN but with better aliasing analysis.
-        heap_values[idx] = instruction;
-        KeepStoresIfAliasedToLocation(heap_values, idx);
-        return;
-      }
+      // Load is eliminated.
       AddRemovedLoad(instruction, heap_value);
       TryRemovingNullCheck(instruction);
     }
@@ -610,21 +590,11 @@
     return false;
   }
 
-  void VisitSetLocation(HInstruction* instruction,
-                        HInstruction* ref,
-                        size_t offset,
-                        HInstruction* index,
-                        size_t vector_length,
-                        int16_t declaring_class_def_index,
-                        HInstruction* value) {
+  void VisitSetLocation(HInstruction* instruction, size_t idx, HInstruction* value) {
+    DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
     DCHECK(!IsStore(value)) << value->DebugName();
     // value may already have a substitute.
     value = FindSubstitute(value);
-    HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
-    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
-    size_t idx = heap_location_collector_.FindHeapLocationIndex(
-        ref_info, offset, index, vector_length, declaring_class_def_index);
-    DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
     ScopedArenaVector<HInstruction*>& heap_values =
         heap_values_for_[instruction->GetBlock()->GetBlockId()];
     HInstruction* heap_value = heap_values[idx];
@@ -644,7 +614,8 @@
       } else if (!loop_info->IsIrreducible()) {
         // instruction is a store in the loop so the loop must do write.
         DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
-        if (ref_info->IsSingleton() && !loop_info->IsDefinedOutOfTheLoop(original_ref)) {
+        ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(idx)->GetReferenceInfo();
+        if (ref_info->IsSingleton() && !loop_info->IsDefinedOutOfTheLoop(ref_info->GetReference())) {
           // original_ref is created inside the loop. Value stored to it isn't needed at
           // the loop header. This is true for outer loops also.
           possibly_redundant = true;
@@ -686,79 +657,39 @@
   }
 
   void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
-    HInstruction* obj = instruction->InputAt(0);
-    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
-    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
-    VisitGetLocation(instruction,
-                     obj,
-                     offset,
-                     nullptr,
-                     HeapLocation::kScalar,
-                     declaring_class_def_index);
+    HInstruction* object = instruction->InputAt(0);
+    const FieldInfo& field = instruction->GetFieldInfo();
+    VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(object, &field));
   }
 
   void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
-    HInstruction* obj = instruction->InputAt(0);
-    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
-    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    HInstruction* object = instruction->InputAt(0);
+    const FieldInfo& field = instruction->GetFieldInfo();
     HInstruction* value = instruction->InputAt(1);
-    VisitSetLocation(instruction,
-                     obj,
-                     offset,
-                     nullptr,
-                     HeapLocation::kScalar,
-                     declaring_class_def_index,
-                     value);
+    size_t idx = heap_location_collector_.GetFieldHeapLocation(object, &field);
+    VisitSetLocation(instruction, idx, value);
   }
 
   void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
     HInstruction* cls = instruction->InputAt(0);
-    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
-    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
-    VisitGetLocation(instruction,
-                     cls,
-                     offset,
-                     nullptr,
-                     HeapLocation::kScalar,
-                     declaring_class_def_index);
+    const FieldInfo& field = instruction->GetFieldInfo();
+    VisitGetLocation(instruction, heap_location_collector_.GetFieldHeapLocation(cls, &field));
   }
 
   void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
     HInstruction* cls = instruction->InputAt(0);
-    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
-    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
-    HInstruction* value = instruction->InputAt(1);
-    VisitSetLocation(instruction,
-                     cls,
-                     offset,
-                     nullptr,
-                     HeapLocation::kScalar,
-                     declaring_class_def_index,
-                     value);
+    const FieldInfo& field = instruction->GetFieldInfo();
+    size_t idx = heap_location_collector_.GetFieldHeapLocation(cls, &field);
+    VisitSetLocation(instruction, idx, instruction->InputAt(1));
   }
 
   void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
-    HInstruction* array = instruction->InputAt(0);
-    HInstruction* index = instruction->InputAt(1);
-    VisitGetLocation(instruction,
-                     array,
-                     HeapLocation::kInvalidFieldOffset,
-                     index,
-                     HeapLocation::kScalar,
-                     HeapLocation::kDeclaringClassDefIndexForArrays);
+    VisitGetLocation(instruction, heap_location_collector_.GetArrayHeapLocation(instruction));
   }
 
   void VisitArraySet(HArraySet* instruction) OVERRIDE {
-    HInstruction* array = instruction->InputAt(0);
-    HInstruction* index = instruction->InputAt(1);
-    HInstruction* value = instruction->InputAt(2);
-    VisitSetLocation(instruction,
-                     array,
-                     HeapLocation::kInvalidFieldOffset,
-                     index,
-                     HeapLocation::kScalar,
-                     HeapLocation::kDeclaringClassDefIndexForArrays,
-                     value);
+    size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction);
+    VisitSetLocation(instruction, idx, instruction->InputAt(2));
   }
 
   void VisitDeoptimize(HDeoptimize* instruction) {
@@ -971,6 +902,7 @@
     lse_visitor.VisitBasicBlock(block);
   }
   lse_visitor.RemoveInstructions();
+
   return true;
 }
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 5f2833e..7f78dc2 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1951,6 +1951,11 @@
   return !GetInstructions().IsEmpty() && GetLastInstruction()->IsControlFlow();
 }
 
+bool HBasicBlock::EndsWithReturn() const {
+  return !GetInstructions().IsEmpty() &&
+      (GetLastInstruction()->IsReturn() || GetLastInstruction()->IsReturnVoid());
+}
+
 bool HBasicBlock::EndsWithIf() const {
   return !GetInstructions().IsEmpty() && GetLastInstruction()->IsIf();
 }
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 54882ff..09d9c57 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1285,6 +1285,7 @@
   void SetLifetimeEnd(size_t end) { lifetime_end_ = end; }
 
   bool EndsWithControlFlowInstruction() const;
+  bool EndsWithReturn() const;
   bool EndsWithIf() const;
   bool EndsWithTryBoundary() const;
   bool HasSinglePhi() const;
@@ -1383,6 +1384,7 @@
   M(LessThanOrEqual, Condition)                                         \
   M(LoadClass, Instruction)                                             \
   M(LoadException, Instruction)                                         \
+  M(LoadMethodHandle, Instruction)                                      \
   M(LoadMethodType, Instruction)                                        \
   M(LoadString, Instruction)                                            \
   M(LongConstant, Constant)                                             \
@@ -6500,18 +6502,18 @@
   special_input->AddUseAt(this, 0);
 }
 
-class HLoadMethodType FINAL : public HInstruction {
+class HLoadMethodHandle FINAL : public HInstruction {
  public:
-  HLoadMethodType(HCurrentMethod* current_method,
-                  uint16_t proto_idx,
+  HLoadMethodHandle(HCurrentMethod* current_method,
+                  uint16_t method_handle_idx,
                   const DexFile& dex_file,
                   uint32_t dex_pc)
-      : HInstruction(kLoadMethodType,
+      : HInstruction(kLoadMethodHandle,
                      DataType::Type::kReference,
                      SideEffectsForArchRuntimeCalls(),
                      dex_pc),
         special_input_(HUserRecord<HInstruction*>(current_method)),
-        proto_idx_(proto_idx),
+        method_handle_idx_(method_handle_idx),
         dex_file_(dex_file) {
   }
 
@@ -6523,7 +6525,51 @@
 
   bool IsClonable() const OVERRIDE { return true; }
 
-  uint16_t GetProtoIndex() const { return proto_idx_; }
+  uint16_t GetMethodHandleIndex() const { return method_handle_idx_; }
+
+  const DexFile& GetDexFile() const { return dex_file_; }
+
+  static SideEffects SideEffectsForArchRuntimeCalls() {
+    return SideEffects::CanTriggerGC();
+  }
+
+  DECLARE_INSTRUCTION(LoadMethodHandle);
+
+ protected:
+  DEFAULT_COPY_CONSTRUCTOR(LoadMethodHandle);
+
+ private:
+  // The special input is the HCurrentMethod for kRuntimeCall.
+  HUserRecord<HInstruction*> special_input_;
+
+  const uint16_t method_handle_idx_;
+  const DexFile& dex_file_;
+};
+
+class HLoadMethodType FINAL : public HInstruction {
+ public:
+  HLoadMethodType(HCurrentMethod* current_method,
+                  dex::ProtoIndex proto_index,
+                  const DexFile& dex_file,
+                  uint32_t dex_pc)
+      : HInstruction(kLoadMethodType,
+                     DataType::Type::kReference,
+                     SideEffectsForArchRuntimeCalls(),
+                     dex_pc),
+        special_input_(HUserRecord<HInstruction*>(current_method)),
+        proto_index_(proto_index),
+        dex_file_(dex_file) {
+  }
+
+  using HInstruction::GetInputRecords;  // Keep the const version visible.
+  ArrayRef<HUserRecord<HInstruction*>> GetInputRecords() OVERRIDE FINAL {
+    return ArrayRef<HUserRecord<HInstruction*>>(
+        &special_input_, (special_input_.GetInstruction() != nullptr) ? 1u : 0u);
+  }
+
+  bool IsClonable() const OVERRIDE { return true; }
+
+  dex::ProtoIndex GetProtoIndex() const { return proto_index_; }
 
   const DexFile& GetDexFile() const { return dex_file_; }
 
@@ -6540,7 +6586,7 @@
   // The special input is the HCurrentMethod for kRuntimeCall.
   HUserRecord<HInstruction*> special_input_;
 
-  uint16_t proto_idx_;
+  const dex::ProtoIndex proto_index_;
   const DexFile& dex_file_;
 };
 
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 9a26f2f..f246228 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -50,6 +50,7 @@
   kNotCompiledThrowCatchLoop,
   kNotCompiledAmbiguousArrayOp,
   kNotCompiledHugeMethod,
+  kNotCompiledIrreducibleAndStringInit,
   kNotCompiledLargeMethodNoBranches,
   kNotCompiledMalformedOpcode,
   kNotCompiledNoCodegen,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index b15a0ea..ecfa790 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -59,6 +59,12 @@
   return GetRootHandle(handles_, ClassLinker::kJavaLangClass, &class_class_handle_);
 }
 
+ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetMethodHandleClassHandle() {
+  return GetRootHandle(handles_,
+                       ClassLinker::kJavaLangInvokeMethodHandleImpl,
+                       &method_handle_class_handle_);
+}
+
 ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetMethodTypeClassHandle() {
   return GetRootHandle(handles_,
                        ClassLinker::kJavaLangInvokeMethodType,
@@ -95,6 +101,7 @@
   void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
   void VisitInstanceOf(HInstanceOf* load_class) OVERRIDE;
   void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
+  void VisitLoadMethodHandle(HLoadMethodHandle* instr) OVERRIDE;
   void VisitLoadMethodType(HLoadMethodType* instr) OVERRIDE;
   void VisitLoadString(HLoadString* instr) OVERRIDE;
   void VisitLoadException(HLoadException* instr) OVERRIDE;
@@ -675,6 +682,12 @@
   instr->SetReferenceTypeInfo(instr->InputAt(0)->GetReferenceTypeInfo());
 }
 
+void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodHandle(HLoadMethodHandle* instr) {
+  instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
+      handle_cache_->GetMethodHandleClassHandle(),
+      /* is_exact */ true));
+}
+
 void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodType(HLoadMethodType* instr) {
   instr->SetReferenceTypeInfo(
       ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact */ true));
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index da2193d..d36d592 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -75,6 +75,7 @@
 
     ReferenceTypeInfo::TypeHandle GetObjectClassHandle();
     ReferenceTypeInfo::TypeHandle GetClassClassHandle();
+    ReferenceTypeInfo::TypeHandle GetMethodHandleClassHandle();
     ReferenceTypeInfo::TypeHandle GetMethodTypeClassHandle();
     ReferenceTypeInfo::TypeHandle GetStringClassHandle();
     ReferenceTypeInfo::TypeHandle GetThrowableClassHandle();
@@ -84,6 +85,7 @@
 
     ReferenceTypeInfo::TypeHandle object_class_handle_;
     ReferenceTypeInfo::TypeHandle class_class_handle_;
+    ReferenceTypeInfo::TypeHandle method_handle_class_handle_;
     ReferenceTypeInfo::TypeHandle method_type_class_handle_;
     ReferenceTypeInfo::TypeHandle string_class_handle_;
     ReferenceTypeInfo::TypeHandle throwable_class_handle_;
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index e014efa..588ea03 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -70,19 +70,19 @@
   return false;
 }
 
-size_t SchedulingGraph::ArrayAccessHeapLocation(HInstruction* array, HInstruction* index) const {
+size_t SchedulingGraph::ArrayAccessHeapLocation(HInstruction* instruction) const {
   DCHECK(heap_location_collector_ != nullptr);
-  size_t heap_loc = heap_location_collector_->GetArrayHeapLocation(array, index);
+  size_t heap_loc = heap_location_collector_->GetArrayHeapLocation(instruction);
   // This array access should be analyzed and added to HeapLocationCollector before.
   DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound);
   return heap_loc;
 }
 
-bool SchedulingGraph::ArrayAccessMayAlias(const HInstruction* node,
-                                          const HInstruction* other) const {
+bool SchedulingGraph::ArrayAccessMayAlias(HInstruction* node,
+                                          HInstruction* other) const {
   DCHECK(heap_location_collector_ != nullptr);
-  size_t node_heap_loc = ArrayAccessHeapLocation(node->InputAt(0), node->InputAt(1));
-  size_t other_heap_loc = ArrayAccessHeapLocation(other->InputAt(0), other->InputAt(1));
+  size_t node_heap_loc = ArrayAccessHeapLocation(node);
+  size_t other_heap_loc = ArrayAccessHeapLocation(other);
 
   // For example: arr[0] and arr[0]
   if (node_heap_loc == other_heap_loc) {
@@ -194,8 +194,8 @@
   return true;
 }
 
-bool SchedulingGraph::HasMemoryDependency(const HInstruction* node,
-                                          const HInstruction* other) const {
+bool SchedulingGraph::HasMemoryDependency(HInstruction* node,
+                                          HInstruction* other) const {
   if (!MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) {
     return false;
   }
@@ -264,8 +264,8 @@
 
 // Check whether `node` depends on `other`, taking into account `SideEffect`
 // information and `CanThrow` information.
-bool SchedulingGraph::HasSideEffectDependency(const HInstruction* node,
-                                              const HInstruction* other) const {
+bool SchedulingGraph::HasSideEffectDependency(HInstruction* node,
+                                              HInstruction* other) const {
   if (HasMemoryDependency(node, other)) {
     return true;
   }
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 51cd20a..8e98f19 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -310,12 +310,12 @@
   void AddOtherDependency(SchedulingNode* node, SchedulingNode* dependency) {
     AddDependency(node, dependency, /*is_data_dependency*/false);
   }
-  bool HasMemoryDependency(const HInstruction* node, const HInstruction* other) const;
+  bool HasMemoryDependency(HInstruction* node, HInstruction* other) const;
   bool HasExceptionDependency(const HInstruction* node, const HInstruction* other) const;
-  bool HasSideEffectDependency(const HInstruction* node, const HInstruction* other) const;
-  bool ArrayAccessMayAlias(const HInstruction* node, const HInstruction* other) const;
+  bool HasSideEffectDependency(HInstruction* node, HInstruction* other) const;
+  bool ArrayAccessMayAlias(HInstruction* node, HInstruction* other) const;
   bool FieldAccessMayAlias(const HInstruction* node, const HInstruction* other) const;
-  size_t ArrayAccessHeapLocation(HInstruction* array, HInstruction* index) const;
+  size_t ArrayAccessHeapLocation(HInstruction* instruction) const;
   size_t FieldAccessHeapLocation(HInstruction* obj, const FieldInfo* field) const;
 
   // Add dependencies nodes for the given `HInstruction`: inputs, environments, and side-effects.
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index fb15fc8..d4cae72 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -296,38 +296,38 @@
     size_t loc2 = HeapLocationCollector::kHeapLocationNotFound;
 
     // Test side effect dependency: array[0] and array[1]
-    loc1 = heap_location_collector.GetArrayHeapLocation(arr, c0);
-    loc2 = heap_location_collector.GetArrayHeapLocation(arr, c1);
+    loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_0);
+    loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_1);
     ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
     ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_1, arr_set_0));
 
     // Test side effect dependency based on LSA analysis: array[i] and array[j]
-    loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
-    loc2 = heap_location_collector.GetArrayHeapLocation(arr, j);
+    loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
+    loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_j);
     ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
     ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_j, arr_set_i));
 
     // Test side effect dependency based on LSA analysis: array[i] and array[i+0]
-    loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
-    loc2 = heap_location_collector.GetArrayHeapLocation(arr, add0);
+    loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
+    loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_add0);
     ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
     ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_add0, arr_set_i));
 
     // Test side effect dependency based on LSA analysis: array[i] and array[i-0]
-    loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
-    loc2 = heap_location_collector.GetArrayHeapLocation(arr, sub0);
+    loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
+    loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_sub0);
     ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
     ASSERT_TRUE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub0, arr_set_i));
 
     // Test side effect dependency based on LSA analysis: array[i] and array[i+1]
-    loc1 = heap_location_collector.GetArrayHeapLocation(arr, i);
-    loc2 = heap_location_collector.GetArrayHeapLocation(arr, add1);
+    loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_i);
+    loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_add1);
     ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
     ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_add1, arr_set_i));
 
     // Test side effect dependency based on LSA analysis: array[i+1] and array[i-1]
-    loc1 = heap_location_collector.GetArrayHeapLocation(arr, add1);
-    loc2 = heap_location_collector.GetArrayHeapLocation(arr, sub1);
+    loc1 = heap_location_collector.GetArrayHeapLocation(arr_set_add1);
+    loc2 = heap_location_collector.GetArrayHeapLocation(arr_set_sub1);
     ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
     ASSERT_FALSE(scheduling_graph.HasImmediateOtherDependency(arr_set_sub1, arr_set_add1));
 
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 7010e3f..aa28c8b 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -51,15 +51,7 @@
   if (sp_mask != nullptr) {
     stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
   }
-  if (inlining_depth > 0) {
-    number_of_stack_maps_with_inline_info_++;
-  }
 
-  // Note: dex_pc can be kNoDexPc for native method intrinsics.
-  if (dex_pc != dex::kDexNoIndex && (dex_pc_max_ == dex::kDexNoIndex || dex_pc_max_ < dex_pc)) {
-    dex_pc_max_ = dex_pc;
-  }
-  register_mask_max_ = std::max(register_mask_max_, register_mask);
   current_dex_register_ = 0;
 }
 
@@ -146,51 +138,6 @@
   current_inline_info_ = InlineInfoEntry();
 }
 
-CodeOffset StackMapStream::ComputeMaxNativePcCodeOffset() const {
-  CodeOffset max_native_pc_offset;
-  for (const StackMapEntry& entry : stack_maps_) {
-    max_native_pc_offset = std::max(max_native_pc_offset, entry.native_pc_code_offset);
-  }
-  return max_native_pc_offset;
-}
-
-size_t StackMapStream::PrepareForFillIn() {
-  CodeInfoEncoding encoding;
-  encoding.dex_register_map.num_entries = 0;  // TODO: Remove this field.
-  encoding.dex_register_map.num_bytes = ComputeDexRegisterMapsSize();
-  encoding.location_catalog.num_entries = location_catalog_entries_.size();
-  encoding.location_catalog.num_bytes = ComputeDexRegisterLocationCatalogSize();
-  encoding.inline_info.num_entries = inline_infos_.size();
-  // Must be done before calling ComputeInlineInfoEncoding since ComputeInlineInfoEncoding requires
-  // dex_method_index_idx to be filled in.
-  PrepareMethodIndices();
-  ComputeInlineInfoEncoding(&encoding.inline_info.encoding,
-                            encoding.dex_register_map.num_bytes);
-  CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
-  // Prepare the CodeInfo variable-sized encoding.
-  encoding.stack_mask.encoding.num_bits = stack_mask_max_ + 1;  // Need room for max element too.
-  encoding.stack_mask.num_entries = PrepareStackMasks(encoding.stack_mask.encoding.num_bits);
-  encoding.register_mask.encoding.num_bits = MinimumBitsToStore(register_mask_max_);
-  encoding.register_mask.num_entries = PrepareRegisterMasks();
-  encoding.stack_map.num_entries = stack_maps_.size();
-  encoding.stack_map.encoding.SetFromSizes(
-      // The stack map contains compressed native PC offsets.
-      max_native_pc_offset.CompressedValue(),
-      dex_pc_max_,
-      encoding.dex_register_map.num_bytes,
-      encoding.inline_info.num_entries,
-      encoding.register_mask.num_entries,
-      encoding.stack_mask.num_entries);
-  ComputeInvokeInfoEncoding(&encoding);
-  DCHECK_EQ(code_info_encoding_.size(), 0u);
-  encoding.Compress(&code_info_encoding_);
-  encoding.ComputeTableOffsets();
-  // Compute table offsets so we can get the non header size.
-  DCHECK_EQ(encoding.HeaderSize(), code_info_encoding_.size());
-  needed_size_ = code_info_encoding_.size() + encoding.NonHeaderSize();
-  return needed_size_;
-}
-
 size_t StackMapStream::ComputeDexRegisterLocationCatalogSize() const {
   size_t size = DexRegisterLocationCatalog::kFixedSize;
   for (const DexRegisterLocation& dex_register_location : location_catalog_entries_) {
@@ -204,6 +151,10 @@
   if (num_dex_registers == 0u) {
     return 0u;  // No register map will be emitted.
   }
+  size_t number_of_live_dex_registers = live_dex_registers_mask->NumSetBits();
+  if (live_dex_registers_mask->NumSetBits() == 0) {
+    return 0u;  // No register map will be emitted.
+  }
   DCHECK(live_dex_registers_mask != nullptr);
 
   // Size of the map in bytes.
@@ -211,7 +162,6 @@
   // Add the live bit mask for the Dex register liveness.
   size += DexRegisterMap::GetLiveBitMaskSize(num_dex_registers);
   // Compute the size of the set of live Dex register entries.
-  size_t number_of_live_dex_registers = live_dex_registers_mask->NumSetBits();
   size_t map_entries_size_in_bits =
       DexRegisterMap::SingleEntrySizeInBits(catalog_size) * number_of_live_dex_registers;
   size_t map_entries_size_in_bytes =
@@ -220,86 +170,6 @@
   return size;
 }
 
-size_t StackMapStream::ComputeDexRegisterMapsSize() const {
-  size_t size = 0;
-  for (const DexRegisterMapEntry& entry : dex_register_entries_) {
-    size += entry.ComputeSize(location_catalog_entries_.size());
-  }
-  return size;
-}
-
-void StackMapStream::ComputeInvokeInfoEncoding(CodeInfoEncoding* encoding) {
-  DCHECK(encoding != nullptr);
-  uint32_t native_pc_max = 0;
-  uint16_t method_index_max = 0;
-  size_t invoke_infos_count = 0;
-  size_t invoke_type_max = 0;
-  for (const StackMapEntry& entry : stack_maps_) {
-    if (entry.dex_method_index != dex::kDexNoIndex) {
-      native_pc_max = std::max(native_pc_max, entry.native_pc_code_offset.CompressedValue());
-      method_index_max = std::max(method_index_max, static_cast<uint16_t>(entry.dex_method_index));
-      invoke_type_max = std::max(invoke_type_max, static_cast<size_t>(entry.invoke_type));
-      ++invoke_infos_count;
-    }
-  }
-  encoding->invoke_info.num_entries = invoke_infos_count;
-  encoding->invoke_info.encoding.SetFromSizes(native_pc_max, invoke_type_max, method_index_max);
-}
-
-void StackMapStream::ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
-                                               size_t dex_register_maps_bytes) {
-  uint32_t method_index_max = 0;
-  uint32_t dex_pc_max = dex::kDexNoIndex;
-  uint32_t extra_data_max = 0;
-
-  uint32_t inline_info_index = 0;
-  for (const StackMapEntry& entry : stack_maps_) {
-    for (size_t j = 0; j < entry.inlining_depth; ++j) {
-      InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
-      if (inline_entry.method == nullptr) {
-        method_index_max = std::max(method_index_max, inline_entry.dex_method_index_idx);
-        extra_data_max = std::max(extra_data_max, 1u);
-      } else {
-        method_index_max = std::max(
-            method_index_max, High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-        extra_data_max = std::max(
-            extra_data_max, Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-      }
-      if (inline_entry.dex_pc != dex::kDexNoIndex &&
-          (dex_pc_max == dex::kDexNoIndex || dex_pc_max < inline_entry.dex_pc)) {
-        dex_pc_max = inline_entry.dex_pc;
-      }
-    }
-  }
-  DCHECK_EQ(inline_info_index, inline_infos_.size());
-
-  encoding->SetFromSizes(method_index_max, dex_pc_max, extra_data_max, dex_register_maps_bytes);
-}
-
-size_t StackMapStream::MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
-                                               size_t* current_offset,
-                                               MemoryRegion dex_register_locations_region) {
-  DCHECK(current_offset != nullptr);
-  if ((entry.num_dex_registers == 0) || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
-    // No dex register map needed.
-    return StackMap::kNoDexRegisterMap;
-  }
-  if (entry.offset == DexRegisterMapEntry::kOffsetUnassigned) {
-    // Not already copied, need to copy and and assign an offset.
-    entry.offset = *current_offset;
-    const size_t entry_size = entry.ComputeSize(location_catalog_entries_.size());
-    DexRegisterMap dex_register_map(
-        dex_register_locations_region.Subregion(entry.offset, entry_size));
-    *current_offset += entry_size;
-    // Fill in the map since it was just added.
-    FillInDexRegisterMap(dex_register_map,
-                         entry.num_dex_registers,
-                         *entry.live_dex_registers_mask,
-                         entry.locations_start_index);
-  }
-  return entry.offset;
-}
-
 void StackMapStream::FillInMethodInfo(MemoryRegion region) {
   {
     MethodInfo info(region.begin(), method_indices_.size());
@@ -318,30 +188,64 @@
   }
 }
 
-void StackMapStream::FillInCodeInfo(MemoryRegion region) {
-  DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
-  DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
+template<typename Vector>
+static MemoryRegion EncodeMemoryRegion(Vector* out, size_t* bit_offset, uint32_t bit_length) {
+  uint32_t byte_length = BitsToBytesRoundUp(bit_length);
+  EncodeVarintBits(out, bit_offset, byte_length);
+  *bit_offset = RoundUp(*bit_offset, kBitsPerByte);
+  out->resize(out->size() + byte_length);
+  MemoryRegion region(out->data() + *bit_offset / kBitsPerByte, byte_length);
+  *bit_offset += kBitsPerByte * byte_length;
+  return region;
+}
 
-  DCHECK_EQ(region.size(), needed_size_);
+template<uint32_t NumColumns>
+using ScopedBitTableBuilder = BitTableBuilder<NumColumns, ScopedArenaAllocatorAdapter<uint32_t>>;
 
-  // Note that the memory region does not have to be zeroed when we JIT code
-  // because we do not use the arena allocator there.
+size_t StackMapStream::PrepareForFillIn() {
+  size_t bit_offset = 0;
+  out_.clear();
 
-  // Write the CodeInfo header.
-  region.CopyFrom(0, MemoryRegion(code_info_encoding_.data(), code_info_encoding_.size()));
+  // Decide the offsets of dex register map entries, but do not write them out yet.
+  // Needs to be done first as it modifies the stack map entry.
+  size_t dex_register_map_bytes = 0;
+  for (DexRegisterMapEntry& entry : dex_register_entries_) {
+    size_t size = entry.ComputeSize(location_catalog_entries_.size());
+    entry.offset = size == 0 ? DexRegisterMapEntry::kOffsetUnassigned : dex_register_map_bytes;
+    dex_register_map_bytes += size;
+  }
 
-  CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  DCHECK_EQ(encoding.stack_map.num_entries, stack_maps_.size());
+  // Must be done before calling ComputeInlineInfoEncoding since ComputeInlineInfoEncoding requires
+  // dex_method_index_idx to be filled in.
+  PrepareMethodIndices();
 
-  MemoryRegion dex_register_locations_region = region.Subregion(
-      encoding.dex_register_map.byte_offset,
-      encoding.dex_register_map.num_bytes);
+  // Dedup stack masks. Needs to be done first as it modifies the stack map entry.
+  size_t stack_mask_bits = stack_mask_max_ + 1;  // Need room for max element too.
+  size_t num_stack_masks = PrepareStackMasks(stack_mask_bits);
 
-  // Set the Dex register location catalog.
-  MemoryRegion dex_register_location_catalog_region = region.Subregion(
-      encoding.location_catalog.byte_offset,
-      encoding.location_catalog.num_bytes);
+  // Dedup register masks. Needs to be done first as it modifies the stack map entry.
+  size_t num_register_masks = PrepareRegisterMasks();
+
+  // Write dex register maps.
+  MemoryRegion dex_register_map_region =
+      EncodeMemoryRegion(&out_, &bit_offset, dex_register_map_bytes * kBitsPerByte);
+  for (DexRegisterMapEntry& entry : dex_register_entries_) {
+    size_t entry_size = entry.ComputeSize(location_catalog_entries_.size());
+    if (entry_size != 0) {
+      DexRegisterMap dex_register_map(
+          dex_register_map_region.Subregion(entry.offset, entry_size));
+      FillInDexRegisterMap(dex_register_map,
+                           entry.num_dex_registers,
+                           *entry.live_dex_registers_mask,
+                           entry.locations_start_index);
+    }
+  }
+
+  // Write dex register catalog.
+  EncodeVarintBits(&out_, &bit_offset, location_catalog_entries_.size());
+  size_t location_catalog_bytes = ComputeDexRegisterLocationCatalogSize();
+  MemoryRegion dex_register_location_catalog_region =
+      EncodeMemoryRegion(&out_, &bit_offset, location_catalog_bytes * kBitsPerByte);
   DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
   // Offset in `dex_register_location_catalog` where to store the next
   // register location.
@@ -353,93 +257,87 @@
   // Ensure we reached the end of the Dex registers location_catalog.
   DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
 
-  ArenaBitVector empty_bitmask(allocator_, 0, /* expandable */ false, kArenaAllocStackMapStream);
-  uintptr_t next_dex_register_map_offset = 0;
-  uintptr_t next_inline_info_index = 0;
-  size_t invoke_info_idx = 0;
-  for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) {
-    StackMap stack_map = code_info.GetStackMapAt(i, encoding);
-    StackMapEntry entry = stack_maps_[i];
-
-    stack_map.SetDexPc(encoding.stack_map.encoding, entry.dex_pc);
-    stack_map.SetNativePcCodeOffset(encoding.stack_map.encoding, entry.native_pc_code_offset);
-    stack_map.SetRegisterMaskIndex(encoding.stack_map.encoding, entry.register_mask_index);
-    stack_map.SetStackMaskIndex(encoding.stack_map.encoding, entry.stack_mask_index);
-
-    size_t offset = MaybeCopyDexRegisterMap(dex_register_entries_[entry.dex_register_map_index],
-                                            &next_dex_register_map_offset,
-                                            dex_register_locations_region);
-    stack_map.SetDexRegisterMapOffset(encoding.stack_map.encoding, offset);
-
+  // Write stack maps.
+  ScopedArenaAllocatorAdapter<void> adapter = allocator_->Adapter(kArenaAllocStackMapStream);
+  ScopedBitTableBuilder<StackMap::Field::kCount> stack_map_builder((adapter));
+  ScopedBitTableBuilder<InvokeInfo::Field::kCount> invoke_info_builder((adapter));
+  ScopedBitTableBuilder<InlineInfo::Field::kCount> inline_info_builder((adapter));
+  for (const StackMapEntry& entry : stack_maps_) {
     if (entry.dex_method_index != dex::kDexNoIndex) {
-      InvokeInfo invoke_info(code_info.GetInvokeInfo(encoding, invoke_info_idx));
-      invoke_info.SetNativePcCodeOffset(encoding.invoke_info.encoding, entry.native_pc_code_offset);
-      invoke_info.SetInvokeType(encoding.invoke_info.encoding, entry.invoke_type);
-      invoke_info.SetMethodIndexIdx(encoding.invoke_info.encoding, entry.dex_method_index_idx);
-      ++invoke_info_idx;
+      invoke_info_builder.AddRow(
+          entry.native_pc_code_offset.CompressedValue(),
+          entry.invoke_type,
+          entry.dex_method_index_idx);
     }
 
     // Set the inlining info.
-    if (entry.inlining_depth != 0) {
-      InlineInfo inline_info = code_info.GetInlineInfo(next_inline_info_index, encoding);
-
-      // Fill in the index.
-      stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, next_inline_info_index);
-      DCHECK_EQ(next_inline_info_index, entry.inline_infos_start_index);
-      next_inline_info_index += entry.inlining_depth;
-
-      inline_info.SetDepth(encoding.inline_info.encoding, entry.inlining_depth);
-      DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
-
-      for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
-        InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
-        if (inline_entry.method != nullptr) {
-          inline_info.SetMethodIndexIdxAtDepth(
-              encoding.inline_info.encoding,
-              depth,
-              High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-          inline_info.SetExtraDataAtDepth(
-              encoding.inline_info.encoding,
-              depth,
-              Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
-        } else {
-          inline_info.SetMethodIndexIdxAtDepth(encoding.inline_info.encoding,
-                                               depth,
-                                               inline_entry.dex_method_index_idx);
-          inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
-        }
-        inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
-        size_t dex_register_map_offset = MaybeCopyDexRegisterMap(
-            dex_register_entries_[inline_entry.dex_register_map_index],
-            &next_dex_register_map_offset,
-            dex_register_locations_region);
-        inline_info.SetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding,
-                                                   depth,
-                                                   dex_register_map_offset);
+    uint32_t inline_info_index = StackMap::kNoValue;
+    DCHECK_LE(entry.inline_infos_start_index + entry.inlining_depth, inline_infos_.size());
+    for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
+      InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
+      uint32_t method_index_idx = inline_entry.dex_method_index_idx;
+      uint32_t extra_data = 1;
+      if (inline_entry.method != nullptr) {
+        method_index_idx = High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method));
+        extra_data = Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method));
       }
-    } else if (encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      stack_map.SetInlineInfoIndex(encoding.stack_map.encoding, StackMap::kNoInlineInfo);
-    }
-  }
-
-  // Write stack masks table.
-  const size_t stack_mask_bits = encoding.stack_mask.encoding.BitSize();
-  if (stack_mask_bits > 0) {
-    size_t stack_mask_bytes = RoundUp(stack_mask_bits, kBitsPerByte) / kBitsPerByte;
-    for (size_t i = 0; i < encoding.stack_mask.num_entries; ++i) {
-      MemoryRegion source(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes);
-      BitMemoryRegion stack_mask = code_info.GetStackMask(i, encoding);
-      for (size_t bit_index = 0; bit_index < stack_mask_bits; ++bit_index) {
-        stack_mask.StoreBit(bit_index, source.LoadBit(bit_index));
+      uint32_t index = inline_info_builder.AddRow(
+          (depth == entry.inlining_depth - 1) ? InlineInfo::kLast : InlineInfo::kMore,
+          method_index_idx,
+          inline_entry.dex_pc,
+          extra_data,
+          dex_register_entries_[inline_entry.dex_register_map_index].offset);
+      if (depth == 0) {
+        inline_info_index = index;
       }
     }
+    stack_map_builder.AddRow(
+        entry.native_pc_code_offset.CompressedValue(),
+        entry.dex_pc,
+        dex_register_entries_[entry.dex_register_map_index].offset,
+        inline_info_index,
+        entry.register_mask_index,
+        entry.stack_mask_index);
   }
+  stack_map_builder.Encode(&out_, &bit_offset);
+  invoke_info_builder.Encode(&out_, &bit_offset);
+  inline_info_builder.Encode(&out_, &bit_offset);
 
   // Write register masks table.
-  for (size_t i = 0; i < encoding.register_mask.num_entries; ++i) {
-    BitMemoryRegion register_mask = code_info.GetRegisterMask(i, encoding);
-    register_mask.StoreBits(0, register_masks_[i], encoding.register_mask.encoding.BitSize());
+  ScopedBitTableBuilder<1> register_mask_builder((adapter));
+  for (size_t i = 0; i < num_register_masks; ++i) {
+    register_mask_builder.AddRow(register_masks_[i]);
   }
+  register_mask_builder.Encode(&out_, &bit_offset);
+
+  // Write stack masks table.
+  EncodeVarintBits(&out_, &bit_offset, stack_mask_bits);
+  out_.resize(BitsToBytesRoundUp(bit_offset + stack_mask_bits * num_stack_masks));
+  BitMemoryRegion stack_mask_region(MemoryRegion(out_.data(), out_.size()),
+                                    bit_offset,
+                                    stack_mask_bits * num_stack_masks);
+  if (stack_mask_bits > 0) {
+    for (size_t i = 0; i < num_stack_masks; ++i) {
+      size_t stack_mask_bytes = BitsToBytesRoundUp(stack_mask_bits);
+      BitMemoryRegion src(MemoryRegion(&stack_masks_[i * stack_mask_bytes], stack_mask_bytes));
+      BitMemoryRegion dst = stack_mask_region.Subregion(i * stack_mask_bits, stack_mask_bits);
+      for (size_t bit_index = 0; bit_index < stack_mask_bits; bit_index += BitSizeOf<uint32_t>()) {
+        size_t num_bits = std::min<size_t>(stack_mask_bits - bit_index, BitSizeOf<uint32_t>());
+        dst.StoreBits(bit_index, src.LoadBits(bit_index, num_bits), num_bits);
+      }
+    }
+  }
+
+  return UnsignedLeb128Size(out_.size()) +  out_.size();
+}
+
+void StackMapStream::FillInCodeInfo(MemoryRegion region) {
+  DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+  DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before FillIn";
+  DCHECK_EQ(region.size(), UnsignedLeb128Size(out_.size()) +  out_.size());
+
+  uint8_t* ptr = EncodeUnsignedLeb128(region.begin(), out_.size());
+  region.CopyFromVector(ptr - region.begin(), out_);
 
   // Verify all written data in debug build.
   if (kIsDebugBuild) {
@@ -527,7 +425,6 @@
                                          size_t num_dex_registers,
                                          BitVector* live_dex_registers_mask,
                                          size_t dex_register_locations_index) const {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
   for (size_t reg = 0; reg < num_dex_registers; reg++) {
     // Find the location we tried to encode.
     DexRegisterLocation expected = DexRegisterLocation::None();
@@ -542,7 +439,7 @@
     } else {
       DCHECK(dex_register_map.IsDexRegisterLive(reg));
       DexRegisterLocation seen = dex_register_map.GetDexRegisterLocation(
-          reg, num_dex_registers, code_info, encoding);
+          reg, num_dex_registers, code_info);
       DCHECK_EQ(expected.GetKind(), seen.GetKind());
       DCHECK_EQ(expected.GetValue(), seen.GetValue());
     }
@@ -600,8 +497,9 @@
   for (StackMapEntry& stack_map : stack_maps_) {
     size_t index = dedup.size();
     MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size);
+    BitMemoryRegion stack_mask_bits(stack_mask);
     for (size_t i = 0; i < entry_size_in_bits; i++) {
-      stack_mask.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i));
+      stack_mask_bits.StoreBit(i, stack_map.sp_mask != nullptr && stack_map.sp_mask->IsBitSet(i));
     }
     stack_map.stack_mask_index = dedup.emplace(stack_mask, index).first->second;
   }
@@ -611,23 +509,23 @@
 // Check that all StackMapStream inputs are correctly encoded by trying to read them back.
 void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  DCHECK_EQ(code_info.GetNumberOfStackMaps(encoding), stack_maps_.size());
+  DCHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
+  DCHECK_EQ(code_info.GetNumberOfStackMaskBits(), static_cast<uint32_t>(stack_mask_max_ + 1));
+  DCHECK_EQ(code_info.GetNumberOfLocationCatalogEntries(), location_catalog_entries_.size());
   size_t invoke_info_index = 0;
   for (size_t s = 0; s < stack_maps_.size(); ++s) {
-    const StackMap stack_map = code_info.GetStackMapAt(s, encoding);
-    const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
+    const StackMap stack_map = code_info.GetStackMapAt(s);
     StackMapEntry entry = stack_maps_[s];
 
     // Check main stack map fields.
-    DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding, instruction_set_),
+    DCHECK_EQ(stack_map.GetNativePcOffset(instruction_set_),
               entry.native_pc_code_offset.Uint32Value(instruction_set_));
-    DCHECK_EQ(stack_map.GetDexPc(stack_map_encoding), entry.dex_pc);
-    DCHECK_EQ(stack_map.GetRegisterMaskIndex(stack_map_encoding), entry.register_mask_index);
-    DCHECK_EQ(code_info.GetRegisterMaskOf(encoding, stack_map), entry.register_mask);
-    const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits(encoding);
-    DCHECK_EQ(stack_map.GetStackMaskIndex(stack_map_encoding), entry.stack_mask_index);
-    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+    DCHECK_EQ(stack_map.GetDexPc(), entry.dex_pc);
+    DCHECK_EQ(stack_map.GetRegisterMaskIndex(), entry.register_mask_index);
+    DCHECK_EQ(code_info.GetRegisterMaskOf(stack_map), entry.register_mask);
+    const size_t num_stack_mask_bits = code_info.GetNumberOfStackMaskBits();
+    DCHECK_EQ(stack_map.GetStackMaskIndex(), entry.stack_mask_index);
+    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
     if (entry.sp_mask != nullptr) {
       DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits());
       for (size_t b = 0; b < num_stack_mask_bits; b++) {
@@ -639,38 +537,36 @@
       }
     }
     if (entry.dex_method_index != dex::kDexNoIndex) {
-      InvokeInfo invoke_info = code_info.GetInvokeInfo(encoding, invoke_info_index);
-      DCHECK_EQ(invoke_info.GetNativePcOffset(encoding.invoke_info.encoding, instruction_set_),
+      InvokeInfo invoke_info = code_info.GetInvokeInfo(invoke_info_index);
+      DCHECK_EQ(invoke_info.GetNativePcOffset(instruction_set_),
                 entry.native_pc_code_offset.Uint32Value(instruction_set_));
-      DCHECK_EQ(invoke_info.GetInvokeType(encoding.invoke_info.encoding), entry.invoke_type);
-      DCHECK_EQ(invoke_info.GetMethodIndexIdx(encoding.invoke_info.encoding),
-                entry.dex_method_index_idx);
+      DCHECK_EQ(invoke_info.GetInvokeType(), entry.invoke_type);
+      DCHECK_EQ(invoke_info.GetMethodIndexIdx(), entry.dex_method_index_idx);
       invoke_info_index++;
     }
     CheckDexRegisterMap(code_info,
                         code_info.GetDexRegisterMapOf(
-                            stack_map, encoding, entry.dex_register_entry.num_dex_registers),
+                            stack_map, entry.dex_register_entry.num_dex_registers),
                         entry.dex_register_entry.num_dex_registers,
                         entry.dex_register_entry.live_dex_registers_mask,
                         entry.dex_register_entry.locations_start_index);
 
     // Check inline info.
-    DCHECK_EQ(stack_map.HasInlineInfo(stack_map_encoding), (entry.inlining_depth != 0));
+    DCHECK_EQ(stack_map.HasInlineInfo(), (entry.inlining_depth != 0));
     if (entry.inlining_depth != 0) {
-      InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-      DCHECK_EQ(inline_info.GetDepth(encoding.inline_info.encoding), entry.inlining_depth);
+      InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+      DCHECK_EQ(inline_info.GetDepth(), entry.inlining_depth);
       for (size_t d = 0; d < entry.inlining_depth; ++d) {
         size_t inline_info_index = entry.inline_infos_start_index + d;
         DCHECK_LT(inline_info_index, inline_infos_.size());
         InlineInfoEntry inline_entry = inline_infos_[inline_info_index];
-        DCHECK_EQ(inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, d),
-                  inline_entry.dex_pc);
-        if (inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, d)) {
-          DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info.encoding, d),
+        DCHECK_EQ(inline_info.GetDexPcAtDepth(d), inline_entry.dex_pc);
+        if (inline_info.EncodesArtMethodAtDepth(d)) {
+          DCHECK_EQ(inline_info.GetArtMethodAtDepth(d),
                     inline_entry.method);
         } else {
           const size_t method_index_idx =
-              inline_info.GetMethodIndexIdxAtDepth(encoding.inline_info.encoding, d);
+              inline_info.GetMethodIndexIdxAtDepth(d);
           DCHECK_EQ(method_index_idx, inline_entry.dex_method_index_idx);
           DCHECK_EQ(method_indices_[method_index_idx], inline_entry.method_index);
         }
@@ -679,7 +575,6 @@
                             code_info.GetDexRegisterMapAtDepth(
                                 d,
                                 inline_info,
-                                encoding,
                                 inline_entry.dex_register_entry.num_dex_registers),
                             inline_entry.dex_register_entry.num_dex_registers,
                             inline_entry.dex_register_entry.live_dex_registers_mask,
@@ -690,7 +585,7 @@
 }
 
 size_t StackMapStream::ComputeMethodInfoSize() const {
-  DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before " << __FUNCTION__;
+  DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before " << __FUNCTION__;
   return MethodInfo::ComputeSize(method_indices_.size());
 }
 
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 268e9bd..ea97cf6 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -73,36 +73,32 @@
         method_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
         dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
         stack_mask_max_(-1),
-        dex_pc_max_(kNoDexPc),
-        register_mask_max_(0),
-        number_of_stack_maps_with_inline_info_(0),
+        out_(allocator->Adapter(kArenaAllocStackMapStream)),
         dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(),
                                            allocator->Adapter(kArenaAllocStackMapStream)),
         current_entry_(),
         current_inline_info_(),
-        code_info_encoding_(allocator->Adapter(kArenaAllocStackMapStream)),
-        needed_size_(0),
         current_dex_register_(0),
         in_inline_frame_(false) {
     stack_maps_.reserve(10);
+    out_.reserve(64);
     location_catalog_entries_.reserve(4);
     dex_register_locations_.reserve(10 * 4);
     inline_infos_.reserve(2);
-    code_info_encoding_.reserve(16);
   }
 
   // A dex register map entry for a single stack map entry, contains what registers are live as
   // well as indices into the location catalog.
   class DexRegisterMapEntry {
    public:
-    static const size_t kOffsetUnassigned = -1;
+    static const uint32_t kOffsetUnassigned = -1;
 
     BitVector* live_dex_registers_mask;
     uint32_t num_dex_registers;
     size_t locations_start_index;
     // Computed fields
     size_t hash = 0;
-    size_t offset = kOffsetUnassigned;
+    uint32_t offset = kOffsetUnassigned;
 
     size_t ComputeSize(size_t catalog_size) const;
   };
@@ -113,7 +109,7 @@
     CodeOffset native_pc_code_offset;
     uint32_t register_mask;
     BitVector* sp_mask;
-    uint8_t inlining_depth;
+    uint32_t inlining_depth;
     size_t inline_infos_start_index;
     uint32_t stack_mask_index;
     uint32_t register_mask_index;
@@ -174,11 +170,6 @@
 
  private:
   size_t ComputeDexRegisterLocationCatalogSize() const;
-  size_t ComputeDexRegisterMapsSize() const;
-  void ComputeInlineInfoEncoding(InlineInfoEncoding* encoding,
-                                 size_t dex_register_maps_bytes);
-
-  CodeOffset ComputeMaxNativePcCodeOffset() const;
 
   // Returns the number of unique stack masks.
   size_t PrepareStackMasks(size_t entry_size_in_bits);
@@ -197,24 +188,11 @@
   bool DexRegisterMapEntryEquals(const DexRegisterMapEntry& a, const DexRegisterMapEntry& b) const;
 
   // Fill in the corresponding entries of a register map.
-  void ComputeInvokeInfoEncoding(CodeInfoEncoding* encoding);
-
-  // Returns the index of an entry with the same dex register map as the current_entry,
-  // or kNoSameDexMapFound if no such entry exists.
-  size_t FindEntryWithTheSameDexMap();
-  bool HaveTheSameDexMaps(const StackMapEntry& a, const StackMapEntry& b) const;
-
-  // Fill in the corresponding entries of a register map.
   void FillInDexRegisterMap(DexRegisterMap dex_register_map,
                             uint32_t num_dex_registers,
                             const BitVector& live_dex_registers_mask,
                             uint32_t start_index_in_dex_register_locations) const;
 
-  // Returns the offset for the dex register inside of the dex register location region. See FillIn.
-  // Only copies the dex register map if the offset for the entry is not already assigned.
-  size_t MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry,
-                                 size_t* current_offset,
-                                 MemoryRegion dex_register_locations_region);
   void CheckDexRegisterMap(const CodeInfo& code_info,
                            const DexRegisterMap& dex_register_map,
                            size_t num_dex_registers,
@@ -244,21 +222,16 @@
   ScopedArenaVector<uint32_t> method_indices_;
   ScopedArenaVector<DexRegisterMapEntry> dex_register_entries_;
   int stack_mask_max_;
-  uint32_t dex_pc_max_;
-  uint32_t register_mask_max_;
-  size_t number_of_stack_maps_with_inline_info_;
+
+  ScopedArenaVector<uint8_t> out_;
 
   ScopedArenaSafeMap<uint32_t, ScopedArenaVector<uint32_t>> dex_map_hash_to_stack_map_indices_;
 
   StackMapEntry current_entry_;
   InlineInfoEntry current_inline_info_;
-  ScopedArenaVector<uint8_t> code_info_encoding_;
-  size_t needed_size_;
   uint32_t current_dex_register_;
   bool in_inline_frame_;
 
-  static constexpr uint32_t kNoSameDexMapFound = -1;
-
   DISALLOW_COPY_AND_ASSIGN(StackMapStream);
 };
 
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index e36c592..9db7588 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -29,14 +29,13 @@
 // to the given bit vector. Returns true if they are same.
 static bool CheckStackMask(
     const CodeInfo& code_info,
-    const CodeInfoEncoding& encoding,
     const StackMap& stack_map,
     const BitVector& bit_vector) {
-  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
-  if (bit_vector.GetNumberOfBits() > encoding.stack_mask.encoding.BitSize()) {
+  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
+  if (bit_vector.GetNumberOfBits() > code_info.GetNumberOfStackMaskBits()) {
     return false;
   }
-  for (size_t i = 0; i < encoding.stack_mask.encoding.BitSize(); ++i) {
+  for (size_t i = 0; i < code_info.GetNumberOfStackMaskBits(); ++i) {
     if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) {
       return false;
     }
@@ -65,30 +64,29 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(2u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - one 1-byte short Dex register location, and
   // - one 5-byte large Dex register location.
   size_t expected_location_catalog_size = 1u + 5u;
   ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
 
-  StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  StackMap stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask));
+  ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask));
 
-  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+  ASSERT_TRUE(stack_map.HasDexRegisterMap());
   DexRegisterMap dex_register_map =
-      code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
   ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
   ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
   ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -99,16 +97,16 @@
   ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
   ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
-                1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
-                1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
   ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(
-                0, number_of_dex_registers, code_info, encoding));
-  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
+  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
 
   size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
       0, number_of_dex_registers, number_of_catalog_entries);
@@ -125,7 +123,7 @@
   ASSERT_EQ(0, location0.GetValue());
   ASSERT_EQ(-2, location1.GetValue());
 
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 TEST(StackMapTest, Test2) {
@@ -179,12 +177,11 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(4u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(4u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(7u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - six 1-byte short Dex register locations, and
   // - one 5-byte large Dex register location.
@@ -193,18 +190,18 @@
 
   // First stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-    ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(0);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+    ASSERT_EQ(0u, stack_map.GetDexPc());
+    ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -215,16 +212,16 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(
-                  0, number_of_dex_registers, code_info, encoding));
-    ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
+    ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -241,29 +238,29 @@
     ASSERT_EQ(0, location0.GetValue());
     ASSERT_EQ(-2, location1.GetValue());
 
-    ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
-    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-    ASSERT_EQ(2u, inline_info.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+    ASSERT_TRUE(stack_map.HasInlineInfo());
+    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+    ASSERT_EQ(2u, inline_info.GetDepth());
+    ASSERT_EQ(3u, inline_info.GetDexPcAtDepth(0));
+    ASSERT_EQ(2u, inline_info.GetDexPcAtDepth(1));
+    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(0));
+    ASSERT_TRUE(inline_info.EncodesArtMethodAtDepth(1));
   }
 
   // Second stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(1, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
-    ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(1);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
+    ASSERT_EQ(1u, stack_map.GetDexPc());
+    ASSERT_EQ(128u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0xFFu, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask2));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask2));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -274,17 +271,17 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(18, dex_register_map.GetMachineRegister(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(3, dex_register_map.GetMachineRegister(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -301,23 +298,23 @@
     ASSERT_EQ(18, location0.GetValue());
     ASSERT_EQ(3, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo());
   }
 
   // Third stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(2, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
-    ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(2);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u)));
+    ASSERT_EQ(2u, stack_map.GetDexPc());
+    ASSERT_EQ(192u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0xABu, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask3));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask3));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -328,17 +325,17 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(6, dex_register_map.GetMachineRegister(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(8, dex_register_map.GetMachineRegister(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -355,23 +352,23 @@
     ASSERT_EQ(6, location0.GetValue());
     ASSERT_EQ(8, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo());
   }
 
   // Fourth stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(3, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
-    ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(3);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u)));
+    ASSERT_EQ(3u, stack_map.GetDexPc());
+    ASSERT_EQ(256u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0xCDu, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask4));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask4));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
     ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -382,17 +379,17 @@
     ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationInternalKind(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationInternalKind(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
     ASSERT_EQ(3, dex_register_map.GetMachineRegister(
-                  0, number_of_dex_registers, code_info, encoding));
+                  0, number_of_dex_registers, code_info));
     ASSERT_EQ(1, dex_register_map.GetMachineRegister(
-                  1, number_of_dex_registers, code_info, encoding));
+                  1, number_of_dex_registers, code_info));
 
     size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
         0, number_of_dex_registers, number_of_catalog_entries);
@@ -409,7 +406,7 @@
     ASSERT_EQ(3, location0.GetValue());
     ASSERT_EQ(1, location1.GetValue());
 
-    ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_FALSE(stack_map.HasInlineInfo());
   }
 }
 
@@ -440,12 +437,11 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(2u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - one 1-byte short Dex register locations, and
   // - one 5-byte large Dex register location.
@@ -454,17 +450,17 @@
 
   // First stack map.
   {
-    StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-    ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-    ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+    StackMap stack_map = code_info.GetStackMapAt(0);
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+    ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+    ASSERT_EQ(0u, stack_map.GetDexPc());
+    ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+    ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-    ASSERT_TRUE(CheckStackMask(code_info, encoding, stack_map, sp_mask1));
+    ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1));
 
-    ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
-    DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+    ASSERT_TRUE(stack_map.HasDexRegisterMap());
+    DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers));
     ASSERT_TRUE(map.IsDexRegisterLive(0));
     ASSERT_TRUE(map.IsDexRegisterLive(1));
     ASSERT_EQ(2u, map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -474,15 +470,15 @@
     size_t expected_map_size = 1u + 1u;
     ASSERT_EQ(expected_map_size, map.Size());
 
-    ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0, number_of_dex_registers, code_info, encoding));
+    ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstant,
-              map.GetLocationKind(1, number_of_dex_registers, code_info, encoding));
+              map.GetLocationKind(1, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kInStack,
-              map.GetLocationInternalKind(0, number_of_dex_registers, code_info, encoding));
+              map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
     ASSERT_EQ(Kind::kConstantLargeValue,
-              map.GetLocationInternalKind(1, number_of_dex_registers, code_info, encoding));
-    ASSERT_EQ(0, map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info, encoding));
-    ASSERT_EQ(-2, map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+              map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+    ASSERT_EQ(0, map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
+    ASSERT_EQ(-2, map.GetConstant(1, number_of_dex_registers, code_info));
 
     const size_t index0 =
         map.GetLocationCatalogEntryIndex(0, number_of_dex_registers, number_of_catalog_entries);
@@ -501,10 +497,10 @@
 
     // Test that the inline info dex register map deduplicated to the same offset as the stack map
     // one.
-    ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
-    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-    EXPECT_EQ(inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, 0),
-              stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+    ASSERT_TRUE(stack_map.HasInlineInfo());
+    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+    EXPECT_EQ(inline_info.GetDexRegisterMapOffsetAtDepth(0),
+              stack_map.GetDexRegisterMapOffset());
   }
 }
 
@@ -527,27 +523,26 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(1u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   // The Dex register location catalog contains:
   // - one 5-byte large Dex register location.
   size_t expected_location_catalog_size = 5u;
   ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
 
-  StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  StackMap stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
+  ASSERT_TRUE(stack_map.HasDexRegisterMap());
   DexRegisterMap dex_register_map =
-      code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
   ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
   ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
   ASSERT_EQ(1u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
@@ -558,14 +553,14 @@
   ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
 
   ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(
-                1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationInternalKind(
-                0, number_of_dex_registers, code_info, encoding));
+                0, number_of_dex_registers, code_info));
   ASSERT_EQ(Kind::kConstantLargeValue, dex_register_map.GetLocationInternalKind(
-                1, number_of_dex_registers, code_info, encoding));
-  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
+                1, number_of_dex_registers, code_info));
+  ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
 
   size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
       0, number_of_dex_registers, number_of_catalog_entries);
@@ -582,7 +577,7 @@
   ASSERT_EQ(0, location0.GetValue());
   ASSERT_EQ(-2, location1.GetValue());
 
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 // Generate a stack map whose dex register offset is
@@ -620,11 +615,10 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
   // The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
   // and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
   // has a size of 1 bit.
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(2u, number_of_catalog_entries);
   ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_catalog_entries));
 
@@ -635,21 +629,21 @@
   //   locations (that is, 127 bytes of data).
   // Hence it has a size of 255 bytes, and therefore...
   ASSERT_EQ(128u, DexRegisterMap::GetLiveBitMaskSize(number_of_dex_registers));
-  StackMap stack_map0 = code_info.GetStackMapAt(0, encoding);
+  StackMap stack_map0 = code_info.GetStackMapAt(0);
   DexRegisterMap dex_register_map0 =
-      code_info.GetDexRegisterMapOf(stack_map0, encoding, number_of_dex_registers);
+      code_info.GetDexRegisterMapOf(stack_map0, number_of_dex_registers);
   ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_dex_registers,
                                                                number_of_catalog_entries));
   ASSERT_EQ(255u, dex_register_map0.Size());
 
-  StackMap stack_map1 = code_info.GetStackMapAt(1, encoding);
-  ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding.stack_map.encoding));
+  StackMap stack_map1 = code_info.GetStackMapAt(1);
+  ASSERT_TRUE(stack_map1.HasDexRegisterMap());
   // ...the offset of the second Dex register map (relative to the
   // beginning of the Dex register maps region) is 255 (i.e.,
   // kNoDexRegisterMapSmallEncoding).
-  ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            StackMap::kNoDexRegisterMap);
-  ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding.stack_map.encoding), 0xFFu);
+  ASSERT_NE(stack_map1.GetDexRegisterMapOffset(),
+            StackMap::kNoValue);
+  ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(), 0xFFu);
 }
 
 TEST(StackMapTest, TestShareDexRegisterMap) {
@@ -682,33 +676,32 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo ci(region);
-  CodeInfoEncoding encoding = ci.ExtractEncoding();
 
   // Verify first stack map.
-  StackMap sm0 = ci.GetStackMapAt(0, encoding);
-  DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, encoding, number_of_dex_registers);
-  ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
-  ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci, encoding));
+  StackMap sm0 = ci.GetStackMapAt(0);
+  DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, number_of_dex_registers);
+  ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci));
+  ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci));
 
   // Verify second stack map.
-  StackMap sm1 = ci.GetStackMapAt(1, encoding);
-  DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, encoding, number_of_dex_registers);
-  ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
-  ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci, encoding));
+  StackMap sm1 = ci.GetStackMapAt(1);
+  DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, number_of_dex_registers);
+  ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci));
+  ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci));
 
   // Verify third stack map.
-  StackMap sm2 = ci.GetStackMapAt(2, encoding);
-  DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, encoding, number_of_dex_registers);
-  ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci, encoding));
-  ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci, encoding));
+  StackMap sm2 = ci.GetStackMapAt(2);
+  DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, number_of_dex_registers);
+  ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci));
+  ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci));
 
   // Verify dex register map offsets.
-  ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding));
-  ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
-  ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding.stack_map.encoding),
-            sm2.GetDexRegisterMapOffset(encoding.stack_map.encoding));
+  ASSERT_EQ(sm0.GetDexRegisterMapOffset(),
+            sm1.GetDexRegisterMapOffset());
+  ASSERT_NE(sm0.GetDexRegisterMapOffset(),
+            sm2.GetDexRegisterMapOffset());
+  ASSERT_NE(sm1.GetDexRegisterMapOffset(),
+            sm2.GetDexRegisterMapOffset());
 }
 
 TEST(StackMapTest, TestNoDexRegisterMap) {
@@ -732,33 +725,32 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
-  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   ASSERT_EQ(0u, number_of_catalog_entries);
-  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
+  DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
   ASSERT_EQ(0u, location_catalog.Size());
 
-  StackMap stack_map = code_info.GetStackMapAt(0, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
-  ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  StackMap stack_map = code_info.GetStackMapAt(0);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+  ASSERT_EQ(0u, stack_map.GetDexPc());
+  ASSERT_EQ(64u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasDexRegisterMap());
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 
-  stack_map = code_info.GetStackMapAt(1, encoding);
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
-  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68, encoding)));
-  ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map.encoding));
-  ASSERT_EQ(68u, stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA));
-  ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(encoding, stack_map));
+  stack_map = code_info.GetStackMapAt(1);
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1)));
+  ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(68)));
+  ASSERT_EQ(1u, stack_map.GetDexPc());
+  ASSERT_EQ(68u, stack_map.GetNativePcOffset(kRuntimeISA));
+  ASSERT_EQ(0x4u, code_info.GetRegisterMaskOf(stack_map));
 
-  ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map.encoding));
-  ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map.encoding));
+  ASSERT_FALSE(stack_map.HasDexRegisterMap());
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 TEST(StackMapTest, InlineTest) {
@@ -835,100 +827,99 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo ci(region);
-  CodeInfoEncoding encoding = ci.ExtractEncoding();
 
   {
     // Verify first stack map.
-    StackMap sm0 = ci.GetStackMapAt(0, encoding);
+    StackMap sm0 = ci.GetStackMapAt(0);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, encoding, 2);
-    ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
-    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, 2);
+    ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
 
-    InlineInfo if0 = ci.GetInlineInfoOf(sm0, encoding);
-    ASSERT_EQ(2u, if0.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(2u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(3u, if0.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
+    InlineInfo if0 = ci.GetInlineInfoOf(sm0);
+    ASSERT_EQ(2u, if0.GetDepth());
+    ASSERT_EQ(2u, if0.GetDexPcAtDepth(0));
+    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(0));
+    ASSERT_EQ(3u, if0.GetDexPcAtDepth(1));
+    ASSERT_TRUE(if0.EncodesArtMethodAtDepth(1));
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, encoding, 1);
-    ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
+    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if0, 1);
+    ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, encoding, 3);
-    ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci, encoding));
-    ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci, encoding));
-    ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
+    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if0, 3);
+    ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
+    ASSERT_EQ(20, dex_registers2.GetConstant(1, 3, ci));
+    ASSERT_EQ(15, dex_registers2.GetMachineRegister(2, 3, ci));
   }
 
   {
     // Verify second stack map.
-    StackMap sm1 = ci.GetStackMapAt(1, encoding);
+    StackMap sm1 = ci.GetStackMapAt(1);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, encoding, 2);
-    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
-    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, 2);
+    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
 
-    InlineInfo if1 = ci.GetInlineInfoOf(sm1, encoding);
-    ASSERT_EQ(3u, if1.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(2u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(3u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_EQ(5u, if1.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
-    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
+    InlineInfo if1 = ci.GetInlineInfoOf(sm1);
+    ASSERT_EQ(3u, if1.GetDepth());
+    ASSERT_EQ(2u, if1.GetDexPcAtDepth(0));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(0));
+    ASSERT_EQ(3u, if1.GetDexPcAtDepth(1));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(1));
+    ASSERT_EQ(5u, if1.GetDexPcAtDepth(2));
+    ASSERT_TRUE(if1.EncodesArtMethodAtDepth(2));
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, encoding, 1);
-    ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci, encoding));
+    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, if1, 1);
+    ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0, 1, ci));
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, encoding, 3);
-    ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci, encoding));
-    ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci, encoding));
-    ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci, encoding));
+    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, if1, 3);
+    ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0, 3, ci));
+    ASSERT_EQ(10, dex_registers2.GetConstant(1, 3, ci));
+    ASSERT_EQ(5, dex_registers2.GetMachineRegister(2, 3, ci));
 
-    ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 2));
+    ASSERT_FALSE(if1.HasDexRegisterMapAtDepth(2));
   }
 
   {
     // Verify third stack map.
-    StackMap sm2 = ci.GetStackMapAt(2, encoding);
+    StackMap sm2 = ci.GetStackMapAt(2);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, encoding, 2);
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, 2);
     ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
-    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
-    ASSERT_FALSE(sm2.HasInlineInfo(encoding.stack_map.encoding));
+    ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci));
+    ASSERT_FALSE(sm2.HasInlineInfo());
   }
 
   {
     // Verify fourth stack map.
-    StackMap sm3 = ci.GetStackMapAt(3, encoding);
+    StackMap sm3 = ci.GetStackMapAt(3);
 
-    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, encoding, 2);
-    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci, encoding));
-    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci, encoding));
+    DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, 2);
+    ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0, 2, ci));
+    ASSERT_EQ(0, dex_registers0.GetConstant(1, 2, ci));
 
-    InlineInfo if2 = ci.GetInlineInfoOf(sm3, encoding);
-    ASSERT_EQ(3u, if2.GetDepth(encoding.inline_info.encoding));
-    ASSERT_EQ(2u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 0));
-    ASSERT_EQ(5u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 1));
-    ASSERT_EQ(10u, if2.GetDexPcAtDepth(encoding.inline_info.encoding, 2));
-    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(encoding.inline_info.encoding, 2));
+    InlineInfo if2 = ci.GetInlineInfoOf(sm3);
+    ASSERT_EQ(3u, if2.GetDepth());
+    ASSERT_EQ(2u, if2.GetDexPcAtDepth(0));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(0));
+    ASSERT_EQ(5u, if2.GetDexPcAtDepth(1));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(1));
+    ASSERT_EQ(10u, if2.GetDexPcAtDepth(2));
+    ASSERT_TRUE(if2.EncodesArtMethodAtDepth(2));
 
-    ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, 0));
+    ASSERT_FALSE(if2.HasDexRegisterMapAtDepth(0));
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, encoding, 1);
-    ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci, encoding));
+    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, if2, 1);
+    ASSERT_EQ(2, dex_registers1.GetMachineRegister(0, 1, ci));
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, encoding, 2);
+    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, if2, 2);
     ASSERT_FALSE(dex_registers2.IsDexRegisterLive(0));
-    ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci, encoding));
+    ASSERT_EQ(3, dex_registers2.GetMachineRegister(1, 2, ci));
   }
 }
 
 TEST(StackMapTest, CodeOffsetTest) {
-  // Test minimum alignments, encoding, and decoding.
+  // Test minimum alignments, and decoding.
   CodeOffset offset_thumb2 =
       CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
   CodeOffset offset_arm64 =
@@ -969,13 +960,12 @@
   stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
-  StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4, encoding);
-  StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8, encoding);
-  EXPECT_EQ(stack_map1.GetStackMaskIndex(encoding.stack_map.encoding),
-            stack_map2.GetStackMaskIndex(encoding.stack_map.encoding));
+  StackMap stack_map1 = code_info.GetStackMapForNativePcOffset(4);
+  StackMap stack_map2 = code_info.GetStackMapForNativePcOffset(8);
+  EXPECT_EQ(stack_map1.GetStackMaskIndex(),
+            stack_map2.GetStackMaskIndex());
 }
 
 TEST(StackMapTest, TestInvokeInfo) {
@@ -1007,26 +997,25 @@
 
   CodeInfo code_info(code_info_region);
   MethodInfo method_info(method_info_region.begin());
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  ASSERT_EQ(3u, code_info.GetNumberOfStackMaps(encoding));
+  ASSERT_EQ(3u, code_info.GetNumberOfStackMaps());
 
-  InvokeInfo invoke1(code_info.GetInvokeInfoForNativePcOffset(4, encoding));
-  InvokeInfo invoke2(code_info.GetInvokeInfoForNativePcOffset(8, encoding));
-  InvokeInfo invoke3(code_info.GetInvokeInfoForNativePcOffset(16, encoding));
-  InvokeInfo invoke_invalid(code_info.GetInvokeInfoForNativePcOffset(12, encoding));
+  InvokeInfo invoke1(code_info.GetInvokeInfoForNativePcOffset(4));
+  InvokeInfo invoke2(code_info.GetInvokeInfoForNativePcOffset(8));
+  InvokeInfo invoke3(code_info.GetInvokeInfoForNativePcOffset(16));
+  InvokeInfo invoke_invalid(code_info.GetInvokeInfoForNativePcOffset(12));
   EXPECT_FALSE(invoke_invalid.IsValid());  // No entry for that index.
   EXPECT_TRUE(invoke1.IsValid());
   EXPECT_TRUE(invoke2.IsValid());
   EXPECT_TRUE(invoke3.IsValid());
-  EXPECT_EQ(invoke1.GetInvokeType(encoding.invoke_info.encoding), kSuper);
-  EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding, method_info), 1u);
-  EXPECT_EQ(invoke1.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 4u);
-  EXPECT_EQ(invoke2.GetInvokeType(encoding.invoke_info.encoding), kStatic);
-  EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding, method_info), 3u);
-  EXPECT_EQ(invoke2.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 8u);
-  EXPECT_EQ(invoke3.GetInvokeType(encoding.invoke_info.encoding), kDirect);
-  EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding, method_info), 65535u);
-  EXPECT_EQ(invoke3.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 16u);
+  EXPECT_EQ(invoke1.GetInvokeType(), kSuper);
+  EXPECT_EQ(invoke1.GetMethodIndex(method_info), 1u);
+  EXPECT_EQ(invoke1.GetNativePcOffset(kRuntimeISA), 4u);
+  EXPECT_EQ(invoke2.GetInvokeType(), kStatic);
+  EXPECT_EQ(invoke2.GetMethodIndex(method_info), 3u);
+  EXPECT_EQ(invoke2.GetNativePcOffset(kRuntimeISA), 8u);
+  EXPECT_EQ(invoke3.GetInvokeType(), kDirect);
+  EXPECT_EQ(invoke3.GetMethodIndex(method_info), 65535u);
+  EXPECT_EQ(invoke3.GetNativePcOffset(kRuntimeISA), 16u);
 }
 
 }  // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index ff3e1ba..19c405e 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -153,7 +153,7 @@
   " 21c:	f8d9 8034 	ldr.w	r8, [r9, #52]	; 0x34\n",
   " 220:	4770      	bx	lr\n",
   " 222:	4660      	mov	r0, ip\n",
-  " 224:	f8d9 c2c8 	ldr.w	ip, [r9, #712]	; 0x2c8\n",
+  " 224:	f8d9 c2cc 	ldr.w	ip, [r9, #716]	; 0x2cc\n",
   " 228:	47e0      	blx	ip\n",
   nullptr
 };
diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h
index 73ef028..a782b16 100644
--- a/compiler/utils/x86/constants_x86.h
+++ b/compiler/utils/x86/constants_x86.h
@@ -40,21 +40,6 @@
   kNoByteRegister = -1  // Signals an illegal register.
 };
 
-
-enum XmmRegister {
-  XMM0 = 0,
-  XMM1 = 1,
-  XMM2 = 2,
-  XMM3 = 3,
-  XMM4 = 4,
-  XMM5 = 5,
-  XMM6 = 6,
-  XMM7 = 7,
-  kNumberOfXmmRegisters = 8,
-  kNoXmmRegister = -1  // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
-
 enum X87Register {
   ST0 = 0,
   ST1 = 1,
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 553d131..06f0bcd 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -236,6 +236,8 @@
         if (cls == nullptr) {
           CHECK(soa.Self()->IsExceptionPending());
           soa.Self()->ClearException();
+        } else if (&cls->GetDexFile() != dex_file) {
+          // Ignore classes from different dex files.
         } else if (unverified_classes.find(class_def.class_idx_) == unverified_classes.end()) {
           ASSERT_EQ(cls->GetStatus(), ClassStatus::kVerified);
         } else {
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 18548ba..e74947a 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -202,6 +202,7 @@
         "libart-dexlayout",
         "libart",
         "libdexfile",
+        "libartbase",
         "libbase",
         "liblz4",
         "libsigchain",
@@ -239,6 +240,7 @@
         "libartd-dexlayout",
         "libartd",
         "libdexfiled",
+        "libartbased",
         "libbase",
         "liblz4",
         "libsigchain",
@@ -267,12 +269,13 @@
         "-z muldefs",
     ],
     static_libs: [
-        "libprofile",
         "libart-dex2oat",
         "libart-compiler",
         "libart-dexlayout",
         "libart",
+        "libartbase",
         "libdexfile",
+        "libprofile",
         "libvixl-arm",
         "libvixl-arm64",
     ] + art_static_dependencies,
@@ -309,6 +312,7 @@
         "libartd-compiler",
         "libartd-dexlayout",
         "libartd",
+        "libartbased",
         "libprofiled",
         "libdexfiled",
         "libvixld-arm",
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 9e5cd80..01726af 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -248,7 +248,6 @@
 
     CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
     switch (image_storage_mode_) {
-      case ImageHeader::kStorageModeLZ4HC:  // Fall-through.
       case ImageHeader::kStorageModeLZ4: {
         const size_t compressed_max_size = LZ4_compressBound(image_data_size);
         compressed_data.reset(new char[compressed_max_size]);
@@ -257,22 +256,20 @@
             &compressed_data[0],
             image_data_size,
             compressed_max_size);
-
         break;
       }
-      /*
-       * Disabled due to image_test64 flakyness. Both use same decompression. b/27560444
       case ImageHeader::kStorageModeLZ4HC: {
         // Bound is same as non HC.
         const size_t compressed_max_size = LZ4_compressBound(image_data_size);
         compressed_data.reset(new char[compressed_max_size]);
-        data_size = LZ4_compressHC(
+        data_size = LZ4_compress_HC(
             reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
             &compressed_data[0],
-            image_data_size);
+            image_data_size,
+            compressed_max_size,
+            LZ4HC_CLEVEL_MAX);
         break;
       }
-      */
       case ImageHeader::kStorageModeUncompressed: {
         data_size = image_data_size;
         image_data_to_write = image_data;
@@ -1067,18 +1064,12 @@
     }
     if (method == nullptr || i < stored_index) {
       if (last_class != nullptr) {
-        const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
-        Signature signature = dex_file.GetMethodSignature(method_id);
-        if (last_class->IsInterface()) {
-          method = last_class->FindInterfaceMethod(name, signature, target_ptr_size_);
-        } else {
-          method = last_class->FindClassMethod(name, signature, target_ptr_size_);
-        }
-        if (method != nullptr) {
-          // If the referenced class is in the image, the defining class must also be there.
-          DCHECK(KeepClass(method->GetDeclaringClass()));
-          dex_cache->SetResolvedMethod(i, method, target_ptr_size_);
-        }
+        // Try to resolve the method with the class linker, which will insert
+        // it into the dex cache if successful.
+        method = class_linker->FindResolvedMethod(last_class, dex_cache, class_loader, i);
+        // If the referenced class is in the image, the defining class must also be there.
+        DCHECK(method == nullptr || KeepClass(method->GetDeclaringClass()));
+        DCHECK(method == nullptr || dex_cache->GetResolvedMethod(i, target_ptr_size_) == method);
       }
     } else {
       DCHECK_EQ(i, stored_index);
@@ -1112,14 +1103,10 @@
     }
     if (field == nullptr || i < stored_index) {
       if (last_class != nullptr) {
-        const char* name = dex_file.StringDataByIdx(field_id.name_idx_);
-        const char* type = dex_file.StringByTypeIdx(field_id.type_idx_);
-        field = mirror::Class::FindField(Thread::Current(), last_class, name, type);
-        if (field != nullptr) {
-          // If the referenced class is in the image, the defining class must also be there.
-          DCHECK(KeepClass(field->GetDeclaringClass()));
-          dex_cache->SetResolvedField(i, field, target_ptr_size_);
-        }
+        field = class_linker->FindResolvedFieldJLS(last_class, dex_cache, class_loader, i);
+        // If the referenced class is in the image, the defining class must also be there.
+        DCHECK(field == nullptr || KeepClass(field->GetDeclaringClass()));
+        DCHECK(field == nullptr || dex_cache->GetResolvedField(i, target_ptr_size_) == field);
       }
     } else {
       DCHECK_EQ(i, stored_index);
@@ -1208,7 +1195,9 @@
     }
   }
   for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
-    PruneAndPreloadDexCache(dex_cache, class_loader);
+    // Pass the class loader associated with the DexCache. This can either be
+    // the app's `class_loader` or `nullptr` if boot class loader.
+    PruneAndPreloadDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : class_loader);
   }
 
   // Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 7bd9f0f..df0641c 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -497,7 +497,7 @@
   EXPECT_EQ(76U, sizeof(OatHeader));
   EXPECT_EQ(4U, sizeof(OatMethodOffsets));
   EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
-  EXPECT_EQ(163 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+  EXPECT_EQ(164 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
             sizeof(QuickEntryPoints));
 }
 
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index c63d6c3..2f0962c 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -35,6 +35,7 @@
     host_supported: true,
     shared_libs: [
         "libdexfile",
+        "libartbase",
         "libbase",
     ],
 }
@@ -46,6 +47,7 @@
     device_supported: false,
     static_libs: [
         "libdexfile",
+        "libartbase",
     ] + art_static_dependencies,
     target: {
         darwin: {
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 9536381..f5a13f0 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -45,6 +45,7 @@
 #include "android-base/logging.h"
 #include "android-base/stringprintf.h"
 
+#include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_exception_helpers.h"
@@ -611,19 +612,11 @@
           pClassDef.class_data_off_, pClassDef.class_data_off_);
 
   // Fields and methods.
-  const u1* pEncodedData = pDexFile->GetClassData(pClassDef);
-  if (pEncodedData != nullptr) {
-    ClassDataItemIterator pClassData(*pDexFile, pEncodedData);
-    fprintf(gOutFile, "static_fields_size  : %d\n", pClassData.NumStaticFields());
-    fprintf(gOutFile, "instance_fields_size: %d\n", pClassData.NumInstanceFields());
-    fprintf(gOutFile, "direct_methods_size : %d\n", pClassData.NumDirectMethods());
-    fprintf(gOutFile, "virtual_methods_size: %d\n", pClassData.NumVirtualMethods());
-  } else {
-    fprintf(gOutFile, "static_fields_size  : 0\n");
-    fprintf(gOutFile, "instance_fields_size: 0\n");
-    fprintf(gOutFile, "direct_methods_size : 0\n");
-    fprintf(gOutFile, "virtual_methods_size: 0\n");
-  }
+  ClassAccessor accessor(*pDexFile, pClassDef);
+  fprintf(gOutFile, "static_fields_size  : %d\n", accessor.NumStaticFields());
+  fprintf(gOutFile, "instance_fields_size: %d\n", accessor.NumInstanceFields());
+  fprintf(gOutFile, "direct_methods_size : %d\n", accessor.NumDirectMethods());
+  fprintf(gOutFile, "virtual_methods_size: %d\n", accessor.NumVirtualMethods());
   fprintf(gOutFile, "\n");
 }
 
@@ -786,11 +779,10 @@
 static std::unique_ptr<char[]> indexString(const DexFile* pDexFile,
                                            const Instruction* pDecInsn,
                                            size_t bufSize) {
-  static const u4 kInvalidIndex = std::numeric_limits<u4>::max();
   std::unique_ptr<char[]> buf(new char[bufSize]);
   // Determine index and width of the string.
   u4 index = 0;
-  u4 secondary_index = kInvalidIndex;
+  u2 secondary_index = 0;
   u4 width = 4;
   switch (Instruction::FormatOf(pDecInsn->Opcode())) {
     // SOME NOT SUPPORTED:
@@ -898,7 +890,7 @@
                                              signature.ToString().c_str());
       }
       if (secondary_index < pDexFile->GetHeader().proto_ids_size_) {
-        const DexFile::ProtoId& protoId = pDexFile->GetProtoId(secondary_index);
+        const DexFile::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(secondary_index));
         const Signature signature = pDexFile->GetProtoSignature(protoId);
         proto = signature.ToString();
       }
@@ -916,7 +908,7 @@
       break;
     case Instruction::kIndexProtoRef:
       if (index < pDexFile->GetHeader().proto_ids_size_) {
-        const DexFile::ProtoId& protoId = pDexFile->GetProtoId(index);
+        const DexFile::ProtoId& protoId = pDexFile->GetProtoId(dex::ProtoIndex(index));
         const Signature signature = pDexFile->GetProtoSignature(protoId);
         const std::string& proto = signature.ToString();
         outSize = snprintf(buf.get(), bufSize, "%s // proto@%0*x", proto.c_str(), width, index);
@@ -1705,7 +1697,7 @@
   dex::StringIndex method_name_idx = static_cast<dex::StringIndex>(it.GetJavaValue().i);
   const char* method_name = pDexFile->StringDataByIdx(method_name_idx);
   it.Next();
-  uint32_t method_type_idx = static_cast<uint32_t>(it.GetJavaValue().i);
+  dex::ProtoIndex method_type_idx = static_cast<dex::ProtoIndex>(it.GetJavaValue().i);
   const DexFile::ProtoId& method_type_id = pDexFile->GetProtoId(method_type_idx);
   std::string method_type = pDexFile->GetProtoSignature(method_type_id).ToString();
   it.Next();
@@ -1763,7 +1755,7 @@
         break;
       case EncodedArrayValueIterator::ValueType::kMethodType: {
         type = "MethodType";
-        uint32_t proto_idx = static_cast<uint32_t>(it.GetJavaValue().i);
+        dex::ProtoIndex proto_idx = static_cast<dex::ProtoIndex>(it.GetJavaValue().i);
         const DexFile::ProtoId& proto_id = pDexFile->GetProtoId(proto_idx);
         value = pDexFile->GetProtoSignature(proto_id).ToString();
         break;
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index b009774..147af0c 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -39,8 +39,8 @@
         "dex2oat-pgo-defaults",
     ],
     shared_libs: [
-        "libart",
         "libdexfile",
+        "libartbase",
         "libprofile",
     ],
 
@@ -60,8 +60,8 @@
       "art_debug_defaults",
     ],
     shared_libs: [
-        "libartd",
         "libdexfiled",
+        "libartbased",
         "libprofiled",
     ],
 }
@@ -70,7 +70,6 @@
     name: "dexlayout-defaults",
     defaults: ["art_defaults"],
     host_supported: true,
-    srcs: ["dexlayout_main.cc"],
     shared_libs: [
         "libbase",
     ],
@@ -79,9 +78,11 @@
 art_cc_binary {
     name: "dexlayout",
     defaults: ["dexlayout-defaults"],
+    srcs: ["dexlayout_main.cc"],
     shared_libs: [
+        "libdexfile",
         "libprofile",
-        "libart",
+        "libartbase",
         "libart-dexlayout",
     ],
 }
@@ -92,9 +93,11 @@
         "art_debug_defaults",
         "dexlayout-defaults",
     ],
+    srcs: ["dexlayout_main.cc"],
     shared_libs: [
+        "libdexfiled",
         "libprofiled",
-        "libartd",
+        "libartbased",
         "libartd-dexlayout",
     ],
 }
@@ -117,6 +120,8 @@
     cflags: ["-Wall"],
     shared_libs: [
         "libart",
+        "libdexfile",
+        "libartbase",
         "libart-dexlayout",
         "libbase",
     ],
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 1525d53..b7d9db6 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -332,7 +332,7 @@
 }
 
 void Collections::CreateProtoId(const DexFile& dex_file, uint32_t i) {
-  const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(i);
+  const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(dex::ProtoIndex(i));
   const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
   TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_);
 
@@ -353,7 +353,7 @@
 void Collections::CreateMethodId(const DexFile& dex_file, uint32_t i) {
   const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
   MethodId* method_id = new MethodId(GetTypeId(disk_method_id.class_idx_.index_),
-                                     GetProtoId(disk_method_id.proto_idx_),
+                                     GetProtoId(disk_method_id.proto_idx_.index_),
                                      GetStringId(disk_method_id.name_idx_.index_));
   AddIndexedItem(method_ids_, method_id, MethodIdsOffset() + i * MethodId::ItemSize(), i);
 }
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index 6cb141f..aa4e6d0 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -27,7 +27,6 @@
 #include "android-base/stringprintf.h"
 
 #include "base/logging.h"  // For InitLogging.
-#include "base/mutex.h"
 #include "base/stringpiece.h"
 
 #include "dexlayout.h"
@@ -37,7 +36,6 @@
 #ifdef ART_TARGET_ANDROID
 #include "pagemap/pagemap.h"
 #endif
-#include "runtime.h"
 #include "vdex_file.h"
 
 namespace art {
@@ -446,6 +444,11 @@
   PrintLetterKey();
 }
 
+NO_RETURN static void Abort(const char* msg) {
+  std::cerr << msg;
+  exit(1);
+}
+
 static int DexDiagMain(int argc, char* argv[]) {
   if (argc < 2) {
     Usage(argv[0]);
@@ -471,8 +474,7 @@
   }
 
   // Art specific set up.
-  Locks::Init();
-  InitLogging(argv, Runtime::Abort);
+  InitLogging(argv, Abort);
   MemMap::Init();
 
 #ifdef ART_TARGET_ANDROID
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 185c142..71e56d19 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -34,7 +34,6 @@
 #include "base/logging.h"  // For InitLogging.
 #include "base/mem_map.h"
 #include "profile/profile_compilation_info.h"
-#include "runtime.h"
 
 namespace art {
 
@@ -66,12 +65,17 @@
   LOG(ERROR) << " -x : compact dex generation level, either 'none' or 'fast'";
 }
 
+NO_RETURN static void Abort(const char* msg) {
+  LOG(ERROR) << msg;
+  exit(1);
+}
+
 /*
  * Main driver of the dexlayout utility.
  */
 int DexlayoutDriver(int argc, char** argv) {
   // Art specific set up.
-  InitLogging(argv, Runtime::Abort);
+  InitLogging(argv, Abort);
   MemMap::Init();
 
   Options options;
diff --git a/dexlist/Android.bp b/dexlist/Android.bp
index 2703732..bd521ac 100644
--- a/dexlist/Android.bp
+++ b/dexlist/Android.bp
@@ -17,7 +17,11 @@
     host_supported: true,
     srcs: ["dexlist.cc"],
     cflags: ["-Wall", "-Werror"],
-    shared_libs: ["libdexfile", "libbase"],
+    shared_libs: [
+        "libdexfile",
+        "libartbase",
+        "libbase"
+    ],
     // TODO: fix b/72216369 and remove the need for this.
     include_dirs: [
         "art/runtime"  // dex utils.
diff --git a/dexoptanalyzer/Android.bp b/dexoptanalyzer/Android.bp
index 33366ad..99a11cd 100644
--- a/dexoptanalyzer/Android.bp
+++ b/dexoptanalyzer/Android.bp
@@ -38,6 +38,7 @@
     defaults: ["dexoptanalyzer-defaults"],
     shared_libs: [
         "libart",
+        "libartbase",
     ],
 }
 
@@ -49,6 +50,7 @@
     ],
     shared_libs: [
         "libartd",
+        "libartbased",
     ],
 }
 
diff --git a/imgdiag/Android.bp b/imgdiag/Android.bp
index 2b89497..972c8f7 100644
--- a/imgdiag/Android.bp
+++ b/imgdiag/Android.bp
@@ -57,6 +57,7 @@
     defaults: ["imgdiag-defaults"],
     shared_libs: [
         "libart",
+        "libartbase",
         "libart-compiler",
     ],
 }
@@ -69,6 +70,7 @@
     ],
     shared_libs: [
         "libartd",
+        "libartbased",
         "libartd-compiler",
     ],
 }
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 50abdd3..adf0ad6 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -19,11 +19,13 @@
     defaults: ["art_defaults"],
     host_supported: true,
     srcs: [
+        "arch/instruction_set.cc",
         "base/allocator.cc",
         "base/arena_allocator.cc",
         "base/arena_bit_vector.cc",
         "base/bit_vector.cc",
         "base/file_magic.cc",
+        "base/file_utils.cc",
         "base/hex_dump.cc",
         "base/logging.cc",
         "base/malloc_arena_pool.cc",
@@ -62,7 +64,6 @@
     generated_sources: ["art_libartbase_operator_srcs"],
     cflags: ["-DBUILDING_LIBART=1"],
     shared_libs: [
-        "libbacktrace",
         "liblog",
 	// For ashmem.
         "libcutils",
@@ -81,6 +82,7 @@
     cmd: "$(location generate_operator_out) art/libartbase $(in) > $(out)",
     tools: ["generate_operator_out"],
     srcs: [
+        "arch/instruction_set.h",
         "base/allocator.h",
         "base/callee_save_type.h",
         "base/unix_file/fd_file.h",
@@ -142,12 +144,16 @@
         "art_gtest_defaults",
     ],
     srcs: [
+        "arch/instruction_set_test.cc",
         "base/arena_allocator_test.cc",
         "base/bit_field_test.cc",
+        "base/bit_memory_region_test.cc",
         "base/bit_string_test.cc",
         "base/bit_struct_test.cc",
+        "base/bit_table_test.cc",
         "base/bit_utils_test.cc",
         "base/bit_vector_test.cc",
+        "base/file_utils_test.cc",
         "base/hash_set_test.cc",
         "base/hex_dump_test.cc",
         "base/histogram_test.cc",
diff --git a/runtime/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc
similarity index 88%
rename from runtime/arch/instruction_set.cc
rename to libartbase/arch/instruction_set.cc
index b848eb2..a187663 100644
--- a/runtime/arch/instruction_set.cc
+++ b/libartbase/arch/instruction_set.cc
@@ -16,8 +16,6 @@
 
 #include "instruction_set.h"
 
-// Explicitly include our own elf.h to avoid Linux and other dependencies.
-#include "../elf.h"
 #include "android-base/logging.h"
 #include "base/bit_utils.h"
 #include "base/globals.h"
@@ -83,29 +81,6 @@
   return InstructionSet::kNone;
 }
 
-InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
-  switch (e_machine) {
-    case EM_ARM:
-      return InstructionSet::kArm;
-    case EM_AARCH64:
-      return InstructionSet::kArm64;
-    case EM_386:
-      return InstructionSet::kX86;
-    case EM_X86_64:
-      return InstructionSet::kX86_64;
-    case EM_MIPS: {
-      if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
-          (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
-        return InstructionSet::kMips;
-      } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
-        return InstructionSet::kMips64;
-      }
-      break;
-    }
-  }
-  return InstructionSet::kNone;
-}
-
 size_t GetInstructionSetAlignment(InstructionSet isa) {
   switch (isa) {
     case InstructionSet::kArm:
diff --git a/runtime/arch/instruction_set.h b/libartbase/arch/instruction_set.h
similarity index 97%
rename from runtime/arch/instruction_set.h
rename to libartbase/arch/instruction_set.h
index 6434005..06bd53a 100644
--- a/runtime/arch/instruction_set.h
+++ b/libartbase/arch/instruction_set.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
-#define ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
+#ifndef ART_LIBARTBASE_ARCH_INSTRUCTION_SET_H_
+#define ART_LIBARTBASE_ARCH_INSTRUCTION_SET_H_
 
 #include <iosfwd>
 #include <string>
@@ -89,8 +89,6 @@
 // Note: Returns kNone when the string cannot be parsed to a known value.
 InstructionSet GetInstructionSetFromString(const char* instruction_set);
 
-InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags);
-
 // Fatal logging out of line to keep the header clean of logging.h.
 NO_RETURN void InstructionSetAbort(InstructionSet isa);
 
@@ -299,4 +297,4 @@
 
 }  // namespace art
 
-#endif  // ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
+#endif  // ART_LIBARTBASE_ARCH_INSTRUCTION_SET_H_
diff --git a/runtime/arch/instruction_set_test.cc b/libartbase/arch/instruction_set_test.cc
similarity index 100%
rename from runtime/arch/instruction_set_test.cc
rename to libartbase/arch/instruction_set_test.cc
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index f3926bc..3f4d0ba 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -19,6 +19,9 @@
 
 #include "memory_region.h"
 
+#include "bit_utils.h"
+#include "memory_tool.h"
+
 namespace art {
 
 // Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
@@ -26,46 +29,126 @@
 class BitMemoryRegion FINAL : public ValueObject {
  public:
   BitMemoryRegion() = default;
-  ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_size) {
-    bit_start_ = bit_offset % kBitsPerByte;
-    const size_t start = bit_offset / kBitsPerByte;
-    const size_t end = (bit_offset + bit_size + kBitsPerByte - 1) / kBitsPerByte;
-    region_ = region.Subregion(start, end - start);
+  ALWAYS_INLINE explicit BitMemoryRegion(MemoryRegion region)
+    : data_(reinterpret_cast<uintptr_t*>(AlignDown(region.pointer(), sizeof(uintptr_t)))),
+      bit_start_(8 * (reinterpret_cast<uintptr_t>(region.pointer()) % sizeof(uintptr_t))),
+      bit_size_(region.size_in_bits()) {
+  }
+  ALWAYS_INLINE BitMemoryRegion(MemoryRegion region, size_t bit_offset, size_t bit_length)
+    : BitMemoryRegion(region) {
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    bit_start_ += bit_offset;
+    bit_size_ = bit_length;
   }
 
-  void* pointer() const { return region_.pointer(); }
-  size_t size() const { return region_.size(); }
-  size_t BitOffset() const { return bit_start_; }
+  ALWAYS_INLINE bool IsValid() const { return data_ != nullptr; }
+
   size_t size_in_bits() const {
-    return region_.size_in_bits();
+    return bit_size_;
   }
 
-  ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_size) const {
-    return BitMemoryRegion(region_, bit_start_ + bit_offset, bit_size);
+  ALWAYS_INLINE BitMemoryRegion Subregion(size_t bit_offset, size_t bit_length) const {
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    BitMemoryRegion result = *this;
+    result.bit_start_ += bit_offset;
+    result.bit_size_ = bit_length;
+    return result;
   }
 
   // Load a single bit in the region. The bit at offset 0 is the least
   // significant bit in the first byte.
+  ATTRIBUTE_NO_SANITIZE_ADDRESS  // We might touch extra bytes due to the alignment.
   ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
-    return region_.LoadBit(bit_offset + bit_start_);
+    DCHECK_LT(bit_offset, bit_size_);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
+    return ((data_[index] >> shift) & 1) != 0;
   }
 
   ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) const {
-    region_.StoreBit(bit_offset + bit_start_, value);
+    DCHECK_LT(bit_offset, bit_size_);
+    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
+    data[index] &= ~(1 << shift);  // Clear bit.
+    data[index] |= (value ? 1 : 0) << shift;  // Set bit.
+    DCHECK_EQ(value, LoadBit(bit_offset));
   }
 
-  ALWAYS_INLINE uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
-    return region_.LoadBits(bit_offset + bit_start_, length);
+  // Load `bit_length` bits from `data` starting at given `bit_offset`.
+  // The least significant bit is stored in the smallest memory offset.
+  ATTRIBUTE_NO_SANITIZE_ADDRESS  // We might touch extra bytes due to the alignment.
+  ALWAYS_INLINE uint32_t LoadBits(size_t bit_offset, size_t bit_length) const {
+    DCHECK(IsAligned<sizeof(uintptr_t)>(data_));
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    DCHECK_LE(bit_length, BitSizeOf<uint32_t>());
+    if (bit_length == 0) {
+      return 0;
+    }
+    uintptr_t mask = std::numeric_limits<uintptr_t>::max() >> (kBitsPerIntPtrT - bit_length);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
+    uintptr_t value = data_[index] >> shift;
+    size_t finished_bits = kBitsPerIntPtrT - shift;
+    if (finished_bits < bit_length) {
+      value |= data_[index + 1] << finished_bits;
+    }
+    return value & mask;
   }
 
-  // Store at a bit offset from inside the bit memory region.
-  ALWAYS_INLINE void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
-    region_.StoreBits(bit_offset + bit_start_, value, length);
+  // Load bits starting at given `bit_offset`, and advance the `bit_offset`.
+  ALWAYS_INLINE uint32_t LoadBitsAndAdvance(size_t* bit_offset, size_t bit_length) const {
+    uint32_t result = LoadBits(*bit_offset, bit_length);
+    *bit_offset += bit_length;
+    return result;
+  }
+
+  // Store `bit_length` bits in `data` starting at given `bit_offset`.
+  // The least significant bit is stored in the smallest memory offset.
+  ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) {
+    DCHECK_LE(bit_offset, bit_size_);
+    DCHECK_LE(bit_length, bit_size_ - bit_offset);
+    DCHECK_LE(bit_length, BitSizeOf<uint32_t>());
+    DCHECK_LE(value, MaxInt<uint32_t>(bit_length));
+    if (bit_length == 0) {
+      return;
+    }
+    // Write data byte by byte to avoid races with other threads
+    // on bytes that do not overlap with this region.
+    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
+    uint32_t mask = std::numeric_limits<uint32_t>::max() >> (BitSizeOf<uint32_t>() - bit_length);
+    size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
+    size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
+    data[index] &= ~(mask << shift);  // Clear bits.
+    data[index] |= (value << shift);  // Set bits.
+    size_t finished_bits = kBitsPerByte - shift;
+    for (int i = 1; finished_bits < bit_length; i++, finished_bits += kBitsPerByte) {
+      data[index + i] &= ~(mask >> finished_bits);  // Clear bits.
+      data[index + i] |= (value >> finished_bits);  // Set bits.
+    }
+    DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
+  }
+
+  // Store bits starting at given `bit_offset`, and advance the `bit_offset`.
+  ALWAYS_INLINE void StoreBitsAndAdvance(size_t* bit_offset, uint32_t value, size_t bit_length) {
+    StoreBits(*bit_offset, value, bit_length);
+    *bit_offset += bit_length;
+  }
+
+  ALWAYS_INLINE bool Equals(const BitMemoryRegion& other) const {
+    return data_ == other.data_ &&
+           bit_start_ == other.bit_start_ &&
+           bit_size_ == other.bit_size_;
   }
 
  private:
-  MemoryRegion region_;
+  // The data pointer must be naturally aligned. This makes loading code faster.
+  uintptr_t* data_ = nullptr;
   size_t bit_start_ = 0;
+  size_t bit_size_ = 0;
 };
 
 }  // namespace art
diff --git a/libartbase/base/bit_memory_region_test.cc b/libartbase/base/bit_memory_region_test.cc
new file mode 100644
index 0000000..b754698
--- /dev/null
+++ b/libartbase/base/bit_memory_region_test.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_memory_region.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+static void CheckBits(uint8_t* data,
+                      size_t size,
+                      uint32_t init,
+                      size_t offset,
+                      size_t length,
+                      uint32_t value) {
+  for (size_t i = 0; i < size * kBitsPerByte; i++) {
+    uint8_t expected = (offset <= i && i < offset + length) ? value >> (i - offset) : init;
+    uint8_t actual = data[i / kBitsPerByte] >> (i % kBitsPerByte);
+    EXPECT_EQ(expected & 1, actual & 1);
+  }
+}
+
+TEST(BitMemoryRegion, TestBit) {
+  uint8_t data[sizeof(uint32_t) * 2];
+  for (size_t bit_offset = 0; bit_offset < 2 * sizeof(uint32_t) * kBitsPerByte; ++bit_offset) {
+    for (uint32_t initial_value = 0; initial_value <= 1; initial_value++) {
+      for (uint32_t value = 0; value <= 1; value++) {
+        // Check Store and Load with bit_offset set on the region.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr1(MemoryRegion(&data, sizeof(data)), bit_offset, 1);
+        bmr1.StoreBit(0, value);
+        EXPECT_EQ(bmr1.LoadBit(0), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, 1, value);
+        // Check Store and Load with bit_offset set on the methods.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr2(MemoryRegion(&data, sizeof(data)));
+        bmr2.StoreBit(bit_offset, value);
+        EXPECT_EQ(bmr2.LoadBit(bit_offset), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, 1, value);
+      }
+    }
+  }
+}
+
+TEST(BitMemoryRegion, TestBits) {
+  uint8_t data[sizeof(uint32_t) * 4];
+  for (size_t bit_offset = 0; bit_offset < 3 * sizeof(uint32_t) * kBitsPerByte; ++bit_offset) {
+    uint32_t mask = 0;
+    for (size_t bit_length = 0; bit_length < sizeof(uint32_t) * kBitsPerByte; ++bit_length) {
+      const uint32_t value = 0xDEADBEEF & mask;
+      for (uint32_t initial_value = 0; initial_value <= 1; initial_value++) {
+        // Check Store and Load with bit_offset set on the region.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr1(MemoryRegion(&data, sizeof(data)), bit_offset, bit_length);
+        bmr1.StoreBits(0, value, bit_length);
+        EXPECT_EQ(bmr1.LoadBits(0, bit_length), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, bit_length, value);
+        // Check Store and Load with bit_offset set on the methods.
+        std::fill_n(data, sizeof(data), initial_value * 0xFF);
+        BitMemoryRegion bmr2(MemoryRegion(&data, sizeof(data)));
+        bmr2.StoreBits(bit_offset, value, bit_length);
+        EXPECT_EQ(bmr2.LoadBits(bit_offset, bit_length), value);
+        CheckBits(data, sizeof(data), initial_value, bit_offset, bit_length, value);
+      }
+      mask = (mask << 1) | 1;
+    }
+  }
+}
+
+}  // namespace art
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
new file mode 100644
index 0000000..24bdd13
--- /dev/null
+++ b/libartbase/base/bit_table.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_BIT_TABLE_H_
+#define ART_LIBARTBASE_BASE_BIT_TABLE_H_
+
+#include <vector>
+
+#include "base/bit_memory_region.h"
+#include "base/bit_utils.h"
+#include "base/memory_region.h"
+
+namespace art {
+
+constexpr uint32_t kVarintHeaderBits = 4;
+constexpr uint32_t kVarintSmallValue = 11;  // Maximum value which is stored as-is.
+
+// Load variable-length bit-packed integer from `data` starting at `bit_offset`.
+// The first four bits determine the variable length of the encoded integer:
+//   Values 0..11 represent the result as-is, with no further following bits.
+//   Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
+ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryRegion region, size_t* bit_offset) {
+  uint32_t x = region.LoadBitsAndAdvance(bit_offset, kVarintHeaderBits);
+  if (x > kVarintSmallValue) {
+    x = region.LoadBitsAndAdvance(bit_offset, (x - kVarintSmallValue) * kBitsPerByte);
+  }
+  return x;
+}
+
+// Store variable-length bit-packed integer from `data` starting at `bit_offset`.
+template<typename Vector>
+ALWAYS_INLINE static inline void EncodeVarintBits(Vector* out, size_t* bit_offset, uint32_t value) {
+  if (value <= kVarintSmallValue) {
+    out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits));
+    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
+    region.StoreBitsAndAdvance(bit_offset, value, kVarintHeaderBits);
+  } else {
+    uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
+    out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits + num_bits));
+    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
+    uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
+    region.StoreBitsAndAdvance(bit_offset, header, kVarintHeaderBits);
+    region.StoreBitsAndAdvance(bit_offset, value, num_bits);
+  }
+}
+
+template<uint32_t kNumColumns>
+class BitTable {
+ public:
+  class Accessor {
+   public:
+    static constexpr uint32_t kNoValue = std::numeric_limits<uint32_t>::max();
+
+    Accessor(const BitTable* table, uint32_t row) : table_(table), row_(row) {}
+
+    ALWAYS_INLINE uint32_t Row() const { return row_; }
+
+    ALWAYS_INLINE bool IsValid() const { return table_ != nullptr && row_ < table_->NumRows(); }
+
+    template<uint32_t Column>
+    ALWAYS_INLINE uint32_t Get() const {
+      static_assert(Column < kNumColumns, "Column out of bounds");
+      return table_->Get(row_, Column);
+    }
+
+    ALWAYS_INLINE bool Equals(const Accessor& other) {
+      return this->table_ == other.table_ && this->row_ == other.row_;
+    }
+
+    Accessor& operator++() {
+      row_++;
+      return *this;
+    }
+
+   protected:
+    const BitTable* table_;
+    uint32_t row_;
+  };
+
+  static constexpr uint32_t kValueBias = -1;
+
+  BitTable() {}
+  BitTable(void* data, size_t size, size_t* bit_offset = 0) {
+    Decode(BitMemoryRegion(MemoryRegion(data, size)), bit_offset);
+  }
+
+  ALWAYS_INLINE void Decode(BitMemoryRegion region, size_t* bit_offset) {
+    // Decode row count and column sizes from the table header.
+    num_rows_ = DecodeVarintBits(region, bit_offset);
+    if (num_rows_ != 0) {
+      column_offset_[0] = 0;
+      for (uint32_t i = 0; i < kNumColumns; i++) {
+        size_t column_end = column_offset_[i] + DecodeVarintBits(region, bit_offset);
+        column_offset_[i + 1] = column_end;
+        DCHECK_EQ(column_offset_[i + 1], column_end) << "Overflow";
+      }
+    }
+
+    // Record the region which contains the table data and skip past it.
+    table_data_ = region.Subregion(*bit_offset, num_rows_ * NumRowBits());
+    *bit_offset += table_data_.size_in_bits();
+  }
+
+  ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column = 0) const {
+    DCHECK_LT(row, num_rows_);
+    DCHECK_LT(column, kNumColumns);
+    size_t offset = row * NumRowBits() + column_offset_[column];
+    return table_data_.LoadBits(offset, NumColumnBits(column)) + kValueBias;
+  }
+
+  size_t NumRows() const { return num_rows_; }
+
+  uint32_t NumRowBits() const { return column_offset_[kNumColumns]; }
+
+  constexpr size_t NumColumns() const { return kNumColumns; }
+
+  uint32_t NumColumnBits(uint32_t column) const {
+    return column_offset_[column + 1] - column_offset_[column];
+  }
+
+  size_t DataBitSize() const { return num_rows_ * column_offset_[kNumColumns]; }
+
+ protected:
+  BitMemoryRegion table_data_;
+  size_t num_rows_ = 0;
+
+  uint16_t column_offset_[kNumColumns + 1] = {};
+};
+
+template<uint32_t kNumColumns>
+constexpr uint32_t BitTable<kNumColumns>::Accessor::kNoValue;
+
+template<uint32_t kNumColumns>
+constexpr uint32_t BitTable<kNumColumns>::kValueBias;
+
+template<uint32_t kNumColumns, typename Alloc = std::allocator<uint32_t>>
+class BitTableBuilder {
+ public:
+  explicit BitTableBuilder(Alloc alloc = Alloc()) : buffer_(alloc) {}
+
+  template<typename ... T>
+  uint32_t AddRow(T ... values) {
+    constexpr size_t count = sizeof...(values);
+    static_assert(count == kNumColumns, "Incorrect argument count");
+    uint32_t data[count] = { values... };
+    buffer_.insert(buffer_.end(), data, data + count);
+    return num_rows_++;
+  }
+
+  ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column) const {
+    return buffer_[row * kNumColumns + column];
+  }
+
+  template<typename Vector>
+  void Encode(Vector* out, size_t* bit_offset) {
+    constexpr uint32_t bias = BitTable<kNumColumns>::kValueBias;
+    size_t initial_bit_offset = *bit_offset;
+    // Measure data size.
+    uint32_t max_column_value[kNumColumns] = {};
+    for (uint32_t r = 0; r < num_rows_; r++) {
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        max_column_value[c] |= Get(r, c) - bias;
+      }
+    }
+    // Write table header.
+    uint32_t table_data_bits = 0;
+    uint32_t column_bits[kNumColumns] = {};
+    EncodeVarintBits(out, bit_offset, num_rows_);
+    if (num_rows_ != 0) {
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        column_bits[c] = MinimumBitsToStore(max_column_value[c]);
+        EncodeVarintBits(out, bit_offset, column_bits[c]);
+        table_data_bits += num_rows_ * column_bits[c];
+      }
+    }
+    // Write table data.
+    out->resize(BitsToBytesRoundUp(*bit_offset + table_data_bits));
+    BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
+    for (uint32_t r = 0; r < num_rows_; r++) {
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        region.StoreBitsAndAdvance(bit_offset, Get(r, c) - bias, column_bits[c]);
+      }
+    }
+    // Verify the written data.
+    if (kIsDebugBuild) {
+      BitTable<kNumColumns> table;
+      table.Decode(region, &initial_bit_offset);
+      DCHECK_EQ(this->num_rows_, table.NumRows());
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        DCHECK_EQ(column_bits[c], table.NumColumnBits(c));
+      }
+      for (uint32_t r = 0; r < num_rows_; r++) {
+        for (uint32_t c = 0; c < kNumColumns; c++) {
+          DCHECK_EQ(this->Get(r, c), table.Get(r, c)) << " (" << r << ", " << c << ")";
+        }
+      }
+    }
+  }
+
+ protected:
+  std::vector<uint32_t, Alloc> buffer_;
+  uint32_t num_rows_ = 0;
+};
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_BIT_TABLE_H_
diff --git a/libartbase/base/bit_table_test.cc b/libartbase/base/bit_table_test.cc
new file mode 100644
index 0000000..25bfcf0
--- /dev/null
+++ b/libartbase/base/bit_table_test.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bit_table.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(BitTableTest, TestVarint) {
+  for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
+    uint32_t values[] = { 0, 1, 11, 12, 15, 16, 255, 256, ~1u, ~0u };
+    for (uint32_t value : values) {
+      std::vector<uint8_t> buffer;
+      size_t encode_bit_offset = start_bit_offset;
+      EncodeVarintBits(&buffer, &encode_bit_offset, value);
+
+      size_t decode_bit_offset = start_bit_offset;
+      BitMemoryRegion region(MemoryRegion(buffer.data(), buffer.size()));
+      uint32_t result = DecodeVarintBits(region, &decode_bit_offset);
+      EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+      EXPECT_EQ(value, result);
+    }
+  }
+}
+
+TEST(BitTableTest, TestEmptyTable) {
+  std::vector<uint8_t> buffer;
+  size_t encode_bit_offset = 0;
+  BitTableBuilder<1> builder;
+  builder.Encode(&buffer, &encode_bit_offset);
+
+  size_t decode_bit_offset = 0;
+  BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
+  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  EXPECT_EQ(0u, table.NumRows());
+}
+
+TEST(BitTableTest, TestSingleColumnTable) {
+  constexpr uint32_t kNoValue = -1;
+  std::vector<uint8_t> buffer;
+  size_t encode_bit_offset = 0;
+  BitTableBuilder<1> builder;
+  builder.AddRow(42u);
+  builder.AddRow(kNoValue);
+  builder.AddRow(1000u);
+  builder.AddRow(kNoValue);
+  builder.Encode(&buffer, &encode_bit_offset);
+
+  size_t decode_bit_offset = 0;
+  BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
+  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  EXPECT_EQ(4u, table.NumRows());
+  EXPECT_EQ(42u, table.Get(0));
+  EXPECT_EQ(kNoValue, table.Get(1));
+  EXPECT_EQ(1000u, table.Get(2));
+  EXPECT_EQ(kNoValue, table.Get(3));
+  EXPECT_EQ(10u, table.NumColumnBits(0));
+}
+
+TEST(BitTableTest, TestUnalignedTable) {
+  for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
+    std::vector<uint8_t> buffer;
+    size_t encode_bit_offset = start_bit_offset;
+    BitTableBuilder<1> builder;
+    builder.AddRow(42u);
+    builder.Encode(&buffer, &encode_bit_offset);
+
+    size_t decode_bit_offset = start_bit_offset;
+    BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
+    EXPECT_EQ(encode_bit_offset, decode_bit_offset) << " start_bit_offset=" << start_bit_offset;
+    EXPECT_EQ(1u, table.NumRows());
+    EXPECT_EQ(42u, table.Get(0));
+  }
+}
+
+TEST(BitTableTest, TestBigTable) {
+  constexpr uint32_t kNoValue = -1;
+  std::vector<uint8_t> buffer;
+  size_t encode_bit_offset = 0;
+  BitTableBuilder<4> builder;
+  builder.AddRow(42u, kNoValue, 0u, static_cast<uint32_t>(-2));
+  builder.AddRow(62u, kNoValue, 63u, static_cast<uint32_t>(-3));
+  builder.Encode(&buffer, &encode_bit_offset);
+
+  size_t decode_bit_offset = 0;
+  BitTable<4> table(buffer.data(), buffer.size(), &decode_bit_offset);
+  EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+  EXPECT_EQ(2u, table.NumRows());
+  EXPECT_EQ(42u, table.Get(0, 0));
+  EXPECT_EQ(kNoValue, table.Get(0, 1));
+  EXPECT_EQ(0u, table.Get(0, 2));
+  EXPECT_EQ(static_cast<uint32_t>(-2), table.Get(0, 3));
+  EXPECT_EQ(62u, table.Get(1, 0));
+  EXPECT_EQ(kNoValue, table.Get(1, 1));
+  EXPECT_EQ(63u, table.Get(1, 2));
+  EXPECT_EQ(static_cast<uint32_t>(-3), table.Get(1, 3));
+  EXPECT_EQ(6u, table.NumColumnBits(0));
+  EXPECT_EQ(0u, table.NumColumnBits(1));
+  EXPECT_EQ(7u, table.NumColumnBits(2));
+  EXPECT_EQ(32u, table.NumColumnBits(3));
+}
+
+}  // namespace art
diff --git a/libartbase/base/bit_utils.h b/libartbase/base/bit_utils.h
index 04f0e85..58cc78c 100644
--- a/libartbase/base/bit_utils.h
+++ b/libartbase/base/bit_utils.h
@@ -22,6 +22,7 @@
 
 #include <android-base/logging.h>
 
+#include "globals.h"
 #include "stl_util_identity.h"
 
 namespace art {
@@ -499,6 +500,10 @@
   return bitfield_unsigned;
 }
 
+inline static constexpr size_t BitsToBytesRoundUp(size_t num_bits) {
+  return RoundUp(num_bits, kBitsPerByte) / kBitsPerByte;
+}
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_BIT_UTILS_H_
diff --git a/runtime/base/file_utils.cc b/libartbase/base/file_utils.cc
similarity index 92%
rename from runtime/base/file_utils.cc
rename to libartbase/base/file_utils.cc
index 7921985..56934ac 100644
--- a/runtime/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -47,7 +47,6 @@
 #include "base/os.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
-#include "dex/dex_file_loader.h"
 
 #if defined(__APPLE__)
 #include <crt_externs.h>
@@ -64,6 +63,8 @@
 using android::base::StringAppendF;
 using android::base::StringPrintf;
 
+static constexpr const char* kClassesDex = "classes.dex";
+
 bool ReadFileToString(const std::string& file_name, std::string* result) {
   File file(file_name, O_RDONLY, false);
   if (!file.IsOpened()) {
@@ -224,7 +225,7 @@
       !android::base::EndsWith(location, ".art") &&
       !android::base::EndsWith(location, ".oat")) {
     cache_file += "/";
-    cache_file += DexFileLoader::kClassesDex;
+    cache_file += kClassesDex;
   }
   std::replace(cache_file.begin(), cache_file.end(), '/', '@');
   *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
@@ -261,12 +262,13 @@
   }
 }
 
-bool LocationIsOnSystem(const char* location) {
-  UniqueCPtr<const char[]> path(realpath(location, nullptr));
-  return path != nullptr && android::base::StartsWith(path.get(), GetAndroidRoot().c_str());
+bool LocationIsOnSystem(const char* path) {
+  UniqueCPtr<const char[]> full_path(realpath(path, nullptr));
+  return full_path != nullptr &&
+      android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str());
 }
 
-bool LocationIsOnSystemFramework(const char* location) {
+bool LocationIsOnSystemFramework(const char* full_path) {
   std::string error_msg;
   std::string root_path = GetAndroidRootSafe(&error_msg);
   if (root_path.empty()) {
@@ -275,12 +277,7 @@
     return false;
   }
   std::string framework_path = root_path + "/framework/";
-
-  // Warning: Bionic implementation of realpath() allocates > 12KB on the stack.
-  // Do not run this code on a small stack, e.g. in signal handler.
-  UniqueCPtr<const char[]> path(realpath(location, nullptr));
-  return path != nullptr &&
-         android::base::StartsWith(path.get(), framework_path.c_str());
+  return android::base::StartsWith(full_path, framework_path);
 }
 
 }  // namespace art
diff --git a/runtime/base/file_utils.h b/libartbase/base/file_utils.h
similarity index 95%
rename from runtime/base/file_utils.h
rename to libartbase/base/file_utils.h
index d4f6c57..063393b 100644
--- a/runtime/base/file_utils.h
+++ b/libartbase/base/file_utils.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_BASE_FILE_UTILS_H_
-#define ART_RUNTIME_BASE_FILE_UTILS_H_
+#ifndef ART_LIBARTBASE_BASE_FILE_UTILS_H_
+#define ART_LIBARTBASE_BASE_FILE_UTILS_H_
 
 #include <stdlib.h>
 
@@ -46,6 +46,7 @@
 // Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
 // could not be found.
 std::string GetDalvikCache(const char* subdir);
+
 // Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
 // have_android_data will be set to true if we have an ANDROID_DATA that exists,
 // dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
@@ -79,4 +80,4 @@
 
 }  // namespace art
 
-#endif  // ART_RUNTIME_BASE_FILE_UTILS_H_
+#endif  // ART_LIBARTBASE_BASE_FILE_UTILS_H_
diff --git a/runtime/base/file_utils_test.cc b/libartbase/base/file_utils_test.cc
similarity index 100%
rename from runtime/base/file_utils_test.cc
rename to libartbase/base/file_utils_test.cc
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 69d1a64..39e0c50 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -38,6 +38,9 @@
 // compile-time constant so the compiler can generate better code.
 static constexpr int kPageSize = 4096;
 
+// Size of Dex virtual registers.
+static constexpr size_t kVRegSize = 4;
+
 // Returns whether the given memory offset can be used for generating
 // an implicit null check.
 static inline bool CanDoImplicitNullCheckOn(uintptr_t offset) {
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 9a1392c..c455fed 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -29,7 +29,6 @@
 
 #include "android-base/stringprintf.h"
 #include "android-base/unique_fd.h"
-#include "backtrace/BacktraceMap.h"
 #include "cutils/ashmem.h"
 
 #include "allocator.h"
@@ -57,21 +56,6 @@
 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
 
-static std::ostream& operator<<(
-    std::ostream& os,
-    std::pair<BacktraceMap::iterator, BacktraceMap::iterator> iters) {
-  for (BacktraceMap::iterator it = iters.first; it != iters.second; ++it) {
-    const backtrace_map_t* entry = *it;
-    os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
-                       static_cast<uint32_t>(entry->start),
-                       static_cast<uint32_t>(entry->end),
-                       (entry->flags & PROT_READ) ? 'r' : '-',
-                       (entry->flags & PROT_WRITE) ? 'w' : '-',
-                       (entry->flags & PROT_EXEC) ? 'x' : '-', entry->name.c_str());
-  }
-  return os;
-}
-
 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
   os << "MemMap:" << std::endl;
   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
@@ -149,8 +133,6 @@
   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
   uintptr_t end = begin + size;
 
-  // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
-  // further.
   {
     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
     for (auto& pair : *gMaps) {
@@ -162,22 +144,6 @@
     }
   }
 
-  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
-  if (map == nullptr) {
-    if (error_msg != nullptr) {
-      *error_msg = StringPrintf("Failed to build process map");
-    }
-    return false;
-  }
-
-  ScopedBacktraceMapIteratorLock lock(map.get());
-  for (BacktraceMap::iterator it = map->begin(); it != map->end(); ++it) {
-    const backtrace_map_t* entry = *it;
-    if ((begin >= entry->start && begin < entry->end)     // start of new within old
-        && (end > entry->start && end <= entry->end)) {   // end of new within old
-      return true;
-    }
-  }
   if (error_msg != nullptr) {
     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
@@ -186,36 +152,6 @@
   return false;
 }
 
-// Return true if the address range does not conflict with any /proc/self/maps entry.
-static bool CheckNonOverlapping(uintptr_t begin,
-                                uintptr_t end,
-                                std::string* error_msg) {
-  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
-  if (map.get() == nullptr) {
-    *error_msg = StringPrintf("Failed to build process map");
-    return false;
-  }
-  ScopedBacktraceMapIteratorLock lock(map.get());
-  for (BacktraceMap::iterator it = map->begin(); it != map->end(); ++it) {
-    const backtrace_map_t* entry = *it;
-    if ((begin >= entry->start && begin < entry->end)      // start of new within old
-        || (end > entry->start && end < entry->end)        // end of new within old
-        || (begin <= entry->start && end > entry->end)) {  // start/end of new includes all of old
-      std::ostringstream map_info;
-      map_info << std::make_pair(it, map->end());
-      *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
-                                "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
-                                begin, end,
-                                static_cast<uintptr_t>(entry->start),
-                                static_cast<uintptr_t>(entry->end),
-                                entry->name.c_str(),
-                                map_info.str().c_str());
-      return false;
-    }
-  }
-  return true;
-}
-
 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
 // the expected value, calling munmap if validation fails, giving the
 // reason in error_msg.
@@ -236,7 +172,6 @@
 
   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
-  uintptr_t limit = expected + byte_count;
 
   if (expected_ptr == actual_ptr) {
     return true;
@@ -256,15 +191,16 @@
     //   true, even if there is no overlap
     // - There might have been an overlap at the point of mmap, but the
     //   overlapping region has since been unmapped.
-    std::string error_detail;
-    CheckNonOverlapping(expected, limit, &error_detail);
+
+    // Tell the client the mappings that were in place at the time.
+    if (kIsDebugBuild) {
+      PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+    }
+
     std::ostringstream os;
     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
                         actual, expected);
-    if (!error_detail.empty()) {
-      os << " : " << error_detail;
-    }
     *error_msg = os.str();
   }
   return false;
diff --git a/libartbase/base/memory_region.cc b/libartbase/base/memory_region.cc
index 862ff73..d207872 100644
--- a/libartbase/base/memory_region.cc
+++ b/libartbase/base/memory_region.cc
@@ -29,36 +29,4 @@
   memmove(reinterpret_cast<void*>(begin() + offset), from.pointer(), from.size());
 }
 
-void MemoryRegion::StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
-  DCHECK_LE(value, MaxInt<uint32_t>(length));
-  DCHECK_LE(length, BitSizeOf<uint32_t>());
-  DCHECK_LE(bit_offset + length, size_in_bits());
-  if (length == 0) {
-    return;
-  }
-  // Bits are stored in this order {7 6 5 4 3 2 1 0}.
-  // How many remaining bits in current byte is (bit_offset % kBitsPerByte) + 1.
-  uint8_t* out = ComputeInternalPointer<uint8_t>(bit_offset >> kBitsPerByteLog2);
-  size_t orig_len = length;
-  uint32_t orig_value = value;
-  uintptr_t bit_remainder = bit_offset % kBitsPerByte;
-  while (true) {
-    const uintptr_t remaining_bits = kBitsPerByte - bit_remainder;
-    if (length <= remaining_bits) {
-      // Length is smaller than all of remainder bits.
-      size_t mask = ((1 << length) - 1) << bit_remainder;
-      *out = (*out & ~mask) | (value << bit_remainder);
-      break;
-    }
-    // Copy remaining bits in current byte.
-    size_t value_mask = (1 << remaining_bits) - 1;
-    *out = (*out & ~(value_mask << bit_remainder)) | ((value & value_mask) << bit_remainder);
-    value >>= remaining_bits;
-    bit_remainder = 0;
-    length -= remaining_bits;
-    ++out;
-  }
-  DCHECK_EQ(LoadBits(bit_offset, orig_len), orig_value) << bit_offset << " " << orig_len;
-}
-
 }  // namespace art
diff --git a/libartbase/base/memory_region.h b/libartbase/base/memory_region.h
index 3d00f5b..2060329 100644
--- a/libartbase/base/memory_region.h
+++ b/libartbase/base/memory_region.h
@@ -109,67 +109,6 @@
     return ComputeInternalPointer<T>(offset);
   }
 
-  // Load a single bit in the region. The bit at offset 0 is the least
-  // significant bit in the first byte.
-  ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
-    uint8_t bit_mask;
-    uint8_t byte = *ComputeBitPointer(bit_offset, &bit_mask);
-    return byte & bit_mask;
-  }
-
-  ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) const {
-    uint8_t bit_mask;
-    uint8_t* byte = ComputeBitPointer(bit_offset, &bit_mask);
-    if (value) {
-      *byte |= bit_mask;
-    } else {
-      *byte &= ~bit_mask;
-    }
-  }
-
-  // Load `length` bits from the region starting at bit offset `bit_offset`.
-  // The bit at the smallest offset is the least significant bit in the
-  // loaded value.  `length` must not be larger than the number of bits
-  // contained in the return value (32).
-  ALWAYS_INLINE uint32_t LoadBits(uintptr_t bit_offset, size_t length) const {
-    DCHECK_LE(length, BitSizeOf<uint32_t>());
-    DCHECK_LE(bit_offset + length, size_in_bits());
-    if (UNLIKELY(length == 0)) {
-      // Do not touch any memory if the range is empty.
-      return 0;
-    }
-    const uint8_t* address = begin() + bit_offset / kBitsPerByte;
-    const uint32_t shift = bit_offset & (kBitsPerByte - 1);
-    // Load the value (reading only the strictly needed bytes).
-    const uint32_t load_bit_count = shift + length;
-    uint32_t value = address[0] >> shift;
-    if (load_bit_count > 8) {
-      value |= static_cast<uint32_t>(address[1]) << (8 - shift);
-      if (load_bit_count > 16) {
-        value |= static_cast<uint32_t>(address[2]) << (16 - shift);
-        if (load_bit_count > 24) {
-          value |= static_cast<uint32_t>(address[3]) << (24 - shift);
-          if (load_bit_count > 32) {
-            value |= static_cast<uint32_t>(address[4]) << (32 - shift);
-          }
-        }
-      }
-    }
-    // Clear unwanted most significant bits.
-    uint32_t clear_bit_count = BitSizeOf(value) - length;
-    value = (value << clear_bit_count) >> clear_bit_count;
-    for (size_t i = 0; i < length; ++i) {
-      DCHECK_EQ((value >> i) & 1, LoadBit(bit_offset + i));
-    }
-    return value;
-  }
-
-  // Store `value` on `length` bits in the region starting at bit offset
-  // `bit_offset`.  The bit at the smallest offset is the least significant
-  // bit of the stored `value`.  `value` must not be larger than `length`
-  // bits.
-  void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length);
-
   void CopyFrom(size_t offset, const MemoryRegion& from) const;
 
   template<class Vector>
diff --git a/libartbase/base/memory_region_test.cc b/libartbase/base/memory_region_test.cc
index e3aead4..72e03a4 100644
--- a/libartbase/base/memory_region_test.cc
+++ b/libartbase/base/memory_region_test.cc
@@ -18,8 +18,6 @@
 
 #include "gtest/gtest.h"
 
-#include "bit_memory_region.h"
-
 namespace art {
 
 TEST(MemoryRegion, LoadUnaligned) {
@@ -57,35 +55,4 @@
   }
 }
 
-TEST(MemoryRegion, TestBits) {
-  const size_t n = 8;
-  uint8_t data[n] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-  MemoryRegion region(&data, n);
-  uint32_t value = 0xDEADBEEF;
-  // Try various offsets and lengths.
-  for (size_t bit_offset = 0; bit_offset < 2 * kBitsPerByte; ++bit_offset) {
-    for (size_t length = 0; length < 2 * kBitsPerByte; ++length) {
-      const uint32_t length_mask = (1 << length) - 1;
-      uint32_t masked_value = value & length_mask;
-      BitMemoryRegion bmr(region, bit_offset, length);
-      region.StoreBits(bit_offset, masked_value, length);
-      EXPECT_EQ(region.LoadBits(bit_offset, length), masked_value);
-      EXPECT_EQ(bmr.LoadBits(0, length), masked_value);
-      // Check adjacent bits to make sure they were not incorrectly cleared.
-      EXPECT_EQ(region.LoadBits(0, bit_offset), (1u << bit_offset) - 1);
-      EXPECT_EQ(region.LoadBits(bit_offset + length, length), length_mask);
-      region.StoreBits(bit_offset, length_mask, length);
-      // Store with bit memory region.
-      bmr.StoreBits(0, masked_value, length);
-      EXPECT_EQ(bmr.LoadBits(0, length), masked_value);
-      // Check adjacent bits to make sure they were not incorrectly cleared.
-      EXPECT_EQ(region.LoadBits(0, bit_offset), (1u << bit_offset) - 1);
-      EXPECT_EQ(region.LoadBits(bit_offset + length, length), length_mask);
-      region.StoreBits(bit_offset, length_mask, length);
-      // Flip the value to try different edge bit combinations.
-      value = ~value;
-    }
-  }
-}
-
 }  // namespace art
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index b2c041c..3818624 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -19,6 +19,7 @@
     defaults: ["art_defaults"],
     host_supported: true,
     srcs: [
+        "dex/art_dex_file_loader.cc",
         "dex/compact_dex_file.cc",
         "dex/compact_offset_table.cc",
         "dex/descriptors_names.cc",
@@ -55,24 +56,20 @@
     },
     generated_sources: ["dexfile_operator_srcs"],
     shared_libs: [
-        // Important note: relying on libartbase's header_lib is perfectly acceptable.
-        // However, relying on the libartbase shared library introduces further, possibly cyclic,
-	// dependencies for clients outside of ART.
+        // For MemMap.
+        "libartbase",
         "liblog",
+        // For atrace.
+        "libcutils",
         // For common macros.
         "libbase",
         "libz",
     ],
-    header_libs: [
-        "art_libartbase_headers",
-    ],
     export_include_dirs: ["."],
     export_shared_lib_headers: [
+        "libartbase",
         "libbase",
     ],
-    export_header_lib_headers: [
-        "art_libartbase_headers",
-    ],
 }
 
 gensrcs {
@@ -114,6 +111,7 @@
         "art_gtest_defaults",
     ],
     srcs: [
+        "dex/art_dex_file_loader_test.cc",
         "dex/code_item_accessors_test.cc",
         "dex/compact_dex_file_test.cc",
         "dex/compact_offset_table_test.cc",
diff --git a/runtime/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
similarity index 98%
rename from runtime/dex/art_dex_file_loader.cc
rename to libdexfile/dex/art_dex_file_loader.cc
index 415e451..392ce1e 100644
--- a/runtime/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -534,7 +534,10 @@
   // Check if this dex file is located in the framework directory.
   // If it is, set a flag on the dex file. This is used by hidden API
   // policy decision logic.
-  if (dex_file != nullptr && LocationIsOnSystemFramework(location.c_str())) {
+  // Location can contain multidex suffix, so fetch its canonical version. Note
+  // that this will call `realpath`.
+  std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str());
+  if (dex_file != nullptr && LocationIsOnSystemFramework(path.c_str())) {
     dex_file->SetIsPlatformDexFile();
   }
 
diff --git a/runtime/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
similarity index 97%
rename from runtime/dex/art_dex_file_loader.h
rename to libdexfile/dex/art_dex_file_loader.h
index 7577945..a460aee 100644
--- a/runtime/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
-#define ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
+#ifndef ART_LIBDEXFILE_DEX_ART_DEX_FILE_LOADER_H_
+#define ART_LIBDEXFILE_DEX_ART_DEX_FILE_LOADER_H_
 
 #include <cstdint>
 #include <memory>
@@ -137,4 +137,4 @@
 
 }  // namespace art
 
-#endif  // ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
+#endif  // ART_LIBDEXFILE_DEX_ART_DEX_FILE_LOADER_H_
diff --git a/runtime/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
similarity index 69%
rename from runtime/dex/art_dex_file_loader_test.cc
rename to libdexfile/dex/art_dex_file_loader_test.cc
index aee397b..d353c26 100644
--- a/runtime/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -43,34 +43,7 @@
   dst_stream << src_stream.rdbuf();
 }
 
-class ArtDexFileLoaderTest : public CommonRuntimeTest {
- public:
-  virtual void SetUp() {
-    CommonRuntimeTest::SetUp();
-
-    std::string dex_location = GetTestDexFileName("Main");
-
-    data_location_path_ = android_data_ + "/foo.jar";
-    system_location_path_ = GetAndroidRoot() + "/foo.jar";
-    system_framework_location_path_ = GetAndroidRoot() + "/framework/foo.jar";
-
-    Copy(dex_location, data_location_path_);
-    Copy(dex_location, system_location_path_);
-    Copy(dex_location, system_framework_location_path_);
-  }
-
-  virtual void TearDown() {
-    remove(data_location_path_.c_str());
-    remove(system_location_path_.c_str());
-    remove(system_framework_location_path_.c_str());
-    CommonRuntimeTest::TearDown();
-  }
-
- protected:
-  std::string data_location_path_;
-  std::string system_location_path_;
-  std::string system_framework_location_path_;
-};
+class ArtDexFileLoaderTest : public CommonRuntimeTest {};
 
 // TODO: Port OpenTestDexFile(s) need to be ported to use non-ART utilities, and
 // the tests that depend upon them should be moved to dex_file_loader_test.cc
@@ -272,7 +245,7 @@
 
 TEST_F(ArtDexFileLoaderTest, FindProtoId) {
   for (size_t i = 0; i < java_lang_dex_file_->NumProtoIds(); i++) {
-    const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(i);
+    const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(dex::ProtoIndex(i));
     const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
     std::vector<dex::TypeIndex> to_find_types;
     if (to_find_tl != nullptr) {
@@ -283,7 +256,7 @@
     const DexFile::ProtoId* found =
         java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
     ASSERT_TRUE(found != nullptr);
-    EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), i);
+    EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), dex::ProtoIndex(i));
   }
 }
 
@@ -339,21 +312,24 @@
   ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
 }
 
-TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile) {
-  ArtDexFileLoader loader;
-  bool success;
-  std::string error_msg;
-  std::vector<std::unique_ptr<const DexFile>> dex_files;
-
+TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_DataDir) {
   // Load file from a non-system directory and check that it is not flagged as framework.
-  ASSERT_FALSE(LocationIsOnSystemFramework(data_location_path_.c_str()));
-  success = loader.Open(data_location_path_.c_str(),
-                        data_location_path_,
-                        /* verify */ false,
-                        /* verify_checksum */ false,
-                        &error_msg,
-                        &dex_files);
+  std::string data_location_path = android_data_ + "/foo.jar";
+  ASSERT_FALSE(LocationIsOnSystemFramework(data_location_path.c_str()));
+
+  Copy(GetTestDexFileName("Main"), data_location_path);
+
+  ArtDexFileLoader loader;
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  std::string error_msg;
+  bool success = loader.Open(data_location_path.c_str(),
+                             data_location_path,
+                             /* verify */ false,
+                             /* verify_checksum */ false,
+                             &error_msg,
+                             &dex_files);
   ASSERT_TRUE(success) << error_msg;
+
   ASSERT_GE(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
     ASSERT_FALSE(dex_file->IsPlatformDexFile());
@@ -361,15 +337,27 @@
 
   dex_files.clear();
 
+  ASSERT_EQ(0, remove(data_location_path.c_str()));
+}
+
+TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemDir) {
   // Load file from a system, non-framework directory and check that it is not flagged as framework.
-  ASSERT_FALSE(LocationIsOnSystemFramework(system_location_path_.c_str()));
-  success = loader.Open(system_location_path_.c_str(),
-                        system_location_path_,
-                        /* verify */ false,
-                        /* verify_checksum */ false,
-                        &error_msg,
-                        &dex_files);
-  ASSERT_TRUE(success);
+  std::string system_location_path = GetAndroidRoot() + "/foo.jar";
+  ASSERT_FALSE(LocationIsOnSystemFramework(system_location_path.c_str()));
+
+  Copy(GetTestDexFileName("Main"), system_location_path);
+
+  ArtDexFileLoader loader;
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  std::string error_msg;
+  bool success = loader.Open(system_location_path.c_str(),
+                             system_location_path,
+                             /* verify */ false,
+                             /* verify_checksum */ false,
+                             &error_msg,
+                             &dex_files);
+  ASSERT_TRUE(success) << error_msg;
+
   ASSERT_GE(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
     ASSERT_FALSE(dex_file->IsPlatformDexFile());
@@ -377,19 +365,121 @@
 
   dex_files.clear();
 
+  ASSERT_EQ(0, remove(system_location_path.c_str()));
+}
+
+TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemFrameworkDir) {
   // Load file from a system/framework directory and check that it is flagged as a framework dex.
-  ASSERT_TRUE(LocationIsOnSystemFramework(system_framework_location_path_.c_str()));
-  success = loader.Open(system_framework_location_path_.c_str(),
-                        system_framework_location_path_,
-                        /* verify */ false,
-                        /* verify_checksum */ false,
-                        &error_msg,
-                        &dex_files);
-  ASSERT_TRUE(success);
+  std::string system_framework_location_path = GetAndroidRoot() + "/framework/foo.jar";
+  ASSERT_TRUE(LocationIsOnSystemFramework(system_framework_location_path.c_str()));
+
+  Copy(GetTestDexFileName("Main"), system_framework_location_path);
+
+  ArtDexFileLoader loader;
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  std::string error_msg;
+  bool success = loader.Open(system_framework_location_path.c_str(),
+                             system_framework_location_path,
+                             /* verify */ false,
+                             /* verify_checksum */ false,
+                             &error_msg,
+                             &dex_files);
+  ASSERT_TRUE(success) << error_msg;
+
   ASSERT_GE(dex_files.size(), 1u);
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
     ASSERT_TRUE(dex_file->IsPlatformDexFile());
   }
+
+  dex_files.clear();
+
+  ASSERT_EQ(0, remove(system_framework_location_path.c_str()));
+}
+
+TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_DataDir_MultiDex) {
+  // Load multidex file from a non-system directory and check that it is not flagged as framework.
+  std::string data_multi_location_path = android_data_ + "/multifoo.jar";
+  ASSERT_FALSE(LocationIsOnSystemFramework(data_multi_location_path.c_str()));
+
+  Copy(GetTestDexFileName("MultiDex"), data_multi_location_path);
+
+  ArtDexFileLoader loader;
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  std::string error_msg;
+  bool success = loader.Open(data_multi_location_path.c_str(),
+                             data_multi_location_path,
+                             /* verify */ false,
+                             /* verify_checksum */ false,
+                             &error_msg,
+                             &dex_files);
+  ASSERT_TRUE(success) << error_msg;
+
+  ASSERT_GT(dex_files.size(), 1u);
+  for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
+    ASSERT_FALSE(dex_file->IsPlatformDexFile());
+  }
+
+  dex_files.clear();
+
+  ASSERT_EQ(0, remove(data_multi_location_path.c_str()));
+}
+
+TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemDir_MultiDex) {
+  // Load multidex file from a system, non-framework directory and check that it is not flagged
+  // as framework.
+  std::string system_multi_location_path = GetAndroidRoot() + "/multifoo.jar";
+  ASSERT_FALSE(LocationIsOnSystemFramework(system_multi_location_path.c_str()));
+
+  Copy(GetTestDexFileName("MultiDex"), system_multi_location_path);
+
+  ArtDexFileLoader loader;
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  std::string error_msg;
+  bool success = loader.Open(system_multi_location_path.c_str(),
+                             system_multi_location_path,
+                             /* verify */ false,
+                             /* verify_checksum */ false,
+                             &error_msg,
+                             &dex_files);
+  ASSERT_TRUE(success) << error_msg;
+
+  ASSERT_GT(dex_files.size(), 1u);
+  for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
+    ASSERT_FALSE(dex_file->IsPlatformDexFile());
+  }
+
+  dex_files.clear();
+
+  ASSERT_EQ(0, remove(system_multi_location_path.c_str()));
+}
+
+TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile_SystemFrameworkDir_MultiDex) {
+  // Load multidex file from a system/framework directory and check that it is flagged as a
+  // framework dex.
+  std::string system_framework_multi_location_path = GetAndroidRoot() + "/framework/multifoo.jar";
+  ASSERT_TRUE(LocationIsOnSystemFramework(system_framework_multi_location_path.c_str()));
+
+  Copy(GetTestDexFileName("MultiDex"), system_framework_multi_location_path);
+
+  ArtDexFileLoader loader;
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  std::string error_msg;
+  bool success = loader.Open(system_framework_multi_location_path.c_str(),
+                             system_framework_multi_location_path,
+                             /* verify */ false,
+                             /* verify_checksum */ false,
+                             &error_msg,
+                             &dex_files);
+  ASSERT_TRUE(success) << error_msg;
+
+  ASSERT_GT(dex_files.size(), 1u);
+  for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
+    ASSERT_TRUE(dex_file->IsPlatformDexFile());
+  }
+
+  dex_files.clear();
+
+  ASSERT_EQ(0, remove(system_framework_multi_location_path.c_str()));
 }
 
 }  // namespace art
diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h
new file mode 100644
index 0000000..bcd0a7b
--- /dev/null
+++ b/libdexfile/dex/class_accessor-inl.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_INL_H_
+#define ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_INL_H_
+
+#include "class_accessor.h"
+
+#include "base/leb128.h"
+
+namespace art {
+
+inline ClassAccessor::ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def)
+    : ClassAccessor(dex_file, dex_file.GetClassData(class_def)) {}
+
+inline ClassAccessor::ClassAccessor(const DexFile& dex_file, const uint8_t* class_data)
+    : dex_file_(dex_file),
+      ptr_pos_(class_data),
+      num_static_fields_(class_data != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
+      num_instance_fields_(class_data != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
+      num_direct_methods_(class_data != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
+      num_virtual_methods_(class_data != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u) {}
+
+inline const uint8_t* ClassAccessor::Method::Read(const uint8_t* ptr) {
+  method_idx_ += DecodeUnsignedLeb128(&ptr);
+  access_flags_ = DecodeUnsignedLeb128(&ptr);
+  code_off_ = DecodeUnsignedLeb128(&ptr);
+  return ptr;
+}
+
+inline const uint8_t* ClassAccessor::Field::Read(const uint8_t* ptr) {
+  field_idx_ += DecodeUnsignedLeb128(&ptr);
+  access_flags_ = DecodeUnsignedLeb128(&ptr);
+  return ptr;
+}
+
+template <typename StaticFieldVisitor,
+          typename InstanceFieldVisitor,
+          typename DirectMethodVisitor,
+          typename VirtualMethodVisitor>
+inline void ClassAccessor::VisitMethodsAndFields(
+    const StaticFieldVisitor& static_field_visitor,
+    const InstanceFieldVisitor& instance_field_visitor,
+    const DirectMethodVisitor& direct_method_visitor,
+    const VirtualMethodVisitor& virtual_method_visitor) const {
+  const uint8_t* ptr = ptr_pos_;
+  for (size_t i = 0; i < num_static_fields_; ++i) {
+    Field data;
+    ptr = data.Read(ptr);
+    static_field_visitor(data);
+  }
+  for (size_t i = 0; i < num_instance_fields_; ++i) {
+    Field data;
+    ptr = data.Read(ptr);
+    instance_field_visitor(data);
+  }
+  for (size_t i = 0; i < num_direct_methods_; ++i) {
+    Method data;
+    ptr = data.Read(ptr);
+    direct_method_visitor(data);
+  }
+  for (size_t i = 0; i < num_virtual_methods_; ++i) {
+    Method data;
+    ptr = data.Read(ptr);
+    virtual_method_visitor(data);
+  }
+}
+
+template <typename DirectMethodVisitor,
+          typename VirtualMethodVisitor>
+inline void ClassAccessor::VisitMethods(const DirectMethodVisitor& direct_method_visitor,
+                                        const VirtualMethodVisitor& virtual_method_visitor) const {
+  VisitMethodsAndFields(VoidFunctor(),
+                        VoidFunctor(),
+                        direct_method_visitor,
+                        virtual_method_visitor);
+}
+
+// Visit direct and virtual methods.
+template <typename MethodVisitor>
+inline void ClassAccessor::VisitMethods(const MethodVisitor& method_visitor) const {
+  VisitMethods(method_visitor, method_visitor);
+}
+
+inline const DexFile::CodeItem* ClassAccessor::GetCodeItem(const Method& method) const {
+  return dex_file_.GetCodeItem(method.GetCodeItemOffset());
+}
+
+}  // namespace art
+
+#endif  // ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_INL_H_
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
new file mode 100644
index 0000000..59a6b5d
--- /dev/null
+++ b/libdexfile/dex/class_accessor.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
+#define ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
+
+#include "base/utils.h"
+#include "dex_file.h"
+
+namespace art {
+
+// Classes to access Dex data.
+class ClassAccessor {
+ public:
+  // Class method data.
+  class Method {
+   public:
+    uint32_t GetIndex() const {
+      return method_idx_;
+    }
+
+    uint32_t GetAccessFlags() const {
+      return access_flags_;
+    }
+
+    uint32_t GetCodeItemOffset() const {
+      return code_off_;
+    }
+
+   private:
+    const uint8_t* Read(const uint8_t* ptr);
+
+    // A decoded version of the method of a class_data_item.
+    uint32_t method_idx_ = 0u;
+    uint32_t access_flags_ = 0u;
+    uint32_t code_off_ = 0u;
+
+    friend class ClassAccessor;
+  };
+
+  // Class field data.
+  class Field {
+   public:
+    uint32_t GetIndex() const {
+      return field_idx_;
+    }
+
+    uint32_t GetAccessFlags() const {
+      return access_flags_;
+    }
+
+   private:
+    const uint8_t* Read(const uint8_t* ptr);
+
+    // A decoded version of the field of a class_data_item.
+    uint32_t field_idx_ = 0u;
+    uint32_t access_flags_ = 0u;
+
+    friend class ClassAccessor;
+  };
+
+  ALWAYS_INLINE ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def);
+
+  // Return the code item for a method.
+  const DexFile::CodeItem* GetCodeItem(const Method& method) const;
+
+  // Iterator data is not very iterator friendly, use visitors to get around this.
+  template <typename StaticFieldVisitor,
+            typename InstanceFieldVisitor,
+            typename DirectMethodVisitor,
+            typename VirtualMethodVisitor>
+  void VisitMethodsAndFields(const StaticFieldVisitor& static_field_visitor,
+                             const InstanceFieldVisitor& instance_field_visitor,
+                             const DirectMethodVisitor& direct_method_visitor,
+                             const VirtualMethodVisitor& virtual_method_visitor) const;
+
+  template <typename DirectMethodVisitor,
+            typename VirtualMethodVisitor>
+  void VisitMethods(const DirectMethodVisitor& direct_method_visitor,
+                    const VirtualMethodVisitor& virtual_method_visitor) const;
+
+  // Visit direct and virtual methods.
+  template <typename MethodVisitor>
+  void VisitMethods(const MethodVisitor& method_visitor) const;
+
+  uint32_t NumStaticFields() const {
+    return num_static_fields_;
+  }
+
+  uint32_t NumInstanceFields() const {
+    return num_instance_fields_;
+  }
+
+  uint32_t NumDirectMethods() const {
+    return num_direct_methods_;
+  }
+
+  uint32_t NumVirtualMethods() const {
+    return num_virtual_methods_;
+  }
+
+ protected:
+  ALWAYS_INLINE ClassAccessor(const DexFile& dex_file, const uint8_t* class_data);
+
+  const DexFile& dex_file_;
+  const uint8_t* ptr_pos_ = nullptr;  // Pointer into stream of class_data_item.
+  const uint32_t num_static_fields_ = 0u;
+  const uint32_t num_instance_fields_ = 0u;
+  const uint32_t num_direct_methods_ = 0u;
+  const uint32_t num_virtual_methods_ = 0u;
+  // Only cache descriptor.
+  const void* class_def_ = nullptr;
+  const void* class_data_ = nullptr;
+};
+
+}  // namespace art
+
+#endif  // ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_H_
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index d1b3200..e78e8d7 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -127,7 +127,7 @@
   return StringByTypeIdx(proto_id.return_type_idx_);
 }
 
-inline const char* DexFile::GetShorty(uint32_t proto_idx) const {
+inline const char* DexFile::GetShorty(dex::ProtoIndex proto_idx) const {
   const ProtoId& proto_id = GetProtoId(proto_idx);
   return StringDataByIdx(proto_id.shorty_idx_);
 }
diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc
index 8cfee66..9de260c 100644
--- a/libdexfile/dex/dex_file.cc
+++ b/libdexfile/dex/dex_file.cc
@@ -281,7 +281,7 @@
   // Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx
   const dex::TypeIndex class_idx = GetIndexForTypeId(declaring_klass);
   const dex::StringIndex name_idx = GetIndexForStringId(name);
-  const uint16_t proto_idx = GetIndexForProtoId(signature);
+  const dex::ProtoIndex proto_idx = GetIndexForProtoId(signature);
   int32_t lo = 0;
   int32_t hi = NumMethodIds() - 1;
   while (hi >= lo) {
@@ -373,7 +373,8 @@
   int32_t hi = NumProtoIds() - 1;
   while (hi >= lo) {
     int32_t mid = (hi + lo) / 2;
-    const DexFile::ProtoId& proto = GetProtoId(mid);
+    const dex::ProtoIndex proto_idx = static_cast<dex::ProtoIndex>(mid);
+    const DexFile::ProtoId& proto = GetProtoId(proto_idx);
     int compare = return_type_idx.index_ - proto.return_type_idx_.index_;
     if (compare == 0) {
       DexFileParameterIterator it(*this, proto);
@@ -777,6 +778,11 @@
 
 namespace dex {
 
+std::ostream& operator<<(std::ostream& os, const ProtoIndex& index) {
+  os << "ProtoIndex[" << index.index_ << "]";
+  return os;
+}
+
 std::ostream& operator<<(std::ostream& os, const StringIndex& index) {
   os << "StringIndex[" << index.index_ << "]";
   return os;
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 4ca735a..87d2c48 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -189,7 +189,7 @@
   // Raw method_id_item.
   struct MethodId {
     dex::TypeIndex class_idx_;   // index into type_ids_ array for defining class
-    uint16_t proto_idx_;         // index into proto_ids_ array for method prototype
+    dex::ProtoIndex proto_idx_;  // index into proto_ids_ array for method prototype
     dex::StringIndex name_idx_;  // index into string_ids_ array for method name
 
    private:
@@ -692,15 +692,15 @@
   }
 
   // Returns the ProtoId at the specified index.
-  const ProtoId& GetProtoId(uint16_t idx) const {
-    DCHECK_LT(idx, NumProtoIds()) << GetLocation();
-    return proto_ids_[idx];
+  const ProtoId& GetProtoId(dex::ProtoIndex idx) const {
+    DCHECK_LT(idx.index_, NumProtoIds()) << GetLocation();
+    return proto_ids_[idx.index_];
   }
 
-  uint16_t GetIndexForProtoId(const ProtoId& proto_id) const {
+  dex::ProtoIndex GetIndexForProtoId(const ProtoId& proto_id) const {
     CHECK_GE(&proto_id, proto_ids_) << GetLocation();
     CHECK_LT(&proto_id, proto_ids_ + header_->proto_ids_size_) << GetLocation();
-    return &proto_id - proto_ids_;
+    return dex::ProtoIndex(&proto_id - proto_ids_);
   }
 
   // Looks up a proto id for a given return type and signature type list
@@ -722,7 +722,7 @@
   const Signature CreateSignature(const StringPiece& signature) const;
 
   // Returns the short form method descriptor for the given prototype.
-  const char* GetShorty(uint32_t proto_idx) const;
+  const char* GetShorty(dex::ProtoIndex proto_idx) const;
 
   const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
     return DataPointer<TypeList>(proto_id.parameters_off_);
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 1e0f5ac..457addf 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -191,6 +191,8 @@
   std::string base_location = GetBaseLocation(dex_location);
   const char* suffix = dex_location + base_location.size();
   DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator);
+  // Warning: Bionic implementation of realpath() allocates > 12KB on the stack.
+  // Do not run this code on a small stack, e.g. in signal handler.
   UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr));
   if (path != nullptr && path.get() != base_location) {
     return std::string(path.get()) + suffix;
diff --git a/libdexfile/dex/dex_file_loader.h b/libdexfile/dex/dex_file_loader.h
index 28cdfc1..0153220 100644
--- a/libdexfile/dex/dex_file_loader.h
+++ b/libdexfile/dex/dex_file_loader.h
@@ -71,7 +71,7 @@
   //     of the dex file. In the second case (oat) it will include the file name
   //     and possibly some multidex annotation to uniquely identify it.
   // canonical_dex_location:
-  //     the dex_location where it's file name part has been made canonical.
+  //     the dex_location where its file name part has been made canonical.
   static std::string GetDexCanonicalLocation(const char* dex_location);
 
   // For normal dex files, location and base location coincide. If a dex file is part of a multidex
diff --git a/libdexfile/dex/dex_file_types.h b/libdexfile/dex/dex_file_types.h
index 2bb70ff..d4fb3de 100644
--- a/libdexfile/dex/dex_file_types.h
+++ b/libdexfile/dex/dex_file_types.h
@@ -25,73 +25,67 @@
 
 constexpr uint32_t kDexNoIndex = 0xFFFFFFFF;
 
-class StringIndex {
+template<typename T>
+class DexIndex {
  public:
-  uint32_t index_;
+  T index_;
 
-  constexpr StringIndex() : index_(std::numeric_limits<decltype(index_)>::max()) {}
-  explicit constexpr StringIndex(uint32_t idx) : index_(idx) {}
+  constexpr DexIndex() : index_(std::numeric_limits<decltype(index_)>::max()) {}
+  explicit constexpr DexIndex(T idx) : index_(idx) {}
 
   bool IsValid() const {
     return index_ != std::numeric_limits<decltype(index_)>::max();
   }
-  static StringIndex Invalid() {
-    return StringIndex(std::numeric_limits<decltype(index_)>::max());
+  static constexpr DexIndex Invalid() {
+    return DexIndex(std::numeric_limits<decltype(index_)>::max());
   }
-
-  bool operator==(const StringIndex& other) const {
+  bool operator==(const DexIndex& other) const {
     return index_ == other.index_;
   }
-  bool operator!=(const StringIndex& other) const {
+  bool operator!=(const DexIndex& other) const {
     return index_ != other.index_;
   }
-  bool operator<(const StringIndex& other) const {
+  bool operator<(const DexIndex& other) const {
     return index_ < other.index_;
   }
-  bool operator<=(const StringIndex& other) const {
+  bool operator<=(const DexIndex& other) const {
     return index_ <= other.index_;
   }
-  bool operator>(const StringIndex& other) const {
+  bool operator>(const DexIndex& other) const {
     return index_ > other.index_;
   }
-  bool operator>=(const StringIndex& other) const {
+  bool operator>=(const DexIndex& other) const {
     return index_ >= other.index_;
   }
 };
+
+class ProtoIndex : public DexIndex<uint16_t> {
+ public:
+  ProtoIndex() {}
+  explicit constexpr ProtoIndex(uint16_t index) : DexIndex<decltype(index_)>(index) {}
+  static constexpr ProtoIndex Invalid() {
+    return ProtoIndex(std::numeric_limits<decltype(index_)>::max());
+  }
+};
+std::ostream& operator<<(std::ostream& os, const ProtoIndex& index);
+
+class StringIndex : public DexIndex<uint32_t> {
+ public:
+  StringIndex() {}
+  explicit constexpr StringIndex(uint32_t index) : DexIndex<decltype(index_)>(index) {}
+  static constexpr StringIndex Invalid() {
+    return StringIndex(std::numeric_limits<decltype(index_)>::max());
+  }
+};
 std::ostream& operator<<(std::ostream& os, const StringIndex& index);
 
-class TypeIndex {
+class TypeIndex : public DexIndex<uint16_t> {
  public:
-  uint16_t index_;
-
-  constexpr TypeIndex() : index_(std::numeric_limits<decltype(index_)>::max()) {}
-  explicit constexpr TypeIndex(uint16_t idx) : index_(idx) {}
-
-  bool IsValid() const {
-    return index_ != std::numeric_limits<decltype(index_)>::max();
-  }
-  static TypeIndex Invalid() {
+  TypeIndex() {}
+  explicit constexpr TypeIndex(uint16_t index) : DexIndex<uint16_t>(index) {}
+  static constexpr TypeIndex Invalid() {
     return TypeIndex(std::numeric_limits<decltype(index_)>::max());
   }
-
-  bool operator==(const TypeIndex& other) const {
-    return index_ == other.index_;
-  }
-  bool operator!=(const TypeIndex& other) const {
-    return index_ != other.index_;
-  }
-  bool operator<(const TypeIndex& other) const {
-    return index_ < other.index_;
-  }
-  bool operator<=(const TypeIndex& other) const {
-    return index_ <= other.index_;
-  }
-  bool operator>(const TypeIndex& other) const {
-    return index_ > other.index_;
-  }
-  bool operator>=(const TypeIndex& other) const {
-    return index_ >= other.index_;
-  }
 };
 std::ostream& operator<<(std::ostream& os, const TypeIndex& index);
 
@@ -100,15 +94,21 @@
 
 namespace std {
 
+template<> struct hash<art::dex::ProtoIndex> {
+  size_t operator()(const art::dex::ProtoIndex& index) const {
+    return hash<decltype(index.index_)>()(index.index_);
+  }
+};
+
 template<> struct hash<art::dex::StringIndex> {
   size_t operator()(const art::dex::StringIndex& index) const {
-    return hash<uint32_t>()(index.index_);
+    return hash<decltype(index.index_)>()(index.index_);
   }
 };
 
 template<> struct hash<art::dex::TypeIndex> {
   size_t operator()(const art::dex::TypeIndex& index) const {
-    return hash<uint16_t>()(index.index_);
+    return hash<decltype(index.index_)>()(index.index_);
   }
 };
 
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index a32f64e..78db8b9 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -127,8 +127,9 @@
   return &dex_file_->GetMethodId(idx);
 }
 
-const DexFile::ProtoId* DexFileVerifier::CheckLoadProtoId(uint32_t idx, const char* err_string) {
-  if (UNLIKELY(!CheckIndex(idx, dex_file_->NumProtoIds(), err_string))) {
+const DexFile::ProtoId* DexFileVerifier::CheckLoadProtoId(dex::ProtoIndex idx,
+                                                          const char* err_string) {
+  if (UNLIKELY(!CheckIndex(idx.index_, dex_file_->NumProtoIds(), err_string))) {
     return nullptr;
   }
   return &dex_file_->GetProtoId(idx);
@@ -2208,7 +2209,7 @@
   }
 
   // Check that the proto id is valid.
-  if (UNLIKELY(!CheckIndex(item->proto_idx_, dex_file_->NumProtoIds(),
+  if (UNLIKELY(!CheckIndex(item->proto_idx_.index_, dex_file_->NumProtoIds(),
                            "inter_method_id_item proto_idx"))) {
     return false;
   }
diff --git a/libdexfile/dex/dex_file_verifier.h b/libdexfile/dex/dex_file_verifier.h
index 04d8d71..43d1093 100644
--- a/libdexfile/dex/dex_file_verifier.h
+++ b/libdexfile/dex/dex_file_verifier.h
@@ -164,7 +164,7 @@
   // error if not. If there is an error, null is returned.
   const DexFile::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
   const DexFile::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
-  const DexFile::ProtoId* CheckLoadProtoId(uint32_t idx, const char* error_fmt);
+  const DexFile::ProtoId* CheckLoadProtoId(dex::ProtoIndex idx, const char* error_fmt);
 
   void ErrorStringPrintf(const char* fmt, ...)
       __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index f82081f..c9bac0f 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -161,7 +161,7 @@
       "method_id_proto_idx",
       [](DexFile* dex_file) {
         DexFile::MethodId* method_id = const_cast<DexFile::MethodId*>(&dex_file->GetMethodId(0));
-        method_id->proto_idx_ = 0xFF;
+        method_id->proto_idx_ = dex::ProtoIndex(0xFF);
       },
       "inter_method_id_item proto_idx");
 
@@ -1425,12 +1425,13 @@
           CHECK_LT(method_idx + 1u, dex_file->NumMethodIds());
           CHECK_EQ(dex_file->GetMethodId(method_idx).name_idx_,
                    dex_file->GetMethodId(method_idx + 1).name_idx_);
-          CHECK_EQ(dex_file->GetMethodId(method_idx).proto_idx_ + 1u,
-                   dex_file->GetMethodId(method_idx + 1).proto_idx_);
+          CHECK_EQ(dex_file->GetMethodId(method_idx).proto_idx_.index_ + 1u,
+                   dex_file->GetMethodId(method_idx + 1).proto_idx_.index_);
           // Their return types should be the same.
-          uint32_t proto1_idx = dex_file->GetMethodId(method_idx).proto_idx_;
+          dex::ProtoIndex proto1_idx = dex_file->GetMethodId(method_idx).proto_idx_;
           const DexFile::ProtoId& proto1 = dex_file->GetProtoId(proto1_idx);
-          const DexFile::ProtoId& proto2 = dex_file->GetProtoId(proto1_idx + 1u);
+          dex::ProtoIndex proto2_idx(proto1_idx.index_ + 1u);
+          const DexFile::ProtoId& proto2 = dex_file->GetProtoId(proto2_idx);
           CHECK_EQ(proto1.return_type_idx_, proto2.return_type_idx_);
           // And the first should not have any parameters while the second should have some.
           CHECK(!DexFileParameterIterator(*dex_file, proto1).HasNext());
diff --git a/libdexfile/dex/dex_instruction-inl.h b/libdexfile/dex/dex_instruction-inl.h
index 6bef18c..e0cffdd 100644
--- a/libdexfile/dex/dex_instruction-inl.h
+++ b/libdexfile/dex/dex_instruction-inl.h
@@ -508,7 +508,7 @@
   return (FormatOf(Opcode()) == k35c) || (FormatOf(Opcode()) == k45cc);
 }
 
-inline void Instruction::GetVarArgs(uint32_t arg[kMaxVarArgRegs], uint16_t inst_data) const {
+inline uint32_t Instruction::GetVarArgs(uint32_t arg[kMaxVarArgRegs], uint16_t inst_data) const {
   DCHECK(HasVarArgs());
 
   /*
@@ -551,6 +551,7 @@
     default:  // case 0
       break;  // Valid, but no need to do anything.
   }
+  return count;
 }
 
 }  // namespace art
diff --git a/libdexfile/dex/dex_instruction.cc b/libdexfile/dex/dex_instruction.cc
index 8862181..8378211 100644
--- a/libdexfile/dex/dex_instruction.cc
+++ b/libdexfile/dex/dex_instruction.cc
@@ -467,10 +467,10 @@
     case k45cc: {
       uint32_t arg[kMaxVarArgRegs];
       GetVarArgs(arg);
-      uint32_t method_idx = VRegB_45cc();
-      uint32_t proto_idx = VRegH_45cc();
+      uint16_t method_idx = VRegB_45cc();
+      dex::ProtoIndex proto_idx(VRegH_45cc());
       os << opcode << " {";
-      for (int i = 0; i < VRegA_45cc(); ++i) {
+      for (uint32_t i = 0; i < VRegA_45cc(); ++i) {
         if (i != 0) {
           os << ", ";
         }
@@ -478,7 +478,8 @@
       }
       os << "}";
       if (file != nullptr) {
-        os << ", " << file->PrettyMethod(method_idx) << ", " << file->GetShorty(proto_idx)
+        os << ", " << file->PrettyMethod(method_idx)
+           << ", " << file->GetShorty(proto_idx)
            << " // ";
       } else {
         os << ", ";
@@ -490,18 +491,19 @@
       switch (Opcode()) {
         case INVOKE_POLYMORPHIC_RANGE: {
           if (file != nullptr) {
-            uint32_t method_idx = VRegB_4rcc();
-            uint32_t proto_idx = VRegH_4rcc();
+            uint16_t method_idx = VRegB_4rcc();
+            dex::ProtoIndex proto_idx(VRegH_4rcc());
             os << opcode << ", {v" << VRegC_4rcc() << " .. v" << (VRegC_4rcc() + VRegA_4rcc())
-               << "}, " << file->PrettyMethod(method_idx) << ", " << file->GetShorty(proto_idx)
+               << "}, " << file->PrettyMethod(method_idx)
+               << ", " << file->GetShorty(dex::ProtoIndex(proto_idx))
                << " // method@" << method_idx << ", proto@" << proto_idx;
             break;
           }
         }
         FALLTHROUGH_INTENDED;
         default: {
-          uint32_t method_idx = VRegB_4rcc();
-          uint32_t proto_idx = VRegH_4rcc();
+          uint16_t method_idx = VRegB_4rcc();
+          dex::ProtoIndex proto_idx(VRegH_4rcc());
           os << opcode << ", {v" << VRegC_4rcc() << " .. v" << (VRegC_4rcc() + VRegA_4rcc())
              << "}, method@" << method_idx << ", proto@" << proto_idx;
         }
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index bf50836..6807025 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -462,8 +462,8 @@
 
   // Fills the given array with the 'arg' array of the instruction.
   bool HasVarArgs() const;
-  void GetVarArgs(uint32_t args[kMaxVarArgRegs], uint16_t inst_data) const;
-  void GetVarArgs(uint32_t args[kMaxVarArgRegs]) const {
+  uint32_t GetVarArgs(uint32_t args[kMaxVarArgRegs], uint16_t inst_data) const;
+  uint32_t GetVarArgs(uint32_t args[kMaxVarArgRegs]) const {
     return GetVarArgs(args, Fetch16(0));
   }
 
diff --git a/libprofile/Android.bp b/libprofile/Android.bp
index bcb90cb..b9883f6 100644
--- a/libprofile/Android.bp
+++ b/libprofile/Android.bp
@@ -45,6 +45,7 @@
     shared_libs: [
         "libartbase",
         "libdexfile",
+        "libartbase",
 	// For atrace.
         "libcutils",
     ],
@@ -97,6 +98,7 @@
     shared_libs: [
         "libartbased",
         "libdexfiled",
+        "libartbased",
         "libziparchive",
     ],
 }
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
index 535acdf..77dede3 100644
--- a/oatdump/Android.bp
+++ b/oatdump/Android.bp
@@ -39,6 +39,7 @@
         "libart-compiler",
         "libart-disassembler",
         "libdexfile",
+        "libartbase",
         "libprofile",
         "libbase",
     ],
@@ -55,6 +56,7 @@
         "libartd-compiler",
         "libartd-disassembler",
         "libdexfiled",
+        "libartbased",
         "libprofiled",
         "libbase",
     ],
@@ -82,6 +84,7 @@
         "libart",
         "libdexfile",
         "libprofile",
+        "libartbase",
         "libart-compiler",
         "libart-disassembler",
         "libvixl-arm",
@@ -117,6 +120,7 @@
         "libartd",
         "libdexfiled",
         "libprofiled",
+        "libartbased",
         "libartd-compiler",
         "libartd-disassembler",
         "libvixld-arm",
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 44050ff..5c20efa 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -753,7 +753,7 @@
       kByteKindQuickMethodHeader,
       kByteKindCodeInfoLocationCatalog,
       kByteKindCodeInfoDexRegisterMap,
-      kByteKindCodeInfoEncoding,
+      kByteKindCodeInfo,
       kByteKindCodeInfoInvokeInfo,
       kByteKindCodeInfoStackMasks,
       kByteKindCodeInfoRegisterMasks,
@@ -800,7 +800,7 @@
       if (sum > 0) {
         Dump(os, "Code                            ", bits[kByteKindCode], sum);
         Dump(os, "QuickMethodHeader               ", bits[kByteKindQuickMethodHeader], sum);
-        Dump(os, "CodeInfoEncoding                ", bits[kByteKindCodeInfoEncoding], sum);
+        Dump(os, "CodeInfo                        ", bits[kByteKindCodeInfo], sum);
         Dump(os, "CodeInfoLocationCatalog         ", bits[kByteKindCodeInfoLocationCatalog], sum);
         Dump(os, "CodeInfoDexRegisterMap          ", bits[kByteKindCodeInfoDexRegisterMap], sum);
         Dump(os, "CodeInfoStackMasks              ", bits[kByteKindCodeInfoStackMasks], sum);
@@ -819,7 +819,7 @@
                stack_map_bits,
                "stack map");
           Dump(os,
-               "StackMapDexPcEncoding         ",
+               "StackMapDexPc                 ",
                bits[kByteKindStackMapDexPc],
                stack_map_bits,
                "stack map");
@@ -1732,8 +1732,7 @@
    public:
     explicit StackMapsHelper(const uint8_t* raw_code_info, InstructionSet instruction_set)
         : code_info_(raw_code_info),
-          encoding_(code_info_.ExtractEncoding()),
-          number_of_stack_maps_(code_info_.GetNumberOfStackMaps(encoding_)),
+          number_of_stack_maps_(code_info_.GetNumberOfStackMaps()),
           indexes_(),
           offset_(static_cast<uint32_t>(-1)),
           stack_map_index_(0u),
@@ -1741,11 +1740,11 @@
       if (number_of_stack_maps_ != 0u) {
         // Check if native PCs are ordered.
         bool ordered = true;
-        StackMap last = code_info_.GetStackMapAt(0u, encoding_);
+        StackMap last = code_info_.GetStackMapAt(0u);
         for (size_t i = 1; i != number_of_stack_maps_; ++i) {
-          StackMap current = code_info_.GetStackMapAt(i, encoding_);
-          if (last.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set) >
-              current.GetNativePcOffset(encoding_.stack_map.encoding, instruction_set)) {
+          StackMap current = code_info_.GetStackMapAt(i);
+          if (last.GetNativePcOffset(instruction_set) >
+              current.GetNativePcOffset(instruction_set)) {
             ordered = false;
             break;
           }
@@ -1760,18 +1759,15 @@
           std::sort(indexes_.begin(),
                     indexes_.end(),
                     [this](size_t lhs, size_t rhs) {
-                      StackMap left = code_info_.GetStackMapAt(lhs, encoding_);
-                      uint32_t left_pc = left.GetNativePcOffset(encoding_.stack_map.encoding,
-                                                                instruction_set_);
-                      StackMap right = code_info_.GetStackMapAt(rhs, encoding_);
-                      uint32_t right_pc = right.GetNativePcOffset(encoding_.stack_map.encoding,
-                                                                  instruction_set_);
+                      StackMap left = code_info_.GetStackMapAt(lhs);
+                      uint32_t left_pc = left.GetNativePcOffset(instruction_set_);
+                      StackMap right = code_info_.GetStackMapAt(rhs);
+                      uint32_t right_pc = right.GetNativePcOffset(instruction_set_);
                       // If the PCs are the same, compare indexes to preserve the original order.
                       return (left_pc < right_pc) || (left_pc == right_pc && lhs < rhs);
                     });
         }
-        offset_ = GetStackMapAt(0).GetNativePcOffset(encoding_.stack_map.encoding,
-                                                     instruction_set_);
+        offset_ = GetStackMapAt(0).GetNativePcOffset(instruction_set_);
       }
     }
 
@@ -1779,10 +1775,6 @@
       return code_info_;
     }
 
-    const CodeInfoEncoding& GetEncoding() const {
-      return encoding_;
-    }
-
     uint32_t GetOffset() const {
       return offset_;
     }
@@ -1795,8 +1787,7 @@
       ++stack_map_index_;
       offset_ = (stack_map_index_ == number_of_stack_maps_)
           ? static_cast<uint32_t>(-1)
-          : GetStackMapAt(stack_map_index_).GetNativePcOffset(encoding_.stack_map.encoding,
-                                                              instruction_set_);
+          : GetStackMapAt(stack_map_index_).GetNativePcOffset(instruction_set_);
     }
 
    private:
@@ -1805,11 +1796,10 @@
         i = indexes_[i];
       }
       DCHECK_LT(i, number_of_stack_maps_);
-      return code_info_.GetStackMapAt(i, encoding_);
+      return code_info_.GetStackMapAt(i);
     }
 
     const CodeInfo code_info_;
-    const CodeInfoEncoding encoding_;
     const size_t number_of_stack_maps_;
     dchecked_vector<size_t> indexes_;  // Used if stack map native PCs are not ordered.
     uint32_t offset_;
@@ -1835,79 +1825,75 @@
       StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
       MethodInfo method_info(oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo());
       {
-        CodeInfoEncoding encoding(helper.GetEncoding());
-        StackMapEncoding stack_map_encoding(encoding.stack_map.encoding);
-        const size_t num_stack_maps = encoding.stack_map.num_entries;
-        if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfoEncoding,
-                                   encoding.HeaderSize() * kBitsPerByte,
+        const CodeInfo code_info = helper.GetCodeInfo();
+        const BitTable<StackMap::kCount>& stack_maps = code_info.stack_maps_;
+        const size_t num_stack_maps = stack_maps.NumRows();
+        if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfo,
+                                   code_info.size_ * kBitsPerByte,
                                    oat_method.GetVmapTable())) {
           // Stack maps
           stats_.AddBits(
               Stats::kByteKindStackMapNativePc,
-              stack_map_encoding.GetNativePcEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kNativePcOffset) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapDexPc,
-              stack_map_encoding.GetDexPcEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kDexPc) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapDexRegisterMap,
-              stack_map_encoding.GetDexRegisterMapEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kDexRegisterMapOffset) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapInlineInfoIndex,
-              stack_map_encoding.GetInlineInfoEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kInlineInfoIndex) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapRegisterMaskIndex,
-              stack_map_encoding.GetRegisterMaskIndexEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kRegisterMaskIndex) * num_stack_maps);
           stats_.AddBits(
               Stats::kByteKindStackMapStackMaskIndex,
-              stack_map_encoding.GetStackMaskIndexEncoding().BitSize() * num_stack_maps);
+              stack_maps.NumColumnBits(StackMap::kStackMaskIndex) * num_stack_maps);
 
           // Stack masks
           stats_.AddBits(
               Stats::kByteKindCodeInfoStackMasks,
-              encoding.stack_mask.encoding.BitSize() * encoding.stack_mask.num_entries);
+              code_info.stack_masks_.size_in_bits());
 
           // Register masks
           stats_.AddBits(
               Stats::kByteKindCodeInfoRegisterMasks,
-              encoding.register_mask.encoding.BitSize() * encoding.register_mask.num_entries);
+              code_info.register_masks_.DataBitSize());
 
           // Invoke infos
-          if (encoding.invoke_info.num_entries > 0u) {
-            stats_.AddBits(
-                Stats::kByteKindCodeInfoInvokeInfo,
-                encoding.invoke_info.encoding.BitSize() * encoding.invoke_info.num_entries);
-          }
+          stats_.AddBits(
+              Stats::kByteKindCodeInfoInvokeInfo,
+              code_info.invoke_infos_.DataBitSize());
 
           // Location catalog
           const size_t location_catalog_bytes =
-              helper.GetCodeInfo().GetDexRegisterLocationCatalogSize(encoding);
+              helper.GetCodeInfo().GetDexRegisterLocationCatalogSize();
           stats_.AddBits(Stats::kByteKindCodeInfoLocationCatalog,
                          kBitsPerByte * location_catalog_bytes);
           // Dex register bytes.
           const size_t dex_register_bytes =
-              helper.GetCodeInfo().GetDexRegisterMapsSize(encoding,
-                                                          code_item_accessor.RegistersSize());
+              helper.GetCodeInfo().GetDexRegisterMapsSize(code_item_accessor.RegistersSize());
           stats_.AddBits(
               Stats::kByteKindCodeInfoDexRegisterMap,
               kBitsPerByte * dex_register_bytes);
 
           // Inline infos.
-          const size_t num_inline_infos = encoding.inline_info.num_entries;
+          const BitTable<InlineInfo::kCount>& inline_infos = code_info.inline_infos_;
+          const size_t num_inline_infos = inline_infos.NumRows();
           if (num_inline_infos > 0u) {
             stats_.AddBits(
                 Stats::kByteKindInlineInfoMethodIndexIdx,
-                encoding.inline_info.encoding.GetMethodIndexIdxEncoding().BitSize() *
-                    num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kMethodIndexIdx) * num_inline_infos);
             stats_.AddBits(
                 Stats::kByteKindInlineInfoDexPc,
-                encoding.inline_info.encoding.GetDexPcEncoding().BitSize() * num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kDexPc) * num_inline_infos);
             stats_.AddBits(
                 Stats::kByteKindInlineInfoExtraData,
-                encoding.inline_info.encoding.GetExtraDataEncoding().BitSize() * num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kExtraData) * num_inline_infos);
             stats_.AddBits(
                 Stats::kByteKindInlineInfoDexRegisterMap,
-                encoding.inline_info.encoding.GetDexRegisterMapEncoding().BitSize() *
-                    num_inline_infos);
+                inline_infos.NumColumnBits(InlineInfo::kDexRegisterMapOffset) * num_inline_infos);
             stats_.AddBits(Stats::kByteKindInlineInfoIsLast, num_inline_infos);
           }
         }
@@ -1922,7 +1908,6 @@
           DCHECK(stack_map.IsValid());
           stack_map.Dump(vios,
                          helper.GetCodeInfo(),
-                         helper.GetEncoding(),
                          method_info,
                          oat_method.GetCodeOffset(),
                          code_item_accessor.RegistersSize(),
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index b85730d..bbe89ca 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -162,7 +162,6 @@
         // Code and dex code do not show up if list only.
         expected_prefixes.push_back("DEX CODE:");
         expected_prefixes.push_back("CODE:");
-        expected_prefixes.push_back("CodeInfoEncoding");
         expected_prefixes.push_back("CodeInfoInlineInfo");
       }
       if (mode == kModeArt) {
diff --git a/openjdkjvm/Android.bp b/openjdkjvm/Android.bp
index a178993..907315e 100644
--- a/openjdkjvm/Android.bp
+++ b/openjdkjvm/Android.bp
@@ -29,7 +29,10 @@
 art_cc_library {
     name: "libopenjdkjvm",
     defaults: ["libopenjdkjvm_defaults"],
-    shared_libs: ["libart"],
+    shared_libs: [
+        "libart",
+        "libartbase",
+    ],
 }
 
 art_cc_library {
@@ -38,5 +41,8 @@
         "art_debug_defaults",
         "libopenjdkjvm_defaults",
     ],
-    shared_libs: ["libartd"],
+    shared_libs: [
+        "libartd",
+        "libartbased",
+    ],
 }
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index 81b69e8..d8902d6 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -71,6 +71,7 @@
         "libart-compiler",
         "libart-dexlayout",
         "libdexfile",
+        "libartbase",
     ],
 }
 
@@ -85,5 +86,6 @@
         "libartd-compiler",
         "libartd-dexlayout",
         "libdexfiled",
+        "libartbased",
     ],
 }
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
index 0e8e517..13c8f47 100644
--- a/patchoat/Android.bp
+++ b/patchoat/Android.bp
@@ -25,6 +25,7 @@
         },
     },
     shared_libs: [
+        "libartbase",
         "libbase",
         "libcrypto", // For computing the digest of image file
     ],
@@ -58,7 +59,6 @@
         "patchoat_test.cc",
     ],
     shared_libs: [
-        "libartd",
         "libcrypto", // For computing the digest of image file
     ],
 }
diff --git a/profman/Android.bp b/profman/Android.bp
index 3c8c72c..5aaccb0 100644
--- a/profman/Android.bp
+++ b/profman/Android.bp
@@ -42,6 +42,7 @@
         "libart",
         "libprofile",
         "libdexfile",
+        "libartbase",
     ],
 }
 
@@ -55,6 +56,7 @@
         "libartd",
         "libprofiled",
         "libdexfiled",
+        "libartbased",
     ],
 }
 
diff --git a/profman/profman.cc b/profman/profman.cc
index cd88d03..1f77239 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -33,6 +33,7 @@
 
 #include "base/dumpable.h"
 #include "base/logging.h"  // For InitLogging.
+#include "base/mem_map.h"
 #include "base/scoped_flock.h"
 #include "base/stringpiece.h"
 #include "base/time_utils.h"
@@ -49,7 +50,6 @@
 #include "dex/type_reference.h"
 #include "profile/profile_compilation_info.h"
 #include "profile_assistant.h"
-#include "runtime.h"
 
 namespace art {
 
@@ -177,6 +177,11 @@
 static constexpr char kMethodFlagStringStartup = 'S';
 static constexpr char kMethodFlagStringPostStartup = 'P';
 
+NO_RETURN static void Abort(const char* msg) {
+  LOG(ERROR) << msg;
+  exit(1);
+}
+
 // TODO(calin): This class has grown too much from its initial design. Split the functionality
 // into smaller, more contained pieces.
 class ProfMan FINAL {
@@ -202,8 +207,8 @@
     original_argc = argc;
     original_argv = argv;
 
-    Locks::Init();
-    InitLogging(argv, Runtime::Abort);
+    MemMap::Init();
+    InitLogging(argv, Abort);
 
     // Skip over the command name.
     argv++;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 64e6796..92607f5 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -23,7 +23,7 @@
     "-Wl,--keep-unique,__dex_debug_register_code"
 ]
 
-cc_defaults {
+libart_cc_defaults {
     name: "libart_defaults",
     defaults: ["art_defaults"],
     host_supported: true,
@@ -32,7 +32,6 @@
         "art_field.cc",
         "art_method.cc",
         "barrier.cc",
-        "base/file_utils.cc",
         "base/mem_map_arena_pool.cc",
         "base/mutex.cc",
         "base/quasi_atomic.cc",
@@ -46,7 +45,6 @@
         "compiler_filter.cc",
         "debug_print.cc",
         "debugger.cc",
-        "dex/art_dex_file_loader.cc",
         "dex/dex_file_annotations.cc",
         "dex_to_dex_decompiler.cc",
         "elf_file.cc",
@@ -196,6 +194,7 @@
         "ti/agent.cc",
         "trace.cc",
         "transaction.cc",
+        "var_handles.cc",
         "vdex_file.cc",
         "verifier/instruction_flags.cc",
         "verifier/method_verifier.cc",
@@ -207,7 +206,6 @@
         "well_known_classes.cc",
 
         "arch/context.cc",
-        "arch/instruction_set.cc",
         "arch/instruction_set_features.cc",
         "arch/memcmp16.cc",
         "arch/arm/instruction_set_features_arm.cc",
@@ -396,7 +394,6 @@
         "libbacktrace",
         "liblz4",
         "liblog",
-        "libmetricslogger",
         // For atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
         "libcutils",
         // For common macros.
@@ -420,7 +417,6 @@
     cmd: "$(location generate_operator_out) art/runtime $(in) > $(out)",
     tools: ["generate_operator_out"],
     srcs: [
-        "arch/instruction_set.h",
         "base/mutex.h",
         "class_loader_context.h",
         "class_status.h",
@@ -467,9 +463,9 @@
         keep_symbols: true,
     },
     whole_static_libs: [
-        "libartbase",
     ],
     shared_libs: [
+        "libartbase",
         "libdexfile",
         "libprofile",
     ],
@@ -492,9 +488,9 @@
         "libart_defaults",
     ],
     whole_static_libs: [
-        "libartbased",
     ],
     shared_libs: [
+        "libartbased",
         "libdexfiled",
         "libprofiled",
     ],
@@ -531,7 +527,6 @@
     ],
     srcs: [
         "arch/arch_test.cc",
-        "arch/instruction_set_test.cc",
         "arch/instruction_set_features_test.cc",
         "arch/memcmp16_test.cc",
         "arch/stub_test.cc",
@@ -542,7 +537,6 @@
         "arch/x86/instruction_set_features_x86_test.cc",
         "arch/x86_64/instruction_set_features_x86_64_test.cc",
         "barrier_test.cc",
-        "base/file_utils_test.cc",
         "base/mutex_test.cc",
         "base/timing_logger_test.cc",
         "cha_test.cc",
@@ -550,7 +544,6 @@
         "class_loader_context_test.cc",
         "class_table_test.cc",
         "compiler_filter_test.cc",
-        "dex/art_dex_file_loader_test.cc",
         "entrypoints/math_entrypoints_test.cc",
         "entrypoints/quick/quick_trampoline_entrypoints_test.cc",
         "entrypoints_order_test.cc",
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 1ba4070..d4ceede 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -18,6 +18,7 @@
 
 #include "art_method-inl.h"
 #include "base/callee_save_type.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "common_runtime_test.h"
 #include "quick/quick_method_frame_info.h"
 
@@ -57,21 +58,6 @@
   void FinalizeSetup() OVERRIDE {
     ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
   }
-
-  static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
-      NO_THREAD_SAFETY_ANALYSIS {
-    Runtime* const runtime = Runtime::Current();
-    Thread* const self = Thread::Current();
-    ScopedObjectAccess soa(self);  // So we can create callee-save methods.
-
-    runtime->SetInstructionSet(isa);
-    ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
-    runtime->SetCalleeSaveMethod(save_method, type);
-    QuickMethodFrameInfo frame_info =  runtime->GetRuntimeMethodFrameInfo(save_method);
-    EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
-        << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
-        << frame_info.FpSpillMask() << std::dec;
-  }
 };
 
 TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
@@ -205,26 +191,20 @@
 }  // namespace x86_64
 
 // Check architecture specific constants are sound.
-#define TEST_ARCH(Arch, arch)                                       \
-  TEST_F(ArchTest, Arch) {                                          \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveAllCalleeSaves,             \
-                   arch::kFrameSizeSaveAllCalleeSaves);             \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveRefsOnly,                   \
-                   arch::kFrameSizeSaveRefsOnly);                   \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveRefsAndArgs,                \
-                   arch::kFrameSizeSaveRefsAndArgs);                \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveEverything,                 \
-                   arch::kFrameSizeSaveEverything);                 \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveEverythingForClinit,        \
-                   arch::kFrameSizeSaveEverythingForClinit);        \
-    CheckFrameSize(InstructionSet::k##Arch,                         \
-                   CalleeSaveType::kSaveEverythingForSuspendCheck,  \
-                   arch::kFrameSizeSaveEverythingForSuspendCheck);  \
+// We expect the return PC to be stored at the highest address slot in the frame.
+#define TEST_ARCH_TYPE(Arch, arch, type)                                              \
+  EXPECT_EQ(arch::Arch##CalleeSaveFrame::GetFrameSize(CalleeSaveType::k##type),       \
+            arch::kFrameSize##type);                                                  \
+  EXPECT_EQ(arch::Arch##CalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::k##type),  \
+            arch::kFrameSize##type - static_cast<size_t>(k##Arch##PointerSize))
+#define TEST_ARCH(Arch, arch)                                   \
+  TEST_F(ArchTest, Arch) {                                      \
+    TEST_ARCH_TYPE(Arch, arch, SaveAllCalleeSaves);             \
+    TEST_ARCH_TYPE(Arch, arch, SaveRefsOnly);                   \
+    TEST_ARCH_TYPE(Arch, arch, SaveRefsAndArgs);                \
+    TEST_ARCH_TYPE(Arch, arch, SaveEverything);                 \
+    TEST_ARCH_TYPE(Arch, arch, SaveEverythingForClinit);        \
+    TEST_ARCH_TYPE(Arch, arch, SaveEverythingForSuspendCheck);  \
   }
 TEST_ARCH(Arm, arm)
 TEST_ARCH(Arm64, arm64)
diff --git a/runtime/arch/arm/callee_save_frame_arm.h b/runtime/arch/arm/callee_save_frame_arm.h
new file mode 100644
index 0000000..11eefb9
--- /dev/null
+++ b/runtime/arch/arm/callee_save_frame_arm.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "quick/quick_method_frame_info.h"
+#include "registers_arm.h"
+
+namespace art {
+namespace arm {
+
+static constexpr uint32_t kArmCalleeSaveAlwaysSpills =
+    (1 << art::arm::LR);
+static constexpr uint32_t kArmCalleeSaveRefSpills =
+    (1 << art::arm::R5) | (1 << art::arm::R6)  | (1 << art::arm::R7) | (1 << art::arm::R8) |
+    (1 << art::arm::R10) | (1 << art::arm::R11);
+static constexpr uint32_t kArmCalleeSaveArgSpills =
+    (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
+static constexpr uint32_t kArmCalleeSaveAllSpills =
+    (1 << art::arm::R4) | (1 << art::arm::R9);
+static constexpr uint32_t kArmCalleeSaveEverythingSpills =
+    (1 << art::arm::R0) | (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3) |
+    (1 << art::arm::R4) | (1 << art::arm::R9) | (1 << art::arm::R12);
+
+static constexpr uint32_t kArmCalleeSaveFpAlwaysSpills = 0;
+static constexpr uint32_t kArmCalleeSaveFpRefSpills = 0;
+static constexpr uint32_t kArmCalleeSaveFpArgSpills =
+    (1 << art::arm::S0)  | (1 << art::arm::S1)  | (1 << art::arm::S2)  | (1 << art::arm::S3)  |
+    (1 << art::arm::S4)  | (1 << art::arm::S5)  | (1 << art::arm::S6)  | (1 << art::arm::S7)  |
+    (1 << art::arm::S8)  | (1 << art::arm::S9)  | (1 << art::arm::S10) | (1 << art::arm::S11) |
+    (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15);
+static constexpr uint32_t kArmCalleeSaveFpAllSpills =
+    (1 << art::arm::S16) | (1 << art::arm::S17) | (1 << art::arm::S18) | (1 << art::arm::S19) |
+    (1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
+    (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
+    (1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
+static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
+    kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
+
+class ArmCalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
+  }
+
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kArmPointerSize);
+  }
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 801254f..608999b 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -46,9 +46,11 @@
       "cortex-a53",
       "cortex-a53.a57",
       "cortex-a53.a72",
+      "cortex-a55",
       "cortex-a57",
       "cortex-a72",
       "cortex-a73",
+      "cortex-a75",
       "exynos-m1",
       "denver",
       "kryo"
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 1a1f4ed..311e838 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -55,7 +55,7 @@
     @ Load kSaveAllCalleeSaves Method* into rTemp.
     ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
     str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 
      // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 36 + 64 + 12)
@@ -86,7 +86,7 @@
     @ Load kSaveRefsOnly Method* into rTemp.
     ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
     str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_REFS_ONLY != 28 + 4)
@@ -147,13 +147,13 @@
     @ Load kSaveRefsAndArgs Method* into rTemp.
     ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
     str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 .endm
 
 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
     SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
     str r0, [sp, #0]                              @ Store ArtMethod* to bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 .endm
 
 .macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
@@ -193,7 +193,7 @@
     @ Load kSaveEverything Method* into rTemp.
     ldr \rTemp, [\rTemp, #\runtime_method_offset]
     str \rTemp, [sp, #0]                @ Place Method* at bottom of stack.
-    str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 
     // Ugly compile-time check, but we only have the preprocessor.
 #if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8)
@@ -301,7 +301,7 @@
      * exception is Thread::Current()->exception_ when the runtime method frame is ready.
      */
 .macro DELIVER_PENDING_EXCEPTION_FRAME_READY
-    mov    r0, r9                              @ pass Thread::Current
+    mov    r0, rSELF                           @ pass Thread::Current
     bl     artDeliverPendingExceptionFromCode  @ artDeliverPendingExceptionFromCode(Thread*)
 .endm
 
@@ -318,7 +318,7 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0       @ save all registers as basis for long jump context
-    mov r0, r9                      @ pass Thread::Current
+    mov r0, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
@@ -327,7 +327,7 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_EVERYTHING_FRAME r0  @ save all registers as basis for long jump context
-    mov r0, r9                      @ pass Thread::Current
+    mov r0, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
@@ -336,7 +336,7 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1       @ save all registers as basis for long jump context
-    mov r1, r9                      @ pass Thread::Current
+    mov r1, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
@@ -345,13 +345,13 @@
     .extern \cxx_name
 ENTRY \c_name
     SETUP_SAVE_EVERYTHING_FRAME r2  @ save all registers as basis for long jump context
-    mov r2, r9                      @ pass Thread::Current
+    mov r2, rSELF                   @ pass Thread::Current
     bl  \cxx_name                   @ \cxx_name(Thread*)
 END \c_name
 .endm
 
 .macro  RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
-    ldr \reg, [r9, #THREAD_EXCEPTION_OFFSET]   // Get exception field.
+    ldr \reg, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ Get exception field.
     cbnz \reg, 1f
     bx lr
 1:
@@ -377,7 +377,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r1        @ save callee saves in case of GC
-    mov    r1, r9                        @ pass Thread::Current
+    mov    r1, rSELF                     @ pass Thread::Current
     bl     \entrypoint                   @ (uint32_t field_idx, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -389,7 +389,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r2        @ save callee saves in case of GC
-    mov    r2, r9                        @ pass Thread::Current
+    mov    r2, rSELF                     @ pass Thread::Current
     bl     \entrypoint                   @ (field_idx, Object*, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -401,7 +401,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r3        @ save callee saves in case of GC
-    mov    r3, r9                        @ pass Thread::Current
+    mov    r3, rSELF                     @ pass Thread::Current
     bl     \entrypoint                   @ (field_idx, Object*, new_val, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME         @ TODO: we can clearly save an add here
     REFRESH_MARKING_REGISTER
@@ -448,7 +448,7 @@
     @ save all registers as basis for long jump context
     SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED r1
     mov r0, lr                      @ pass the fault address stored in LR by the fault handler.
-    mov r1, r9                      @ pass Thread::Current
+    mov r1, rSELF                   @ pass Thread::Current
     bl  artThrowNullPointerExceptionFromSignal  @ (Thread*)
 END art_quick_throw_null_pointer_exception_from_signal
 
@@ -494,7 +494,7 @@
 .macro INVOKE_TRAMPOLINE_BODY cxx_name
     .extern \cxx_name
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2     @ save callee saves in case allocation triggers GC
-    mov    r2, r9                         @ pass Thread::Current
+    mov    r2, rSELF                      @ pass Thread::Current
     mov    r3, sp
     bl     \cxx_name                      @ (method_idx, this, Thread*, SP)
     mov    r12, r1                        @ save Method*->code_
@@ -682,50 +682,48 @@
      */
     .extern artLockObjectFromCode
 ENTRY art_quick_lock_object
+    ldr    r1, [rSELF, #THREAD_ID_OFFSET]
     cbz    r0, .Lslow_lock
 .Lretry_lock:
-    ldr    r2, [r9, #THREAD_ID_OFFSET]
-    ldrex  r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
-    mov    r3, r1
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits
-    cbnz   r3, .Lnot_unlocked         @ already thin locked
-    @ unlocked case - r1: original lock word that's zero except for the read barrier bits.
-    orr    r2, r1, r2                 @ r2 holds thread id with count of 0 with preserved read barrier bits
-    strex  r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
-    cbnz   r3, .Llock_strex_fail      @ store failed, retry
-    dmb    ish                        @ full (LoadLoad|LoadStore) memory barrier
+    ldrex  r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    eor    r3, r2, r1                 @ Prepare the value to store if unlocked
+                                      @   (thread id, count of 0 and preserved read barrier bits),
+                                      @ or prepare to compare thread id for recursive lock check
+                                      @   (lock_word.ThreadId() ^ self->ThreadId()).
+    ands   ip, r2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ Test the non-gc bits.
+    bne    .Lnot_unlocked             @ Check if unlocked.
+    @ unlocked case - store r3: original lock word plus thread id, preserved read barrier bits.
+    strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    cbnz   r2, .Llock_strex_fail      @ If store failed, retry.
+    dmb    ish                        @ Full (LoadLoad|LoadStore) memory barrier.
     bx lr
-.Lnot_unlocked:  @ r1: original lock word, r2: thread_id with count of 0 and zero read barrier bits
-    lsr    r3, r1, LOCK_WORD_STATE_SHIFT
-    cbnz   r3, .Lslow_lock            @ if either of the top two bits are set, go slow path
-    eor    r2, r1, r2                 @ lock_word.ThreadId() ^ self->ThreadId()
-    uxth   r2, r2                     @ zero top 16 bits
-    cbnz   r2, .Lslow_lock            @ lock word and self thread id's match -> recursive lock
-                                      @ else contention, go to slow path
-    mov    r3, r1                     @ copy the lock word to check count overflow.
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits.
-    add    r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ increment count in lock word placing in r2 to check overflow
-    lsr    r3, r2, #LOCK_WORD_GC_STATE_SHIFT    @ if the first gc state bit is set, we overflowed.
-    cbnz   r3, .Lslow_lock            @ if we overflow the count go slow path
-    add    r2, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ increment count for real
-    strex  r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
-    cbnz   r3, .Llock_strex_fail      @ strex failed, retry
+.Lnot_unlocked:  @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+                                      @ Check lock word state and thread id together,
+    bfc    r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+    cbnz   r3, .Lslow_lock            @ if either of the top two bits are set, or the lock word's
+                                      @ thread id did not match, go slow path.
+    add    r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ Increment the recursive lock count.
+                                      @ Extract the new thin lock count for overflow check.
+    ubfx   r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+    cbz    r2, .Lslow_lock            @ Zero as the new count indicates overflow, go slow path.
+    strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits.
+    cbnz   r2, .Llock_strex_fail      @ If strex failed, retry.
     bx lr
 .Llock_strex_fail:
     b      .Lretry_lock               @ retry
-.Lslow_lock:
-    SETUP_SAVE_REFS_ONLY_FRAME r1     @ save callee saves in case we block
-    mov    r1, r9                     @ pass Thread::Current
-    bl     artLockObjectFromCode      @ (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_lock_object_no_inline (tail call).
 END art_quick_lock_object
 
 ENTRY art_quick_lock_object_no_inline
+    // This is also the slow path for art_quick_lock_object. Note that we
+    // need a local label, the assembler complains about target being out of
+    // range if we try to jump to `art_quick_lock_object_no_inline`.
+.Lslow_lock:
     SETUP_SAVE_REFS_ONLY_FRAME r1     @ save callee saves in case we block
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     artLockObjectFromCode      @ (Object* obj, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -739,62 +737,59 @@
      */
     .extern artUnlockObjectFromCode
 ENTRY art_quick_unlock_object
+    ldr    r1, [rSELF, #THREAD_ID_OFFSET]
     cbz    r0, .Lslow_unlock
 .Lretry_unlock:
 #ifndef USE_READ_BARRIER
-    ldr    r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    ldr    r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #else
-    ldrex  r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ Need to use atomic instructions for read barrier
+                                      @ Need to use atomic instructions for read barrier.
+    ldrex  r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #endif
-    lsr    r2, r1, #LOCK_WORD_STATE_SHIFT
-    cbnz   r2, .Lslow_unlock          @ if either of the top two bits are set, go slow path
-    ldr    r2, [r9, #THREAD_ID_OFFSET]
-    mov    r3, r1                     @ copy lock word to check thread id equality
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits
-    eor    r3, r3, r2                 @ lock_word.ThreadId() ^ self->ThreadId()
-    uxth   r3, r3                     @ zero top 16 bits
-    cbnz   r3, .Lslow_unlock          @ do lock word and self thread id's match?
-    mov    r3, r1                     @ copy lock word to detect transition to unlocked
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ zero the gc bits
-    cmp    r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
-    bpl    .Lrecursive_thin_unlock
-    @ transition to unlocked
-    mov    r3, r1
-    and    r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED  @ r3: zero except for the preserved gc bits
-    dmb    ish                        @ full (LoadStore|StoreStore) memory barrier
+    eor    r3, r2, r1                 @ Prepare the value to store if simply locked
+                                      @   (mostly 0s, and preserved read barrier bits),
+                                      @ or prepare to compare thread id for recursive lock check
+                                      @   (lock_word.ThreadId() ^ self->ThreadId()).
+    ands   ip, r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  @ Test the non-gc bits.
+    bne    .Lnot_simply_locked        @ Locked recursively or by other thread?
+    @ Transition to unlocked.
+    dmb    ish                        @ Full (LoadStore|StoreStore) memory barrier.
 #ifndef USE_READ_BARRIER
     str    r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #else
     strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits
-    cbnz   r2, .Lunlock_strex_fail    @ store failed, retry
+    cbnz   r2, .Lunlock_strex_fail    @ If the store failed, retry.
 #endif
     bx     lr
-.Lrecursive_thin_unlock:  @ r1: original lock word
-    sub    r1, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ decrement count
+.Lnot_simply_locked:  @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+                                      @ Check lock word state and thread id together,
+    bfc    r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+    cbnz   r3, .Lslow_unlock          @ if either of the top two bits are set, or the lock word's
+                                      @ thread id did not match, go slow path.
+    sub    r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  @ Decrement recursive lock count.
 #ifndef USE_READ_BARRIER
-    str    r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    str    r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
 #else
-    strex  r2, r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits
-    cbnz   r2, .Lunlock_strex_fail    @ store failed, retry
+    strex  r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]  @ strex necessary for read barrier bits.
+    cbnz   r2, .Lunlock_strex_fail    @ If the store failed, retry.
 #endif
     bx     lr
 .Lunlock_strex_fail:
     b      .Lretry_unlock             @ retry
-.Lslow_unlock:
-    @ save callee saves in case exception allocation triggers GC
-    SETUP_SAVE_REFS_ONLY_FRAME r1
-    mov    r1, r9                     @ pass Thread::Current
-    bl     artUnlockObjectFromCode    @ (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_unlock_object_no_inline (tail call).
 END art_quick_unlock_object
 
 ENTRY art_quick_unlock_object_no_inline
+    // This is also the slow path for art_quick_unlock_object. Note that we
+    // need a local label, the assembler complains about target being out of
+    // range if we try to jump to `art_quick_unlock_object_no_inline`.
+.Lslow_unlock:
     @ save callee saves in case exception allocation triggers GC
     SETUP_SAVE_REFS_ONLY_FRAME r1
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     artUnlockObjectFromCode    @ (Object* obj, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -832,7 +827,7 @@
 
 .Lthrow_class_cast_exception_for_bitstring_check:
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2       @ save all registers as basis for long jump context
-    mov r2, r9                      @ pass Thread::Current
+    mov r2, rSELF                   @ pass Thread::Current
     bl  artThrowClassCastExceptionForObject  @ (Object*, Class*, Thread*)
     bkpt
 END art_quick_check_instance_of
@@ -917,7 +912,7 @@
     add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
     POISON_HEAP_REF r2
     str r2, [r3, r1, lsl #2]
-    ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+    ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
     lsr r0, r0, #CARD_TABLE_CARD_SHIFT
     strb r3, [r3, r0]
     blx lr
@@ -945,7 +940,7 @@
     add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
     POISON_HEAP_REF r2
     str r2, [r3, r1, lsl #2]
-    ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+    ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
     lsr r0, r0, #CARD_TABLE_CARD_SHIFT
     strb r3, [r3, r0]
     blx lr
@@ -954,7 +949,7 @@
     /* No need to repeat restore cfi directives, the ones above apply here. */
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3
     mov r1, r2
-    mov r2, r9                     @ pass Thread::Current
+    mov r2, rSELF                  @ pass Thread::Current
     bl artThrowArrayStoreException @ (Class*, Class*, Thread*)
     bkpt                           @ unreached
 END art_quick_aput_obj
@@ -964,7 +959,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r1     @ save callee saves in case of GC
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     \entrypoint     @ (uint32_t type_idx, Method* method, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -977,7 +972,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r2     @ save callee saves in case of GC
-    mov    r2, r9                     @ pass Thread::Current
+    mov    r2, rSELF                  @ pass Thread::Current
     bl     \entrypoint     @ (uint32_t type_idx, Method* method, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -990,7 +985,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r3     @ save callee saves in case of GC
-    mov    r3, r9                     @ pass Thread::Current
+    mov    r3, rSELF                  @ pass Thread::Current
     @ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
     bl     \entrypoint
     RESTORE_SAVE_REFS_ONLY_FRAME
@@ -1004,7 +999,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_REFS_ONLY_FRAME r12    @ save callee saves in case of GC
-    str    r9, [sp, #-16]!            @ expand the frame and pass Thread::Current
+    str    rSELF, [sp, #-16]!         @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 16
     bl     \entrypoint
     add    sp, #16                    @ strip the extra frame
@@ -1023,7 +1018,7 @@
     .extern \entrypoint
 ENTRY \name
     SETUP_SAVE_EVERYTHING_FRAME r1, \runtime_method_offset    @ save everything in case of GC
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     \entrypoint                @ (uint32_t index, Thread*)
     cbz    r0, 1f                     @ If result is null, deliver the OOME.
     .cfi_remember_state
@@ -1043,6 +1038,7 @@
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
 
@@ -1064,9 +1060,9 @@
     .extern artGet64StaticFromCompiledCode
 ENTRY art_quick_get64_static
     SETUP_SAVE_REFS_ONLY_FRAME r2        @ save callee saves in case of GC
-    mov    r1, r9                        @ pass Thread::Current
-    bl     artGet64StaticFromCompiledCode        @ (uint32_t field_idx, Thread*)
-    ldr    r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    mov    r1, rSELF                     @ pass Thread::Current
+    bl     artGet64StaticFromCompiledCode  @ (uint32_t field_idx, Thread*)
+    ldr    r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
     cbnz   r2, 1f                        @ success if no exception pending
@@ -1090,9 +1086,9 @@
     .extern artGet64InstanceFromCompiledCode
 ENTRY art_quick_get64_instance
     SETUP_SAVE_REFS_ONLY_FRAME r2        @ save callee saves in case of GC
-    mov    r2, r9                        @ pass Thread::Current
-    bl     artGet64InstanceFromCompiledCode      @ (field_idx, Object*, Thread*)
-    ldr    r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    mov    r2, rSELF                     @ pass Thread::Current
+    bl     artGet64InstanceFromCompiledCode  @ (field_idx, Object*, Thread*)
+    ldr    r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
     cbnz   r2, 1f                        @ success if no exception pending
@@ -1124,7 +1120,7 @@
 ENTRY art_quick_set64_instance
     SETUP_SAVE_REFS_ONLY_FRAME r12       @ save callee saves in case of GC
                                          @ r2:r3 contain the wide argument
-    str    r9, [sp, #-16]!               @ expand the frame and pass Thread::Current
+    str    rSELF, [sp, #-16]!            @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 16
     bl     artSet64InstanceFromCompiledCode      @ (field_idx, Object*, new_val, Thread*)
     add    sp, #16                       @ release out args
@@ -1139,7 +1135,7 @@
 ENTRY art_quick_set64_static
     SETUP_SAVE_REFS_ONLY_FRAME r12        @ save callee saves in case of GC
                                           @ r2:r3 contain the wide argument
-    str    r9, [sp, #-16]!                @ expand the frame and pass Thread::Current
+    str    rSELF, [sp, #-16]!             @ expand the frame and pass Thread::Current
     .cfi_adjust_cfa_offset 16
     bl     artSet64StaticFromCompiledCode @ (field_idx, new_val, Thread*)
     add    sp, #16                        @ release out args
@@ -1184,12 +1180,12 @@
 .macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
 ENTRY \c_name
     // Fast path rosalloc allocation.
-    // r0: type/return value, r9: Thread::Current
+    // r0: type/return value, rSELF (r9): Thread::Current
     // r1, r2, r3, r12: free.
-    ldr    r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]     // Check if the thread local
+    ldr    r3, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]  // Check if the thread local
                                                               // allocation stack has room.
                                                               // TODO: consider using ldrd.
-    ldr    r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+    ldr    r12, [rSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
     cmp    r3, r12
     bhs    .Lslow_path\c_name
 
@@ -1207,7 +1203,7 @@
                                                               // from the size. Since the size is
                                                               // already aligned we can combine the
                                                               // two shifts together.
-    add    r12, r9, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+    add    r12, rSELF, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
                                                               // Subtract pointer size since ther
                                                               // are no runs for 0 byte allocations
                                                               // and the size is already aligned.
@@ -1235,9 +1231,9 @@
                                                               // local allocation stack and
                                                               // increment the thread local
                                                               // allocation stack top.
-    ldr    r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+    ldr    r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
     str    r3, [r1], #COMPRESSED_REFERENCE_SIZE               // (Increment r1 as a side effect.)
-    str    r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+    str    r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
                                                               // Decrement the size of the free list
 
     // After this "STR" the object is published to the thread local allocation stack,
@@ -1286,7 +1282,7 @@
 
 .Lslow_path\c_name:
     SETUP_SAVE_REFS_ONLY_FRAME r2     @ save callee saves in case of GC
-    mov    r1, r9                     @ pass Thread::Current
+    mov    r1, rSELF                  @ pass Thread::Current
     bl     \cxx_name                  @ (mirror::Class* cls, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -1300,7 +1296,7 @@
 // The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
 // and art_quick_alloc_object_resolved/initialized_region_tlab.
 //
-// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
+// r0: type, rSELF (r9): Thread::Current, r1, r2, r3, r12: free.
 // Need to preserve r0 to the slow path.
 //
 // If isInitialized=1 then the compiler assumes the object's class has already been initialized.
@@ -1312,7 +1308,7 @@
 #if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
 #error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
 #endif
-    ldrd   r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
+    ldrd   r12, r3, [rSELF, #THREAD_LOCAL_POS_OFFSET]
     sub    r12, r3, r12                                       // Compute the remaining buf size.
     ldr    r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (r3).
     cmp    r3, r12                                            // Check if it fits.
@@ -1325,9 +1321,9 @@
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
                                                               // Reload old thread_local_pos (r0)
                                                               // for the return value.
-    ldr    r2, [r9, #THREAD_LOCAL_POS_OFFSET]
+    ldr    r2, [rSELF, #THREAD_LOCAL_POS_OFFSET]
     add    r1, r2, r3
-    str    r1, [r9, #THREAD_LOCAL_POS_OFFSET]                 // Store new thread_local_pos.
+    str    r1, [rSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
     // After this "STR" the object is published to the thread local allocation stack,
     // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view.
     // It is not yet visible to the running (user) compiled code until after the return.
@@ -1345,9 +1341,9 @@
     //
     // (Note: The actual check is done by checking that the object's class pointer is non-null.
     // Also, unlike rosalloc, the object can never be observed as null).
-    ldr    r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]             // Increment thread_local_objects.
+    ldr    r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
     add    r1, r1, #1
-    str    r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+    str    r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
     POISON_HEAP_REF r0
     str    r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
                                                               // Fence. This is "ish" not "ishst" so
@@ -1374,12 +1370,12 @@
 .macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized
 ENTRY \name
     // Fast path tlab allocation.
-    // r0: type, r9: Thread::Current
+    // r0: type, rSELF (r9): Thread::Current
     // r1, r2, r3, r12: free.
     ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name, \isInitialized
 .Lslow_path\name:
     SETUP_SAVE_REFS_ONLY_FRAME r2                             // Save callee saves in case of GC.
-    mov    r1, r9                                             // Pass Thread::Current.
+    mov    r1, rSELF                                          // Pass Thread::Current.
     bl     \entrypoint                                        // (mirror::Class* klass, Thread*)
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -1396,7 +1392,7 @@
 // The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
 // and art_quick_alloc_array_resolved/initialized_region_tlab.
 //
-// r0: type r1: component_count r2: total_size r9: Thread::Current, r3, r12: free.
+// r0: type, r1: component_count, r2: total_size, rSELF (r9): Thread::Current, r3, r12: free.
 // Need to preserve r0 and r1 to the slow path.
 .macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
     and    r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED             // Apply alignment mask
@@ -1408,7 +1404,7 @@
 #if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
 #error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
 #endif
-    ldrd   r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
+    ldrd   r3, r12, [rSELF, #THREAD_LOCAL_POS_OFFSET]
     sub    r12, r12, r3                                       // Compute the remaining buf size.
     cmp    r2, r12                                            // Check if the total_size fits.
     // The array class is always initialized here. Unlike new-instance,
@@ -1416,10 +1412,10 @@
     bhi    \slowPathLabel
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
     add    r2, r2, r3
-    str    r2, [r9, #THREAD_LOCAL_POS_OFFSET]                 // Store new thread_local_pos.
-    ldr    r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]             // Increment thread_local_objects.
+    str    r2, [rSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
+    ldr    r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
     add    r2, r2, #1
-    str    r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+    str    r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
     POISON_HEAP_REF r0
     str    r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET]              // Store the class pointer.
     str    r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET]              // Store the array length.
@@ -1442,7 +1438,7 @@
     // Fast path array allocation for region tlab allocation.
     // r0: mirror::Class* type
     // r1: int32_t component_count
-    // r9: thread
+    // rSELF (r9): thread
     // r2, r3, r12: free.
     \size_setup .Lslow_path\name
     ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name
@@ -1451,7 +1447,7 @@
     // r1: int32_t component_count
     // r2: Thread* self
     SETUP_SAVE_REFS_ONLY_FRAME r2  // save callee saves in case of GC
-    mov    r2, r9                  // pass Thread::Current
+    mov    r2, rSELF               // pass Thread::Current
     bl     \entrypoint
     RESTORE_SAVE_REFS_ONLY_FRAME
     REFRESH_MARKING_REGISTER
@@ -1574,10 +1570,10 @@
      .extern artQuickProxyInvokeHandler
 ENTRY art_quick_proxy_invoke_handler
     SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
-    mov     r2, r9                 @ pass Thread::Current
+    mov     r2, rSELF              @ pass Thread::Current
     mov     r3, sp                 @ pass SP
     blx     artQuickProxyInvokeHandler  @ (Method* proxy method, receiver, Thread*, SP)
-    ldr     r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    ldr     r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     // Tear down the callee-save frame. Skip arg registers.
     add     sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
     .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1705,7 +1701,7 @@
     .extern artQuickResolutionTrampoline
 ENTRY art_quick_resolution_trampoline
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2
-    mov     r2, r9                 @ pass Thread::Current
+    mov     r2, rSELF              @ pass Thread::Current
     mov     r3, sp                 @ pass SP
     blx     artQuickResolutionTrampoline  @ (Method* called, receiver, Thread*, SP)
     cbz     r0, 1f                 @ is code pointer null? goto exception
@@ -1779,10 +1775,10 @@
     blx artQuickGenericJniEndTrampoline
 
     // Restore self pointer.
-    mov r9, r11
+    mov rSELF, r11
 
     // Pending exceptions possible.
-    ldr r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     cbnz r2, .Lexception_in_native
 
     // Tear down the alloca.
@@ -1803,7 +1799,7 @@
     .cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
 
 .Lexception_in_native:
-    ldr ip, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+    ldr ip, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
     add ip, ip, #-1  // Remove the GenericJNI tag. ADD/SUB writing directly to SP is UNPREDICTABLE.
     mov sp, ip
     .cfi_def_cfa_register sp
@@ -1814,10 +1810,10 @@
     .extern artQuickToInterpreterBridge
 ENTRY art_quick_to_interpreter_bridge
     SETUP_SAVE_REFS_AND_ARGS_FRAME r1
-    mov     r1, r9                 @ pass Thread::Current
+    mov     r1, rSELF              @ pass Thread::Current
     mov     r2, sp                 @ pass SP
     blx     artQuickToInterpreterBridge    @ (Method* method, Thread*, SP)
-    ldr     r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    ldr     r2, [rSELF, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     // Tear down the callee-save frame. Skip arg registers.
     add     sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
     .cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1845,7 +1841,7 @@
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2
     @ preserve r0 (not normally an arg) knowing there is a spare slot in kSaveRefsAndArgs.
     str   r0, [sp, #4]
-    mov   r2, r9         @ pass Thread::Current
+    mov   r2, rSELF      @ pass Thread::Current
     mov   r3, sp         @ pass SP
     blx   artInstrumentationMethodEntryFromCode  @ (Method*, Object*, Thread*, SP)
     cbz   r0, .Ldeliver_instrumentation_entry_exception
@@ -1871,7 +1867,7 @@
     add   r3, sp, #8     @ store fpr_res pointer, in kSaveEverything frame
     add   r2, sp, #136   @ store gpr_res pointer, in kSaveEverything frame
     mov   r1, sp         @ pass SP
-    mov   r0, r9         @ pass Thread::Current
+    mov   r0, rSELF      @ pass Thread::Current
     blx   artInstrumentationMethodExitFromCode  @ (Thread*, SP, gpr_res*, fpr_res*)
 
     cbz   r0, .Ldo_deliver_instrumentation_exception
@@ -1900,7 +1896,7 @@
     .extern artDeoptimize
 ENTRY art_quick_deoptimize
     SETUP_SAVE_EVERYTHING_FRAME r0
-    mov    r0, r9         @ pass Thread::Current
+    mov    r0, rSELF      @ pass Thread::Current
     blx    artDeoptimize  @ (Thread*)
 END art_quick_deoptimize
 
@@ -1911,7 +1907,7 @@
     .extern artDeoptimizeFromCompiledCode
 ENTRY art_quick_deoptimize_from_compiled_code
     SETUP_SAVE_EVERYTHING_FRAME r1
-    mov    r1, r9                         @ pass Thread::Current
+    mov    r1, rSELF                      @ pass Thread::Current
     blx    artDeoptimizeFromCompiledCode  @ (DeoptimizationKind, Thread*)
 END art_quick_deoptimize_from_compiled_code
 
@@ -2690,7 +2686,7 @@
 .extern artInvokePolymorphic
 ENTRY art_quick_invoke_polymorphic
     SETUP_SAVE_REFS_AND_ARGS_FRAME r2
-    mov     r2, r9                 @ pass Thread::Current
+    mov     r2, rSELF              @ pass Thread::Current
     mov     r3, sp                 @ pass SP
     mov     r0, #0                 @ initialize 64-bit JValue as zero.
     str     r0, [sp, #-4]!
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
deleted file mode 100644
index 5c5b81b..0000000
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
-#define ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_arm.h"
-
-namespace art {
-namespace arm {
-
-static constexpr uint32_t kArmCalleeSaveAlwaysSpills =
-    (1 << art::arm::LR);
-static constexpr uint32_t kArmCalleeSaveRefSpills =
-    (1 << art::arm::R5) | (1 << art::arm::R6)  | (1 << art::arm::R7) | (1 << art::arm::R8) |
-    (1 << art::arm::R10) | (1 << art::arm::R11);
-static constexpr uint32_t kArmCalleeSaveArgSpills =
-    (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
-static constexpr uint32_t kArmCalleeSaveAllSpills =
-    (1 << art::arm::R4) | (1 << art::arm::R9);
-static constexpr uint32_t kArmCalleeSaveEverythingSpills =
-    (1 << art::arm::R0) | (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3) |
-    (1 << art::arm::R4) | (1 << art::arm::R9) | (1 << art::arm::R12);
-
-static constexpr uint32_t kArmCalleeSaveFpAlwaysSpills = 0;
-static constexpr uint32_t kArmCalleeSaveFpRefSpills = 0;
-static constexpr uint32_t kArmCalleeSaveFpArgSpills =
-    (1 << art::arm::S0)  | (1 << art::arm::S1)  | (1 << art::arm::S2)  | (1 << art::arm::S3)  |
-    (1 << art::arm::S4)  | (1 << art::arm::S5)  | (1 << art::arm::S6)  | (1 << art::arm::S7)  |
-    (1 << art::arm::S8)  | (1 << art::arm::S9)  | (1 << art::arm::S10) | (1 << art::arm::S11) |
-    (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15);
-static constexpr uint32_t kArmCalleeSaveFpAllSpills =
-    (1 << art::arm::S16) | (1 << art::arm::S17) | (1 << art::arm::S18) | (1 << art::arm::S19) |
-    (1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
-    (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
-    (1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
-static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
-    kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
-
-constexpr uint32_t ArmCalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
-}
-
-constexpr uint32_t ArmCalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
-}
-
-constexpr uint32_t ArmCalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
-}
-
-constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(ArmCalleeSaveFrameSize(type),
-                              ArmCalleeSaveCoreSpills(type),
-                              ArmCalleeSaveFpSpills(type));
-}
-
-constexpr size_t ArmCalleeSaveFpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return ArmCalleeSaveFrameSize(type) -
-         (POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
-          POPCOUNT(ArmCalleeSaveFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
-}
-
-constexpr size_t ArmCalleeSaveGpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return ArmCalleeSaveFrameSize(type) -
-         POPCOUNT(ArmCalleeSaveCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
-}
-
-constexpr size_t ArmCalleeSaveLrOffset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return ArmCalleeSaveFrameSize(type) -
-      POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * static_cast<size_t>(kArmPointerSize);
-}
-
-}  // namespace arm
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/callee_save_frame_arm64.h
similarity index 61%
rename from runtime/arch/arm64/quick_method_frame_info_arm64.h
rename to runtime/arch/arm64/callee_save_frame_arm64.h
index 2d2b500..bc36bfa 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/callee_save_frame_arm64.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
-#define ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#ifndef ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
 
 #include "arch/instruction_set.h"
 #include "base/bit_utils.h"
@@ -79,57 +79,56 @@
     (1 << art::arm64::D27) | (1 << art::arm64::D28) | (1 << art::arm64::D29) |
     (1 << art::arm64::D30) | (1 << art::arm64::D31);
 
-constexpr uint32_t Arm64CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
-}
+class Arm64CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
+  }
 
-constexpr uint32_t Arm64CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
-}
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
+  }
 
-constexpr uint32_t Arm64CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
-}
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
+  }
 
-constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(Arm64CalleeSaveFrameSize(type),
-                              Arm64CalleeSaveCoreSpills(type),
-                              Arm64CalleeSaveFpSpills(type));
-}
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
 
-constexpr size_t Arm64CalleeSaveFpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return Arm64CalleeSaveFrameSize(type) -
-         (POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
-          POPCOUNT(Arm64CalleeSaveFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
-}
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
+  }
 
-constexpr size_t Arm64CalleeSaveGpr1Offset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return Arm64CalleeSaveFrameSize(type) -
-         POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
-}
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
+  }
 
-constexpr size_t Arm64CalleeSaveLrOffset(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return Arm64CalleeSaveFrameSize(type) -
-      POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) *
-      static_cast<size_t>(kArm64PointerSize);
-}
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kArm64PointerSize);
+  }
+};
 
 }  // namespace arm64
 }  // namespace art
 
-#endif  // ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#endif  // ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 42c9a84..d0f61c9 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -52,6 +52,8 @@
     // Check to see if this is an expected variant.
     static const char* arm64_known_variants[] = {
         "cortex-a35",
+        "cortex-a55",
+        "cortex-a75",
         "exynos-m1",
         "exynos-m2",
         "exynos-m3",
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index 7fd39b6..b946f4f 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -64,6 +64,26 @@
   EXPECT_FALSE(kryo_features->Equals(cortex_a57_features.get()));
   EXPECT_STREQ("-a53", kryo_features->GetFeatureString().c_str());
   EXPECT_EQ(kryo_features->AsBitmap(), 0U);
+
+  std::unique_ptr<const InstructionSetFeatures> cortex_a55_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a55", &error_msg));
+  ASSERT_TRUE(cortex_a55_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(cortex_a55_features->GetInstructionSet(), InstructionSet::kArm64);
+  EXPECT_TRUE(cortex_a55_features->Equals(cortex_a55_features.get()));
+  EXPECT_TRUE(cortex_a55_features->Equals(cortex_a35_features.get()));
+  EXPECT_FALSE(cortex_a55_features->Equals(cortex_a57_features.get()));
+  EXPECT_STREQ("-a53", cortex_a55_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a55_features->AsBitmap(), 0U);
+
+  std::unique_ptr<const InstructionSetFeatures> cortex_a75_features(
+      InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a75", &error_msg));
+  ASSERT_TRUE(cortex_a75_features.get() != nullptr) << error_msg;
+  EXPECT_EQ(cortex_a75_features->GetInstructionSet(), InstructionSet::kArm64);
+  EXPECT_TRUE(cortex_a75_features->Equals(cortex_a75_features.get()));
+  EXPECT_TRUE(cortex_a75_features->Equals(cortex_a35_features.get()));
+  EXPECT_FALSE(cortex_a75_features->Equals(cortex_a57_features.get()));
+  EXPECT_STREQ("-a53", cortex_a75_features->GetFeatureString().c_str());
+  EXPECT_EQ(cortex_a75_features->AsBitmap(), 0U);
 }
 
 }  // namespace art
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 9919e98..14d0cc7 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1151,45 +1151,36 @@
      */
     .extern artLockObjectFromCode
 ENTRY art_quick_lock_object
-    cbz    w0, .Lslow_lock
-    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
+    ldr    w1, [xSELF, #THREAD_ID_OFFSET]
+    cbz    w0, art_quick_lock_object_no_inline
+                                      // Exclusive load/store has no immediate anymore.
+    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
 .Lretry_lock:
-    ldr    w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
-    ldaxr  w1, [x4]                   // acquire needed only in most common case
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits
-    cbnz   w3, .Lnot_unlocked         // already thin locked
-    // unlocked case - x1: original lock word that's zero except for the read barrier bits.
-    orr    x2, x1, x2                 // x2 holds thread id with count of 0 with preserved read barrier bits
-    stxr   w3, w2, [x4]
-    cbnz   w3, .Llock_stxr_fail       // store failed, retry
+    ldaxr  w2, [x4]                   // Acquire needed only in most common case.
+    eor    w3, w2, w1                 // Prepare the value to store if unlocked
+                                      //   (thread id, count of 0 and preserved read barrier bits),
+                                      // or prepare to compare thread id for recursive lock check
+                                      //   (lock_word.ThreadId() ^ self->ThreadId()).
+    tst    w2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // Test the non-gc bits.
+    b.ne   .Lnot_unlocked             // Check if unlocked.
+    // unlocked case - store w3: original lock word plus thread id, preserved read barrier bits.
+    stxr   w2, w3, [x4]
+    cbnz   w2, .Lretry_lock           // If the store failed, retry.
     ret
-.Lnot_unlocked:  // x1: original lock word
-    lsr    w3, w1, LOCK_WORD_STATE_SHIFT
-    cbnz   w3, .Lslow_lock            // if either of the top two bits are set, go slow path
-    eor    w2, w1, w2                 // lock_word.ThreadId() ^ self->ThreadId()
-    uxth   w2, w2                     // zero top 16 bits
-    cbnz   w2, .Lslow_lock            // lock word and self thread id's match -> recursive lock
-                                      // else contention, go to slow path
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits.
-    add    w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count in lock word placing in w2 to check overflow
-    lsr    w3, w2, #LOCK_WORD_GC_STATE_SHIFT     // if the first gc state bit is set, we overflowed.
-    cbnz   w3, .Lslow_lock            // if we overflow the count go slow path
-    add    w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // increment count for real
-    stxr   w3, w2, [x4]
-    cbnz   w3, .Llock_stxr_fail       // store failed, retry
+.Lnot_unlocked:  // w2: original lock word, w1: thread id, w3: w2 ^ w1
+                                      // Check lock word state and thread id together,
+    tst    w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+    b.ne   art_quick_lock_object_no_inline
+    add    w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // Increment the recursive lock count.
+    tst    w3, #LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED  // Test the new thin lock count.
+    b.eq   art_quick_lock_object_no_inline  // Zero as the new count indicates overflow, go slow path.
+    stxr   w2, w3, [x4]
+    cbnz   w2, .Lretry_lock           // If the store failed, retry.
     ret
-.Llock_stxr_fail:
-    b      .Lretry_lock               // retry
-.Lslow_lock:
-    SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case we block
-    mov    x1, xSELF                  // pass Thread::Current
-    bl     artLockObjectFromCode      // (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_W0_IS_ZERO_OR_DELIVER
 END art_quick_lock_object
 
 ENTRY art_quick_lock_object_no_inline
+    // This is also the slow path for art_quick_lock_object.
     SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case we block
     mov    x1, xSELF                  // pass Thread::Current
     bl     artLockObjectFromCode      // (Object* obj, Thread*)
@@ -1206,54 +1197,46 @@
      */
     .extern artUnlockObjectFromCode
 ENTRY art_quick_unlock_object
-    cbz    x0, .Lslow_unlock
-    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET  // exclusive load/store has no immediate anymore
+    ldr    w1, [xSELF, #THREAD_ID_OFFSET]
+    cbz    x0, art_quick_unlock_object_no_inline
+                                      // Exclusive load/store has no immediate anymore.
+    add    x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
 .Lretry_unlock:
 #ifndef USE_READ_BARRIER
-    ldr    w1, [x4]
+    ldr    w2, [x4]
 #else
-    ldxr   w1, [x4]                   // Need to use atomic instructions for read barrier
+    ldxr   w2, [x4]                   // Need to use atomic instructions for read barrier.
 #endif
-    lsr    w2, w1, LOCK_WORD_STATE_SHIFT
-    cbnz   w2, .Lslow_unlock          // if either of the top two bits are set, go slow path
-    ldr    w2, [xSELF, #THREAD_ID_OFFSET]
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits
-    eor    w3, w3, w2                 // lock_word.ThreadId() ^ self->ThreadId()
-    uxth   w3, w3                     // zero top 16 bits
-    cbnz   w3, .Lslow_unlock          // do lock word and self thread id's match?
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // zero the gc bits
-    cmp    w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
-    bpl    .Lrecursive_thin_unlock
-    // transition to unlocked
-    and    w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED  // w3: zero except for the preserved read barrier bits
+    eor    w3, w2, w1                 // Prepare the value to store if simply locked
+                                      //   (mostly 0s, and preserved read barrier bits),
+                                      // or prepare to compare thread id for recursive lock check
+                                      //   (lock_word.ThreadId() ^ self->ThreadId()).
+    tst    w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED  // Test the non-gc bits.
+    b.ne   .Lnot_simply_locked        // Locked recursively or by other thread?
+    // Transition to unlocked.
 #ifndef USE_READ_BARRIER
     stlr   w3, [x4]
 #else
-    stlxr  w2, w3, [x4]               // Need to use atomic instructions for read barrier
-    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
+    stlxr  w2, w3, [x4]               // Need to use atomic instructions for read barrier.
+    cbnz   w2, .Lretry_unlock         // If the store failed, retry.
 #endif
     ret
-.Lrecursive_thin_unlock:  // w1: original lock word
-    sub    w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // decrement count
+.Lnot_simply_locked:
+                                      // Check lock word state and thread id together,
+    tst    w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+    b.ne   art_quick_unlock_object_no_inline
+    sub    w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE  // decrement count
 #ifndef USE_READ_BARRIER
-    str    w1, [x4]
+    str    w3, [x4]
 #else
-    stxr   w2, w1, [x4]               // Need to use atomic instructions for read barrier
-    cbnz   w2, .Lunlock_stxr_fail     // store failed, retry
+    stxr   w2, w3, [x4]               // Need to use atomic instructions for read barrier.
+    cbnz   w2, .Lretry_unlock         // If the store failed, retry.
 #endif
     ret
-.Lunlock_stxr_fail:
-    b      .Lretry_unlock             // retry
-.Lslow_unlock:
-    SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case exception allocation triggers GC
-    mov    x1, xSELF                  // pass Thread::Current
-    bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    REFRESH_MARKING_REGISTER
-    RETURN_IF_W0_IS_ZERO_OR_DELIVER
 END art_quick_unlock_object
 
 ENTRY art_quick_unlock_object_no_inline
+    // This is also the slow path for art_quick_unlock_object.
     SETUP_SAVE_REFS_ONLY_FRAME        // save callee saves in case exception allocation triggers GC
     mov    x1, xSELF                  // pass Thread::Current
     bl     artUnlockObjectFromCode    // (Object* obj, Thread*)
@@ -1580,6 +1563,7 @@
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
 
diff --git a/runtime/arch/code_offset.h b/runtime/arch/code_offset.h
index 8e8dde4..f0c6d22 100644
--- a/runtime/arch/code_offset.h
+++ b/runtime/arch/code_offset.h
@@ -21,9 +21,9 @@
 
 #include <android-base/logging.h>
 
+#include "arch/instruction_set.h"
 #include "base/bit_utils.h"
 #include "base/macros.h"
-#include "instruction_set.h"
 
 namespace art {
 
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index 5f1a507..c31c927 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -21,8 +21,8 @@
 #include <ostream>
 #include <vector>
 
+#include "arch/instruction_set.h"
 #include "base/macros.h"
-#include "instruction_set.h"
 
 namespace art {
 
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/callee_save_frame_mips.h
similarity index 66%
rename from runtime/arch/mips/quick_method_frame_info_mips.h
rename to runtime/arch/mips/callee_save_frame_mips.h
index 8c86252..6e88d08 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/callee_save_frame_mips.h
@@ -14,13 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#ifndef ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
 
 #include "arch/instruction_set.h"
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
+#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_mips.h"
 
@@ -80,37 +81,56 @@
     (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
     (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
 
-constexpr uint32_t MipsCalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
-}
+class MipsCalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
+  }
 
-constexpr uint32_t MipsCalleeSaveFPSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
-}
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
+  }
 
-constexpr uint32_t MipsCalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(MipsCalleeSaveFPSpills(type))   /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
-}
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type))   /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
+  }
 
-constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(MipsCalleeSaveFrameSize(type),
-                              MipsCalleeSaveCoreSpills(type),
-                              MipsCalleeSaveFPSpills(type));
-}
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMipsPointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMipsPointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kMipsPointerSize);
+  }
+};
 
 }  // namespace mips
 }  // namespace art
 
-#endif  // ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#endif  // ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 58b9d48..5d6e410 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -203,6 +203,8 @@
   static_assert(!IsDirectEntrypoint(kQuickInitializeType), "Non-direct C stub marked direct.");
   qpoints->pResolveString = art_quick_resolve_string;
   static_assert(!IsDirectEntrypoint(kQuickResolveString), "Non-direct C stub marked direct.");
+  qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
+  static_assert(!IsDirectEntrypoint(kQuickResolveMethodHandle), "Non-direct C stub marked direct.");
   qpoints->pResolveMethodType = art_quick_resolve_method_type;
   static_assert(!IsDirectEntrypoint(kQuickResolveMethodType), "Non-direct C stub marked direct.");
 
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index d5a9b15..7c8ac28 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -17,13 +17,13 @@
 #include <sys/ucontext.h>
 #include "fault_handler.h"
 
+#include "arch/mips/callee_save_frame_mips.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
 #include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
-#include "quick_method_frame_info_mips.h"
 #include "registers_mips.h"
 #include "thread-current-inl.h"
 
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 104a4ca..c367ea6 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -2056,6 +2056,12 @@
 .endm
 
     /*
+     * Entry from managed code to resolve a method handle. On entry, A0 holds the method handle
+     * index. On success the MethodHandle is returned, otherwise an exception is raised.
+     */
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
+
+    /*
      * Entry from managed code to resolve a method type. On entry, A0 holds the method type index.
      * On success the MethodType is returned, otherwise an exception is raised.
      */
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/callee_save_frame_mips64.h
similarity index 61%
rename from runtime/arch/mips64/quick_method_frame_info_mips64.h
rename to runtime/arch/mips64/callee_save_frame_mips64.h
index 520f631..59529a0 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/callee_save_frame_mips64.h
@@ -14,13 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
+#ifndef ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
+#define ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
 
 #include "arch/instruction_set.h"
 #include "base/bit_utils.h"
 #include "base/callee_save_type.h"
 #include "base/enums.h"
+#include "base/globals.h"
 #include "quick/quick_method_frame_info.h"
 #include "registers_mips64.h"
 
@@ -71,37 +72,56 @@
     (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
     (1 << art::mips64::F30) | (1 << art::mips64::F31);
 
-constexpr uint32_t Mips64CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
-}
+class Mips64CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
+  }
 
-constexpr uint32_t Mips64CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kMips64CalleeSaveFpRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
-}
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kMips64CalleeSaveFpRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
+  }
 
-constexpr uint32_t Mips64CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(Mips64CalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(Mips64CalleeSaveFpSpills(type))   /* fprs */ +
-                  + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
-}
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type))   /* fprs */ +
+                    + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
+  }
 
-constexpr QuickMethodFrameInfo Mips64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(Mips64CalleeSaveFrameSize(type),
-                              Mips64CalleeSaveCoreSpills(type),
-                              Mips64CalleeSaveFpSpills(type));
-}
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMips64PointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMips64PointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kMips64PointerSize);
+  }
+};
 
 }  // namespace mips64
 }  // namespace art
 
-#endif  // ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
+#endif  // ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 695da47..85f3528 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -18,13 +18,13 @@
 
 #include <sys/ucontext.h>
 
+#include "arch/mips64/callee_save_frame_mips64.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
 #include "base/globals.h"
 #include "base/hex_dump.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
-#include "quick_method_frame_info_mips64.h"
 #include "registers_mips64.h"
 #include "thread-current-inl.h"
 
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 1e94e07..1f4f174 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1956,6 +1956,12 @@
 .endm
 
     /*
+     * Entry from managed code to resolve a method handle. On entry, A0 holds the method handle
+     * index. On success the MethodHandle is returned, otherwise an exception is raised.
+     */
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
+
+    /*
      * Entry from managed code to resolve a method type. On entry, A0 holds the method type index.
      * On success the MethodType is returned, otherwise an exception is raised.
      */
diff --git a/runtime/arch/x86/callee_save_frame_x86.h b/runtime/arch/x86/callee_save_frame_x86.h
new file mode 100644
index 0000000..f336f43
--- /dev/null
+++ b/runtime/arch/x86/callee_save_frame_x86.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
+#define ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86.h"
+
+namespace art {
+namespace x86 {
+
+static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
+    (1 << art::x86::kNumberOfCpuRegisters);  // Fake return address callee save.
+static constexpr uint32_t kX86CalleeSaveRefSpills =
+    (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
+static constexpr uint32_t kX86CalleeSaveArgSpills =
+    (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+static constexpr uint32_t kX86CalleeSaveEverythingSpills =
+    (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+
+static constexpr uint32_t kX86CalleeSaveFpArgSpills =
+    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+    (1 << art::x86::XMM2) | (1 << art::x86::XMM3);
+static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
+    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+    (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
+    (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
+    (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
+
+class X86CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
+           (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    2 * POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
+  }
+
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            2 * POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kX86PointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kX86PointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kX86PointerSize);
+  }
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 0ae6914..b89d45f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1281,6 +1281,7 @@
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
 
@@ -1291,7 +1292,7 @@
     jz   .Lslow_lock
 .Lretry_lock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx  // ecx := lock word
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx         // test the 2 high bits.
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx  // test the 2 high bits.
     jne  .Lslow_lock                      // slow path if either of the two high bits are set.
     movl %ecx, %edx                       // save lock word (edx) to keep read barrier bits.
     andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx  // zero the gc bits.
@@ -1361,7 +1362,7 @@
 .Lretry_unlock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx  // ecx := lock word
     movl %fs:THREAD_ID_OFFSET, %edx       // edx := thread id
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
     jnz  .Lslow_unlock                    // lock word contains a monitor
     cmpw %cx, %dx                         // does the thread id match?
     jne  .Lslow_unlock
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
deleted file mode 100644
index 9a66333..0000000
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
-#define ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_x86.h"
-
-namespace art {
-namespace x86 {
-
-enum XMM {
-  XMM0 = 0,
-  XMM1 = 1,
-  XMM2 = 2,
-  XMM3 = 3,
-  XMM4 = 4,
-  XMM5 = 5,
-  XMM6 = 6,
-  XMM7 = 7,
-};
-
-static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
-    (1 << art::x86::kNumberOfCpuRegisters);  // Fake return address callee save.
-static constexpr uint32_t kX86CalleeSaveRefSpills =
-    (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
-static constexpr uint32_t kX86CalleeSaveArgSpills =
-    (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
-static constexpr uint32_t kX86CalleeSaveEverythingSpills =
-    (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
-
-static constexpr uint32_t kX86CalleeSaveFpArgSpills =
-    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
-    (1 << art::x86::XMM2) | (1 << art::x86::XMM3);
-static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
-    (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
-    (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
-    (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
-    (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
-
-constexpr uint32_t X86CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
-}
-
-constexpr uint32_t X86CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
-         (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
-}
-
-constexpr uint32_t X86CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
-                  2 * POPCOUNT(X86CalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
-}
-
-constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(X86CalleeSaveFrameSize(type),
-                              X86CalleeSaveCoreSpills(type),
-                              X86CalleeSaveFpSpills(type));
-}
-
-}  // namespace x86
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
diff --git a/runtime/arch/x86/registers_x86.h b/runtime/arch/x86/registers_x86.h
index 5a5d226..d3b959f 100644
--- a/runtime/arch/x86/registers_x86.h
+++ b/runtime/arch/x86/registers_x86.h
@@ -42,6 +42,20 @@
 };
 std::ostream& operator<<(std::ostream& os, const Register& rhs);
 
+enum XmmRegister {
+  XMM0 = 0,
+  XMM1 = 1,
+  XMM2 = 2,
+  XMM3 = 3,
+  XMM4 = 4,
+  XMM5 = 5,
+  XMM6 = 6,
+  XMM7 = 7,
+  kNumberOfXmmRegisters = 8,
+  kNoXmmRegister = -1  // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
+
 }  // namespace x86
 }  // namespace art
 
diff --git a/runtime/arch/x86_64/callee_save_frame_x86_64.h b/runtime/arch/x86_64/callee_save_frame_x86_64.h
new file mode 100644
index 0000000..228a902
--- /dev/null
+++ b/runtime/arch/x86_64/callee_save_frame_x86_64.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86_64.h"
+
+namespace art {
+namespace x86_64 {
+
+static constexpr uint32_t kX86_64CalleeSaveAlwaysSpills =
+    (1 << art::x86_64::kNumberOfCpuRegisters);  // Fake return address callee save.
+static constexpr uint32_t kX86_64CalleeSaveRefSpills =
+    (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
+    (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
+static constexpr uint32_t kX86_64CalleeSaveArgSpills =
+    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
+    (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
+static constexpr uint32_t kX86_64CalleeSaveEverythingSpills =
+    (1 << art::x86_64::RAX) | (1 << art::x86_64::RCX) | (1 << art::x86_64::RDX) |
+    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDI) | (1 << art::x86_64::R8) |
+    (1 << art::x86_64::R9) | (1 << art::x86_64::R10) | (1 << art::x86_64::R11);
+
+static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
+    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
+    (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
+static constexpr uint32_t kX86_64CalleeSaveFpSpills =
+    (1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
+    (1 << art::x86_64::XMM14) | (1 << art::x86_64::XMM15);
+static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
+    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) |
+    (1 << art::x86_64::XMM2) | (1 << art::x86_64::XMM3) |
+    (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7) |
+    (1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
+    (1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
+
+class X86_64CalleeSaveFrame {
+ public:
+  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return kX86_64CalleeSaveFpSpills |
+        (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
+        (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
+  }
+
+  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+                    POPCOUNT(GetFpSpills(type)) /* fprs */ +
+                    1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
+  }
+
+  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+  }
+
+  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           (POPCOUNT(GetCoreSpills(type)) +
+            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kX86_64PointerSize);
+  }
+
+  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) -
+           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kX86_64PointerSize);
+  }
+
+  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+    type = GetCanonicalCalleeSaveType(type);
+    return GetFrameSize(type) - static_cast<size_t>(kX86_64PointerSize);
+  }
+};
+
+}  // namespace x86_64
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 8fef802..c179033 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1301,6 +1301,7 @@
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_type, artInitializeTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
 ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
 
@@ -1311,7 +1312,7 @@
     jz   .Lslow_lock
 .Lretry_lock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx  // ecx := lock word.
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx         // Test the 2 high bits.
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx  // Test the 2 high bits.
     jne  .Lslow_lock                      // Slow path if either of the two high bits are set.
     movl %ecx, %edx                       // save lock word (edx) to keep read barrier bits.
     andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx  // zero the gc bits.
@@ -1361,7 +1362,7 @@
 .Lretry_unlock:
     movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx  // ecx := lock word
     movl %gs:THREAD_ID_OFFSET, %edx       // edx := thread id
-    test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+    test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
     jnz  .Lslow_unlock                    // lock word contains a monitor
     cmpw %cx, %dx                         // does the thread id match?
     jne  .Lslow_unlock
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
deleted file mode 100644
index ebf976e..0000000
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
-#define ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_x86_64.h"
-
-namespace art {
-namespace x86_64 {
-
-static constexpr uint32_t kX86_64CalleeSaveAlwaysSpills =
-    (1 << art::x86_64::kNumberOfCpuRegisters);  // Fake return address callee save.
-static constexpr uint32_t kX86_64CalleeSaveRefSpills =
-    (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
-    (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
-static constexpr uint32_t kX86_64CalleeSaveArgSpills =
-    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
-    (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
-static constexpr uint32_t kX86_64CalleeSaveEverythingSpills =
-    (1 << art::x86_64::RAX) | (1 << art::x86_64::RCX) | (1 << art::x86_64::RDX) |
-    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDI) | (1 << art::x86_64::R8) |
-    (1 << art::x86_64::R9) | (1 << art::x86_64::R10) | (1 << art::x86_64::R11);
-
-static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
-    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
-    (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
-    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
-static constexpr uint32_t kX86_64CalleeSaveFpSpills =
-    (1 << art::x86_64::XMM12) | (1 << art::x86_64::XMM13) |
-    (1 << art::x86_64::XMM14) | (1 << art::x86_64::XMM15);
-static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
-    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) |
-    (1 << art::x86_64::XMM2) | (1 << art::x86_64::XMM3) |
-    (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
-    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7) |
-    (1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
-    (1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
-
-constexpr uint32_t X86_64CalleeSaveCoreSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
-}
-
-constexpr uint32_t X86_64CalleeSaveFpSpills(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return kX86_64CalleeSaveFpSpills |
-      (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
-      (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
-}
-
-constexpr uint32_t X86_64CalleeSaveFrameSize(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
-                  POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
-                  1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
-}
-
-constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
-  type = GetCanonicalCalleeSaveType(type);
-  return QuickMethodFrameInfo(X86_64CalleeSaveFrameSize(type),
-                              X86_64CalleeSaveCoreSpills(type),
-                              X86_64CalleeSaveFpSpills(type));
-}
-
-}  // namespace x86_64
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 87fcb20..608e33c 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -710,6 +710,23 @@
   return GetOatMethodQuickCode(runtime->GetClassLinker()->GetImagePointerSize()) != nullptr;
 }
 
+void ArtMethod::SetNotIntrinsic() {
+  if (!IsIntrinsic()) {
+    return;
+  }
+
+  // Query the hidden API access flags of the intrinsic.
+  HiddenApiAccessFlags::ApiList intrinsic_api_list = GetHiddenApiAccessFlags();
+
+  // Clear intrinsic-related access flags.
+  ClearAccessFlags(kAccIntrinsic | kAccIntrinsicBits);
+
+  // Re-apply hidden API access flags now that the method is not an intrinsic.
+  SetAccessFlags(HiddenApiAccessFlags::EncodeForRuntime(GetAccessFlags(), intrinsic_api_list));
+  DCHECK_EQ(GetHiddenApiAccessFlags(), intrinsic_api_list);
+}
+
+
 void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
   memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
          Size(image_pointer_size));
diff --git a/runtime/art_method.h b/runtime/art_method.h
index acaa4a6..012d706 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -194,9 +194,7 @@
     return (GetAccessFlags() & kAccIntrinsicBits) >> kAccFlagsShift;
   }
 
-  void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ClearAccessFlags(kAccIntrinsic | kAccIntrinsicBits);
-  }
+  void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsCopied() {
     static_assert((kAccCopied & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 705e1ff..70ff40d 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -73,7 +73,7 @@
 
 // Offset of field Thread::tlsPtr_.mterp_current_ibase.
 #define THREAD_CURRENT_IBASE_OFFSET \
-    (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 163) * __SIZEOF_POINTER__)
+    (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 164) * __SIZEOF_POINTER__)
 ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
             art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
 // Offset of field Thread::tlsPtr_.mterp_default_ibase.
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index e2ad7fd..6917899 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -64,20 +64,19 @@
   void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
-    CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+    CodeInfo code_info(GetCurrentOatQuickMethodHeader());
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
     CodeItemDataAccessor accessor(m->DexInstructionData());
     uint16_t number_of_dex_registers = accessor.RegistersSize();
     DexRegisterMap dex_register_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
-    uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
-    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+    uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
     for (int i = 0; i < number_of_references; ++i) {
       int reg = registers[i];
       CHECK_LT(reg, accessor.RegistersSize());
       DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
-          reg, number_of_dex_registers, code_info, encoding);
+          reg, number_of_dex_registers, code_info);
       switch (location.GetKind()) {
         case DexRegisterLocation::Kind::kNone:
           // Not set, should not be a reference.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4141a37..b88aa5e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -828,9 +828,24 @@
   return true;
 }
 
+static void CreateStringInitBindings(Thread* self, ClassLinker* class_linker)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Find String.<init> -> StringFactory bindings.
+  ObjPtr<mirror::Class> string_factory_class =
+      class_linker->FindSystemClass(self, "Ljava/lang/StringFactory;");
+  CHECK(string_factory_class != nullptr);
+  ObjPtr<mirror::Class> string_class =
+      class_linker->GetClassRoot(ClassLinker::ClassRoot::kJavaLangString);
+  WellKnownClasses::InitStringInit(string_class, string_factory_class);
+  // Update the primordial thread.
+  self->InitStringEntryPoints();
+}
+
 void ClassLinker::FinishInit(Thread* self) {
   VLOG(startup) << "ClassLinker::FinishInit entering";
 
+  CreateStringInitBindings(self, this);
+
   // Let the heap know some key offsets into java.lang.ref instances
   // Note: we hard code the field indexes here rather than using FindInstanceField
   // as the types of the field can't be resolved prior to the runtime being
@@ -4850,6 +4865,9 @@
       const uint32_t field_idx = field->GetDexFieldIndex();
       ArtField* resolved_field = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
       if (resolved_field == nullptr) {
+        // Populating cache of a dex file which defines `klass` should always be allowed.
+        DCHECK_EQ(hiddenapi::GetMemberAction(
+            field, class_loader.Get(), dex_cache.Get(), hiddenapi::kNone), hiddenapi::kAllow);
         dex_cache->SetResolvedField(field_idx, field, image_pointer_size_);
       } else {
         DCHECK_EQ(field, resolved_field);
@@ -8053,26 +8071,8 @@
     return nullptr;
   }
   DCHECK(klass->IsResolved());
-  Thread* self = is_static ? Thread::Current() : nullptr;
 
-  // First try to find a field declared directly by `klass` by the field index.
-  ArtField* resolved_field = is_static
-      ? mirror::Class::FindStaticField(self, klass, dex_cache, field_idx)
-      : klass->FindInstanceField(dex_cache, field_idx);
-
-  if (resolved_field == nullptr) {
-    // If not found in `klass` by field index, search the class hierarchy using the name and type.
-    const char* name = dex_file.GetFieldName(field_id);
-    const char* type = dex_file.GetFieldTypeDescriptor(field_id);
-    resolved_field = is_static
-        ? mirror::Class::FindStaticField(self, klass, name, type)
-        : klass->FindInstanceField(name, type);
-  }
-
-  if (resolved_field != nullptr) {
-    dex_cache->SetResolvedField(field_idx, resolved_field, image_pointer_size_);
-  }
-  return resolved_field;
+  return FindResolvedField(klass, dex_cache, class_loader, field_idx, is_static);
 }
 
 ArtField* ClassLinker::ResolveField(uint32_t field_idx,
@@ -8087,39 +8087,18 @@
   }
   const DexFile& dex_file = *dex_cache->GetDexFile();
   const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
-  Thread* const self = Thread::Current();
   ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
   if (klass == nullptr) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
 
-  if (is_static) {
-    resolved = mirror::Class::FindStaticField(self, klass, dex_cache.Get(), field_idx);
-  } else {
-    resolved = klass->FindInstanceField(dex_cache.Get(), field_idx);
-  }
-
+  resolved = FindResolvedField(klass, dex_cache.Get(), class_loader.Get(), field_idx, is_static);
   if (resolved == nullptr) {
     const char* name = dex_file.GetFieldName(field_id);
     const char* type = dex_file.GetFieldTypeDescriptor(field_id);
-    if (is_static) {
-      resolved = mirror::Class::FindStaticField(self, klass, name, type);
-    } else {
-      resolved = klass->FindInstanceField(name, type);
-    }
-  }
-
-  if (resolved == nullptr ||
-      hiddenapi::GetMemberAction(
-          resolved, class_loader.Get(), dex_cache.Get(), hiddenapi::kLinking) == hiddenapi::kDeny) {
-    const char* name = dex_file.GetFieldName(field_id);
-    const char* type = dex_file.GetFieldTypeDescriptor(field_id);
     ThrowNoSuchFieldError(is_static ? "static " : "instance ", klass, type, name);
-    return nullptr;
   }
-
-  dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
   return resolved;
 }
 
@@ -8134,32 +8113,83 @@
   }
   const DexFile& dex_file = *dex_cache->GetDexFile();
   const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
-  Thread* self = Thread::Current();
   ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
   if (klass == nullptr) {
     DCHECK(Thread::Current()->IsExceptionPending());
     return nullptr;
   }
 
-  StringPiece name(dex_file.GetFieldName(field_id));
-  StringPiece type(dex_file.GetFieldTypeDescriptor(field_id));
-  resolved = mirror::Class::FindField(self, klass, name, type);
-  if (resolved != nullptr &&
-      hiddenapi::GetMemberAction(
-          resolved, class_loader.Get(), dex_cache.Get(), hiddenapi::kLinking) == hiddenapi::kDeny) {
-    resolved = nullptr;
-  }
-  if (resolved != nullptr) {
-    dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
-  } else {
+  resolved = FindResolvedFieldJLS(klass, dex_cache.Get(), class_loader.Get(), field_idx);
+  if (resolved == nullptr) {
+    const char* name = dex_file.GetFieldName(field_id);
+    const char* type = dex_file.GetFieldTypeDescriptor(field_id);
     ThrowNoSuchFieldError("", klass, type, name);
   }
   return resolved;
 }
 
+ArtField* ClassLinker::FindResolvedField(ObjPtr<mirror::Class> klass,
+                                         ObjPtr<mirror::DexCache> dex_cache,
+                                         ObjPtr<mirror::ClassLoader> class_loader,
+                                         uint32_t field_idx,
+                                         bool is_static) {
+  ArtField* resolved = nullptr;
+  Thread* self = is_static ? Thread::Current() : nullptr;
+  const DexFile& dex_file = *dex_cache->GetDexFile();
+
+  resolved = is_static ? mirror::Class::FindStaticField(self, klass, dex_cache, field_idx)
+                       : klass->FindInstanceField(dex_cache, field_idx);
+
+  if (resolved == nullptr) {
+    const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+    const char* name = dex_file.GetFieldName(field_id);
+    const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+    resolved = is_static ? mirror::Class::FindStaticField(self, klass, name, type)
+                         : klass->FindInstanceField(name, type);
+  }
+
+  if (resolved != nullptr &&
+      hiddenapi::GetMemberAction(
+          resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
+    resolved = nullptr;
+  }
+
+  if (resolved != nullptr) {
+    dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
+  }
+
+  return resolved;
+}
+
+ArtField* ClassLinker::FindResolvedFieldJLS(ObjPtr<mirror::Class> klass,
+                                            ObjPtr<mirror::DexCache> dex_cache,
+                                            ObjPtr<mirror::ClassLoader> class_loader,
+                                            uint32_t field_idx) {
+  ArtField* resolved = nullptr;
+  Thread* self = Thread::Current();
+  const DexFile& dex_file = *dex_cache->GetDexFile();
+  const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+
+  const char* name = dex_file.GetFieldName(field_id);
+  const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+  resolved = mirror::Class::FindField(self, klass, name, type);
+
+  if (resolved != nullptr &&
+      hiddenapi::GetMemberAction(
+          resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
+    resolved = nullptr;
+  }
+
+  if (resolved != nullptr) {
+    dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
+  }
+
+  return resolved;
+}
+
 ObjPtr<mirror::MethodType> ClassLinker::ResolveMethodType(
     Thread* self,
-    uint32_t proto_idx,
+    dex::ProtoIndex proto_idx,
     Handle<mirror::DexCache> dex_cache,
     Handle<mirror::ClassLoader> class_loader) {
   DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
@@ -8221,7 +8251,7 @@
 }
 
 ObjPtr<mirror::MethodType> ClassLinker::ResolveMethodType(Thread* self,
-                                                          uint32_t proto_idx,
+                                                          dex::ProtoIndex proto_idx,
                                                           ArtMethod* referrer) {
   StackHandleScope<2> hs(self);
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index e935d1d..52ecf82 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -392,17 +392,38 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
+  // Find a field with a given ID from the DexFile associated with the given DexCache
+  // and ClassLoader, storing the result in DexCache. The declaring class is assumed
+  // to have been already resolved into `klass`. The `is_static` argument is used to
+  // determine if we are resolving a static or non-static field.
+  ArtField* FindResolvedField(ObjPtr<mirror::Class> klass,
+                              ObjPtr<mirror::DexCache> dex_cache,
+                              ObjPtr<mirror::ClassLoader> class_loader,
+                              uint32_t field_idx,
+                              bool is_static)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Find a field with a given ID from the DexFile associated with the given DexCache
+  // and ClassLoader, storing the result in DexCache. The declaring class is assumed
+  // to have been already resolved into `klass`. No is_static argument is provided
+  // so that Java field resolution semantics are followed.
+  ArtField* FindResolvedFieldJLS(ObjPtr<mirror::Class> klass,
+                                 ObjPtr<mirror::DexCache> dex_cache,
+                                 ObjPtr<mirror::ClassLoader> class_loader,
+                                 uint32_t field_idx)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Resolve a method type with a given ID from the DexFile associated with a given DexCache
   // and ClassLoader, storing the result in the DexCache.
   ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
-                                               uint32_t proto_idx,
+                                               dex::ProtoIndex proto_idx,
                                                Handle<mirror::DexCache> dex_cache,
                                                Handle<mirror::ClassLoader> class_loader)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
   ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
-                                               uint32_t proto_idx,
+                                               dex::ProtoIndex proto_idx,
                                                ArtMethod* referrer)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 37e074d..9c2a40b 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -29,6 +29,7 @@
 #include "instrumentation.h"
 #include "interpreter/shadow_frame.h"
 #include "interpreter/unstarted_runtime.h"
+#include "jvalue-inl.h"
 #include "mirror/class.h"
 #include "mirror/object.h"
 #include "obj_ptr-inl.h"
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 8fd95ed..657a78b 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -880,11 +880,14 @@
 
 void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> expected_type,
                                    ObjPtr<mirror::MethodType> actual_type) {
-  ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",
-                 nullptr,
-                 StringPrintf("Expected %s but was %s",
-                              expected_type->PrettyDescriptor().c_str(),
-                              actual_type->PrettyDescriptor().c_str()).c_str());
+  ThrowWrongMethodTypeException(expected_type->PrettyDescriptor(), actual_type->PrettyDescriptor());
+}
+
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+                                   const std::string& actual_descriptor) {
+  std::ostringstream msg;
+  msg << "Expected " << expected_descriptor << " but was " << actual_descriptor;
+  ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",  nullptr, msg.str().c_str());
 }
 
 }  // namespace art
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 29a056e..6acff6f 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -274,6 +274,10 @@
                                    ObjPtr<mirror::MethodType> callsite_type)
     REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
 
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+                                   const std::string& actual_descriptor)
+    REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_COMMON_THROWS_H_
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index c399b1c..95b42d2 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -29,6 +29,7 @@
 #include "mirror/field.h"
 #include "mirror/method.h"
 #include "oat_file.h"
+#include "obj_ptr-inl.h"
 #include "reflection.h"
 #include "thread.h"
 #include "well_known_classes.h"
@@ -116,9 +117,9 @@
   DISALLOW_COPY_AND_ASSIGN(ClassData);
 };
 
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
-                                       Handle<mirror::Class> annotation_class,
-                                       const uint8_t** annotation)
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+                                              Handle<mirror::Class> annotation_class,
+                                              const uint8_t** annotation)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 bool IsVisibilityCompatible(uint32_t actual, uint32_t expected) {
@@ -333,7 +334,7 @@
   return dex_file.GetClassAnnotationSet(annotations_dir);
 }
 
-mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
+ObjPtr<mirror::Object> ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   uint32_t type_index = DecodeUnsignedLeb128(annotation);
   uint32_t size = DecodeUnsignedLeb128(annotation);
@@ -355,13 +356,13 @@
   }
 
   ObjPtr<mirror::Class> annotation_member_class =
-      soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember).Ptr();
-  mirror::Class* annotation_member_array_class =
+      soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember);
+  ObjPtr<mirror::Class> annotation_member_array_class =
       class_linker->FindArrayClass(self, &annotation_member_class);
   if (annotation_member_array_class == nullptr) {
     return nullptr;
   }
-  mirror::ObjectArray<mirror::Object>* element_array = nullptr;
+  ObjPtr<mirror::ObjectArray<mirror::Object>> element_array = nullptr;
   if (size > 0) {
     element_array =
         mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size);
@@ -373,7 +374,7 @@
 
   Handle<mirror::ObjectArray<mirror::Object>> h_element_array(hs.NewHandle(element_array));
   for (uint32_t i = 0; i < size; ++i) {
-    mirror::Object* new_member = CreateAnnotationMember(klass, annotation_class, annotation);
+    ObjPtr<mirror::Object> new_member = CreateAnnotationMember(klass, annotation_class, annotation);
     if (new_member == nullptr) {
       return nullptr;
     }
@@ -605,7 +606,7 @@
             return false;
           }
           if (!component_type->IsPrimitive()) {
-            mirror::Object* obj = new_annotation_value.value_.GetL();
+            ObjPtr<mirror::Object> obj = new_annotation_value.value_.GetL();
             new_array->AsObjectArray<mirror::Object>()->
                 SetWithoutChecks<kTransactionActive>(i, obj);
           } else {
@@ -682,20 +683,20 @@
   *annotation_ptr = annotation;
 
   if (result_style == DexFile::kAllObjects && primitive_type != Primitive::kPrimVoid) {
-    element_object = BoxPrimitive(primitive_type, annotation_value->value_).Ptr();
+    element_object = BoxPrimitive(primitive_type, annotation_value->value_);
     set_object = true;
   }
 
   if (set_object) {
-    annotation_value->value_.SetL(element_object.Ptr());
+    annotation_value->value_.SetL(element_object);
   }
 
   return true;
 }
 
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
-                                       Handle<mirror::Class> annotation_class,
-                                       const uint8_t** annotation) {
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+                                              Handle<mirror::Class> annotation_class,
+                                              const uint8_t** annotation) {
   const DexFile& dex_file = klass.GetDexFile();
   Thread* self = Thread::Current();
   ScopedObjectAccessUnchecked soa(self);
@@ -799,7 +800,7 @@
   return nullptr;
 }
 
-mirror::Object* GetAnnotationObjectFromAnnotationSet(
+ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(
     const ClassData& klass,
     const DexFile::AnnotationSetItem* annotation_set,
     uint32_t visibility,
@@ -814,11 +815,11 @@
   return ProcessEncodedAnnotation(klass, &annotation);
 }
 
-mirror::Object* GetAnnotationValue(const ClassData& klass,
-                                   const DexFile::AnnotationItem* annotation_item,
-                                   const char* annotation_name,
-                                   Handle<mirror::Class> array_class,
-                                   uint32_t expected_type)
+ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass,
+                                          const DexFile::AnnotationItem* annotation_item,
+                                          const char* annotation_name,
+                                          Handle<mirror::Class> array_class,
+                                          uint32_t expected_type)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
   const uint8_t* annotation =
@@ -864,7 +865,7 @@
   if (string_array_class == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj =
+  ObjPtr<mirror::Object> obj =
       GetAnnotationValue(klass, annotation_item, "value", string_array_class,
                          DexFile::kDexAnnotationArray);
   if (obj == nullptr) {
@@ -873,8 +874,9 @@
   return obj->AsObjectArray<mirror::String>();
 }
 
-mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
-                                                   const DexFile::AnnotationSetItem* annotation_set)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(
+    const ClassData& klass,
+    const DexFile::AnnotationSetItem* annotation_set)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const DexFile& dex_file = klass.GetDexFile();
   StackHandleScope<1> hs(Thread::Current());
@@ -890,7 +892,7 @@
   if (class_array_class == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj =
+  ObjPtr<mirror::Object> obj =
       GetAnnotationValue(klass, annotation_item, "value", class_array_class,
                          DexFile::kDexAnnotationArray);
   if (obj == nullptr) {
@@ -899,7 +901,7 @@
   return obj->AsObjectArray<mirror::Class>();
 }
 
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSet(
     const ClassData& klass,
     const DexFile::AnnotationSetItem* annotation_set,
     uint32_t visibility)
@@ -930,7 +932,7 @@
       continue;
     }
     const uint8_t* annotation = annotation_item->annotation_;
-    mirror::Object* annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
+    ObjPtr<mirror::Object> annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
     if (annotation_obj != nullptr) {
       result->SetWithoutChecks<false>(dest_index, annotation_obj);
       ++dest_index;
@@ -943,21 +945,21 @@
     return result.Get();
   }
 
-  mirror::ObjectArray<mirror::Object>* trimmed_result =
+  ObjPtr<mirror::ObjectArray<mirror::Object>> trimmed_result =
       mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index);
   if (trimmed_result == nullptr) {
     return nullptr;
   }
 
   for (uint32_t i = 0; i < dest_index; ++i) {
-    mirror::Object* obj = result->GetWithoutChecks(i);
+    ObjPtr<mirror::Object> obj = result->GetWithoutChecks(i);
     trimmed_result->SetWithoutChecks<false>(i, obj);
   }
 
   return trimmed_result;
 }
 
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSetRefList(
     const ClassData& klass,
     const DexFile::AnnotationSetRefList* set_ref_list,
     uint32_t size)
@@ -968,7 +970,7 @@
   StackHandleScope<1> hs(self);
   ObjPtr<mirror::Class> annotation_array_class =
       soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
-  mirror::Class* annotation_array_array_class =
+  ObjPtr<mirror::Class> annotation_array_array_class =
       Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
   if (annotation_array_array_class == nullptr) {
     return nullptr;
@@ -982,8 +984,9 @@
   for (uint32_t index = 0; index < size; ++index) {
     const DexFile::AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
     const DexFile::AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
-    mirror::Object* annotation_set = ProcessAnnotationSet(klass, set_item,
-                                                          DexFile::kDexVisibilityRuntime);
+    ObjPtr<mirror::Object> annotation_set = ProcessAnnotationSet(klass,
+                                                                 set_item,
+                                                                 DexFile::kDexVisibilityRuntime);
     if (annotation_set == nullptr) {
       return nullptr;
     }
@@ -995,7 +998,8 @@
 
 namespace annotations {
 
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+                                             Handle<mirror::Class> annotation_class) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1008,7 +1012,7 @@
                                               annotation_class);
 }
 
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
   StackHandleScope<1> hs(Thread::Current());
   const ClassData field_class(hs, field);
@@ -1037,7 +1041,7 @@
   return annotation_item != nullptr;
 }
 
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method) {
   const ClassData klass(method);
   const DexFile* dex_file = &klass.GetDexFile();
   const DexFile::AnnotationsDirectoryItem* annotations_dir =
@@ -1081,7 +1085,8 @@
   return annotation_value.value_.GetL();
 }
 
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+                                              Handle<mirror::Class> annotation_class) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1090,14 +1095,14 @@
                                               DexFile::kDexVisibilityRuntime, annotation_class);
 }
 
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   return ProcessAnnotationSet(ClassData(method),
                               annotation_set,
                               DexFile::kDexVisibilityRuntime);
 }
 
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method) {
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
   if (annotation_set == nullptr) {
     return nullptr;
@@ -1105,7 +1110,7 @@
   return GetThrowsValue(ClassData(method), annotation_set);
 }
 
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method) {
   const DexFile* dex_file = method->GetDexFile();
   const DexFile::ParameterAnnotationsItem* parameter_annotations =
       FindAnnotationsItemForMethod(method);
@@ -1136,9 +1141,9 @@
   return set_ref_list->size_;
 }
 
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
-                                                uint32_t parameter_idx,
-                                                Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+                                                       uint32_t parameter_idx,
+                                                       Handle<mirror::Class> annotation_class) {
   const DexFile* dex_file = method->GetDexFile();
   const DexFile::ParameterAnnotationsItem* parameter_annotations =
       FindAnnotationsItemForMethod(method);
@@ -1307,8 +1312,8 @@
   return access_flags;
 }
 
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
-                                      Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
+                                             Handle<mirror::Class> annotation_class) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1320,13 +1325,13 @@
                                               annotation_class);
 }
 
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime);
 }
 
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1345,7 +1350,7 @@
   if (class_array_class == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj =
+  ObjPtr<mirror::Object> obj =
       GetAnnotationValue(data, annotation_item, "value", class_array_class,
                          DexFile::kDexAnnotationArray);
   if (obj == nullptr) {
@@ -1354,7 +1359,7 @@
   return obj->AsObjectArray<mirror::Class>();
 }
 
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1366,17 +1371,19 @@
   if (annotation_item == nullptr) {
     return nullptr;
   }
-  mirror::Object* obj = GetAnnotationValue(data, annotation_item, "value",
-                                           ScopedNullHandle<mirror::Class>(),
-                                           DexFile::kDexAnnotationType);
+  ObjPtr<mirror::Object> obj = GetAnnotationValue(data,
+                                                  annotation_item,
+                                                  "value",
+                                                  ScopedNullHandle<mirror::Class>(),
+                                                  DexFile::kDexAnnotationType);
   if (obj == nullptr) {
     return nullptr;
   }
   return obj->AsClass();
 }
 
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
-  mirror::Class* declaring_class = GetDeclaringClass(klass);
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass) {
+  ObjPtr<mirror::Class> declaring_class = GetDeclaringClass(klass);
   if (declaring_class != nullptr) {
     return declaring_class;
   }
@@ -1420,7 +1427,7 @@
   return method->GetDeclaringClass();
 }
 
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
@@ -1438,7 +1445,7 @@
       DexFile::kDexAnnotationMethod);
 }
 
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) {
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name) {
   ClassData data(klass);
   const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
   if (annotation_set == nullptr) {
diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h
index 4bb0d75..9645a7f 100644
--- a/runtime/dex/dex_file_annotations.h
+++ b/runtime/dex/dex_file_annotations.h
@@ -22,6 +22,7 @@
 #include "handle.h"
 #include "mirror/dex_cache.h"
 #include "mirror/object_array.h"
+#include "obj_ptr.h"
 
 namespace art {
 
@@ -35,9 +36,10 @@
 namespace annotations {
 
 // Field annotations.
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+                                             Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field)
     REQUIRES_SHARED(Locks::mutator_lock_);
 mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field)
     REQUIRES_SHARED(Locks::mutator_lock_);
@@ -45,21 +47,22 @@
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 // Method annotations.
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method)
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+                                              Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
 uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
-                                                uint32_t parameter_idx,
-                                                Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+                                                       uint32_t parameter_idx,
+                                                       Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
 bool GetParametersMetadataForMethod(ArtMethod* method,
                                     MutableHandle<mirror::ObjectArray<mirror::String>>* names,
@@ -85,20 +88,20 @@
                                               uint32_t method_index);
 
 // Class annotations.
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
                                       Handle<mirror::Class> annotation_class)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass)
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass)
     REQUIRES_SHARED(Locks::mutator_lock_);
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name)
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name)
     REQUIRES_SHARED(Locks::mutator_lock_);
 bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags)
     REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 719b4af..026b5da 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1073,6 +1073,29 @@
   return true;
 }
 
+static InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
+  switch (e_machine) {
+    case EM_ARM:
+      return InstructionSet::kArm;
+    case EM_AARCH64:
+      return InstructionSet::kArm64;
+    case EM_386:
+      return InstructionSet::kX86;
+    case EM_X86_64:
+      return InstructionSet::kX86_64;
+    case EM_MIPS: {
+      if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
+          (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
+        return InstructionSet::kMips;
+      } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
+        return InstructionSet::kMips64;
+      }
+      break;
+    }
+  }
+  return InstructionSet::kNone;
+}
+
 template <typename ElfTypes>
 bool ElfFileImpl<ElfTypes>::Load(File* file,
                                  bool executable,
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index d4e7492..f6b1c73 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -47,7 +47,6 @@
 inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
                                     const MethodInfo& method_info,
                                     const InlineInfo& inline_info,
-                                    const InlineInfoEncoding& encoding,
                                     uint8_t inlining_depth)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(!outer_method->IsObsolete());
@@ -57,12 +56,12 @@
   // suspended while executing it.
   ScopedAssertNoThreadSuspension sants(__FUNCTION__);
 
-  if (inline_info.EncodesArtMethodAtDepth(encoding, inlining_depth)) {
-    return inline_info.GetArtMethodAtDepth(encoding, inlining_depth);
+  if (inline_info.EncodesArtMethodAtDepth(inlining_depth)) {
+    return inline_info.GetArtMethodAtDepth(inlining_depth);
   }
 
-  uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, inlining_depth);
-  if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
+  uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, inlining_depth);
+  if (inline_info.GetDexPcAtDepth(inlining_depth) == static_cast<uint32_t>(-1)) {
     // "charAt" special case. It is the only non-leaf method we inline across dex files.
     ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
     DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
@@ -73,9 +72,9 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ArtMethod* method = outer_method;
   for (uint32_t depth = 0, end = inlining_depth + 1u; depth != end; ++depth) {
-    DCHECK(!inline_info.EncodesArtMethodAtDepth(encoding, depth));
-    DCHECK_NE(inline_info.GetDexPcAtDepth(encoding, depth), static_cast<uint32_t>(-1));
-    method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, depth);
+    DCHECK(!inline_info.EncodesArtMethodAtDepth(depth));
+    DCHECK_NE(inline_info.GetDexPcAtDepth(depth), static_cast<uint32_t>(-1));
+    method_index = inline_info.GetMethodIndexAtDepth(method_info, depth);
     ArtMethod* inlined_method = class_linker->LookupResolvedMethod(method_index,
                                                                    method->GetDexCache(),
                                                                    method->GetClassLoader());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index df184bc..7f9b385 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -180,10 +180,10 @@
     ArtMethod** sp, CalleeSaveType type) REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
 
-  const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+  const size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
   auto** caller_sp = reinterpret_cast<ArtMethod**>(
       reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
-  const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+  const size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
   uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
       (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
   ArtMethod* outer_method = *caller_sp;
@@ -201,18 +201,16 @@
       DCHECK(current_code != nullptr);
       DCHECK(current_code->IsOptimized());
       uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
-      CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+      CodeInfo code_info(current_code);
       MethodInfo method_info = current_code->GetOptimizedMethodInfo();
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
-      StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+      StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(stack_map.IsValid());
-      if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+      if (stack_map.HasInlineInfo()) {
+        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
         caller = GetResolvedMethod(outer_method,
                                    method_info,
                                    inline_info,
-                                   encoding.inline_info.encoding,
-                                   inline_info.GetDepth(encoding.inline_info.encoding) - 1);
+                                   inline_info.GetDepth() - 1);
       }
     }
     if (kIsDebugBuild && do_caller_check) {
@@ -260,8 +258,15 @@
   return DoGetCalleeSaveMethodOuterCallerAndPc(sp, type).first;
 }
 
+ObjPtr<mirror::MethodHandle> ResolveMethodHandleFromCode(ArtMethod* referrer,
+                                                         uint32_t method_handle_idx) {
+  Thread::PoisonObjectPointersIfDebug();
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  return class_linker->ResolveMethodHandle(Thread::Current(), method_handle_idx, referrer);
+}
+
 ObjPtr<mirror::MethodType> ResolveMethodTypeFromCode(ArtMethod* referrer,
-                                                     uint32_t proto_idx) {
+                                                     dex::ProtoIndex proto_idx) {
   Thread::PoisonObjectPointersIfDebug();
   ObjPtr<mirror::MethodType> method_type =
       referrer->GetDexCache()->GetResolvedMethodType(proto_idx);
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 203ff3d..e33de9c 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -34,6 +34,7 @@
 namespace mirror {
 class Array;
 class Class;
+class MethodHandle;
 class MethodType;
 class Object;
 class String;
@@ -152,7 +153,12 @@
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
 
-ObjPtr<mirror::MethodType> ResolveMethodTypeFromCode(ArtMethod* referrer, uint32_t proto_idx)
+ObjPtr<mirror::MethodHandle> ResolveMethodHandleFromCode(ArtMethod* referrer,
+                                                         uint32_t method_handle_idx)
+    REQUIRES_SHARED(Locks::mutator_lock_)
+    REQUIRES(!Roles::uninterruptible_);
+
+ObjPtr<mirror::MethodType> ResolveMethodTypeFromCode(ArtMethod* referrer, dex::ProtoIndex proto_idx)
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
 
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index ef27ca3..6f1bbaa 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -21,16 +21,17 @@
 #include "base/callee_save_type.h"
 #include "base/enums.h"
 #include "base/mutex.h"
+#include "quick/quick_method_frame_info.h"
 #include "thread-inl.h"
 
 // Specific frame size code is in architecture-specific files. We include this to compile-time
 // specialize the code.
-#include "arch/arm/quick_method_frame_info_arm.h"
-#include "arch/arm64/quick_method_frame_info_arm64.h"
-#include "arch/mips/quick_method_frame_info_mips.h"
-#include "arch/mips64/quick_method_frame_info_mips64.h"
-#include "arch/x86/quick_method_frame_info_x86.h"
-#include "arch/x86_64/quick_method_frame_info_x86_64.h"
+#include "arch/arm/callee_save_frame_arm.h"
+#include "arch/arm64/callee_save_frame_arm64.h"
+#include "arch/mips/callee_save_frame_mips.h"
+#include "arch/mips64/callee_save_frame_mips64.h"
+#include "arch/x86/callee_save_frame_x86.h"
+#include "arch/x86_64/callee_save_frame_x86_64.h"
 
 namespace art {
 class ArtMethod;
@@ -67,57 +68,28 @@
   bool exit_check_;
 };
 
-static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, CalleeSaveType type) {
-  switch (isa) {
-    case InstructionSet::kArm:
-    case InstructionSet::kThumb2:
-      return arm::ArmCalleeSaveFrameSize(type);
-    case InstructionSet::kArm64:
-      return arm64::Arm64CalleeSaveFrameSize(type);
-    case InstructionSet::kMips:
-      return mips::MipsCalleeSaveFrameSize(type);
-    case InstructionSet::kMips64:
-      return mips64::Mips64CalleeSaveFrameSize(type);
-    case InstructionSet::kX86:
-      return x86::X86CalleeSaveFrameSize(type);
-    case InstructionSet::kX86_64:
-      return x86_64::X86_64CalleeSaveFrameSize(type);
-    case InstructionSet::kNone:
-      LOG(FATAL) << "kNone has no frame size";
-      UNREACHABLE();
-  }
-  LOG(FATAL) << "Unknown ISA " << isa;
-  UNREACHABLE();
-}
+namespace detail_ {
 
-// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
-  switch (isa) {
-    case InstructionSet::kArm:
-    case InstructionSet::kThumb2:
-      return kArmPointerSize;
-    case InstructionSet::kArm64:
-      return kArm64PointerSize;
-    case InstructionSet::kMips:
-      return kMipsPointerSize;
-    case InstructionSet::kMips64:
-      return kMips64PointerSize;
-    case InstructionSet::kX86:
-      return kX86PointerSize;
-    case InstructionSet::kX86_64:
-      return kX86_64PointerSize;
-    case InstructionSet::kNone:
-      LOG(FATAL) << "kNone has no pointer size";
-      UNREACHABLE();
-  }
-  LOG(FATAL) << "Unknown ISA " << isa;
-  UNREACHABLE();
-}
+template <InstructionSet>
+struct CSFSelector;  // No definition for unspecialized callee save frame selector.
 
-// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa, CalleeSaveType type) {
-  return GetCalleeSaveFrameSize(isa, type) - static_cast<size_t>(GetConstExprPointerSize(isa));
-}
+// Note: kThumb2 is never the kRuntimeISA.
+template <>
+struct CSFSelector<InstructionSet::kArm> { using type = arm::ArmCalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kArm64> { using type = arm64::Arm64CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kMips> { using type = mips::MipsCalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kMips64> { using type = mips64::Mips64CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kX86> { using type = x86::X86CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kX86_64> { using type = x86_64::X86_64CalleeSaveFrame; };
+
+}  // namespace detail_
+
+using RuntimeCalleeSaveFrame = detail_::CSFSelector<kRuntimeISA>::type;
 
 }  // namespace art
 
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index d934a53..1804d9e 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -37,6 +37,7 @@
 extern "C" void* art_quick_initialize_static_storage(uint32_t);
 extern "C" void* art_quick_initialize_type(uint32_t);
 extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t);
+extern "C" void* art_quick_resolve_method_handle(uint32_t);
 extern "C" void* art_quick_resolve_method_type(uint32_t);
 extern "C" void* art_quick_resolve_string(uint32_t);
 
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index a4572f6..3f66045 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -37,6 +37,7 @@
   qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
   qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
   qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
   qpoints->pResolveMethodType = art_quick_resolve_method_type;
   qpoints->pResolveString = art_quick_resolve_string;
 
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 09cbfff..fa536c7 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -183,13 +183,24 @@
   return result.Ptr();
 }
 
+extern "C" mirror::MethodHandle* artResolveMethodHandleFromCode(uint32_t method_handle_idx,
+                                                                Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedQuickEntrypointChecks sqec(self);
+  auto caller_and_outer =
+      GetCalleeSaveMethodCallerAndOuterMethod(self, CalleeSaveType::kSaveEverything);
+  ArtMethod* caller = caller_and_outer.caller;
+  ObjPtr<mirror::MethodHandle> result = ResolveMethodHandleFromCode(caller, method_handle_idx);
+  return result.Ptr();
+}
+
 extern "C" mirror::MethodType* artResolveMethodTypeFromCode(uint32_t proto_idx, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
   auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self,
                                                                   CalleeSaveType::kSaveEverything);
   ArtMethod* caller = caller_and_outer.caller;
-  ObjPtr<mirror::MethodType> result = ResolveMethodTypeFromCode(caller, proto_idx);
+  ObjPtr<mirror::MethodType> result = ResolveMethodTypeFromCode(caller, dex::ProtoIndex(proto_idx));
   return result.Ptr();
 }
 
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 39dcd39..3a8faca 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -38,6 +38,7 @@
   V(InitializeStaticStorage, void*, uint32_t) \
   V(InitializeTypeAndVerifyAccess, void*, uint32_t) \
   V(InitializeType, void*, uint32_t) \
+  V(ResolveMethodHandle, void*, uint32_t) \
   V(ResolveMethodType, void*, uint32_t) \
   V(ResolveString, void*, uint32_t) \
 \
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 39429c5..ff85f47 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -26,6 +26,7 @@
 #include "dex/dex_instruction-inl.h"
 #include "dex/method_reference.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "imt_conflict_table.h"
@@ -42,6 +43,7 @@
 #include "mirror/method_handle_impl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/var_handle.h"
 #include "oat_file.h"
 #include "oat_quick_method_header.h"
 #include "quick_exception_handler.h"
@@ -49,6 +51,7 @@
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "thread-inl.h"
+#include "var_handles.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -59,7 +62,16 @@
   static constexpr size_t kBytesStackArgLocation = 4;
   // Frame size in bytes of a callee-save frame for RefsAndArgs.
   static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
-      GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs);
+      RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
+  // Offset of first GPR arg.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
+      RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+  // Offset of first FPR arg.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
+      RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+  // Offset of return address.
+  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset =
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs);
 #if defined(__arm__)
   // The callee save frame is pointed to by SP.
   // | argN       |  |
@@ -87,12 +99,6 @@
   static constexpr size_t kNumQuickGprArgs = 3;
   static constexpr size_t kNumQuickFprArgs = 16;
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
-      arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
-      arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
-      arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs);  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -125,15 +131,6 @@
   static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
-  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
-      arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
-  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
-      arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
-  // Offset of return address.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
-      arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs);
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -177,9 +174,6 @@
                                                   // passed only in even numbered registers and each
                                                   // double occupies two registers.
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8;  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56;  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -221,9 +215,6 @@
   static constexpr size_t kNumQuickFprArgs = 7;  // 7 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = true;
 
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24;  // Offset of first FPR arg (F13).
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80;  // Offset of first GPR arg (A1).
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -254,9 +245,6 @@
   static constexpr size_t kNumQuickGprArgs = 3;  // 3 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 4;  // 4 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4;  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8;  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
@@ -296,9 +284,6 @@
   static constexpr size_t kNumQuickGprArgs = 5;  // 5 arguments passed in GPRs.
   static constexpr size_t kNumQuickFprArgs = 8;  // 8 arguments passed in FPRs.
   static constexpr bool kGprFprLockstep = false;
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16;  // Offset of first FPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8;  // Offset of first GPR arg.
-  static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8;  // Offset of return address.
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     switch (gpr_index) {
       case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
@@ -345,8 +330,8 @@
 
   static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
-    const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
-                                                            CalleeSaveType::kSaveRefsAndArgs);
+    constexpr size_t callee_frame_size =
+        RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
         reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
     uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -354,16 +339,14 @@
     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
 
     if (current_code->IsOptimized()) {
-      CodeInfo code_info = current_code->GetOptimizedCodeInfo();
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
-      StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
+      CodeInfo code_info(current_code);
+      StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
       DCHECK(stack_map.IsValid());
-      if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-        return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding,
-                                           inline_info.GetDepth(encoding.inline_info.encoding)-1);
+      if (stack_map.HasInlineInfo()) {
+        InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+        return inline_info.GetDexPcAtDepth(inline_info.GetDepth()-1);
       } else {
-        return stack_map.GetDexPc(encoding.stack_map.encoding);
+        return stack_map.GetDexPc();
       }
     } else {
       return current_code->ToDexPc(*caller_sp, outer_pc);
@@ -373,8 +356,8 @@
   static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
-    const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
-                                                            CalleeSaveType::kSaveRefsAndArgs);
+    constexpr size_t callee_frame_size =
+        RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
         reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
     uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -383,13 +366,12 @@
       return false;
     }
     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
-    CodeInfo code_info = current_code->GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
+    CodeInfo code_info(current_code);
     MethodInfo method_info = current_code->GetOptimizedMethodInfo();
-    InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding));
+    InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset));
     if (invoke.IsValid()) {
-      *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding));
-      *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info);
+      *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType());
+      *dex_method_index = invoke.GetMethodIndex(method_info);
       return true;
     }
     return false;
@@ -398,8 +380,9 @@
   // For the given quick ref and args quick frame, return the caller's PC.
   static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK((*sp)->IsCalleeSaveMethod());
-    uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
-    return *reinterpret_cast<uintptr_t*>(lr);
+    uint8_t* return_adress_spill =
+        reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
+    return *reinterpret_cast<uintptr_t*>(return_adress_spill);
   }
 
   QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
@@ -1157,8 +1140,8 @@
   CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
                                      << self->GetException()->Dump();
   // Compute address of return PC and sanity check that it currently holds 0.
-  size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA,
-                                                        CalleeSaveType::kSaveEverything);
+  constexpr size_t return_pc_offset =
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything);
   uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
                                                       return_pc_offset);
   CHECK_EQ(*return_pc, 0U);
@@ -1210,10 +1193,10 @@
   constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs;
   CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
 
-  const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+  constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
   auto** caller_sp = reinterpret_cast<ArtMethod**>(
       reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
-  const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+  constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
   uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
       (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
   ArtMethod* outer_method = *caller_sp;
@@ -1228,12 +1211,11 @@
   CHECK(current_code != nullptr);
   CHECK(current_code->IsOptimized());
   uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
-  CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+  CodeInfo code_info(current_code);
   MethodInfo method_info = current_code->GetOptimizedMethodInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   CHECK(stack_map.IsValid());
-  uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
+  uint32_t dex_pc = stack_map.GetDexPc();
 
   // Log the outer method and its associated dex file and class table pointer which can be used
   // to find out if the inlined methods were defined by other dex file(s) or class loader(s).
@@ -1247,20 +1229,17 @@
   LOG(FATAL_WITHOUT_ABORT) << "  instruction: " << DumpInstruction(outer_method, dex_pc);
 
   ArtMethod* caller = outer_method;
-  if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
-    const InlineInfoEncoding& inline_info_encoding = encoding.inline_info.encoding;
-    size_t depth = inline_info.GetDepth(inline_info_encoding);
+  if (stack_map.HasInlineInfo()) {
+    InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+    size_t depth = inline_info.GetDepth();
     for (size_t d = 0; d < depth; ++d) {
       const char* tag = "";
-      dex_pc = inline_info.GetDexPcAtDepth(inline_info_encoding, d);
-      if (inline_info.EncodesArtMethodAtDepth(inline_info_encoding, d)) {
+      dex_pc = inline_info.GetDexPcAtDepth(d);
+      if (inline_info.EncodesArtMethodAtDepth(d)) {
         tag = "encoded ";
-        caller = inline_info.GetArtMethodAtDepth(inline_info_encoding, d);
+        caller = inline_info.GetArtMethodAtDepth(d);
       } else {
-        uint32_t method_index = inline_info.GetMethodIndexAtDepth(inline_info_encoding,
-                                                                  method_info,
-                                                                  d);
+        uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, d);
         if (dex_pc == static_cast<uint32_t>(-1)) {
           tag = "special ";
           CHECK_EQ(d + 1u, depth);
@@ -2766,7 +2745,7 @@
   const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
   DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
          inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
-  const uint32_t proto_idx = inst.VRegH();
+  const dex::ProtoIndex proto_idx(inst.VRegH());
   const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
   const size_t shorty_length = strlen(shorty);
   static const bool kMethodIsStatic = false;  // invoke() and invokeExact() are not static.
@@ -2789,13 +2768,6 @@
     return static_cast<uintptr_t>('V');
   }
 
-  // TODO(oth): Ensure this path isn't taken for VarHandle accessors (b/65872996).
-  DCHECK_EQ(resolved_method->GetDeclaringClass(),
-            WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle));
-
-  Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
-      ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
-
   Handle<mirror::MethodType> method_type(
       hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
 
@@ -2835,24 +2807,43 @@
   // Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
   // consecutive order.
   RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
-  bool isExact = (jni::EncodeArtMethod(resolved_method) ==
-                  WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
+  Intrinsics intrinsic = static_cast<Intrinsics>(resolved_method->GetIntrinsic());
   bool success = false;
-  if (isExact) {
-    success = MethodHandleInvokeExact(self,
+  if (resolved_method->GetDeclaringClass() == mirror::MethodHandle::StaticClass()) {
+    Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
+        ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
+    if (intrinsic == Intrinsics::kMethodHandleInvokeExact) {
+      success = MethodHandleInvokeExact(self,
+                                        *shadow_frame,
+                                        method_handle,
+                                        method_type,
+                                        &operands,
+                                        result);
+    } else {
+      DCHECK_EQ(static_cast<uint32_t>(intrinsic),
+                static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke));
+      success = MethodHandleInvoke(self,
+                                   *shadow_frame,
+                                   method_handle,
+                                   method_type,
+                                   &operands,
+                                   result);
+    }
+  } else {
+    DCHECK_EQ(mirror::VarHandle::StaticClass(), resolved_method->GetDeclaringClass());
+    Handle<mirror::VarHandle> var_handle(hs.NewHandle(
+        ObjPtr<mirror::VarHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
+    mirror::VarHandle::AccessMode access_mode =
+        mirror::VarHandle::GetAccessModeByIntrinsic(intrinsic);
+    success = VarHandleInvokeAccessor(self,
                                       *shadow_frame,
-                                      method_handle,
+                                      var_handle,
                                       method_type,
+                                      access_mode,
                                       &operands,
                                       result);
-  } else {
-    success = MethodHandleInvoke(self,
-                                 *shadow_frame,
-                                 method_handle,
-                                 method_type,
-                                 &operands,
-                                 result);
   }
+
   DCHECK(success || self->IsExceptionPending());
 
   // Pop transition record.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 77b3132..89694e3 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -54,15 +54,6 @@
     return save_method;
   }
 
-  static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
-      NO_THREAD_SAFETY_ANALYSIS {
-    ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
-    QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
-    EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
-        << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
-        << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
-  }
-
   static void CheckPCOffset(InstructionSet isa, CalleeSaveType type, size_t pc_offset)
       NO_THREAD_SAFETY_ANALYSIS {
     ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
@@ -74,79 +65,36 @@
   }
 };
 
-// Note: these tests are all runtime tests. They let the Runtime create the corresponding ArtMethod
-// and check against it. Technically we know and expect certain values, but the Runtime code is
-// not constexpr, so we cannot make this compile-time checks (and I want the Runtime code tested).
-
-// This test ensures that kQuickCalleeSaveFrame_RefAndArgs_FrameSize is correct.
-TEST_F(QuickTrampolineEntrypointsTest, FrameSize) {
-  // We have to use a define here as the callee_save_frame.h functions are constexpr.
-#define CHECK_FRAME_SIZE(isa)                                                        \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveRefsAndArgs,                                   \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsAndArgs));     \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveRefsOnly,                                      \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsOnly));        \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveAllCalleeSaves,                                \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveAllCalleeSaves));  \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveEverything,                                    \
-                 GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveEverything));      \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveEverythingForClinit,                           \
-                 GetCalleeSaveFrameSize(isa,                                         \
-                                        CalleeSaveType::kSaveEverythingForClinit));  \
-  CheckFrameSize(isa,                                                                \
-                 CalleeSaveType::kSaveEverythingForSuspendCheck,                     \
-                 GetCalleeSaveFrameSize(                                             \
-                     isa, CalleeSaveType::kSaveEverythingForSuspendCheck))
-
-  CHECK_FRAME_SIZE(InstructionSet::kArm);
-  CHECK_FRAME_SIZE(InstructionSet::kArm64);
-  CHECK_FRAME_SIZE(InstructionSet::kMips);
-  CHECK_FRAME_SIZE(InstructionSet::kMips64);
-  CHECK_FRAME_SIZE(InstructionSet::kX86);
-  CHECK_FRAME_SIZE(InstructionSet::kX86_64);
-}
-
-// This test ensures that GetConstExprPointerSize is correct with respect to
-// GetInstructionSetPointerSize.
-TEST_F(QuickTrampolineEntrypointsTest, PointerSize) {
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm),
-            GetConstExprPointerSize(InstructionSet::kArm));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm64),
-            GetConstExprPointerSize(InstructionSet::kArm64));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips),
-            GetConstExprPointerSize(InstructionSet::kMips));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips64),
-            GetConstExprPointerSize(InstructionSet::kMips64));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86),
-            GetConstExprPointerSize(InstructionSet::kX86));
-  EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86_64),
-            GetConstExprPointerSize(InstructionSet::kX86_64));
-}
-
 // This test ensures that the constexpr specialization of the return PC offset computation in
 // GetCalleeSavePCOffset is correct.
 TEST_F(QuickTrampolineEntrypointsTest, ReturnPC) {
   // Ensure that the computation in callee_save_frame.h correct.
   // Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
   // sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverything,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveEverything));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForClinit,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForClinit));
-  CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForSuspendCheck,
-                GetCalleeSaveReturnPcOffset(kRuntimeISA,
-                                            CalleeSaveType::kSaveEverythingForSuspendCheck));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveRefsAndArgs,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveRefsOnly,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsOnly));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveAllCalleeSaves,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveAllCalleeSaves));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveEverything,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveEverythingForClinit,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverythingForClinit));
+  CheckPCOffset(
+      kRuntimeISA,
+      CalleeSaveType::kSaveEverythingForSuspendCheck,
+      RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverythingForSuspendCheck));
 }
 
 }  // namespace art
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index b0689f6..1337cd5 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -183,7 +183,8 @@
                          sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
                          sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveMethodType, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveMethodHandle, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveMethodHandle, pResolveMethodType, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveMethodType, pResolveString, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Instance, pSet8Static, sizeof(void*));
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 46630db..464c2b7 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -90,16 +90,24 @@
 DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
 #define LOCK_WORD_STATE_SHIFT 30
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
-#define LOCK_WORD_STATE_MASK 0xc0000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
+#define LOCK_WORD_STATE_MASK_SHIFTED 0xc0000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
 #define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
 #define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
 #define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
-#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SIZE 12
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SIZE), (static_cast<int32_t>(art::LockWord::kThinLockCountSize)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SHIFT 16
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SHIFT), (static_cast<int32_t>(art::LockWord::kThinLockCountShift)))
+#define LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED 0xfff0000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockCountMaskShifted)))
+#define LOCK_WORD_THIN_LOCK_COUNT_ONE 0x10000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<uint32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED 0xffff
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockOwnerMaskShifted)))
 #define LOCK_WORD_STATE_FORWARDING_ADDRESS 0x3
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddress)))
 #define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
@@ -110,6 +118,8 @@
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
 #define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
 DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
+#define LOCK_WORD_GC_STATE_SIZE 2
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SIZE), (static_cast<int32_t>(art::LockWord::kGCStateSize)))
 #define LOCK_WORD_GC_STATE_SHIFT 28
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
 #define LOCK_WORD_MARK_BIT_SHIFT 29
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index 9445ae0..e41d1d3 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -14,8 +14,6 @@
  * limitations under the License.
  */
 
-#include <metricslogger/metrics_logger.h>
-
 #include "hidden_api.h"
 
 #include <nativehelper/scoped_local_ref.h>
@@ -24,11 +22,14 @@
 #include "thread-current-inl.h"
 #include "well_known_classes.h"
 
+#ifdef ART_TARGET_ANDROID
+#include <metricslogger/metrics_logger.h>
 using android::metricslogger::ComplexEventLogger;
 using android::metricslogger::ACTION_HIDDEN_API_ACCESSED;
 using android::metricslogger::FIELD_HIDDEN_API_ACCESS_METHOD;
 using android::metricslogger::FIELD_HIDDEN_API_ACCESS_DENIED;
 using android::metricslogger::FIELD_HIDDEN_API_SIGNATURE;
+#endif
 
 namespace art {
 namespace hiddenapi {
@@ -39,7 +40,7 @@
 // Note that when flipping this flag, you must also update the expectations of test 674-hiddenapi
 // as it affects whether or not we warn for light grey APIs that have been added to the exemptions
 // list.
-static constexpr bool kLogAllAccesses = true;
+static constexpr bool kLogAllAccesses = false;
 
 static inline std::ostream& operator<<(std::ostream& os, AccessMethod value) {
   switch (value) {
@@ -137,6 +138,7 @@
   LOG(WARNING) << "Accessing hidden " << (type_ == kField ? "field " : "method ")
                << Dumpable<MemberSignature>(*this) << " (" << list << ", " << access_method << ")";
 }
+#ifdef ART_TARGET_ANDROID
 // Convert an AccessMethod enum to a value for logging from the proto enum.
 // This method may look odd (the enum values are current the same), but it
 // prevents coupling the internal enum to the proto enum (which should never
@@ -156,12 +158,15 @@
       DCHECK(false);
   }
 }
+#endif
 
 void MemberSignature::LogAccessToEventLog(AccessMethod access_method, Action action_taken) {
-  if (access_method == kLinking) {
+#ifdef ART_TARGET_ANDROID
+  if (access_method == kLinking || access_method == kNone) {
     // Linking warnings come from static analysis/compilation of the bytecode
     // and can contain false positives (i.e. code that is never run). We choose
     // not to log these in the event log.
+    // None does not correspond to actual access, so should also be ignored.
     return;
   }
   ComplexEventLogger log_maker(ACTION_HIDDEN_API_ACCESSED);
@@ -173,6 +178,10 @@
   Dump(signature_str);
   log_maker.AddTaggedData(FIELD_HIDDEN_API_SIGNATURE, signature_str.str());
   log_maker.Record();
+#else
+  UNUSED(access_method);
+  UNUSED(action_taken);
+#endif
 }
 
 static ALWAYS_INLINE bool CanUpdateMemberAccessFlags(ArtField*) {
@@ -210,7 +219,8 @@
   // - for non-debuggable apps, there is no distinction between light grey & whitelisted APIs.
   // - we want to avoid the overhead of checking for exemptions for light greylisted APIs whenever
   //   possible.
-  if (kLogAllAccesses || action == kDeny || runtime->IsJavaDebuggable()) {
+  const bool shouldWarn = kLogAllAccesses || runtime->IsJavaDebuggable();
+  if (shouldWarn || action == kDeny) {
     if (member_signature.IsExempted(runtime->GetHiddenApiExemptions())) {
       action = kAllow;
       // Avoid re-examining the exemption list next time.
@@ -227,7 +237,7 @@
     }
   }
 
-  if (kIsTargetBuild) {
+  if (kIsTargetBuild && !kIsTargetLinux) {
     uint32_t eventLogSampleRate = runtime->GetHiddenApiEventLogSampleRate();
     // Assert that RAND_MAX is big enough, to ensure sampling below works as expected.
     static_assert(RAND_MAX >= 0xffff, "RAND_MAX too small");
@@ -251,7 +261,8 @@
     MaybeWhitelistMember(runtime, member);
 
     // If this action requires a UI warning, set the appropriate flag.
-    if (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag()) {
+    if (shouldWarn &&
+        (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag())) {
       runtime->SetPendingHiddenApiWarning(true);
     }
   }
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 8e21fd3..580224e 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -95,6 +95,22 @@
   }
 }
 
+class ScopedHiddenApiEnforcementPolicySetting {
+ public:
+  explicit ScopedHiddenApiEnforcementPolicySetting(EnforcementPolicy new_policy)
+      : initial_policy_(Runtime::Current()->GetHiddenApiEnforcementPolicy()) {
+    Runtime::Current()->SetHiddenApiEnforcementPolicy(new_policy);
+  }
+
+  ~ScopedHiddenApiEnforcementPolicySetting() {
+    Runtime::Current()->SetHiddenApiEnforcementPolicy(initial_policy_);
+  }
+
+ private:
+  const EnforcementPolicy initial_policy_;
+  DISALLOW_COPY_AND_ASSIGN(ScopedHiddenApiEnforcementPolicySetting);
+};
+
 // Implementation details. DO NOT ACCESS DIRECTLY.
 namespace detail {
 
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 8a85ee4..5a50ec5 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -24,7 +24,7 @@
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "intrinsics_enum.h"
 #include "jit/jit.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
 #include "method_handles-inl.h"
 #include "method_handles.h"
 #include "mirror/array-inl.h"
@@ -37,6 +37,7 @@
 #include "stack.h"
 #include "thread-inl.h"
 #include "transaction.h"
+#include "var_handles.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -626,7 +627,8 @@
 
   // The vRegH value gives the index of the proto_id associated with this
   // signature polymorphic call site.
-  const uint32_t callsite_proto_id = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+  const uint16_t vRegH = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+  const dex::ProtoIndex callsite_proto_id(vRegH);
 
   // Call through to the classlinker and ask it to resolve the static type associated
   // with the callsite. This information is stored in the dex cache so it's
@@ -724,38 +726,6 @@
   }
 }
 
-static bool DoVarHandleInvokeChecked(Thread* self,
-                                     Handle<mirror::VarHandle> var_handle,
-                                     Handle<mirror::MethodType> callsite_type,
-                                     mirror::VarHandle::AccessMode access_mode,
-                                     ShadowFrame& shadow_frame,
-                                     InstructionOperands* operands,
-                                     JValue* result)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  // TODO(oth): GetMethodTypeForAccessMode() allocates a MethodType()
-  // which is only required if we need to convert argument and/or
-  // return types.
-  StackHandleScope<1> hs(self);
-  Handle<mirror::MethodType> accessor_type(hs.NewHandle(
-      var_handle->GetMethodTypeForAccessMode(self, access_mode)));
-  const size_t num_vregs = accessor_type->NumberOfVRegs();
-  const int num_params = accessor_type->GetPTypes()->GetLength();
-  ShadowFrameAllocaUniquePtr accessor_frame =
-      CREATE_SHADOW_FRAME(num_vregs, nullptr, shadow_frame.GetMethod(), shadow_frame.GetDexPC());
-  ShadowFrameGetter getter(shadow_frame, operands);
-  static const uint32_t kFirstDestinationReg = 0;
-  ShadowFrameSetter setter(accessor_frame.get(), kFirstDestinationReg);
-  if (!PerformConversions(self, callsite_type, accessor_type, &getter, &setter, num_params)) {
-    return false;
-  }
-  RangeInstructionOperands accessor_operands(kFirstDestinationReg,
-                                             kFirstDestinationReg + num_vregs);
-  if (!var_handle->Access(access_mode, accessor_frame.get(), &accessor_operands, result)) {
-    return false;
-  }
-  return ConvertReturnValue(callsite_type, accessor_type, result);
-}
-
 static bool DoVarHandleInvokeCommon(Thread* self,
                                     ShadowFrame& shadow_frame,
                                     const Instruction* inst,
@@ -768,59 +738,43 @@
     return false;
   }
 
-  bool is_var_args = inst->HasVarArgs();
-  const uint32_t vRegC = is_var_args ? inst->VRegC_45cc() : inst->VRegC_4rcc();
-  ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(vRegC));
-  if (receiver.IsNull()) {
-    ThrowNullPointerExceptionFromDexPC();
-    return false;
-  }
-
   StackHandleScope<2> hs(self);
-  Handle<mirror::VarHandle> var_handle(hs.NewHandle(down_cast<mirror::VarHandle*>(receiver.Ptr())));
-  if (!var_handle->IsAccessModeSupported(access_mode)) {
-    ThrowUnsupportedOperationException();
-    return false;
-  }
-
-  const uint32_t vRegH = is_var_args ? inst->VRegH_45cc() : inst->VRegH_4rcc();
+  bool is_var_args = inst->HasVarArgs();
+  const uint16_t vRegH = is_var_args ? inst->VRegH_45cc() : inst->VRegH_4rcc();
   ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
   Handle<mirror::MethodType> callsite_type(hs.NewHandle(
-      class_linker->ResolveMethodType(self, vRegH, shadow_frame.GetMethod())));
+      class_linker->ResolveMethodType(self, dex::ProtoIndex(vRegH), shadow_frame.GetMethod())));
   // This implies we couldn't resolve one or more types in this VarHandle.
   if (UNLIKELY(callsite_type == nullptr)) {
     CHECK(self->IsExceptionPending());
     return false;
   }
 
-  if (!var_handle->IsMethodTypeCompatible(access_mode, callsite_type.Get())) {
-    ThrowWrongMethodTypeException(var_handle->GetMethodTypeForAccessMode(self, access_mode),
-                                  callsite_type.Get());
-    return false;
-  }
-
+  const uint32_t vRegC = is_var_args ? inst->VRegC_45cc() : inst->VRegC_4rcc();
+  ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(vRegC));
+  Handle<mirror::VarHandle> var_handle(hs.NewHandle(down_cast<mirror::VarHandle*>(receiver.Ptr())));
   if (is_var_args) {
     uint32_t args[Instruction::kMaxVarArgRegs];
     inst->GetVarArgs(args, inst_data);
     VarArgsInstructionOperands all_operands(args, inst->VRegA_45cc());
     NoReceiverInstructionOperands operands(&all_operands);
-    return DoVarHandleInvokeChecked(self,
-                                    var_handle,
-                                    callsite_type,
-                                    access_mode,
-                                    shadow_frame,
-                                    &operands,
-                                    result);
+    return VarHandleInvokeAccessor(self,
+                                   shadow_frame,
+                                   var_handle,
+                                   callsite_type,
+                                   access_mode,
+                                   &operands,
+                                   result);
   } else {
     RangeInstructionOperands all_operands(inst->VRegC_4rcc(), inst->VRegA_4rcc());
     NoReceiverInstructionOperands operands(&all_operands);
-    return DoVarHandleInvokeChecked(self,
-                                    var_handle,
-                                    callsite_type,
-                                    access_mode,
-                                    shadow_frame,
-                                    &operands,
-                                    result);
+    return VarHandleInvokeAccessor(self,
+                                   shadow_frame,
+                                   var_handle,
+                                   callsite_type,
+                                   access_mode,
+                                   &operands,
+                                   result);
   }
 }
 
@@ -965,9 +919,10 @@
       StackHandleScope<2> hs(self);
       Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
       Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
-      uint32_t index = static_cast<uint32_t>(encoded_value->GetI());
+      dex::ProtoIndex proto_idx(encoded_value->GetC());
       ClassLinker* cl = Runtime::Current()->GetClassLinker();
-      ObjPtr<mirror::MethodType> o = cl->ResolveMethodType(self, index, dex_cache, class_loader);
+      ObjPtr<mirror::MethodType> o =
+          cl->ResolveMethodType(self, proto_idx, dex_cache, class_loader);
       if (UNLIKELY(o.IsNull())) {
         DCHECK(self->IsExceptionPending());
         return false;
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0818e06..67a0349 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -217,7 +217,7 @@
 }
 
 static inline ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
-                                                           uint32_t method_type_index,
+                                                           dex::ProtoIndex method_type_index,
                                                            ArtMethod* referrer)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 283885e..5c7838c 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -565,7 +565,7 @@
         PREAMBLE();
         ClassLinker* cl = Runtime::Current()->GetClassLinker();
         ObjPtr<mirror::MethodType> mt = cl->ResolveMethodType(self,
-                                                              inst->VRegB_21c(),
+                                                              dex::ProtoIndex(inst->VRegB_21c()),
                                                               shadow_frame.GetMethod());
         if (UNLIKELY(mt == nullptr)) {
           HANDLE_PENDING_EXCEPTION();
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 2a9ef2c..1b39a74 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -408,7 +408,8 @@
                                        ShadowFrame* shadow_frame,
                                        Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::MethodType> mt = ResolveMethodType(self, index, shadow_frame->GetMethod());
+  ObjPtr<mirror::MethodType> mt =
+      ResolveMethodType(self, dex::ProtoIndex(index), shadow_frame->GetMethod());
   if (UNLIKELY(mt == nullptr)) {
     return true;
   }
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 791ebf0..0e429a6 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -510,7 +510,7 @@
     result->SetZ(false);
     return;
   }
-  mirror::String* class_name = nullptr;
+  ObjPtr<mirror::String> class_name = nullptr;
   if (!annotations::GetInnerClass(klass, &class_name)) {
     result->SetZ(false);
     return;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 0684b46..5d4b9e8 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -46,8 +46,6 @@
 namespace jit {
 
 static constexpr bool kEnableOnStackReplacement = true;
-// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
-static constexpr int kJitPoolThreadPthreadPriority = 9;
 
 // Different compilation threshold constants. These can be overridden on the command line.
 static constexpr size_t kJitDefaultCompileThreshold           = 10000;  // Non-debug default.
@@ -80,6 +78,8 @@
       options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
   jit_options->profile_saver_options_ =
       options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
+  jit_options->thread_pool_pthread_priority_ =
+      options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
 
   if (options.Exists(RuntimeArgumentMap::JITCompileThreshold)) {
     jit_options->compile_threshold_ = *options.Get(RuntimeArgumentMap::JITCompileThreshold);
@@ -167,21 +167,14 @@
   cumulative_timings_.AddLogger(logger);
 }
 
-Jit::Jit() : dump_info_on_shutdown_(false),
-             cumulative_timings_("JIT timings"),
-             memory_use_("Memory used for compilation", 16),
-             lock_("JIT memory use lock"),
-             use_jit_compilation_(true),
-             hot_method_threshold_(0),
-             warm_method_threshold_(0),
-             osr_method_threshold_(0),
-             priority_thread_weight_(0),
-             invoke_transition_weight_(0) {}
+Jit::Jit(JitOptions* options) : options_(options),
+                                cumulative_timings_("JIT timings"),
+                                memory_use_("Memory used for compilation", 16),
+                                lock_("JIT memory use lock") {}
 
 Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
   DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
-  std::unique_ptr<Jit> jit(new Jit);
-  jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown();
+  std::unique_ptr<Jit> jit(new Jit(options));
   if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
     return nullptr;
   }
@@ -195,8 +188,6 @@
   if (jit->GetCodeCache() == nullptr) {
     return nullptr;
   }
-  jit->use_jit_compilation_ = options->UseJitCompilation();
-  jit->profile_saver_options_ = options->GetProfileSaverOptions();
   VLOG(jit) << "JIT created with initial_capacity="
       << PrettySize(options->GetCodeCacheInitialCapacity())
       << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
@@ -204,12 +195,6 @@
       << ", profile_saver_options=" << options->GetProfileSaverOptions();
 
 
-  jit->hot_method_threshold_ = options->GetCompileThreshold();
-  jit->warm_method_threshold_ = options->GetWarmupThreshold();
-  jit->osr_method_threshold_ = options->GetOsrThreshold();
-  jit->priority_thread_weight_ = options->GetPriorityThreadWeight();
-  jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight();
-
   jit->CreateThreadPool();
 
   // Notify native debugger about the classes already loaded before the creation of the jit.
@@ -330,7 +315,7 @@
   constexpr bool kJitPoolNeedsPeers = true;
   thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
 
-  thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
+  thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
   Start();
 }
 
@@ -360,8 +345,8 @@
 
 void Jit::StartProfileSaver(const std::string& filename,
                             const std::vector<std::string>& code_paths) {
-  if (profile_saver_options_.IsEnabled()) {
-    ProfileSaver::Start(profile_saver_options_,
+  if (options_->GetSaveProfilingInfo()) {
+    ProfileSaver::Start(options_->GetProfileSaverOptions(),
                         filename,
                         code_cache_.get(),
                         code_paths);
@@ -369,8 +354,8 @@
 }
 
 void Jit::StopProfileSaver() {
-  if (profile_saver_options_.IsEnabled() && ProfileSaver::IsStarted()) {
-    ProfileSaver::Stop(dump_info_on_shutdown_);
+  if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
+    ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
   }
 }
 
@@ -383,8 +368,8 @@
 }
 
 Jit::~Jit() {
-  DCHECK(!profile_saver_options_.IsEnabled() || !ProfileSaver::IsStarted());
-  if (dump_info_on_shutdown_) {
+  DCHECK(!options_->GetSaveProfilingInfo() || !ProfileSaver::IsStarted());
+  if (options_->DumpJitInfoOnShutdown()) {
     DumpInfo(LOG_STREAM(INFO));
     Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
   }
@@ -488,11 +473,10 @@
       return false;
     }
 
-    CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
+    CodeInfo code_info(osr_method);
 
     // Find stack map starting at the target dex_pc.
-    StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
+    StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset);
     if (!stack_map.IsValid()) {
       // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
       // hope that the next branch has one.
@@ -509,7 +493,7 @@
     // We found a stack map, now fill the frame with dex register values from the interpreter's
     // shadow frame.
     DexRegisterMap vreg_map =
-        code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+        code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
 
     frame_size = osr_method->GetFrameSizeInBytes();
 
@@ -531,7 +515,7 @@
     } else {
       for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
         DexRegisterLocation::Kind location =
-            vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+            vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
         if (location == DexRegisterLocation::Kind::kNone) {
           // Dex register is dead or uninitialized.
           continue;
@@ -547,15 +531,14 @@
         int32_t vreg_value = shadow_frame->GetVReg(vreg);
         int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
                                                              number_of_vregs,
-                                                             code_info,
-                                                             encoding);
+                                                             code_info);
         DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
         DCHECK_GT(slot_offset, 0);
         (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
       }
     }
 
-    native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) +
+    native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
         osr_method->GetEntryPoint();
     VLOG(jit) << "Jumping to "
               << method_name
@@ -671,25 +654,25 @@
   if (IgnoreSamplesForMethod(method)) {
     return;
   }
-  if (hot_method_threshold_ == 0) {
+  if (HotMethodThreshold() == 0) {
     // Tests might request JIT on first use (compiled synchronously in the interpreter).
     return;
   }
   DCHECK(thread_pool_ != nullptr);
-  DCHECK_GT(warm_method_threshold_, 0);
-  DCHECK_GT(hot_method_threshold_, warm_method_threshold_);
-  DCHECK_GT(osr_method_threshold_, hot_method_threshold_);
-  DCHECK_GE(priority_thread_weight_, 1);
-  DCHECK_LE(priority_thread_weight_, hot_method_threshold_);
+  DCHECK_GT(WarmMethodThreshold(), 0);
+  DCHECK_GT(HotMethodThreshold(), WarmMethodThreshold());
+  DCHECK_GT(OSRMethodThreshold(), HotMethodThreshold());
+  DCHECK_GE(PriorityThreadWeight(), 1);
+  DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold());
 
-  int32_t starting_count = method->GetCounter();
+  uint16_t starting_count = method->GetCounter();
   if (Jit::ShouldUsePriorityThreadWeight(self)) {
-    count *= priority_thread_weight_;
+    count *= PriorityThreadWeight();
   }
-  int32_t new_count = starting_count + count;   // int32 here to avoid wrap-around;
+  uint32_t new_count = starting_count + count;
   // Note: Native method have no "warm" state or profiling info.
-  if (LIKELY(!method->IsNative()) && starting_count < warm_method_threshold_) {
-    if ((new_count >= warm_method_threshold_) &&
+  if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
+    if ((new_count >= WarmMethodThreshold()) &&
         (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
       bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
       if (success) {
@@ -710,23 +693,23 @@
       }
     }
     // Avoid jumping more than one state at a time.
-    new_count = std::min(new_count, hot_method_threshold_ - 1);
-  } else if (use_jit_compilation_) {
-    if (starting_count < hot_method_threshold_) {
-      if ((new_count >= hot_method_threshold_) &&
+    new_count = std::min(new_count, static_cast<uint32_t>(HotMethodThreshold() - 1));
+  } else if (UseJitCompilation()) {
+    if (starting_count < HotMethodThreshold()) {
+      if ((new_count >= HotMethodThreshold()) &&
           !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
         DCHECK(thread_pool_ != nullptr);
         thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
       }
       // Avoid jumping more than one state at a time.
-      new_count = std::min(new_count, osr_method_threshold_ - 1);
-    } else if (starting_count < osr_method_threshold_) {
+      new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
+    } else if (starting_count < OSRMethodThreshold()) {
       if (!with_backedges) {
         // If the samples don't contain any back edge, we don't increment the hotness.
         return;
       }
       DCHECK(!method->IsNative());  // No back edges reported for native methods.
-      if ((new_count >= osr_method_threshold_) &&  !code_cache_->IsOsrCompiled(method)) {
+      if ((new_count >= OSRMethodThreshold()) &&  !code_cache_->IsOsrCompiled(method)) {
         DCHECK(thread_pool_ != nullptr);
         thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
       }
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 4b8b891..edaf348 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -44,6 +44,110 @@
 
 static constexpr int16_t kJitCheckForOSR = -1;
 static constexpr int16_t kJitHotnessDisabled = -2;
+// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
+// See android/os/Process.java.
+static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
+
+class JitOptions {
+ public:
+  static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
+
+  uint16_t GetCompileThreshold() const {
+    return compile_threshold_;
+  }
+
+  uint16_t GetWarmupThreshold() const {
+    return warmup_threshold_;
+  }
+
+  uint16_t GetOsrThreshold() const {
+    return osr_threshold_;
+  }
+
+  uint16_t GetPriorityThreadWeight() const {
+    return priority_thread_weight_;
+  }
+
+  uint16_t GetInvokeTransitionWeight() const {
+    return invoke_transition_weight_;
+  }
+
+  size_t GetCodeCacheInitialCapacity() const {
+    return code_cache_initial_capacity_;
+  }
+
+  size_t GetCodeCacheMaxCapacity() const {
+    return code_cache_max_capacity_;
+  }
+
+  bool DumpJitInfoOnShutdown() const {
+    return dump_info_on_shutdown_;
+  }
+
+  const ProfileSaverOptions& GetProfileSaverOptions() const {
+    return profile_saver_options_;
+  }
+
+  bool GetSaveProfilingInfo() const {
+    return profile_saver_options_.IsEnabled();
+  }
+
+  int GetThreadPoolPthreadPriority() const {
+    return thread_pool_pthread_priority_;
+  }
+
+  bool UseJitCompilation() const {
+    return use_jit_compilation_;
+  }
+
+  void SetUseJitCompilation(bool b) {
+    use_jit_compilation_ = b;
+  }
+
+  void SetSaveProfilingInfo(bool save_profiling_info) {
+    profile_saver_options_.SetEnabled(save_profiling_info);
+  }
+
+  void SetWaitForJitNotificationsToSaveProfile(bool value) {
+    profile_saver_options_.SetWaitForJitNotificationsToSave(value);
+  }
+
+  void SetProfileAOTCode(bool value) {
+    profile_saver_options_.SetProfileAOTCode(value);
+  }
+
+  void SetJitAtFirstUse() {
+    use_jit_compilation_ = true;
+    compile_threshold_ = 0;
+  }
+
+ private:
+  bool use_jit_compilation_;
+  size_t code_cache_initial_capacity_;
+  size_t code_cache_max_capacity_;
+  uint16_t compile_threshold_;
+  uint16_t warmup_threshold_;
+  uint16_t osr_threshold_;
+  uint16_t priority_thread_weight_;
+  uint16_t invoke_transition_weight_;
+  bool dump_info_on_shutdown_;
+  int thread_pool_pthread_priority_;
+  ProfileSaverOptions profile_saver_options_;
+
+  JitOptions()
+      : use_jit_compilation_(false),
+        code_cache_initial_capacity_(0),
+        code_cache_max_capacity_(0),
+        compile_threshold_(0),
+        warmup_threshold_(0),
+        osr_threshold_(0),
+        priority_thread_weight_(0),
+        invoke_transition_weight_(0),
+        dump_info_on_shutdown_(false),
+        thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority) {}
+
+  DISALLOW_COPY_AND_ASSIGN(JitOptions);
+};
 
 class Jit {
  public:
@@ -77,29 +181,29 @@
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  size_t OSRMethodThreshold() const {
-    return osr_method_threshold_;
+  uint16_t OSRMethodThreshold() const {
+    return options_->GetOsrThreshold();
   }
 
-  size_t HotMethodThreshold() const {
-    return hot_method_threshold_;
+  uint16_t HotMethodThreshold() const {
+    return options_->GetCompileThreshold();
   }
 
-  size_t WarmMethodThreshold() const {
-    return warm_method_threshold_;
+  uint16_t WarmMethodThreshold() const {
+    return options_->GetWarmupThreshold();
   }
 
   uint16_t PriorityThreadWeight() const {
-    return priority_thread_weight_;
+    return options_->GetPriorityThreadWeight();
   }
 
   // Returns false if we only need to save profile information and not compile methods.
   bool UseJitCompilation() const {
-    return use_jit_compilation_;
+    return options_->UseJitCompilation();
   }
 
   bool GetSaveProfilingInfo() const {
-    return profile_saver_options_.IsEnabled();
+    return options_->GetSaveProfilingInfo();
   }
 
   // Wait until there is no more pending compilation tasks.
@@ -120,12 +224,12 @@
 
   void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    AddSamples(self, caller, invoke_transition_weight_, false);
+    AddSamples(self, caller, options_->GetInvokeTransitionWeight(), false);
   }
 
   void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    AddSamples(self, callee, invoke_transition_weight_, false);
+    AddSamples(self, callee, options_->GetInvokeTransitionWeight(), false);
   }
 
   // Starts the profile saver if the config options allow profile recording.
@@ -177,7 +281,7 @@
   void Start();
 
  private:
-  Jit();
+  explicit Jit(JitOptions* options);
 
   static bool LoadCompiler(std::string* error_msg);
 
@@ -189,107 +293,22 @@
   static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
   static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
 
+  // We make this static to simplify the interaction with libart-compiler.so.
+  static bool generate_debug_info_;
+
+  const JitOptions* const options_;
+
+  std::unique_ptr<jit::JitCodeCache> code_cache_;
+  std::unique_ptr<ThreadPool> thread_pool_;
+
   // Performance monitoring.
-  bool dump_info_on_shutdown_;
   CumulativeLogger cumulative_timings_;
   Histogram<uint64_t> memory_use_ GUARDED_BY(lock_);
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
-  std::unique_ptr<jit::JitCodeCache> code_cache_;
-
-  bool use_jit_compilation_;
-  ProfileSaverOptions profile_saver_options_;
-  static bool generate_debug_info_;
-  uint16_t hot_method_threshold_;
-  uint16_t warm_method_threshold_;
-  uint16_t osr_method_threshold_;
-  uint16_t priority_thread_weight_;
-  uint16_t invoke_transition_weight_;
-  std::unique_ptr<ThreadPool> thread_pool_;
-
   DISALLOW_COPY_AND_ASSIGN(Jit);
 };
 
-class JitOptions {
- public:
-  static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
-  size_t GetCompileThreshold() const {
-    return compile_threshold_;
-  }
-  size_t GetWarmupThreshold() const {
-    return warmup_threshold_;
-  }
-  size_t GetOsrThreshold() const {
-    return osr_threshold_;
-  }
-  uint16_t GetPriorityThreadWeight() const {
-    return priority_thread_weight_;
-  }
-  size_t GetInvokeTransitionWeight() const {
-    return invoke_transition_weight_;
-  }
-  size_t GetCodeCacheInitialCapacity() const {
-    return code_cache_initial_capacity_;
-  }
-  size_t GetCodeCacheMaxCapacity() const {
-    return code_cache_max_capacity_;
-  }
-  bool DumpJitInfoOnShutdown() const {
-    return dump_info_on_shutdown_;
-  }
-  const ProfileSaverOptions& GetProfileSaverOptions() const {
-    return profile_saver_options_;
-  }
-  bool GetSaveProfilingInfo() const {
-    return profile_saver_options_.IsEnabled();
-  }
-  bool UseJitCompilation() const {
-    return use_jit_compilation_;
-  }
-  void SetUseJitCompilation(bool b) {
-    use_jit_compilation_ = b;
-  }
-  void SetSaveProfilingInfo(bool save_profiling_info) {
-    profile_saver_options_.SetEnabled(save_profiling_info);
-  }
-  void SetWaitForJitNotificationsToSaveProfile(bool value) {
-    profile_saver_options_.SetWaitForJitNotificationsToSave(value);
-  }
-  void SetProfileAOTCode(bool value) {
-    profile_saver_options_.SetProfileAOTCode(value);
-  }
-
-  void SetJitAtFirstUse() {
-    use_jit_compilation_ = true;
-    compile_threshold_ = 0;
-  }
-
- private:
-  bool use_jit_compilation_;
-  size_t code_cache_initial_capacity_;
-  size_t code_cache_max_capacity_;
-  size_t compile_threshold_;
-  size_t warmup_threshold_;
-  size_t osr_threshold_;
-  uint16_t priority_thread_weight_;
-  size_t invoke_transition_weight_;
-  bool dump_info_on_shutdown_;
-  ProfileSaverOptions profile_saver_options_;
-
-  JitOptions()
-      : use_jit_compilation_(false),
-        code_cache_initial_capacity_(0),
-        code_cache_max_capacity_(0),
-        compile_threshold_(0),
-        warmup_threshold_(0),
-        osr_threshold_(0),
-        priority_thread_weight_(0),
-        invoke_transition_weight_(0),
-        dump_info_on_shutdown_(false) {}
-
-  DISALLOW_COPY_AND_ASSIGN(JitOptions);
-};
-
 // Helper class to stop the JIT for a given scope. This will wait for the JIT to quiesce.
 class ScopedJitSuspend {
  public:
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 249a8b0..d8aa00c 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -563,7 +563,7 @@
   }
 }
 
-void JitCodeCache::FreeCode(const void* code_ptr) {
+void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
   uintptr_t allocation = FromCodeToAllocation(code_ptr);
   // Notify native debugger that we are about to remove the code.
   // It does nothing if we are not using native debugger.
@@ -590,7 +590,7 @@
   MutexLock mu(Thread::Current(), lock_);
   ScopedCodeCacheWrite scc(this);
   for (const OatQuickMethodHeader* method_header : method_headers) {
-    FreeCode(method_header->GetCode());
+    FreeCodeAndData(method_header->GetCode());
   }
 }
 
@@ -916,7 +916,7 @@
       in_cache = true;
       if (it->second.GetMethods().empty()) {
         if (release_memory) {
-          FreeCode(it->second.GetCode());
+          FreeCodeAndData(it->second.GetCode());
         }
         jni_stubs_map_.erase(it);
       } else {
@@ -928,7 +928,7 @@
       if (it->second == method) {
         in_cache = true;
         if (release_memory) {
-          FreeCode(it->first);
+          FreeCodeAndData(it->first);
         }
         it = method_code_map_.erase(it);
       } else {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index b10f57e..958e8e8 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -317,8 +317,8 @@
       REQUIRES(lock_)
       REQUIRES(Locks::mutator_lock_);
 
-  // Free in the mspace allocations for `code_ptr`.
-  void FreeCode(const void* code_ptr) REQUIRES(lock_);
+  // Free code and data allocations for `code_ptr`.
+  void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
 
   // Number of bytes allocated in the code cache.
   size_t CodeCacheSizeLocked() REQUIRES(lock_);
@@ -357,10 +357,10 @@
       REQUIRES(lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void FreeCode(uint8_t* code) REQUIRES(lock_);
   uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
-  void FreeData(uint8_t* data) REQUIRES(lock_);
+  void FreeCode(uint8_t* code) REQUIRES(lock_);
   uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
+  void FreeData(uint8_t* data) REQUIRES(lock_);
 
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
diff --git a/runtime/jvalue-inl.h b/runtime/jvalue-inl.h
index 25e34b2..5bd4f17 100644
--- a/runtime/jvalue-inl.h
+++ b/runtime/jvalue-inl.h
@@ -19,7 +19,7 @@
 
 #include "jvalue.h"
 
-#include "obj_ptr.h"
+#include "obj_ptr-inl.h"
 
 namespace art {
 
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 266abcf..b42d995 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -33,7 +33,7 @@
   // We default initialize JValue instances to all-zeros.
   JValue() : j(0) {}
 
-  template<typename T> static JValue FromPrimitive(T v);
+  template<typename T> ALWAYS_INLINE static JValue FromPrimitive(T v);
 
   int8_t GetB() const { return b; }
   void SetB(int8_t new_b) {
@@ -62,6 +62,7 @@
   mirror::Object* GetL() const REQUIRES_SHARED(Locks::mutator_lock_) {
     return l;
   }
+  ALWAYS_INLINE
   void SetL(ObjPtr<mirror::Object> new_l) REQUIRES_SHARED(Locks::mutator_lock_);
 
   int16_t GetS() const { return s; }
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 09d856f..ce7fe34 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -75,16 +75,18 @@
     // Remaining bits are the recursive lock count.
     kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize -
         kMarkBitStateSize,
-    // Thin lock bits. Owner in lowest bits.
 
+    // Thin lock bits. Owner in lowest bits.
     kThinLockOwnerShift = 0,
     kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
+    kThinLockOwnerMaskShifted = kThinLockOwnerMask << kThinLockOwnerShift,
     kThinLockMaxOwner = kThinLockOwnerMask,
     // Count in higher bits.
     kThinLockCountShift = kThinLockOwnerSize + kThinLockOwnerShift,
     kThinLockCountMask = (1 << kThinLockCountSize) - 1,
     kThinLockMaxCount = kThinLockCountMask,
     kThinLockCountOne = 1 << kThinLockCountShift,  // == 65536 (0x10000)
+    kThinLockCountMaskShifted = kThinLockCountMask << kThinLockCountShift,
 
     // State in the highest bits.
     kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift +
diff --git a/runtime/method_handles-inl.h b/runtime/method_handles-inl.h
index 41c8384..00a8c00 100644
--- a/runtime/method_handles-inl.h
+++ b/runtime/method_handles-inl.h
@@ -22,7 +22,7 @@
 #include "common_throws.h"
 #include "dex/dex_instruction.h"
 #include "interpreter/interpreter_common.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
 #include "mirror/class.h"
 #include "mirror/method_type.h"
 #include "mirror/object.h"
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 64ab789..1d45aae 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -20,7 +20,6 @@
 
 #include "common_dex_operations.h"
 #include "jvalue-inl.h"
-#include "jvalue.h"
 #include "mirror/emulated_stack_frame.h"
 #include "mirror/method_handle_impl-inl.h"
 #include "mirror/method_type.h"
diff --git a/runtime/method_info.h b/runtime/method_info.h
index b00ddc6..6f74678 100644
--- a/runtime/method_info.h
+++ b/runtime/method_info.h
@@ -21,7 +21,7 @@
 
 #include "base/leb128.h"
 #include "base/macros.h"
-#include "base/memory_region.h"
+#include "base/bit_memory_region.h"
 
 namespace art {
 
@@ -35,8 +35,8 @@
   explicit MethodInfo(const uint8_t* ptr) {
     if (ptr != nullptr) {
       num_method_indices_ = DecodeUnsignedLeb128(&ptr);
-      region_ = MemoryRegion(const_cast<uint8_t*>(ptr),
-                             num_method_indices_ * sizeof(MethodIndexType));
+      region_ = BitMemoryRegion(
+          MemoryRegion(const_cast<uint8_t*>(ptr), num_method_indices_ * sizeof(MethodIndexType)));
     }
   }
 
@@ -44,7 +44,7 @@
   MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) {
     DCHECK(ptr != nullptr);
     ptr = EncodeUnsignedLeb128(ptr, num_method_indices_);
-    region_ = MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType));
+    region_ = BitMemoryRegion(MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType)));
   }
 
   static size_t ComputeSize(size_t num_method_indices) {
@@ -71,7 +71,7 @@
 
  private:
   size_t num_method_indices_ = 0u;
-  MemoryRegion region_;
+  BitMemoryRegion region_;
 };
 
 }  // namespace art
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 7a4876c..72f1443 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -127,23 +127,23 @@
   }
 }
 
-inline uint32_t DexCache::MethodTypeSlotIndex(uint32_t proto_idx) {
+inline uint32_t DexCache::MethodTypeSlotIndex(dex::ProtoIndex proto_idx) {
   DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
-  DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
-  const uint32_t slot_idx = proto_idx % kDexCacheMethodTypeCacheSize;
+  DCHECK_LT(proto_idx.index_, GetDexFile()->NumProtoIds());
+  const uint32_t slot_idx = proto_idx.index_ % kDexCacheMethodTypeCacheSize;
   DCHECK_LT(slot_idx, NumResolvedMethodTypes());
   return slot_idx;
 }
 
-inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
+inline MethodType* DexCache::GetResolvedMethodType(dex::ProtoIndex proto_idx) {
   return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load(
-      std::memory_order_relaxed).GetObjectForIndex(proto_idx);
+      std::memory_order_relaxed).GetObjectForIndex(proto_idx.index_);
 }
 
-inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+inline void DexCache::SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved) {
   DCHECK(resolved != nullptr);
   GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
-      MethodTypeDexCachePair(resolved, proto_idx), std::memory_order_relaxed);
+      MethodTypeDexCachePair(resolved, proto_idx.index_), std::memory_order_relaxed);
   // TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
   Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
 }
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index d940964..9aff9ec 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -307,9 +307,9 @@
   ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+  MethodType* GetResolvedMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved)
+  void SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -432,7 +432,7 @@
   uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
   uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
-  uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+  uint32_t MethodTypeSlotIndex(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   void Init(const DexFile* dex_file,
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index d2bff2c..97e0ce6 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -169,9 +169,9 @@
 
   for (size_t i = 0; i < dex_file.NumProtoIds(); ++i) {
     const MethodTypeDexCachePair pair = method_types_cache[i].load(std::memory_order_relaxed);
-    if (pair.index == method1_id.proto_idx_) {
+    if (dex::ProtoIndex(pair.index) == method1_id.proto_idx_) {
       ASSERT_EQ(method1_type.Get(), pair.object.Read());
-    } else if (pair.index == method2_id.proto_idx_) {
+    } else if (dex::ProtoIndex(pair.index) == method2_id.proto_idx_) {
       ASSERT_EQ(method2_type.Get(), pair.object.Read());
     } else {
       ASSERT_TRUE(false);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index b309f59..44c819a 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -1425,21 +1425,24 @@
   return GetField32(AccessModesBitMaskOffset());
 }
 
-bool VarHandle::IsMethodTypeCompatible(AccessMode access_mode, MethodType* method_type) {
-  StackHandleScope<3> hs(Thread::Current());
-  Handle<Class> mt_rtype(hs.NewHandle(method_type->GetRType()));
-  Handle<VarHandle> vh(hs.NewHandle(this));
-  Handle<Class> var_type(hs.NewHandle(vh->GetVarType()));
+VarHandle::MatchKind VarHandle::GetMethodTypeMatchForAccessMode(AccessMode access_mode,
+                                                                MethodType* method_type) {
+  MatchKind match = MatchKind::kExact;
+
+  ObjPtr<VarHandle> vh = this;
+  ObjPtr<Class> var_type = vh->GetVarType();
+  ObjPtr<Class> mt_rtype = method_type->GetRType();
   AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
 
-  // Check return type first.
-  if (mt_rtype->GetPrimitiveType() == Primitive::Type::kPrimVoid) {
-    // The result of the operation will be discarded. The return type
-    // of the VarHandle is immaterial.
-  } else {
-    ObjPtr<Class> vh_rtype(GetReturnType(access_mode_template, var_type.Get()));
-    if (!IsReturnTypeConvertible(vh_rtype, mt_rtype.Get())) {
-      return false;
+  // Check return type first. If the return type of the method
+  // of the VarHandle is immaterial.
+  if (mt_rtype->GetPrimitiveType() != Primitive::Type::kPrimVoid) {
+    ObjPtr<Class> vh_rtype = GetReturnType(access_mode_template, var_type.Ptr());
+    if (vh_rtype != mt_rtype) {
+      if (!IsReturnTypeConvertible(vh_rtype, mt_rtype)) {
+        return MatchKind::kNone;
+      }
+      match = MatchKind::kWithConversions;
     }
   }
 
@@ -1447,21 +1450,25 @@
   ObjPtr<Class> vh_ptypes[VarHandle::kMaxAccessorParameters];
   const int32_t vh_ptypes_count = BuildParameterArray(vh_ptypes,
                                                       access_mode_template,
-                                                      var_type.Get(),
+                                                      var_type,
                                                       GetCoordinateType0(),
                                                       GetCoordinateType1());
   if (vh_ptypes_count != method_type->GetPTypes()->GetLength()) {
-    return false;
+    return MatchKind::kNone;
   }
 
   // Check the parameter types are compatible.
   ObjPtr<ObjectArray<Class>> mt_ptypes = method_type->GetPTypes();
   for (int32_t i = 0; i < vh_ptypes_count; ++i) {
-    if (!IsParameterTypeConvertible(mt_ptypes->Get(i), vh_ptypes[i])) {
-      return false;
+    if (mt_ptypes->Get(i) == vh_ptypes[i]) {
+      continue;
     }
+    if (!IsParameterTypeConvertible(mt_ptypes->Get(i), vh_ptypes[i])) {
+      return MatchKind::kNone;
+    }
+    match = MatchKind::kWithConversions;
   }
-  return true;
+  return match;
 }
 
 bool VarHandle::IsInvokerMethodTypeCompatible(AccessMode access_mode,
@@ -1508,7 +1515,7 @@
 MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self,
                                                   ObjPtr<VarHandle> var_handle,
                                                   AccessMode access_mode) {
-  // This is a static as the var_handle might be moved by the GC during it's execution.
+  // This is a static method as the var_handle might be moved by the GC during it's execution.
   AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
 
   StackHandleScope<3> hs(self);
@@ -1538,9 +1545,40 @@
   return GetMethodTypeForAccessMode(self, this, access_mode);
 }
 
+std::string VarHandle::PrettyDescriptorForAccessMode(AccessMode access_mode) {
+  // Effect MethodType::PrettyDescriptor() without first creating a method type first.
+  std::ostringstream oss;
+  oss << '(';
+
+  AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
+  ObjPtr<Class> var_type = GetVarType();
+  ObjPtr<Class> ctypes[2] = { GetCoordinateType0(), GetCoordinateType1() };
+  const int32_t ptypes_count = GetNumberOfParameters(access_mode_template, ctypes[0], ctypes[1]);
+  int32_t ptypes_done = 0;
+  for (ObjPtr<Class> ctype : ctypes) {
+    if (!ctype.IsNull()) {
+      if (ptypes_done != 0) {
+        oss << ", ";
+      }
+      oss << ctype->PrettyDescriptor();;
+      ptypes_done++;
+    }
+  }
+  while (ptypes_done != ptypes_count) {
+    if (ptypes_done != 0) {
+      oss << ", ";
+    }
+    oss << var_type->PrettyDescriptor();
+    ptypes_done++;
+  }
+  ObjPtr<Class> rtype = GetReturnType(access_mode_template, var_type);
+  oss << ')' << rtype->PrettyDescriptor();
+  return oss.str();
+}
+
 bool VarHandle::Access(AccessMode access_mode,
                        ShadowFrame* shadow_frame,
-                       InstructionOperands* operands,
+                       const InstructionOperands* const operands,
                        JValue* result) {
   Class* klass = GetClass();
   if (klass == FieldVarHandle::StaticClass()) {
@@ -1671,7 +1709,7 @@
 
 bool FieldVarHandle::Access(AccessMode access_mode,
                             ShadowFrame* shadow_frame,
-                            InstructionOperands* operands,
+                            const InstructionOperands* const operands,
                             JValue* result) {
   ShadowFrameGetter getter(*shadow_frame, operands);
   ArtField* field = GetField();
@@ -1743,7 +1781,7 @@
 
 bool ArrayElementVarHandle::Access(AccessMode access_mode,
                                    ShadowFrame* shadow_frame,
-                                   InstructionOperands* operands,
+                                   const InstructionOperands* const operands,
                                    JValue* result) {
   ShadowFrameGetter getter(*shadow_frame, operands);
 
@@ -1856,7 +1894,7 @@
 
 bool ByteArrayViewVarHandle::Access(AccessMode access_mode,
                                     ShadowFrame* shadow_frame,
-                                    InstructionOperands* operands,
+                                    const InstructionOperands* const operands,
                                     JValue* result) {
   ShadowFrameGetter getter(*shadow_frame, operands);
 
@@ -1965,7 +2003,7 @@
 
 bool ByteBufferViewVarHandle::Access(AccessMode access_mode,
                                      ShadowFrame* shadow_frame,
-                                     InstructionOperands* operands,
+                                     const InstructionOperands* const operands,
                                      JValue* result) {
   ShadowFrameGetter getter(*shadow_frame, operands);
 
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
index d46d900..5186d43 100644
--- a/runtime/mirror/var_handle.h
+++ b/runtime/mirror/var_handle.h
@@ -99,14 +99,16 @@
     return (GetAccessModesBitMask() & (1u << static_cast<uint32_t>(accessMode))) != 0;
   }
 
-  // Returns true if the MethodType specified is compatible with the
-  // method type associated with the specified AccessMode. The
-  // supplied MethodType is assumed to be from the point of invocation
-  // so it is valid for the supplied MethodType to have a void return
-  // value when the return value for the AccessMode is non-void. This
-  // corresponds to the result of the accessor being discarded.
-  bool IsMethodTypeCompatible(AccessMode access_mode, MethodType* method_type)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  enum MatchKind : uint8_t {
+    kNone,
+    kWithConversions,
+    kExact
+  };
+
+  // Returns match information on the compatability between the exact method type for
+  // 'access_mode' and the provided 'method_type'.
+  MatchKind GetMethodTypeMatchForAccessMode(AccessMode access_mode, MethodType* method_type)
+        REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns true if the MethodType specified is compatible with the
   // specified access_mode if the first parameter of method_type is
@@ -122,9 +124,14 @@
   MethodType* GetMethodTypeForAccessMode(Thread* self, AccessMode accessMode)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Returns a string representing the descriptor of the MethodType associated with
+  // this AccessMode.
+  std::string PrettyDescriptorForAccessMode(AccessMode access_mode)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   bool Access(AccessMode access_mode,
               ShadowFrame* shadow_frame,
-              InstructionOperands* operands,
+              const InstructionOperands* const operands,
               JValue* result)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -192,7 +199,7 @@
  public:
   bool Access(AccessMode access_mode,
               ShadowFrame* shadow_frame,
-              InstructionOperands* operands,
+              const InstructionOperands* const operands,
               JValue* result)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -225,7 +232,7 @@
  public:
     bool Access(AccessMode access_mode,
                 ShadowFrame* shadow_frame,
-                InstructionOperands* operands,
+                const InstructionOperands* const operands,
                 JValue* result)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -248,7 +255,7 @@
  public:
   bool Access(AccessMode access_mode,
               ShadowFrame* shadow_frame,
-              InstructionOperands* operands,
+              const InstructionOperands* const operands,
               JValue* result)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -281,7 +288,7 @@
  public:
   bool Access(AccessMode access_mode,
               ShadowFrame* shadow_frame,
-              InstructionOperands* operands,
+              const InstructionOperands* const operands,
               JValue* result)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/var_handle_test.cc b/runtime/mirror/var_handle_test.cc
index d9fa07f..005aba3 100644
--- a/runtime/mirror/var_handle_test.cc
+++ b/runtime/mirror/var_handle_test.cc
@@ -246,6 +246,47 @@
   return MethodType::Create(self, rtype, ptypes);
 }
 
+static bool AccessModeMatch(VarHandle* vh,
+                            VarHandle::AccessMode access_mode,
+                            MethodType* method_type,
+                            VarHandle::MatchKind expected_match)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return vh->GetMethodTypeMatchForAccessMode(access_mode, method_type) == expected_match;
+}
+
+template <typename VH>
+static bool AccessModeExactMatch(Handle<VH> vh,
+                                 VarHandle::AccessMode access_mode,
+                                 const char* descriptor)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return AccessModeMatch(vh.Get(),
+                         access_mode,
+                         MethodTypeOf(descriptor),
+                         VarHandle::MatchKind::kExact);
+}
+
+template <typename VH>
+static bool AccessModeWithConversionsMatch(Handle<VH> vh,
+                                          VarHandle::AccessMode access_mode,
+                                          const char* descriptor)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return AccessModeMatch(vh.Get(),
+                         access_mode,
+                         MethodTypeOf(descriptor),
+                         VarHandle::MatchKind::kWithConversions);
+}
+
+template <typename VH>
+static bool AccessModeNoMatch(Handle<VH> vh,
+                              VarHandle::AccessMode access_mode,
+                              const char* descriptor)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return AccessModeMatch(vh.Get(),
+                         access_mode,
+                         MethodTypeOf(descriptor),
+                         VarHandle::MatchKind::kNone);
+}
+
 TEST_F(VarHandleTest, InstanceFieldVarHandle) {
   Thread * const self = Thread::Current();
   ScopedObjectAccess soa(self);
@@ -296,47 +337,53 @@
   // Check compatibility - "Get" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)I")));
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;)I"));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;)V"));
+    EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;)D"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)Z"));
   }
 
   // Check compatibility - "Set" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;I)V"));
+    EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;S)V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndSet" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode,
-                                             MethodTypeOf("(Ljava/lang/Integer;II)I")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)Z"));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)V"));
+    EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;II)Ljava/lang/Boolean;"));
+    EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;IB)V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;II)I"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndExchange" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)I")));
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(IIII)V")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)I"));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)V"));
+    EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;II)J"));
+    EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;BS)F"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;I)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(IIII)V"));
   }
 
   // Check compatibility - "GetAndUpdate" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)I")));
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)S")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;I)I"));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;I)V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;I)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)S"));
   }
 
   // Check synthesized method types match expected forms.
@@ -430,48 +477,47 @@
   // Check compatibility - "Get" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()I")));
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "()I"));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "()V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)Z"));
   }
 
   // Check compatibility - "Set" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(F)V")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(I)V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(F)V"));
   }
 
   // Check compatibility - "CompareAndSet" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode,
-                                             MethodTypeOf("(II)Ljava/lang/String;")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(II)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)Ljava/lang/String;"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndExchange" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)I")));
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(ID)I")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)S")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(IIJ)V")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(II)I"));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(II)V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(ID)I"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)S"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(IIJ)V"));
   }
 
   // Check compatibility - "GetAndUpdate" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)I")));
-    EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)V")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)Z")));
-    EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(I)I"));
+    EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(I)V"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(I)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)V"));
   }
 
   // Check synthesized method types match expected forms.
@@ -594,50 +640,46 @@
   // Check compatibility - "Get" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Ljava/lang/String;")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;Ljava/lang/String;)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;I)Ljava/lang/String;"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;I)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;Ljava/lang/String;)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)Z"));
   }
 
   // Check compatibility - "Set" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;I)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;I)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndSet" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
-    EXPECT_TRUE(
-        vh->IsMethodTypeCompatible(
-            access_mode,
-            MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
-                                             MethodTypeOf("([Ljava/lang/String;III)I")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;III)I"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;I)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndExchange" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Ljava/lang/String;")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;II)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Ljava/lang/String;"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;II)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(III)V"));
   }
 
   // Check compatibility - "GetAndUpdate" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)Ljava/lang/String;")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)Ljava/lang/String;"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(II)V"));
   }
 
   // Check synthesized method types match expected forms.
@@ -747,50 +789,46 @@
   // Check compatibility - "Get" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)C")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BC)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BI)C"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BI)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BC)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)Z"));
   }
 
   // Check compatibility - "Set" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BIC)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BI)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BI)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndSet" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
-    EXPECT_TRUE(
-        vh->IsMethodTypeCompatible(
-            access_mode,
-            MethodTypeOf("([BICC)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
-                                             MethodTypeOf("([BIII)I")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BICC)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BIII)I"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BI)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndExchange" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BICC)C")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BICC)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BII)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BICC)C"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BICC)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BII)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(III)V"));
   }
 
   // Check compatibility - "GetAndUpdate" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)C")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BIC)C"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BIC)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BIC)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(II)V"));
   }
 
   // Check synthesized method types match expected forms.
@@ -900,50 +938,46 @@
   // Check compatibility - "Get" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)D")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;D)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)D"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;D)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)Z"));
   }
 
   // Check compatibility - "Set" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndSet" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
-    EXPECT_TRUE(
-        vh->IsMethodTypeCompatible(
-            access_mode,
-            MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
-                                             MethodTypeOf("(Ljava/nio/ByteBuffer;IDI)D")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDD)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDI)D"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
   }
 
   // Check compatibility - "CompareAndExchange" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)D")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;II)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDD)D"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDD)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;II)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(III)V"));
   }
 
   // Check compatibility - "GetAndUpdate" pattern
   {
     const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)D")));
-    EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)V")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)Z")));
-    EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)D"));
+    EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)V"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)Z"));
+    EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(II)V"));
   }
 
   // Check synthesized method types match expected forms.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 7f7b524..6c82019 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -185,6 +185,10 @@
   return Runtime::Current()->IsNativeDebuggable();
 }
 
+static jboolean VMRuntime_isJavaDebuggable(JNIEnv*, jobject) {
+  return Runtime::Current()->IsJavaDebuggable();
+}
+
 static jobjectArray VMRuntime_properties(JNIEnv* env, jobject) {
   DCHECK(WellKnownClasses::java_lang_String != nullptr);
 
@@ -702,6 +706,7 @@
   NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
   FAST_NATIVE_METHOD(VMRuntime, isDebuggerActive, "()Z"),
   FAST_NATIVE_METHOD(VMRuntime, isNativeDebuggable, "()Z"),
+  NATIVE_METHOD(VMRuntime, isJavaDebuggable, "()Z"),
   NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"),
   FAST_NATIVE_METHOD(VMRuntime, newNonMovableArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
   FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 68024cd..9f595b1 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -648,7 +648,7 @@
     // Return an empty array instead of a null pointer.
     ObjPtr<mirror::Class>  annotation_array_class =
         soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
-    mirror::ObjectArray<mirror::Object>* empty_array =
+    ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
         mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
                                                    annotation_array_class.Ptr(),
                                                    0);
@@ -661,7 +661,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  mirror::ObjectArray<mirror::Class>* classes = nullptr;
+  ObjPtr<mirror::ObjectArray<mirror::Class>> classes = nullptr;
   if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
     classes = annotations::GetDeclaredClasses(klass);
   }
@@ -738,7 +738,7 @@
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
-  mirror::String* class_name = nullptr;
+  ObjPtr<mirror::String> class_name = nullptr;
   if (!annotations::GetInnerClass(klass, &class_name)) {
     return nullptr;
   }
@@ -763,7 +763,7 @@
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return false;
   }
-  mirror::String* class_name = nullptr;
+  ObjPtr<mirror::String> class_name = nullptr;
   if (!annotations::GetInnerClass(klass, &class_name)) {
     return false;
   }
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index a5d6c97..13a8d28 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -38,7 +38,7 @@
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
       ->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  mirror::ObjectArray<mirror::Class>* result_array =
+  ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
       annotations::GetExceptionTypesForMethod(method);
   if (result_array == nullptr) {
     // Return an empty array instead of a null pointer.
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index e0afbee..2559984 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -27,6 +27,7 @@
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_annotations.h"
 #include "jni/jni_internal.h"
+#include "jvalue-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/field-inl.h"
 #include "native_util.h"
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2503b3c..52e0494 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -62,7 +62,7 @@
         klass->GetProxyThrows()->Get(throws_index);
     return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
   } else {
-    mirror::ObjectArray<mirror::Class>* result_array =
+    ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
         annotations::GetExceptionTypesForMethod(method);
     if (result_array == nullptr) {
       // Return an empty array instead of a null pointer
diff --git a/runtime/oat.h b/runtime/oat.h
index 9a58ded..7b8f71a 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  // Last oat version changed reason: compiler support const-method-type
-  static constexpr uint8_t kOatVersion[] = { '1', '4', '2', '\0' };
+  // Last oat version changed reason: Refactor stackmap encoding.
+  static constexpr uint8_t kOatVersion[] = { '1', '4', '4', '\0' };
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 371678d..ffbc26c 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -211,12 +211,12 @@
     return nullptr;
   }
 
+  ret->PreSetup(elf_filename);
+
   if (!ret->LoadVdex(vdex_filename, writable, low_4gb, error_msg)) {
     return nullptr;
   }
 
-  ret->PreSetup(elf_filename);
-
   if (!ret->Setup(zip_fd, abs_dex_location, error_msg)) {
     return nullptr;
   }
@@ -252,12 +252,12 @@
     return nullptr;
   }
 
+  ret->PreSetup(oat_location);
+
   if (!ret->LoadVdex(vdex_fd, vdex_location, writable, low_4gb, error_msg)) {
     return nullptr;
   }
 
-  ret->PreSetup(oat_location);
-
   if (!ret->Setup(zip_fd, abs_dex_location, error_msg)) {
     return nullptr;
   }
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 241102e..6c869ca 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -36,6 +36,7 @@
 #include "exec_utils.h"
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
+#include "hidden_api.h"
 #include "image.h"
 #include "oat.h"
 #include "runtime.h"
@@ -823,6 +824,11 @@
     argv.push_back("--compiler-filter=verify-none");
   }
 
+  if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kNoChecks) {
+    argv.push_back("--runtime-arg");
+    argv.push_back("-Xhidden-api-checks");
+  }
+
   if (runtime->MustRelocateIfPossible()) {
     argv.push_back("--runtime-arg");
     argv.push_back("-Xrelocate");
@@ -860,6 +866,13 @@
   CHECK(oat_filename != nullptr);
   CHECK(error_msg != nullptr);
 
+  // If ANDROID_DATA is not set, return false instead of aborting.
+  // This can occur for preopt when using a class loader context.
+  if (GetAndroidDataSafe(error_msg) == nullptr) {
+    *error_msg = "GetAndroidDataSafe failed: " + *error_msg;
+    return false;
+  }
+
   std::string cache_dir = GetDalvikCache(GetInstructionSetString(isa));
   if (cache_dir.empty()) {
     *error_msg = "Dalvik cache directory does not exist";
@@ -1218,8 +1231,8 @@
   }
 
 
-  bool result = context->VerifyClassLoaderContextMatch(file->GetClassLoaderContext()) ==
-      ClassLoaderContext::VerificationResult::kVerifies;
+  const bool result = context->VerifyClassLoaderContextMatch(file->GetClassLoaderContext()) !=
+      ClassLoaderContext::VerificationResult::kMismatch;
   if (!result) {
     VLOG(oat) << "ClassLoaderContext check failed. Context was "
               << file->GetClassLoaderContext()
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 99bc0b2..0b3c61d 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -33,6 +33,7 @@
 #include "class_loader_context.h"
 #include "common_runtime_test.h"
 #include "dexopt_test.h"
+#include "hidden_api.h"
 #include "oat_file.h"
 #include "oat_file_manager.h"
 #include "scoped_thread_state_change-inl.h"
@@ -43,6 +44,8 @@
 static const std::string kSpecialSharedLibrary = "&";  // NOLINT [runtime/string] [4]
 static ClassLoaderContext* kSpecialSharedLibraryContext = nullptr;
 
+static constexpr char kDex2oatCmdLineHiddenApiArg[] = " --runtime-arg -Xhidden-api-checks";
+
 class OatFileAssistantTest : public DexoptTest {
  public:
   void VerifyOptimizationStatus(const std::string& file,
@@ -1413,6 +1416,46 @@
       oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
 }
 
+TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiDisabled) {
+  hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
+      hiddenapi::EnforcementPolicy::kNoChecks);
+
+  std::string dex_location = GetScratchDir() + "/TestDexHiddenApiDisabled.jar";
+  Copy(GetDexSrc1(), dex_location);
+
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+  std::string error_msg;
+  int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+
+  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+  EXPECT_NE(nullptr, oat_file.get());
+
+  const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
+  EXPECT_NE(nullptr, cmd_line);
+  EXPECT_EQ(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
+}
+
+TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiEnabled) {
+  hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
+      hiddenapi::EnforcementPolicy::kBlacklistOnly);
+
+  std::string dex_location = GetScratchDir() + "/TestDexHiddenApiEnabled.jar";
+  Copy(GetDexSrc1(), dex_location);
+
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+  std::string error_msg;
+  int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+  EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+
+  std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+  EXPECT_NE(nullptr, oat_file.get());
+
+  const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
+  EXPECT_NE(nullptr, cmd_line);
+  EXPECT_NE(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
+}
+
 TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
   std::string dex_location = GetScratchDir() + "/TestDex.jar";
   std::string context_location = GetScratchDir() + "/ContextDex.jar";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 98238e5..aed6bc5 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -19,6 +19,7 @@
 #include "art_method.h"
 #include "dex/dex_file_types.h"
 #include "scoped_thread_state_change-inl.h"
+#include "stack_map.h"
 #include "thread.h"
 
 namespace art {
@@ -42,11 +43,10 @@
   const void* entry_point = GetEntryPoint();
   uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
   if (IsOptimized()) {
-    CodeInfo code_info = GetOptimizedCodeInfo();
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+    CodeInfo code_info(this);
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset);
     if (stack_map.IsValid()) {
-      return stack_map.GetDexPc(encoding.stack_map.encoding);
+      return stack_map.GetDexPc();
     }
   } else {
     DCHECK(method->IsNative());
@@ -71,18 +71,17 @@
   DCHECK(!method->IsNative());
   DCHECK(IsOptimized());
   // Search for the dex-to-pc mapping in stack maps.
-  CodeInfo code_info = GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
+  CodeInfo code_info(this);
 
   // All stack maps are stored in the same CodeItem section, safepoint stack
   // maps first, then catch stack maps. We use `is_for_catch_handler` to select
   // the order of iteration.
   StackMap stack_map =
-      LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
-                                   : code_info.GetStackMapForDexPc(dex_pc, encoding);
+      LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc)
+                                   : code_info.GetStackMapForDexPc(dex_pc);
   if (stack_map.IsValid()) {
     return reinterpret_cast<uintptr_t>(entry_point) +
-           stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA);
+           stack_map.GetNativePcOffset(kRuntimeISA);
   }
   if (abort_on_failure) {
     ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index f0966b7..d6762d6 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -22,7 +22,6 @@
 #include "base/utils.h"
 #include "method_info.h"
 #include "quick/quick_method_frame_info.h"
-#include "stack_map.h"
 
 namespace art {
 
@@ -75,10 +74,6 @@
     return code_ - vmap_table_offset_;
   }
 
-  CodeInfo GetOptimizedCodeInfo() const {
-    return CodeInfo(GetOptimizedCodeInfoPtr());
-  }
-
   const void* GetOptimizedMethodInfoPtr() const {
     DCHECK(IsOptimized());
     return reinterpret_cast<const void*>(code_ - method_info_offset_);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 3aa481a..7383d47 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -194,6 +194,9 @@
       .Define("-Xjittransitionweight:_")
           .WithType<unsigned int>()
           .IntoKey(M::JITInvokeTransitionWeight)
+      .Define("-Xjitpthreadpriority:_")
+          .WithType<int>()
+          .IntoKey(M::JITPoolThreadPthreadPriority)
       .Define("-Xjitsaveprofilinginfo")
           .WithType<ProfileSaverOptions>()
           .AppendValues()
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 077aa33..c555fca 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -224,30 +224,29 @@
 
   CodeItemDataAccessor accessor(handler_method_->DexInstructionData());
   const size_t number_of_vregs = accessor.RegistersSize();
-  CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
+  CodeInfo code_info(handler_method_header_);
 
   // Find stack map of the catch block.
-  StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
+  StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc());
   DCHECK(catch_stack_map.IsValid());
   DexRegisterMap catch_vreg_map =
-      code_info.GetDexRegisterMapOf(catch_stack_map, encoding, number_of_vregs);
+      code_info.GetDexRegisterMapOf(catch_stack_map, number_of_vregs);
   if (!catch_vreg_map.IsValid()) {
     return;
   }
 
   // Find stack map of the throwing instruction.
   StackMap throw_stack_map =
-      code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset(), encoding);
+      code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset());
   DCHECK(throw_stack_map.IsValid());
   DexRegisterMap throw_vreg_map =
-      code_info.GetDexRegisterMapOf(throw_stack_map, encoding, number_of_vregs);
+      code_info.GetDexRegisterMapOf(throw_stack_map, number_of_vregs);
   DCHECK(throw_vreg_map.IsValid());
 
   // Copy values between them.
   for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
     DexRegisterLocation::Kind catch_location =
-        catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+        catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
     if (catch_location == DexRegisterLocation::Kind::kNone) {
       continue;
     }
@@ -257,8 +256,7 @@
     uint32_t vreg_value;
     VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg,
                                                                    number_of_vregs,
-                                                                   code_info,
-                                                                   encoding));
+                                                                   code_info));
     bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(),
                                                    vreg,
                                                    vreg_kind,
@@ -271,8 +269,7 @@
     // Copy value to the catch phi's stack slot.
     int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg,
                                                                number_of_vregs,
-                                                               code_info,
-                                                               encoding);
+                                                               code_info);
     ArtMethod** frame_top = stack_visitor->GetCurrentQuickFrame();
     uint8_t* slot_address = reinterpret_cast<uint8_t*>(frame_top) + slot_offset;
     uint32_t* slot_ptr = reinterpret_cast<uint32_t*>(slot_address);
@@ -404,20 +401,18 @@
                                       const bool* updated_vregs)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-    CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+    CodeInfo code_info(method_header);
     uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
-    CodeInfoEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
     CodeItemDataAccessor accessor(m->DexInstructionData());
     const size_t number_of_vregs = accessor.RegistersSize();
-    uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
-    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+    uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+    BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
     DexRegisterMap vreg_map = IsInInlinedFrame()
         ? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
-                                             code_info.GetInlineInfoOf(stack_map, encoding),
-                                             encoding,
+                                             code_info.GetInlineInfoOf(stack_map),
                                              number_of_vregs)
-        : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+        : code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
 
     if (!vreg_map.IsValid()) {
       return;
@@ -430,7 +425,7 @@
       }
 
       DexRegisterLocation::Kind location =
-          vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+          vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
       static constexpr uint32_t kDeadValue = 0xEBADDE09;
       uint32_t value = kDeadValue;
       bool is_reference = false;
@@ -439,12 +434,11 @@
         case DexRegisterLocation::Kind::kInStack: {
           const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg,
                                                                 number_of_vregs,
-                                                                code_info,
-                                                                encoding);
+                                                                code_info);
           const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
           value = *reinterpret_cast<const uint32_t*>(addr);
           uint32_t bit = (offset >> 2);
-          if (bit < encoding.stack_mask.encoding.BitSize() && stack_mask.LoadBit(bit)) {
+          if (bit < code_info.GetNumberOfStackMaskBits() && stack_mask.LoadBit(bit)) {
             is_reference = true;
           }
           break;
@@ -453,7 +447,7 @@
         case DexRegisterLocation::Kind::kInRegisterHigh:
         case DexRegisterLocation::Kind::kInFpuRegister:
         case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
-          uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info, encoding);
+          uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info);
           bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
           CHECK(result);
           if (location == DexRegisterLocation::Kind::kInRegister) {
@@ -464,7 +458,7 @@
           break;
         }
         case DexRegisterLocation::Kind::kConstant: {
-          value = vreg_map.GetConstant(vreg, number_of_vregs, code_info, encoding);
+          value = vreg_map.GetConstant(vreg, number_of_vregs, code_info);
           if (value == 0) {
             // Make it a reference for extra safety.
             is_reference = true;
@@ -479,8 +473,7 @@
               << "Unexpected location kind "
               << vreg_map.GetLocationInternalKind(vreg,
                                                   number_of_vregs,
-                                                  code_info,
-                                                  encoding);
+                                                  code_info);
           UNREACHABLE();
         }
       }
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index dfa4b3d..66eba1e 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -25,6 +25,7 @@
 #include "indirect_reference_table-inl.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
+#include "jvalue-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/executable.h"
 #include "mirror/object_array-inl.h"
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 4584351..374591e 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -19,8 +19,10 @@
 
 #include "runtime.h"
 
+#include "arch/instruction_set.h"
 #include "art_method.h"
 #include "base/callee_save_type.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "gc_root-inl.h"
 #include "obj_ptr-inl.h"
 
@@ -38,21 +40,22 @@
 
 inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(ArtMethod* method) {
   DCHECK(method != nullptr);
+  DCHECK_EQ(instruction_set_, kRuntimeISA);
   // Cannot be imt-conflict-method or resolution-method.
   DCHECK_NE(method, GetImtConflictMethod());
   DCHECK_NE(method, GetResolutionMethod());
   // Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
   if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsAndArgs)) {
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   } else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveAllCalleeSaves)) {
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveAllCalleeSaves);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveAllCalleeSaves);
   } else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsOnly)) {
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsOnly);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsOnly);
   } else {
     DCHECK(method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverything) ||
            method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverythingForClinit) ||
            method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverythingForSuspendCheck));
-    return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveEverything);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveEverything);
   }
 }
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b8775b8..1402749 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -39,18 +39,12 @@
 #include "android-base/strings.h"
 
 #include "aot_class_linker.h"
-#include "arch/arm/quick_method_frame_info_arm.h"
 #include "arch/arm/registers_arm.h"
-#include "arch/arm64/quick_method_frame_info_arm64.h"
 #include "arch/arm64/registers_arm64.h"
 #include "arch/instruction_set_features.h"
-#include "arch/mips/quick_method_frame_info_mips.h"
 #include "arch/mips/registers_mips.h"
-#include "arch/mips64/quick_method_frame_info_mips64.h"
 #include "arch/mips64/registers_mips64.h"
-#include "arch/x86/quick_method_frame_info_x86.h"
 #include "arch/x86/registers_x86.h"
-#include "arch/x86_64/quick_method_frame_info_x86_64.h"
 #include "arch/x86_64/registers_x86_64.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
@@ -1198,7 +1192,7 @@
   // As is, we're encoding some logic here about which specific policy to use, which would be better
   // controlled by the framework.
   hidden_api_policy_ = do_hidden_api_checks
-      ? hiddenapi::EnforcementPolicy::kBlacklistOnly
+      ? hiddenapi::EnforcementPolicy::kDarkGreyAndBlackList
       : hiddenapi::EnforcementPolicy::kNoChecks;
 
   no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
@@ -2203,38 +2197,21 @@
 
 void Runtime::SetInstructionSet(InstructionSet instruction_set) {
   instruction_set_ = instruction_set;
-  if ((instruction_set_ == InstructionSet::kThumb2) || (instruction_set_ == InstructionSet::kArm)) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kMips) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kMips64) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kX86) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kX86_64) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
-    }
-  } else if (instruction_set_ == InstructionSet::kArm64) {
-    for (int i = 0; i != kCalleeSaveSize; ++i) {
-      CalleeSaveType type = static_cast<CalleeSaveType>(i);
-      callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
-    }
-  } else {
-    UNIMPLEMENTED(FATAL) << instruction_set_;
+  switch (instruction_set) {
+    case InstructionSet::kThumb2:
+      // kThumb2 is the same as kArm, use the canonical value.
+      instruction_set_ = InstructionSet::kArm;
+      break;
+    case InstructionSet::kArm:
+    case InstructionSet::kArm64:
+    case InstructionSet::kMips:
+    case InstructionSet::kMips64:
+    case InstructionSet::kX86:
+    case InstructionSet::kX86_64:
+      break;
+    default:
+      UNIMPLEMENTED(FATAL) << instruction_set_;
+      UNREACHABLE();
   }
 }
 
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 953acbb..10f72e7 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -399,10 +399,6 @@
   ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
-    return callee_save_method_frame_infos_[static_cast<size_t>(type)];
-  }
-
   QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -831,7 +827,6 @@
   GcRoot<mirror::Object> sentinel_;
 
   InstructionSet instruction_set_;
-  QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
 
   CompilerCallbacks* compiler_callbacks_;
   bool is_zygote_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 427385d..e647423 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -77,6 +77,7 @@
 RUNTIME_OPTIONS_KEY (unsigned int,        JITOsrThreshold)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITPriorityThreadWeight)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITInvokeTransitionWeight)
+RUNTIME_OPTIONS_KEY (int,                 JITPoolThreadPthreadPriority,   jit::kJitPoolThreadPthreadDefaultPriority)
 RUNTIME_OPTIONS_KEY (MemoryKiB,           JITCodeCacheInitialCapacity,    jit::JitCodeCache::kInitialCapacity)
 RUNTIME_OPTIONS_KEY (MemoryKiB,           JITCodeCacheMaxCapacity,        jit::JitCodeCache::kMaxCapacity)
 RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 229238e..7d1cb5c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -25,6 +25,7 @@
 #include "base/hex_dump.h"
 #include "dex/dex_file_types.h"
 #include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/quick/callee_save_frame.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/space/image_space.h"
 #include "gc/space/space-inl.h"
@@ -75,15 +76,14 @@
   }
 }
 
-static InlineInfo GetCurrentInlineInfo(const OatQuickMethodHeader* method_header,
+static InlineInfo GetCurrentInlineInfo(CodeInfo& code_info,
+                                       const OatQuickMethodHeader* method_header,
                                        uintptr_t cur_quick_frame_pc)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc);
-  CodeInfo code_info = method_header->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   DCHECK(stack_map.IsValid());
-  return code_info.GetInlineInfoOf(stack_map, encoding);
+  return code_info.GetInlineInfoOf(stack_map);
 }
 
 ArtMethod* StackVisitor::GetMethod() const {
@@ -92,16 +92,16 @@
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
       size_t depth_in_stack_map = current_inlining_depth_ - 1;
-      InlineInfo inline_info = GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(),
-                                                    cur_quick_frame_pc_);
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
+      CodeInfo code_info(method_header);
+      InlineInfo inline_info = GetCurrentInlineInfo(code_info,
+                                                    method_header,
+                                                    cur_quick_frame_pc_);
       MethodInfo method_info = method_header->GetOptimizedMethodInfo();
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
       return GetResolvedMethod(*GetCurrentQuickFrame(),
                                method_info,
                                inline_info,
-                               encoding.inline_info.encoding,
                                depth_in_stack_map);
     } else {
       return *cur_quick_frame_;
@@ -115,11 +115,11 @@
     return cur_shadow_frame_->GetDexPC();
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
-      size_t depth_in_stack_map = current_inlining_depth_ - 1;
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
-      return GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(), cur_quick_frame_pc_).
-          GetDexPcAtDepth(encoding.inline_info.encoding, depth_in_stack_map);
+      CodeInfo code_info(method_header);
+      size_t depth_in_stack_map = current_inlining_depth_ - 1;
+      return GetCurrentInlineInfo(code_info, method_header, cur_quick_frame_pc_).
+          GetDexPcAtDepth(depth_in_stack_map);
     } else if (cur_oat_quick_method_header_ == nullptr) {
       return dex::kDexNoIndex;
     } else {
@@ -229,32 +229,29 @@
   uint16_t number_of_dex_registers = accessor.RegistersSize();
   DCHECK_LT(vreg, number_of_dex_registers);
   const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-  CodeInfo code_info = method_header->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
+  CodeInfo code_info(method_header);
 
   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
-  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   DCHECK(stack_map.IsValid());
   size_t depth_in_stack_map = current_inlining_depth_ - 1;
 
   DexRegisterMap dex_register_map = IsInInlinedFrame()
       ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map,
-                                           code_info.GetInlineInfoOf(stack_map, encoding),
-                                           encoding,
+                                           code_info.GetInlineInfoOf(stack_map),
                                            number_of_dex_registers)
-      : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+      : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
 
   if (!dex_register_map.IsValid()) {
     return false;
   }
   DexRegisterLocation::Kind location_kind =
-      dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding);
+      dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
   switch (location_kind) {
     case DexRegisterLocation::Kind::kInStack: {
       const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg,
                                                                     number_of_dex_registers,
-                                                                    code_info,
-                                                                    encoding);
+                                                                    code_info);
       const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
       *val = *reinterpret_cast<const uint32_t*>(addr);
       return true;
@@ -264,11 +261,11 @@
     case DexRegisterLocation::Kind::kInFpuRegister:
     case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
       uint32_t reg =
-          dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
+          dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
       return GetRegisterIfAccessible(reg, kind, val);
     }
     case DexRegisterLocation::Kind::kConstant:
-      *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding);
+      *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info);
       return true;
     case DexRegisterLocation::Kind::kNone:
       return false;
@@ -277,8 +274,7 @@
           << "Unexpected location kind "
           << dex_register_map.GetLocationInternalKind(vreg,
                                                       number_of_dex_registers,
-                                                      code_info,
-                                                      encoding);
+                                                      code_info);
       UNREACHABLE();
   }
 }
@@ -718,7 +714,7 @@
   Runtime* runtime = Runtime::Current();
 
   if (method->IsAbstract()) {
-    return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   }
 
   // This goes before IsProxyMethod since runtime methods have a null declaring class.
@@ -732,7 +728,7 @@
     // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
     DCHECK(!method->IsDirect() && !method->IsConstructor())
         << "Constructors of proxy classes must have a OatQuickMethodHeader";
-    return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+    return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   }
 
   // The only remaining case is if the method is native and uses the generic JNI stub,
@@ -751,8 +747,8 @@
   // Generic JNI frame.
   uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
   size_t scope_size = HandleScope::SizeOf(handle_refs);
-  QuickMethodFrameInfo callee_info =
-      runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+  constexpr QuickMethodFrameInfo callee_info =
+      RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
 
   // Callee saves + handle scope + method ref + alignment
   // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
@@ -830,15 +826,14 @@
         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
             && (cur_oat_quick_method_header_ != nullptr)
             && cur_oat_quick_method_header_->IsOptimized()) {
-          CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
-          CodeInfoEncoding encoding = code_info.ExtractEncoding();
+          CodeInfo code_info(cur_oat_quick_method_header_);
           uint32_t native_pc_offset =
               cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
-          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
-          if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
-            InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+          if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
+            InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
             DCHECK_EQ(current_inlining_depth_, 0u);
-            for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info.encoding);
+            for (current_inlining_depth_ = inline_info.GetDepth();
                  current_inlining_depth_ != 0;
                  --current_inlining_depth_) {
               bool should_continue = VisitFrame();
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 9c7b687..2b7e8dd 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -25,8 +25,6 @@
 namespace art {
 
 constexpr size_t DexRegisterLocationCatalog::kNoLocationEntryIndex;
-constexpr uint32_t StackMap::kNoDexRegisterMap;
-constexpr uint32_t StackMap::kNoInlineInfo;
 
 std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind& kind) {
   using Kind = DexRegisterLocation::Kind;
@@ -56,27 +54,25 @@
 DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(
     uint16_t dex_register_number,
     uint16_t number_of_dex_registers,
-    const CodeInfo& code_info,
-    const CodeInfoEncoding& enc) const {
+    const CodeInfo& code_info) const {
   DexRegisterLocationCatalog dex_register_location_catalog =
-      code_info.GetDexRegisterLocationCatalog(enc);
+      code_info.GetDexRegisterLocationCatalog();
   size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
       dex_register_number,
       number_of_dex_registers,
-      code_info.GetNumberOfLocationCatalogEntries(enc));
+      code_info.GetNumberOfLocationCatalogEntries());
   return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index);
 }
 
 DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number,
                                                            uint16_t number_of_dex_registers,
-                                                           const CodeInfo& code_info,
-                                                           const CodeInfoEncoding& enc) const {
+                                                           const CodeInfo& code_info) const {
   DexRegisterLocationCatalog dex_register_location_catalog =
-      code_info.GetDexRegisterLocationCatalog(enc);
+      code_info.GetDexRegisterLocationCatalog();
   size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
       dex_register_number,
       number_of_dex_registers,
-      code_info.GetNumberOfLocationCatalogEntries(enc));
+      code_info.GetNumberOfLocationCatalogEntries());
   return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
 }
 
@@ -90,27 +86,28 @@
      << " (" << location.GetValue() << ")" << suffix << '\n';
 }
 
-void StackMapEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void StackMap::DumpEncoding(const BitTable<6>& table,
+                            VariableIndentationOutputStream* vios) {
   vios->Stream()
       << "StackMapEncoding"
-      << " (native_pc_bit_offset=" << static_cast<uint32_t>(kNativePcBitOffset)
-      << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
-      << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
-      << ", inline_info_bit_offset=" << static_cast<uint32_t>(inline_info_bit_offset_)
-      << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_index_bit_offset_)
-      << ", stack_mask_index_bit_offset=" << static_cast<uint32_t>(stack_mask_index_bit_offset_)
-      << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+      << " (NativePcOffsetBits=" << table.NumColumnBits(kNativePcOffset)
+      << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+      << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
+      << ", InlineInfoIndexBits=" << table.NumColumnBits(kInlineInfoIndex)
+      << ", RegisterMaskIndexBits=" << table.NumColumnBits(kRegisterMaskIndex)
+      << ", StackMaskIndexBits=" << table.NumColumnBits(kStackMaskIndex)
       << ")\n";
 }
 
-void InlineInfoEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void InlineInfo::DumpEncoding(const BitTable<5>& table,
+                              VariableIndentationOutputStream* vios) {
   vios->Stream()
       << "InlineInfoEncoding"
-      << " (method_index_bit_offset=" << static_cast<uint32_t>(kMethodIndexBitOffset)
-      << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
-      << ", extra_data_bit_offset=" << static_cast<uint32_t>(extra_data_bit_offset_)
-      << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
-      << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+      << " (IsLastBits=" << table.NumColumnBits(kIsLast)
+      << ", MethodIndexIdxBits=" << table.NumColumnBits(kMethodIndexIdx)
+      << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+      << ", ExtraDataBits=" << table.NumColumnBits(kExtraData)
+      << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
       << ")\n";
 }
 
@@ -120,26 +117,24 @@
                     bool dump_stack_maps,
                     InstructionSet instruction_set,
                     const MethodInfo& method_info) const {
-  CodeInfoEncoding encoding = ExtractEncoding();
-  size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
+  size_t number_of_stack_maps = GetNumberOfStackMaps();
   vios->Stream()
       << "Optimized CodeInfo (number_of_dex_registers=" << number_of_dex_registers
       << ", number_of_stack_maps=" << number_of_stack_maps
       << ")\n";
   ScopedIndentation indent1(vios);
-  encoding.stack_map.encoding.Dump(vios);
-  if (HasInlineInfo(encoding)) {
-    encoding.inline_info.encoding.Dump(vios);
+  StackMap::DumpEncoding(stack_maps_, vios);
+  if (HasInlineInfo()) {
+    InlineInfo::DumpEncoding(inline_infos_, vios);
   }
   // Display the Dex register location catalog.
-  GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
+  GetDexRegisterLocationCatalog().Dump(vios, *this);
   // Display stack maps along with (live) Dex register maps.
   if (dump_stack_maps) {
     for (size_t i = 0; i < number_of_stack_maps; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
+      StackMap stack_map = GetStackMapAt(i);
       stack_map.Dump(vios,
                      *this,
-                     encoding,
                      method_info,
                      code_offset,
                      number_of_dex_registers,
@@ -153,9 +148,8 @@
 
 void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
                                       const CodeInfo& code_info) {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
-  size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
+  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
+  size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize();
   vios->Stream()
       << "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
       << ", size_in_bytes=" << location_catalog_size_in_bytes << ")\n";
@@ -169,8 +163,7 @@
 void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
                           const CodeInfo& code_info,
                           uint16_t number_of_dex_registers) const {
-  CodeInfoEncoding encoding = code_info.ExtractEncoding();
-  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+  size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
   // TODO: Display the bit mask of live Dex registers.
   for (size_t j = 0; j < number_of_dex_registers; ++j) {
     if (IsDexRegisterLive(j)) {
@@ -178,8 +171,7 @@
           j, number_of_dex_registers, number_of_location_catalog_entries);
       DexRegisterLocation location = GetDexRegisterLocation(j,
                                                             number_of_dex_registers,
-                                                            code_info,
-                                                            encoding);
+                                                            code_info);
       ScopedIndentation indent1(vios);
       DumpRegisterMapping(
           vios->Stream(), j, location, "v",
@@ -190,38 +182,35 @@
 
 void StackMap::Dump(VariableIndentationOutputStream* vios,
                     const CodeInfo& code_info,
-                    const CodeInfoEncoding& encoding,
                     const MethodInfo& method_info,
                     uint32_t code_offset,
                     uint16_t number_of_dex_registers,
                     InstructionSet instruction_set,
                     const std::string& header_suffix) const {
-  StackMapEncoding stack_map_encoding = encoding.stack_map.encoding;
-  const uint32_t pc_offset = GetNativePcOffset(stack_map_encoding, instruction_set);
+  const uint32_t pc_offset = GetNativePcOffset(instruction_set);
   vios->Stream()
       << "StackMap" << header_suffix
       << std::hex
       << " [native_pc=0x" << code_offset + pc_offset << "]"
-      << " [entry_size=0x" << encoding.stack_map.encoding.BitSize() << " bits]"
-      << " (dex_pc=0x" << GetDexPc(stack_map_encoding)
+      << " (dex_pc=0x" << GetDexPc()
       << ", native_pc_offset=0x" << pc_offset
-      << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
-      << ", inline_info_offset=0x" << GetInlineInfoIndex(stack_map_encoding)
-      << ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this)
+      << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset()
+      << ", inline_info_offset=0x" << GetInlineInfoIndex()
+      << ", register_mask=0x" << code_info.GetRegisterMaskOf(*this)
       << std::dec
       << ", stack_mask=0b";
-  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this);
-  for (size_t i = 0, e = encoding.stack_mask.encoding.BitSize(); i < e; ++i) {
+  BitMemoryRegion stack_mask = code_info.GetStackMaskOf(*this);
+  for (size_t i = 0, e = code_info.GetNumberOfStackMaskBits(); i < e; ++i) {
     vios->Stream() << stack_mask.LoadBit(e - i - 1);
   }
   vios->Stream() << ")\n";
-  if (HasDexRegisterMap(stack_map_encoding)) {
+  if (HasDexRegisterMap()) {
     DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
-        *this, encoding, number_of_dex_registers);
+        *this, number_of_dex_registers);
     dex_register_map.Dump(vios, code_info, number_of_dex_registers);
   }
-  if (HasInlineInfo(stack_map_encoding)) {
-    InlineInfo inline_info = code_info.GetInlineInfoOf(*this, encoding);
+  if (HasInlineInfo()) {
+    InlineInfo inline_info = code_info.GetInlineInfoOf(*this);
     // We do not know the length of the dex register maps of inlined frames
     // at this level, so we just pass null to `InlineInfo::Dump` to tell
     // it not to look at these maps.
@@ -233,29 +222,27 @@
                       const CodeInfo& code_info,
                       const MethodInfo& method_info,
                       uint16_t number_of_dex_registers[]) const {
-  InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
   vios->Stream() << "InlineInfo with depth "
-                 << static_cast<uint32_t>(GetDepth(inline_info_encoding))
+                 << static_cast<uint32_t>(GetDepth())
                  << "\n";
 
-  for (size_t i = 0; i < GetDepth(inline_info_encoding); ++i) {
+  for (size_t i = 0; i < GetDepth(); ++i) {
     vios->Stream()
         << " At depth " << i
         << std::hex
-        << " (dex_pc=0x" << GetDexPcAtDepth(inline_info_encoding, i);
-    if (EncodesArtMethodAtDepth(inline_info_encoding, i)) {
+        << " (dex_pc=0x" << GetDexPcAtDepth(i);
+    if (EncodesArtMethodAtDepth(i)) {
       ScopedObjectAccess soa(Thread::Current());
-      vios->Stream() << ", method=" << GetArtMethodAtDepth(inline_info_encoding, i)->PrettyMethod();
+      vios->Stream() << ", method=" << GetArtMethodAtDepth(i)->PrettyMethod();
     } else {
       vios->Stream()
           << std::dec
-          << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, method_info, i);
+          << ", method_index=" << GetMethodIndexAtDepth(method_info, i);
     }
     vios->Stream() << ")\n";
-    if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) {
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
+    if (HasDexRegisterMapAtDepth(i) && (number_of_dex_registers != nullptr)) {
       DexRegisterMap dex_register_map =
-          code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]);
+          code_info.GetDexRegisterMapAtDepth(i, *this, number_of_dex_registers[i]);
       ScopedIndentation indent1(vios);
       dex_register_map.Dump(vios, code_info, number_of_dex_registers[i]);
     }
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 3839764..91cecf0 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -21,12 +21,14 @@
 
 #include "arch/code_offset.h"
 #include "base/bit_memory_region.h"
+#include "base/bit_table.h"
 #include "base/bit_utils.h"
 #include "base/bit_vector.h"
 #include "base/leb128.h"
 #include "base/memory_region.h"
 #include "dex/dex_file_types.h"
 #include "method_info.h"
+#include "oat_quick_method_header.h"
 
 namespace art {
 
@@ -37,13 +39,8 @@
 // (signed) values.
 static constexpr ssize_t kFrameSlotSize = 4;
 
-// Size of Dex virtual registers.
-static constexpr size_t kVRegSize = 4;
-
 class ArtMethod;
 class CodeInfo;
-class StackMapEncoding;
-struct CodeInfoEncoding;
 
 /**
  * Classes in the following file are wrapper on stack map information backed
@@ -452,35 +449,31 @@
   explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
   DexRegisterMap() {}
 
-  bool IsValid() const { return region_.pointer() != nullptr; }
+  bool IsValid() const { return region_.IsValid(); }
 
   // Get the surface kind of Dex register `dex_register_number`.
   DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
                                             uint16_t number_of_dex_registers,
-                                            const CodeInfo& code_info,
-                                            const CodeInfoEncoding& enc) const {
+                                            const CodeInfo& code_info) const {
     return DexRegisterLocation::ConvertToSurfaceKind(
-        GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info, enc));
+        GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info));
   }
 
   // Get the internal kind of Dex register `dex_register_number`.
   DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number,
                                                     uint16_t number_of_dex_registers,
-                                                    const CodeInfo& code_info,
-                                                    const CodeInfoEncoding& enc) const;
+                                                    const CodeInfo& code_info) const;
 
   // Get the Dex register location `dex_register_number`.
   DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number,
                                              uint16_t number_of_dex_registers,
-                                             const CodeInfo& code_info,
-                                             const CodeInfoEncoding& enc) const;
+                                             const CodeInfo& code_info) const;
 
   int32_t GetStackOffsetInBytes(uint16_t dex_register_number,
                                 uint16_t number_of_dex_registers,
-                                const CodeInfo& code_info,
-                                const CodeInfoEncoding& enc) const {
+                                const CodeInfo& code_info) const {
     DexRegisterLocation location =
-        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
     DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
     // GetDexRegisterLocation returns the offset in bytes.
     return location.GetValue();
@@ -488,20 +481,18 @@
 
   int32_t GetConstant(uint16_t dex_register_number,
                       uint16_t number_of_dex_registers,
-                      const CodeInfo& code_info,
-                      const CodeInfoEncoding& enc) const {
+                      const CodeInfo& code_info) const {
     DexRegisterLocation location =
-        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
     DCHECK_EQ(location.GetKind(), DexRegisterLocation::Kind::kConstant);
     return location.GetValue();
   }
 
   int32_t GetMachineRegister(uint16_t dex_register_number,
                              uint16_t number_of_dex_registers,
-                             const CodeInfo& code_info,
-                             const CodeInfoEncoding& enc) const {
+                             const CodeInfo& code_info) const {
     DexRegisterLocation location =
-        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+        GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
     DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister ||
            location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
            location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister ||
@@ -627,7 +618,7 @@
 
   // Return the size of the DexRegisterMap object, in bytes.
   size_t Size() const {
-    return region_.size();
+    return BitsToBytesRoundUp(region_.size_in_bits());
   }
 
   void Dump(VariableIndentationOutputStream* vios,
@@ -650,143 +641,12 @@
 
   static constexpr int kFixedSize = 0;
 
-  MemoryRegion region_;
+  BitMemoryRegion region_;
 
   friend class CodeInfo;
   friend class StackMapStream;
 };
 
-// Represents bit range of bit-packed integer field.
-// We reuse the idea from ULEB128p1 to support encoding of -1 (aka 0xFFFFFFFF).
-// If min_value is set to -1, we implicitly subtract one from any loaded value,
-// and add one to any stored value. This is generalized to any negative values.
-// In other words, min_value acts as a base and the stored value is added to it.
-struct FieldEncoding {
-  FieldEncoding(size_t start_offset, size_t end_offset, int32_t min_value = 0)
-      : start_offset_(start_offset), end_offset_(end_offset), min_value_(min_value) {
-    DCHECK_LE(start_offset_, end_offset_);
-    DCHECK_LE(BitSize(), 32u);
-  }
-
-  ALWAYS_INLINE size_t BitSize() const { return end_offset_ - start_offset_; }
-
-  template <typename Region>
-  ALWAYS_INLINE int32_t Load(const Region& region) const {
-    DCHECK_LE(end_offset_, region.size_in_bits());
-    return static_cast<int32_t>(region.LoadBits(start_offset_, BitSize())) + min_value_;
-  }
-
-  template <typename Region>
-  ALWAYS_INLINE void Store(Region region, int32_t value) const {
-    region.StoreBits(start_offset_, value - min_value_, BitSize());
-    DCHECK_EQ(Load(region), value);
-  }
-
- private:
-  size_t start_offset_;
-  size_t end_offset_;
-  int32_t min_value_;
-};
-
-class StackMapEncoding {
- public:
-  StackMapEncoding()
-      : dex_pc_bit_offset_(0),
-        dex_register_map_bit_offset_(0),
-        inline_info_bit_offset_(0),
-        register_mask_index_bit_offset_(0),
-        stack_mask_index_bit_offset_(0),
-        total_bit_size_(0) {}
-
-  // Set stack map bit layout based on given sizes.
-  // Returns the size of stack map in bits.
-  size_t SetFromSizes(size_t native_pc_max,
-                      size_t dex_pc_max,
-                      size_t dex_register_map_size,
-                      size_t number_of_inline_info,
-                      size_t number_of_register_masks,
-                      size_t number_of_stack_masks) {
-    total_bit_size_ = 0;
-    DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(native_pc_max);
-
-    dex_pc_bit_offset_ = total_bit_size_;
-    // Note: We're not encoding the dex pc if there is none. That's the case
-    // for an intrinsified native method, such as String.charAt().
-    if (dex_pc_max != dex::kDexNoIndex) {
-      total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
-    }
-
-    // We also need +1 for kNoDexRegisterMap, but since the size is strictly
-    // greater than any offset we might try to encode, we already implicitly have it.
-    dex_register_map_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
-
-    // We also need +1 for kNoInlineInfo, but since the inline_info_size is strictly
-    // greater than the offset we might try to encode, we already implicitly have it.
-    // If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
-    inline_info_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(number_of_inline_info);
-
-    register_mask_index_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(number_of_register_masks);
-
-    stack_mask_index_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(number_of_stack_masks);
-
-    return total_bit_size_;
-  }
-
-  ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
-    return FieldEncoding(kNativePcBitOffset, dex_pc_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
-    return FieldEncoding(dex_pc_bit_offset_, dex_register_map_bit_offset_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
-    return FieldEncoding(dex_register_map_bit_offset_, inline_info_bit_offset_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetInlineInfoEncoding() const {
-    return FieldEncoding(inline_info_bit_offset_,
-                         register_mask_index_bit_offset_,
-                         -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetRegisterMaskIndexEncoding() const {
-    return FieldEncoding(register_mask_index_bit_offset_, stack_mask_index_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetStackMaskIndexEncoding() const {
-    return FieldEncoding(stack_mask_index_bit_offset_, total_bit_size_);
-  }
-  ALWAYS_INLINE size_t BitSize() const {
-    return total_bit_size_;
-  }
-
-  // Encode the encoding into the vector.
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    static_assert(alignof(StackMapEncoding) == 1, "Should not require alignment");
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
-    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
-  }
-
-  // Decode the encoding from a pointer, updates the pointer.
-  void Decode(const uint8_t** ptr) {
-    *this = *reinterpret_cast<const StackMapEncoding*>(*ptr);
-    *ptr += sizeof(*this);
-  }
-
-  void Dump(VariableIndentationOutputStream* vios) const;
-
- private:
-  static constexpr size_t kNativePcBitOffset = 0;
-  uint8_t dex_pc_bit_offset_;
-  uint8_t dex_register_map_bit_offset_;
-  uint8_t inline_info_bit_offset_;
-  uint8_t register_mask_index_bit_offset_;
-  uint8_t stack_mask_index_bit_offset_;
-  uint8_t total_bit_size_;
-};
-
 /**
  * A Stack Map holds compilation information for a specific PC necessary for:
  * - Mapping it to a dex PC,
@@ -794,248 +654,101 @@
  * - Knowing which registers hold objects,
  * - Knowing the inlining information,
  * - Knowing the values of dex registers.
- *
- * The information is of the form:
- *
- *   [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_index, register_mask_index,
- *   stack_mask_index].
  */
-class StackMap {
+class StackMap : public BitTable<6>::Accessor {
  public:
-  StackMap() {}
-  explicit StackMap(BitMemoryRegion region) : region_(region) {}
+  enum Field {
+    kNativePcOffset,
+    kDexPc,
+    kDexRegisterMapOffset,
+    kInlineInfoIndex,
+    kRegisterMaskIndex,
+    kStackMaskIndex,
+    kCount,
+  };
 
-  ALWAYS_INLINE bool IsValid() const { return region_.pointer() != nullptr; }
+  StackMap() : BitTable<kCount>::Accessor(nullptr, -1) {}
+  StackMap(const BitTable<kCount>* table, uint32_t row)
+    : BitTable<kCount>::Accessor(table, row) {}
 
-  ALWAYS_INLINE uint32_t GetDexPc(const StackMapEncoding& encoding) const {
-    return encoding.GetDexPcEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetDexPc(const StackMapEncoding& encoding, uint32_t dex_pc) {
-    encoding.GetDexPcEncoding().Store(region_, dex_pc);
-  }
-
-  ALWAYS_INLINE uint32_t GetNativePcOffset(const StackMapEncoding& encoding,
-                                           InstructionSet instruction_set) const {
-    CodeOffset offset(
-        CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+  ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+    CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
     return offset.Uint32Value(instruction_set);
   }
 
-  ALWAYS_INLINE void SetNativePcCodeOffset(const StackMapEncoding& encoding,
-                                           CodeOffset native_pc_offset) {
-    encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
-  }
+  uint32_t GetDexPc() const { return Get<kDexPc>(); }
 
-  ALWAYS_INLINE uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
-    return encoding.GetDexRegisterMapEncoding().Load(region_);
-  }
+  uint32_t GetDexRegisterMapOffset() const { return Get<kDexRegisterMapOffset>(); }
+  bool HasDexRegisterMap() const { return GetDexRegisterMapOffset() != kNoValue; }
 
-  ALWAYS_INLINE void SetDexRegisterMapOffset(const StackMapEncoding& encoding, uint32_t offset) {
-    encoding.GetDexRegisterMapEncoding().Store(region_, offset);
-  }
+  uint32_t GetInlineInfoIndex() const { return Get<kInlineInfoIndex>(); }
+  bool HasInlineInfo() const { return GetInlineInfoIndex() != kNoValue; }
 
-  ALWAYS_INLINE uint32_t GetInlineInfoIndex(const StackMapEncoding& encoding) const {
-    return encoding.GetInlineInfoEncoding().Load(region_);
-  }
+  uint32_t GetRegisterMaskIndex() const { return Get<kRegisterMaskIndex>(); }
 
-  ALWAYS_INLINE void SetInlineInfoIndex(const StackMapEncoding& encoding, uint32_t index) {
-    encoding.GetInlineInfoEncoding().Store(region_, index);
-  }
+  uint32_t GetStackMaskIndex() const { return Get<kStackMaskIndex>(); }
 
-  ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const {
-    return encoding.GetRegisterMaskIndexEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetRegisterMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
-    encoding.GetRegisterMaskIndexEncoding().Store(region_, mask);
-  }
-
-  ALWAYS_INLINE uint32_t GetStackMaskIndex(const StackMapEncoding& encoding) const {
-    return encoding.GetStackMaskIndexEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetStackMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
-    encoding.GetStackMaskIndexEncoding().Store(region_, mask);
-  }
-
-  ALWAYS_INLINE bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
-    return GetDexRegisterMapOffset(encoding) != kNoDexRegisterMap;
-  }
-
-  ALWAYS_INLINE bool HasInlineInfo(const StackMapEncoding& encoding) const {
-    return GetInlineInfoIndex(encoding) != kNoInlineInfo;
-  }
-
-  ALWAYS_INLINE bool Equals(const StackMap& other) const {
-    return region_.pointer() == other.region_.pointer() &&
-           region_.size() == other.region_.size() &&
-           region_.BitOffset() == other.region_.BitOffset();
-  }
-
+  static void DumpEncoding(const BitTable<6>& table, VariableIndentationOutputStream* vios);
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& code_info,
-            const CodeInfoEncoding& encoding,
             const MethodInfo& method_info,
             uint32_t code_offset,
             uint16_t number_of_dex_registers,
             InstructionSet instruction_set,
             const std::string& header_suffix = "") const;
-
-  // Special (invalid) offset for the DexRegisterMapOffset field meaning
-  // that there is no Dex register map for this stack map.
-  static constexpr uint32_t kNoDexRegisterMap = -1;
-
-  // Special (invalid) offset for the InlineDescriptorOffset field meaning
-  // that there is no inline info for this stack map.
-  static constexpr uint32_t kNoInlineInfo = -1;
-
- private:
-  static constexpr int kFixedSize = 0;
-
-  BitMemoryRegion region_;
-
-  friend class StackMapStream;
-};
-
-class InlineInfoEncoding {
- public:
-  void SetFromSizes(size_t method_index_idx_max,
-                    size_t dex_pc_max,
-                    size_t extra_data_max,
-                    size_t dex_register_map_size) {
-    total_bit_size_ = kMethodIndexBitOffset;
-    total_bit_size_ += MinimumBitsToStore(method_index_idx_max);
-
-    dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
-    // Note: We're not encoding the dex pc if there is none. That's the case
-    // for an intrinsified native method, such as String.charAt().
-    if (dex_pc_max != dex::kDexNoIndex) {
-      total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
-    }
-
-    extra_data_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(extra_data_max);
-
-    // We also need +1 for kNoDexRegisterMap, but since the size is strictly
-    // greater than any offset we might try to encode, we already implicitly have it.
-    dex_register_map_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetMethodIndexIdxEncoding() const {
-    return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
-    return FieldEncoding(dex_pc_bit_offset_, extra_data_bit_offset_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE FieldEncoding GetExtraDataEncoding() const {
-    return FieldEncoding(extra_data_bit_offset_, dex_register_map_bit_offset_);
-  }
-  ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
-    return FieldEncoding(dex_register_map_bit_offset_, total_bit_size_, -1 /* min_value */);
-  }
-  ALWAYS_INLINE size_t BitSize() const {
-    return total_bit_size_;
-  }
-
-  void Dump(VariableIndentationOutputStream* vios) const;
-
-  // Encode the encoding into the vector.
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    static_assert(alignof(InlineInfoEncoding) == 1, "Should not require alignment");
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
-    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
-  }
-
-  // Decode the encoding from a pointer, updates the pointer.
-  void Decode(const uint8_t** ptr) {
-    *this = *reinterpret_cast<const InlineInfoEncoding*>(*ptr);
-    *ptr += sizeof(*this);
-  }
-
- private:
-  static constexpr uint8_t kIsLastBitOffset = 0;
-  static constexpr uint8_t kMethodIndexBitOffset = 1;
-  uint8_t dex_pc_bit_offset_;
-  uint8_t extra_data_bit_offset_;
-  uint8_t dex_register_map_bit_offset_;
-  uint8_t total_bit_size_;
 };
 
 /**
- * Inline information for a specific PC. The information is of the form:
- *
- *   [is_last,
- *    method_index (or ArtMethod high bits),
- *    dex_pc,
- *    extra_data (ArtMethod low bits or 1),
- *    dex_register_map_offset]+.
+ * Inline information for a specific PC.
+ * The row referenced from the StackMap holds information at depth 0.
+ * Following rows hold information for further depths.
  */
-class InlineInfo {
+class InlineInfo : public BitTable<5>::Accessor {
  public:
-  explicit InlineInfo(BitMemoryRegion region) : region_(region) {}
+  enum Field {
+    kIsLast,  // Determines if there are further rows for further depths.
+    kMethodIndexIdx,  // Method index or ArtMethod high bits.
+    kDexPc,
+    kExtraData,  // ArtMethod low bits or 1.
+    kDexRegisterMapOffset,
+    kCount,
+  };
+  static constexpr uint32_t kLast = -1;
+  static constexpr uint32_t kMore = 0;
 
-  ALWAYS_INLINE uint32_t GetDepth(const InlineInfoEncoding& encoding) const {
+  InlineInfo(const BitTable<kCount>* table, uint32_t row)
+    : BitTable<kCount>::Accessor(table, row) {}
+
+  ALWAYS_INLINE InlineInfo AtDepth(uint32_t depth) const {
+    return InlineInfo(table_, this->row_ + depth);
+  }
+
+  uint32_t GetDepth() const {
     size_t depth = 0;
-    while (!GetRegionAtDepth(encoding, depth++).LoadBit(0)) { }  // Check is_last bit.
+    while (AtDepth(depth++).Get<kIsLast>() == kMore) { }
     return depth;
   }
 
-  ALWAYS_INLINE void SetDepth(const InlineInfoEncoding& encoding, uint32_t depth) {
-    DCHECK_GT(depth, 0u);
-    for (size_t d = 0; d < depth; ++d) {
-      GetRegionAtDepth(encoding, d).StoreBit(0, d == depth - 1);  // Set is_last bit.
-    }
+  uint32_t GetMethodIndexIdxAtDepth(uint32_t depth) const {
+    DCHECK(!EncodesArtMethodAtDepth(depth));
+    return AtDepth(depth).Get<kMethodIndexIdx>();
   }
 
-  ALWAYS_INLINE uint32_t GetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
-                                                  uint32_t depth) const {
-    DCHECK(!EncodesArtMethodAtDepth(encoding, depth));
-    return encoding.GetMethodIndexIdxEncoding().Load(GetRegionAtDepth(encoding, depth));
+  uint32_t GetMethodIndexAtDepth(const MethodInfo& method_info, uint32_t depth) const {
+    return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(depth));
   }
 
-  ALWAYS_INLINE void SetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
-                                              uint32_t depth,
-                                              uint32_t index) {
-    encoding.GetMethodIndexIdxEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+  uint32_t GetDexPcAtDepth(uint32_t depth) const {
+    return AtDepth(depth).Get<kDexPc>();
   }
 
-
-  ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
-                                               const MethodInfo& method_info,
-                                               uint32_t depth) const {
-    return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(encoding, depth));
+  bool EncodesArtMethodAtDepth(uint32_t depth) const {
+    return (AtDepth(depth).Get<kExtraData>() & 1) == 0;
   }
 
-  ALWAYS_INLINE uint32_t GetDexPcAtDepth(const InlineInfoEncoding& encoding,
-                                         uint32_t depth) const {
-    return encoding.GetDexPcEncoding().Load(GetRegionAtDepth(encoding, depth));
-  }
-
-  ALWAYS_INLINE void SetDexPcAtDepth(const InlineInfoEncoding& encoding,
-                                     uint32_t depth,
-                                     uint32_t dex_pc) {
-    encoding.GetDexPcEncoding().Store(GetRegionAtDepth(encoding, depth), dex_pc);
-  }
-
-  ALWAYS_INLINE bool EncodesArtMethodAtDepth(const InlineInfoEncoding& encoding,
-                                             uint32_t depth) const {
-    return (encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth)) & 1) == 0;
-  }
-
-  ALWAYS_INLINE void SetExtraDataAtDepth(const InlineInfoEncoding& encoding,
-                                         uint32_t depth,
-                                         uint32_t extra_data) {
-    encoding.GetExtraDataEncoding().Store(GetRegionAtDepth(encoding, depth), extra_data);
-  }
-
-  ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding,
-                                               uint32_t depth) const {
-    uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth));
-    uint32_t high_bits = encoding.GetMethodIndexIdxEncoding().Load(
-        GetRegionAtDepth(encoding, depth));
+  ArtMethod* GetArtMethodAtDepth(uint32_t depth) const {
+    uint32_t low_bits = AtDepth(depth).Get<kExtraData>();
+    uint32_t high_bits = AtDepth(depth).Get<kMethodIndexIdx>();
     if (high_bits == 0) {
       return reinterpret_cast<ArtMethod*>(low_bits);
     } else {
@@ -1045,411 +758,132 @@
     }
   }
 
-  ALWAYS_INLINE uint32_t GetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
-                                                        uint32_t depth) const {
-    return encoding.GetDexRegisterMapEncoding().Load(GetRegionAtDepth(encoding, depth));
+  uint32_t GetDexRegisterMapOffsetAtDepth(uint32_t depth) const {
+    return AtDepth(depth).Get<kDexRegisterMapOffset>();
   }
 
-  ALWAYS_INLINE void SetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
-                                                    uint32_t depth,
-                                                    uint32_t offset) {
-    encoding.GetDexRegisterMapEncoding().Store(GetRegionAtDepth(encoding, depth), offset);
+  bool HasDexRegisterMapAtDepth(uint32_t depth) const {
+    return GetDexRegisterMapOffsetAtDepth(depth) != StackMap::kNoValue;
   }
 
-  ALWAYS_INLINE bool HasDexRegisterMapAtDepth(const InlineInfoEncoding& encoding,
-                                              uint32_t depth) const {
-    return GetDexRegisterMapOffsetAtDepth(encoding, depth) != StackMap::kNoDexRegisterMap;
-  }
-
+  static void DumpEncoding(const BitTable<5>& table, VariableIndentationOutputStream* vios);
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& info,
             const MethodInfo& method_info,
             uint16_t* number_of_dex_registers) const;
-
- private:
-  ALWAYS_INLINE BitMemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
-                                                 uint32_t depth) const {
-    size_t entry_size = encoding.BitSize();
-    DCHECK_GT(entry_size, 0u);
-    return region_.Subregion(depth * entry_size, entry_size);
-  }
-
-  BitMemoryRegion region_;
 };
 
-// Bit sized region encoding, may be more than 255 bits.
-class BitRegionEncoding {
+class InvokeInfo : public BitTable<3>::Accessor {
  public:
-  uint32_t num_bits = 0;
+  enum Field {
+    kNativePcOffset,
+    kInvokeType,
+    kMethodIndexIdx,
+    kCount,
+  };
 
-  ALWAYS_INLINE size_t BitSize() const {
-    return num_bits;
-  }
+  InvokeInfo(const BitTable<kCount>* table, uint32_t row)
+    : BitTable<kCount>::Accessor(table, row) {}
 
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    EncodeUnsignedLeb128(dest, num_bits);  // Use leb in case num_bits is greater than 255.
-  }
-
-  void Decode(const uint8_t** ptr) {
-    num_bits = DecodeUnsignedLeb128(ptr);
-  }
-};
-
-// A table of bit sized encodings.
-template <typename Encoding>
-struct BitEncodingTable {
-  static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
-  // How the encoding is laid out (serialized).
-  Encoding encoding;
-
-  // Number of entries in the table (serialized).
-  size_t num_entries;
-
-  // Bit offset for the base of the table (computed).
-  size_t bit_offset = kInvalidOffset;
-
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    EncodeUnsignedLeb128(dest, num_entries);
-    encoding.Encode(dest);
-  }
-
-  ALWAYS_INLINE void Decode(const uint8_t** ptr) {
-    num_entries = DecodeUnsignedLeb128(ptr);
-    encoding.Decode(ptr);
-  }
-
-  // Set the bit offset in the table and adds the space used by the table to offset.
-  void UpdateBitOffset(size_t* offset) {
-    DCHECK(offset != nullptr);
-    bit_offset = *offset;
-    *offset += encoding.BitSize() * num_entries;
-  }
-
-  // Return the bit region for the map at index i.
-  ALWAYS_INLINE BitMemoryRegion BitRegion(MemoryRegion region, size_t index) const {
-    DCHECK_NE(bit_offset, kInvalidOffset) << "Invalid table offset";
-    DCHECK_LT(index, num_entries);
-    const size_t map_size = encoding.BitSize();
-    return BitMemoryRegion(region, bit_offset + index * map_size, map_size);
-  }
-};
-
-// A byte sized table of possible variable sized encodings.
-struct ByteSizedTable {
-  static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
-
-  // Number of entries in the table (serialized).
-  size_t num_entries = 0;
-
-  // Number of bytes of the table (serialized).
-  size_t num_bytes;
-
-  // Bit offset for the base of the table (computed).
-  size_t byte_offset = kInvalidOffset;
-
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    EncodeUnsignedLeb128(dest, num_entries);
-    EncodeUnsignedLeb128(dest, num_bytes);
-  }
-
-  ALWAYS_INLINE void Decode(const uint8_t** ptr) {
-    num_entries = DecodeUnsignedLeb128(ptr);
-    num_bytes = DecodeUnsignedLeb128(ptr);
-  }
-
-  // Set the bit offset of the table. Adds the total bit size of the table to offset.
-  void UpdateBitOffset(size_t* offset) {
-    DCHECK(offset != nullptr);
-    DCHECK_ALIGNED(*offset, kBitsPerByte);
-    byte_offset = *offset / kBitsPerByte;
-    *offset += num_bytes * kBitsPerByte;
-  }
-};
-
-// Format is [native pc, invoke type, method index].
-class InvokeInfoEncoding {
- public:
-  void SetFromSizes(size_t native_pc_max,
-                    size_t invoke_type_max,
-                    size_t method_index_max) {
-    total_bit_size_ = 0;
-    DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
-    total_bit_size_ += MinimumBitsToStore(native_pc_max);
-    invoke_type_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(invoke_type_max);
-    method_index_bit_offset_ = total_bit_size_;
-    total_bit_size_ += MinimumBitsToStore(method_index_max);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
-    return FieldEncoding(kNativePcBitOffset, invoke_type_bit_offset_);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetInvokeTypeEncoding() const {
-    return FieldEncoding(invoke_type_bit_offset_, method_index_bit_offset_);
-  }
-
-  ALWAYS_INLINE FieldEncoding GetMethodIndexEncoding() const {
-    return FieldEncoding(method_index_bit_offset_, total_bit_size_);
-  }
-
-  ALWAYS_INLINE size_t BitSize() const {
-    return total_bit_size_;
-  }
-
-  template<typename Vector>
-  void Encode(Vector* dest) const {
-    static_assert(alignof(InvokeInfoEncoding) == 1, "Should not require alignment");
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
-    dest->insert(dest->end(), ptr, ptr + sizeof(*this));
-  }
-
-  void Decode(const uint8_t** ptr) {
-    *this = *reinterpret_cast<const InvokeInfoEncoding*>(*ptr);
-    *ptr += sizeof(*this);
-  }
-
- private:
-  static constexpr uint8_t kNativePcBitOffset = 0;
-  uint8_t invoke_type_bit_offset_;
-  uint8_t method_index_bit_offset_;
-  uint8_t total_bit_size_;
-};
-
-class InvokeInfo {
- public:
-  explicit InvokeInfo(BitMemoryRegion region) : region_(region) {}
-
-  ALWAYS_INLINE uint32_t GetNativePcOffset(const InvokeInfoEncoding& encoding,
-                                           InstructionSet instruction_set) const {
-    CodeOffset offset(
-        CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+  ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+    CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
     return offset.Uint32Value(instruction_set);
   }
 
-  ALWAYS_INLINE void SetNativePcCodeOffset(const InvokeInfoEncoding& encoding,
-                                           CodeOffset native_pc_offset) {
-    encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
+  uint32_t GetInvokeType() const { return Get<kInvokeType>(); }
+
+  uint32_t GetMethodIndexIdx() const { return Get<kMethodIndexIdx>(); }
+
+  uint32_t GetMethodIndex(MethodInfo method_info) const {
+    return method_info.GetMethodIndex(GetMethodIndexIdx());
   }
-
-  ALWAYS_INLINE uint32_t GetInvokeType(const InvokeInfoEncoding& encoding) const {
-    return encoding.GetInvokeTypeEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetInvokeType(const InvokeInfoEncoding& encoding, uint32_t invoke_type) {
-    encoding.GetInvokeTypeEncoding().Store(region_, invoke_type);
-  }
-
-  ALWAYS_INLINE uint32_t GetMethodIndexIdx(const InvokeInfoEncoding& encoding) const {
-    return encoding.GetMethodIndexEncoding().Load(region_);
-  }
-
-  ALWAYS_INLINE void SetMethodIndexIdx(const InvokeInfoEncoding& encoding,
-                                       uint32_t method_index_idx) {
-    encoding.GetMethodIndexEncoding().Store(region_, method_index_idx);
-  }
-
-  ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding,
-                                        MethodInfo method_info) const {
-    return method_info.GetMethodIndex(GetMethodIndexIdx(encoding));
-  }
-
-  bool IsValid() const { return region_.pointer() != nullptr; }
-
- private:
-  BitMemoryRegion region_;
-};
-
-// Most of the fields are encoded as ULEB128 to save space.
-struct CodeInfoEncoding {
-  using SizeType = uint32_t;
-
-  static constexpr SizeType kInvalidSize = std::numeric_limits<SizeType>::max();
-
-  // Byte sized tables go first to avoid unnecessary alignment bits.
-  ByteSizedTable dex_register_map;
-  ByteSizedTable location_catalog;
-  BitEncodingTable<StackMapEncoding> stack_map;
-  BitEncodingTable<BitRegionEncoding> register_mask;
-  BitEncodingTable<BitRegionEncoding> stack_mask;
-  BitEncodingTable<InvokeInfoEncoding> invoke_info;
-  BitEncodingTable<InlineInfoEncoding> inline_info;
-
-  CodeInfoEncoding() {}
-
-  explicit CodeInfoEncoding(const void* data) {
-    const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
-    dex_register_map.Decode(&ptr);
-    location_catalog.Decode(&ptr);
-    stack_map.Decode(&ptr);
-    register_mask.Decode(&ptr);
-    stack_mask.Decode(&ptr);
-    invoke_info.Decode(&ptr);
-    if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      inline_info.Decode(&ptr);
-    } else {
-      inline_info = BitEncodingTable<InlineInfoEncoding>();
-    }
-    cache_header_size =
-        dchecked_integral_cast<SizeType>(ptr - reinterpret_cast<const uint8_t*>(data));
-    ComputeTableOffsets();
-  }
-
-  // Compress is not const since it calculates cache_header_size. This is used by PrepareForFillIn.
-  template<typename Vector>
-  void Compress(Vector* dest) {
-    dex_register_map.Encode(dest);
-    location_catalog.Encode(dest);
-    stack_map.Encode(dest);
-    register_mask.Encode(dest);
-    stack_mask.Encode(dest);
-    invoke_info.Encode(dest);
-    if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
-      inline_info.Encode(dest);
-    }
-    cache_header_size = dest->size();
-  }
-
-  ALWAYS_INLINE void ComputeTableOffsets() {
-    // Skip the header.
-    size_t bit_offset = HeaderSize() * kBitsPerByte;
-    // The byte tables must be aligned so they must go first.
-    dex_register_map.UpdateBitOffset(&bit_offset);
-    location_catalog.UpdateBitOffset(&bit_offset);
-    // Other tables don't require alignment.
-    stack_map.UpdateBitOffset(&bit_offset);
-    register_mask.UpdateBitOffset(&bit_offset);
-    stack_mask.UpdateBitOffset(&bit_offset);
-    invoke_info.UpdateBitOffset(&bit_offset);
-    inline_info.UpdateBitOffset(&bit_offset);
-    cache_non_header_size = RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte - HeaderSize();
-  }
-
-  ALWAYS_INLINE size_t HeaderSize() const {
-    DCHECK_NE(cache_header_size, kInvalidSize) << "Uninitialized";
-    return cache_header_size;
-  }
-
-  ALWAYS_INLINE size_t NonHeaderSize() const {
-    DCHECK_NE(cache_non_header_size, kInvalidSize) << "Uninitialized";
-    return cache_non_header_size;
-  }
-
- private:
-  // Computed fields (not serialized).
-  // Header size in bytes, cached to avoid needing to re-decoding the encoding in HeaderSize.
-  SizeType cache_header_size = kInvalidSize;
-  // Non header size in bytes, cached to avoid needing to re-decoding the encoding in NonHeaderSize.
-  SizeType cache_non_header_size = kInvalidSize;
 };
 
 /**
  * Wrapper around all compiler information collected for a method.
  * The information is of the form:
  *
- *   [CodeInfoEncoding, DexRegisterMap+, DexLocationCatalog+, StackMap+, RegisterMask+, StackMask+,
- *    InlineInfo*]
+ *   [BitTable<Header>, BitTable<StackMap>, BitTable<RegisterMask>, BitTable<InlineInfo>,
+ *    BitTable<InvokeInfo>, BitTable<StackMask>, DexRegisterMap, DexLocationCatalog]
  *
- * where CodeInfoEncoding is of the form:
- *
- *   [ByteSizedTable(dex_register_map), ByteSizedTable(location_catalog),
- *    BitEncodingTable<StackMapEncoding>, BitEncodingTable<BitRegionEncoding>,
- *    BitEncodingTable<BitRegionEncoding>, BitEncodingTable<InlineInfoEncoding>]
  */
 class CodeInfo {
  public:
-  explicit CodeInfo(MemoryRegion region) : region_(region) {
-  }
-
   explicit CodeInfo(const void* data) {
-    CodeInfoEncoding encoding = CodeInfoEncoding(data);
-    region_ = MemoryRegion(const_cast<void*>(data),
-                           encoding.HeaderSize() + encoding.NonHeaderSize());
+    Decode(reinterpret_cast<const uint8_t*>(data));
   }
 
-  CodeInfoEncoding ExtractEncoding() const {
-    CodeInfoEncoding encoding(region_.begin());
-    AssertValidStackMap(encoding);
-    return encoding;
+  explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
+    DCHECK_EQ(size_, region.size());
   }
 
-  bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0;
+  explicit CodeInfo(const OatQuickMethodHeader* header)
+    : CodeInfo(header->GetOptimizedCodeInfoPtr()) {
   }
 
-  DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const CodeInfoEncoding& encoding) const {
-    return DexRegisterLocationCatalog(region_.Subregion(encoding.location_catalog.byte_offset,
-                                                        encoding.location_catalog.num_bytes));
+  size_t Size() const {
+    return size_;
   }
 
-  ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_mask.encoding.BitSize();
+  bool HasInlineInfo() const {
+    return stack_maps_.NumColumnBits(StackMap::kInlineInfoIndex) != 0;
   }
 
-  ALWAYS_INLINE StackMap GetStackMapAt(size_t index, const CodeInfoEncoding& encoding) const {
-    return StackMap(encoding.stack_map.BitRegion(region_, index));
+  DexRegisterLocationCatalog GetDexRegisterLocationCatalog() const {
+    return DexRegisterLocationCatalog(location_catalog_);
   }
 
-  BitMemoryRegion GetStackMask(size_t index, const CodeInfoEncoding& encoding) const {
-    return encoding.stack_mask.BitRegion(region_, index);
+  ALWAYS_INLINE size_t GetNumberOfStackMaskBits() const {
+    return stack_mask_bits_;
   }
 
-  BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding,
-                                 const StackMap& stack_map) const {
-    return GetStackMask(stack_map.GetStackMaskIndex(encoding.stack_map.encoding), encoding);
+  ALWAYS_INLINE StackMap GetStackMapAt(size_t index) const {
+    return StackMap(&stack_maps_, index);
   }
 
-  BitMemoryRegion GetRegisterMask(size_t index, const CodeInfoEncoding& encoding) const {
-    return encoding.register_mask.BitRegion(region_, index);
+  BitMemoryRegion GetStackMask(size_t index) const {
+    return stack_masks_.Subregion(index * stack_mask_bits_, stack_mask_bits_);
   }
 
-  uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const {
-    size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map.encoding);
-    return GetRegisterMask(index, encoding).LoadBits(0u, encoding.register_mask.encoding.BitSize());
+  BitMemoryRegion GetStackMaskOf(const StackMap& stack_map) const {
+    return GetStackMask(stack_map.GetStackMaskIndex());
   }
 
-  uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
-    return encoding.location_catalog.num_entries;
+  uint32_t GetRegisterMaskOf(const StackMap& stack_map) const {
+    return register_masks_.Get(stack_map.GetRegisterMaskIndex());
   }
 
-  uint32_t GetDexRegisterLocationCatalogSize(const CodeInfoEncoding& encoding) const {
-    return encoding.location_catalog.num_bytes;
+  uint32_t GetNumberOfLocationCatalogEntries() const {
+    return location_catalog_entries_;
   }
 
-  uint32_t GetNumberOfStackMaps(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map.num_entries;
+  uint32_t GetDexRegisterLocationCatalogSize() const {
+    return location_catalog_.size();
   }
 
-  // Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned.
-  ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const {
-    return encoding.stack_map.encoding.BitSize() * GetNumberOfStackMaps(encoding);
+  uint32_t GetNumberOfStackMaps() const {
+    return stack_maps_.NumRows();
   }
 
-  InvokeInfo GetInvokeInfo(const CodeInfoEncoding& encoding, size_t index) const {
-    return InvokeInfo(encoding.invoke_info.BitRegion(region_, index));
+  InvokeInfo GetInvokeInfo(size_t index) const {
+    return InvokeInfo(&invoke_infos_, index);
   }
 
   DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
-                                     const CodeInfoEncoding& encoding,
                                      size_t number_of_dex_registers) const {
-    if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+    if (!stack_map.HasDexRegisterMap()) {
       return DexRegisterMap();
     }
-    const uint32_t offset = encoding.dex_register_map.byte_offset +
-        stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding);
-    size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
-    return DexRegisterMap(region_.Subregion(offset, size));
+    const uint32_t offset = stack_map.GetDexRegisterMapOffset();
+    size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+    return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
   }
 
-  size_t GetDexRegisterMapsSize(const CodeInfoEncoding& encoding,
-                                uint32_t number_of_dex_registers) const {
+  size_t GetDexRegisterMapsSize(uint32_t number_of_dex_registers) const {
     size_t total = 0;
-    for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      DexRegisterMap map(GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap stack_map = GetStackMapAt(i);
+      DexRegisterMap map(GetDexRegisterMapOf(stack_map, number_of_dex_registers));
       total += map.Size();
     }
     return total;
@@ -1458,38 +892,30 @@
   // Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
   DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
                                           InlineInfo inline_info,
-                                          const CodeInfoEncoding& encoding,
                                           uint32_t number_of_dex_registers) const {
-    if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, depth)) {
+    if (!inline_info.HasDexRegisterMapAtDepth(depth)) {
       return DexRegisterMap();
     } else {
-      uint32_t offset = encoding.dex_register_map.byte_offset +
-          inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, depth);
-      size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
-      return DexRegisterMap(region_.Subregion(offset, size));
+      uint32_t offset = inline_info.GetDexRegisterMapOffsetAtDepth(depth);
+      size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+      return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
     }
   }
 
-  InlineInfo GetInlineInfo(size_t index, const CodeInfoEncoding& encoding) const {
-    // Since we do not know the depth, we just return the whole remaining map. The caller may
-    // access the inline info for arbitrary depths. To return the precise inline info we would need
-    // to count the depth before returning.
-    // TODO: Clean this up.
-    const size_t bit_offset = encoding.inline_info.bit_offset +
-        index * encoding.inline_info.encoding.BitSize();
-    return InlineInfo(BitMemoryRegion(region_, bit_offset, region_.size_in_bits() - bit_offset));
+  InlineInfo GetInlineInfo(size_t index) const {
+    return InlineInfo(&inline_infos_, index);
   }
 
-  InlineInfo GetInlineInfoOf(StackMap stack_map, const CodeInfoEncoding& encoding) const {
-    DCHECK(stack_map.HasInlineInfo(encoding.stack_map.encoding));
-    uint32_t index = stack_map.GetInlineInfoIndex(encoding.stack_map.encoding);
-    return GetInlineInfo(index, encoding);
+  InlineInfo GetInlineInfoOf(StackMap stack_map) const {
+    DCHECK(stack_map.HasInlineInfo());
+    uint32_t index = stack_map.GetInlineInfoIndex();
+    return GetInlineInfo(index);
   }
 
-  StackMap GetStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
-    for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+  StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap stack_map = GetStackMapAt(i);
+      if (stack_map.GetDexPc() == dex_pc) {
         return stack_map;
       }
     }
@@ -1498,40 +924,39 @@
 
   // Searches the stack map list backwards because catch stack maps are stored
   // at the end.
-  StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
-    for (size_t i = GetNumberOfStackMaps(encoding); i > 0; --i) {
-      StackMap stack_map = GetStackMapAt(i - 1, encoding);
-      if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+  StackMap GetCatchStackMapForDexPc(uint32_t dex_pc) const {
+    for (size_t i = GetNumberOfStackMaps(); i > 0; --i) {
+      StackMap stack_map = GetStackMapAt(i - 1);
+      if (stack_map.GetDexPc() == dex_pc) {
         return stack_map;
       }
     }
     return StackMap();
   }
 
-  StackMap GetOsrStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
-    size_t e = GetNumberOfStackMaps(encoding);
+  StackMap GetOsrStackMapForDexPc(uint32_t dex_pc) const {
+    size_t e = GetNumberOfStackMaps();
     if (e == 0) {
       // There cannot be OSR stack map if there is no stack map.
       return StackMap();
     }
     // Walk over all stack maps. If two consecutive stack maps are identical, then we
     // have found a stack map suitable for OSR.
-    const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
     for (size_t i = 0; i < e - 1; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
-        StackMap other = GetStackMapAt(i + 1, encoding);
-        if (other.GetDexPc(stack_map_encoding) == dex_pc &&
-            other.GetNativePcOffset(stack_map_encoding, kRuntimeISA) ==
-                stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA)) {
-          DCHECK_EQ(other.GetDexRegisterMapOffset(stack_map_encoding),
-                    stack_map.GetDexRegisterMapOffset(stack_map_encoding));
-          DCHECK(!stack_map.HasInlineInfo(stack_map_encoding));
+      StackMap stack_map = GetStackMapAt(i);
+      if (stack_map.GetDexPc() == dex_pc) {
+        StackMap other = GetStackMapAt(i + 1);
+        if (other.GetDexPc() == dex_pc &&
+            other.GetNativePcOffset(kRuntimeISA) ==
+                stack_map.GetNativePcOffset(kRuntimeISA)) {
+          DCHECK_EQ(other.GetDexRegisterMapOffset(),
+                    stack_map.GetDexRegisterMapOffset());
+          DCHECK(!stack_map.HasInlineInfo());
           if (i < e - 2) {
             // Make sure there are not three identical stack maps following each other.
             DCHECK_NE(
-                stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA),
-                GetStackMapAt(i + 2, encoding).GetNativePcOffset(stack_map_encoding, kRuntimeISA));
+                stack_map.GetNativePcOffset(kRuntimeISA),
+                GetStackMapAt(i + 2).GetNativePcOffset(kRuntimeISA));
           }
           return stack_map;
         }
@@ -1540,30 +965,27 @@
     return StackMap();
   }
 
-  StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
-                                        const CodeInfoEncoding& encoding) const {
+  StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const {
     // TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
     //       maps are not. If we knew that the method does not have try/catch,
     //       we could do binary search.
-    for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i, encoding);
-      if (stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) ==
-          native_pc_offset) {
+    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+      StackMap stack_map = GetStackMapAt(i);
+      if (stack_map.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
         return stack_map;
       }
     }
     return StackMap();
   }
 
-  InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset,
-                                            const CodeInfoEncoding& encoding) {
-    for (size_t index = 0; index < encoding.invoke_info.num_entries; index++) {
-      InvokeInfo item = GetInvokeInfo(encoding, index);
-      if (item.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA) == native_pc_offset) {
+  InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset) {
+    for (size_t index = 0; index < invoke_infos_.NumRows(); index++) {
+      InvokeInfo item = GetInvokeInfo(index);
+      if (item.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
         return item;
       }
     }
-    return InvokeInfo(BitMemoryRegion());
+    return InvokeInfo(&invoke_infos_, -1);
   }
 
   // Dump this CodeInfo object on `os`.  `code_offset` is the (absolute)
@@ -1578,23 +1000,10 @@
             InstructionSet instruction_set,
             const MethodInfo& method_info) const;
 
-  // Check that the code info has valid stack map and abort if it does not.
-  void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
-    if (region_.size() != 0 && region_.size_in_bits() < GetStackMapsSizeInBits(encoding)) {
-      LOG(FATAL) << region_.size() << "\n"
-                 << encoding.HeaderSize() << "\n"
-                 << encoding.NonHeaderSize() << "\n"
-                 << encoding.location_catalog.num_entries << "\n"
-                 << encoding.stack_map.num_entries << "\n"
-                 << encoding.stack_map.encoding.BitSize();
-    }
-  }
-
  private:
   // Compute the size of the Dex register map associated to the stack map at
   // `dex_register_map_offset_in_code_info`.
-  size_t ComputeDexRegisterMapSizeOf(const CodeInfoEncoding& encoding,
-                                     uint32_t dex_register_map_offset_in_code_info,
+  size_t ComputeDexRegisterMapSizeOf(uint32_t dex_register_map_offset,
                                      uint16_t number_of_dex_registers) const {
     // Offset where the actual mapping data starts within art::DexRegisterMap.
     size_t location_mapping_data_offset_in_dex_register_map =
@@ -1602,12 +1011,12 @@
     // Create a temporary art::DexRegisterMap to be able to call
     // art::DexRegisterMap::GetNumberOfLiveDexRegisters and
     DexRegisterMap dex_register_map_without_locations(
-        MemoryRegion(region_.Subregion(dex_register_map_offset_in_code_info,
-                                       location_mapping_data_offset_in_dex_register_map)));
+        MemoryRegion(dex_register_maps_.Subregion(dex_register_map_offset,
+                                        location_mapping_data_offset_in_dex_register_map)));
     size_t number_of_live_dex_registers =
         dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers);
     size_t location_mapping_data_size_in_bits =
-        DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries(encoding))
+        DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries())
         * number_of_live_dex_registers;
     size_t location_mapping_data_size_in_bytes =
         RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
@@ -1616,37 +1025,42 @@
     return dex_register_map_size;
   }
 
-  // Compute the size of a Dex register location catalog starting at offset `origin`
-  // in `region_` and containing `number_of_dex_locations` entries.
-  size_t ComputeDexRegisterLocationCatalogSize(uint32_t origin,
-                                               uint32_t number_of_dex_locations) const {
-    // TODO: Ideally, we would like to use art::DexRegisterLocationCatalog::Size or
-    // art::DexRegisterLocationCatalog::FindLocationOffset, but the
-    // DexRegisterLocationCatalog is not yet built.  Try to factor common code.
-    size_t offset = origin + DexRegisterLocationCatalog::kFixedSize;
-
-    // Skip the first `number_of_dex_locations - 1` entries.
-    for (uint16_t i = 0; i < number_of_dex_locations; ++i) {
-      // Read the first next byte and inspect its first 3 bits to decide
-      // whether it is a short or a large location.
-      DexRegisterLocationCatalog::ShortLocation first_byte =
-          region_.LoadUnaligned<DexRegisterLocationCatalog::ShortLocation>(offset);
-      DexRegisterLocation::Kind kind =
-          DexRegisterLocationCatalog::ExtractKindFromShortLocation(first_byte);
-      if (DexRegisterLocation::IsShortLocationKind(kind)) {
-        // Short location.  Skip the current byte.
-        offset += DexRegisterLocationCatalog::SingleShortEntrySize();
-      } else {
-        // Large location.  Skip the 5 next bytes.
-        offset += DexRegisterLocationCatalog::SingleLargeEntrySize();
-      }
-    }
-    size_t size = offset - origin;
-    return size;
+  MemoryRegion DecodeMemoryRegion(MemoryRegion& region, size_t* bit_offset) {
+    size_t length = DecodeVarintBits(BitMemoryRegion(region), bit_offset);
+    size_t offset = BitsToBytesRoundUp(*bit_offset);;
+    *bit_offset = (offset + length) * kBitsPerByte;
+    return region.Subregion(offset, length);
   }
 
-  MemoryRegion region_;
-  friend class StackMapStream;
+  void Decode(const uint8_t* data) {
+    size_t non_header_size = DecodeUnsignedLeb128(&data);
+    MemoryRegion region(const_cast<uint8_t*>(data), non_header_size);
+    BitMemoryRegion bit_region(region);
+    size_t bit_offset = 0;
+    size_ = UnsignedLeb128Size(non_header_size) + non_header_size;
+    dex_register_maps_ = DecodeMemoryRegion(region, &bit_offset);
+    location_catalog_entries_ = DecodeVarintBits(bit_region, &bit_offset);
+    location_catalog_ = DecodeMemoryRegion(region, &bit_offset);
+    stack_maps_.Decode(bit_region, &bit_offset);
+    invoke_infos_.Decode(bit_region, &bit_offset);
+    inline_infos_.Decode(bit_region, &bit_offset);
+    register_masks_.Decode(bit_region, &bit_offset);
+    stack_mask_bits_ = DecodeVarintBits(bit_region, &bit_offset);
+    stack_masks_ = bit_region.Subregion(bit_offset, non_header_size * kBitsPerByte - bit_offset);
+  }
+
+  size_t size_;
+  MemoryRegion dex_register_maps_;
+  uint32_t location_catalog_entries_;
+  MemoryRegion location_catalog_;
+  BitTable<StackMap::Field::kCount> stack_maps_;
+  BitTable<InvokeInfo::Field::kCount> invoke_infos_;
+  BitTable<InlineInfo::Field::kCount> inline_infos_;
+  BitTable<1> register_masks_;
+  uint32_t stack_mask_bits_ = 0;
+  BitMemoryRegion stack_masks_;
+
+  friend class OatDumper;
 };
 
 #undef ELEMENT_BYTE_OFFSET_AFTER
diff --git a/runtime/thread.cc b/runtime/thread.cc
index eada24d..81ed722 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3559,16 +3559,15 @@
       StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
           reinterpret_cast<uintptr_t>(cur_quick_frame));
       uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
-      CodeInfo code_info = method_header->GetOptimizedCodeInfo();
-      CodeInfoEncoding encoding = code_info.ExtractEncoding();
-      StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+      CodeInfo code_info(method_header);
+      StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(map.IsValid());
 
-      T vreg_info(m, code_info, encoding, map, visitor_);
+      T vreg_info(m, code_info, map, visitor_);
 
       // Visit stack entries that hold pointers.
-      const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding);
-      BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map);
+      const size_t number_of_bits = code_info.GetNumberOfStackMaskBits();
+      BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map);
       for (size_t i = 0; i < number_of_bits; ++i) {
         if (stack_mask.LoadBit(i)) {
           StackReference<mirror::Object>* ref_addr = vreg_base + i;
@@ -3583,7 +3582,7 @@
         }
       }
       // Visit callee-save registers that hold pointers.
-      uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map);
+      uint32_t register_mask = code_info.GetRegisterMaskOf(map);
       for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
         if (register_mask & (1 << i)) {
           mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
@@ -3631,7 +3630,6 @@
     struct UndefinedVRegInfo {
       UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED,
                         const CodeInfo& code_info ATTRIBUTE_UNUSED,
-                        const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED,
                         const StackMap& map ATTRIBUTE_UNUSED,
                         RootVisitor& _visitor)
           : visitor(_visitor) {
@@ -3662,14 +3660,11 @@
     struct StackMapVRegInfo {
       StackMapVRegInfo(ArtMethod* method,
                        const CodeInfo& _code_info,
-                       const CodeInfoEncoding& _encoding,
                        const StackMap& map,
                        RootVisitor& _visitor)
           : number_of_dex_registers(method->DexInstructionData().RegistersSize()),
             code_info(_code_info),
-            encoding(_encoding),
             dex_register_map(code_info.GetDexRegisterMapOf(map,
-                                                           encoding,
                                                            number_of_dex_registers)),
             visitor(_visitor) {
       }
@@ -3684,7 +3679,7 @@
         bool found = false;
         for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
           DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
-              dex_reg, number_of_dex_registers, code_info, encoding);
+              dex_reg, number_of_dex_registers, code_info);
           if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
             visitor(ref, dex_reg, stack_visitor);
             found = true;
@@ -3718,7 +3713,6 @@
 
       size_t number_of_dex_registers;
       const CodeInfo& code_info;
-      const CodeInfoEncoding& encoding;
       DexRegisterMap dex_register_map;
       RootVisitor& visitor;
     };
diff --git a/runtime/var_handles.cc b/runtime/var_handles.cc
new file mode 100644
index 0000000..f08742f
--- /dev/null
+++ b/runtime/var_handles.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "var_handles.h"
+
+#include "common_throws.h"
+#include "dex/dex_instruction.h"
+#include "handle.h"
+#include "method_handles-inl.h"
+#include "mirror/method_type.h"
+#include "mirror/var_handle.h"
+
+namespace art {
+
+namespace {
+
+bool VarHandleInvokeAccessorWithConversions(Thread* self,
+                                            ShadowFrame& shadow_frame,
+                                            Handle<mirror::VarHandle> var_handle,
+                                            Handle<mirror::MethodType> callsite_type,
+                                            const mirror::VarHandle::AccessMode access_mode,
+                                            const InstructionOperands* const operands,
+                                            JValue* result)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  StackHandleScope<1> hs(self);
+  Handle<mirror::MethodType> accessor_type(hs.NewHandle(
+      var_handle->GetMethodTypeForAccessMode(self, access_mode)));
+  const size_t num_vregs = accessor_type->NumberOfVRegs();
+  const int num_params = accessor_type->GetPTypes()->GetLength();
+  ShadowFrameAllocaUniquePtr accessor_frame =
+      CREATE_SHADOW_FRAME(num_vregs, nullptr, shadow_frame.GetMethod(), shadow_frame.GetDexPC());
+  ShadowFrameGetter getter(shadow_frame, operands);
+  static const uint32_t kFirstDestinationReg = 0;
+  ShadowFrameSetter setter(accessor_frame.get(), kFirstDestinationReg);
+  if (!PerformConversions(self, callsite_type, accessor_type, &getter, &setter, num_params)) {
+    return false;
+  }
+  RangeInstructionOperands accessor_operands(kFirstDestinationReg,
+                                             kFirstDestinationReg + num_vregs);
+  if (!var_handle->Access(access_mode, accessor_frame.get(), &accessor_operands, result)) {
+    return false;
+  }
+  return ConvertReturnValue(callsite_type, accessor_type, result);
+}
+
+}  // namespace
+
+bool VarHandleInvokeAccessor(Thread* self,
+                             ShadowFrame& shadow_frame,
+                             Handle<mirror::VarHandle> var_handle,
+                             Handle<mirror::MethodType> callsite_type,
+                             const mirror::VarHandle::AccessMode access_mode,
+                             const InstructionOperands* const operands,
+                             JValue* result) {
+  if (var_handle.IsNull()) {
+    ThrowNullPointerExceptionFromDexPC();
+    return false;
+  }
+
+  if (!var_handle->IsAccessModeSupported(access_mode)) {
+    ThrowUnsupportedOperationException();
+    return false;
+  }
+
+  mirror::VarHandle::MatchKind match_kind =
+      var_handle->GetMethodTypeMatchForAccessMode(access_mode, callsite_type.Get());
+  if (LIKELY(match_kind == mirror::VarHandle::MatchKind::kExact)) {
+    return var_handle->Access(access_mode, &shadow_frame, operands, result);
+  } else if (match_kind == mirror::VarHandle::MatchKind::kWithConversions) {
+    return VarHandleInvokeAccessorWithConversions(self,
+                                                  shadow_frame,
+                                                  var_handle,
+                                                  callsite_type,
+                                                  access_mode,
+                                                  operands,
+                                                  result);
+  } else {
+    DCHECK_EQ(match_kind, mirror::VarHandle::MatchKind::kNone);
+    ThrowWrongMethodTypeException(var_handle->PrettyDescriptorForAccessMode(access_mode),
+                                  callsite_type->PrettyDescriptor());
+    return false;
+  }
+}
+
+}  // namespace art
diff --git a/runtime/var_handles.h b/runtime/var_handles.h
new file mode 100644
index 0000000..2ff8405
--- /dev/null
+++ b/runtime/var_handles.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VAR_HANDLES_H_
+#define ART_RUNTIME_VAR_HANDLES_H_
+
+#include "mirror/var_handle.h"
+
+namespace art {
+
+bool VarHandleInvokeAccessor(Thread* self,
+                             ShadowFrame& shadow_frame,
+                             Handle<mirror::VarHandle> var_handle,
+                             Handle<mirror::MethodType> callsite_type,
+                             const mirror::VarHandle::AccessMode access_mode,
+                             const InstructionOperands* const operands,
+                             JValue* result)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_VAR_HANDLES_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 5cd6091..91cec23 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2287,8 +2287,6 @@
     case Instruction::CONST_METHOD_HANDLE:
       work_line_->SetRegisterType<LockOp::kClear>(
           this, inst->VRegA_21c(), reg_types_.JavaLangInvokeMethodHandle());
-      // TODO: add compiler support for const-method-{handle,type} (b/66890674)
-      Fail(VERIFY_ERROR_FORCE_INTERPRETER);
       break;
     case Instruction::CONST_METHOD_TYPE:
       work_line_->SetRegisterType<LockOp::kClear>(
@@ -3088,7 +3086,8 @@
         DCHECK(HasFailures());
         break;
       }
-      const uint32_t proto_idx = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+      const uint16_t vRegH = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+      const dex::ProtoIndex proto_idx(vRegH);
       const char* return_descriptor =
           dex_file_->GetReturnTypeDescriptor(dex_file_->GetProtoId(proto_idx));
       const RegType& return_type =
@@ -3119,7 +3118,7 @@
       CallSiteArrayValueIterator it(*dex_file_, dex_file_->GetCallSiteId(call_site_idx));
       it.Next();  // Skip to name.
       it.Next();  // Skip to method type of the method handle
-      const uint32_t proto_idx = static_cast<uint32_t>(it.GetJavaValue().i);
+      const dex::ProtoIndex proto_idx(it.GetJavaValue().c);
       const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(proto_idx);
       DexFileParameterIterator param_it(*dex_file_, proto_id);
       // Treat method as static as it has yet to be determined.
@@ -4192,7 +4191,8 @@
 
   if (UNLIKELY(method_type == METHOD_POLYMORPHIC)) {
     // Process the signature of the calling site that is invoking the method handle.
-    DexFileParameterIterator it(*dex_file_, dex_file_->GetProtoId(inst->VRegH()));
+    dex::ProtoIndex proto_idx(inst->VRegH());
+    DexFileParameterIterator it(*dex_file_, dex_file_->GetProtoId(proto_idx));
     return VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, res_method);
   } else {
     // Process the target method's signature.
@@ -4210,8 +4210,6 @@
     expected_return_descriptor = mirror::MethodHandle::GetReturnTypeDescriptor(method_name);
   } else if (klass == mirror::VarHandle::StaticClass()) {
     expected_return_descriptor = mirror::VarHandle::GetReturnTypeDescriptor(method_name);
-    // TODO: add compiler support for VarHandle accessor methods (b/71781600)
-    Fail(VERIFY_ERROR_FORCE_INTERPRETER);
   } else {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD)
         << "Signature polymorphic method in unsuppported class: " << klass->PrettyDescriptor();
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index b79334a..f7cdf39 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -23,6 +23,8 @@
 #include <android-base/logging.h>
 #include <android-base/stringprintf.h>
 
+#include "base/enums.h"
+#include "class_linker.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "hidden_api.h"
 #include "jni/jni_internal.h"
@@ -30,6 +32,7 @@
 #include "mirror/throwable.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "obj_ptr-inl.h"
+#include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 
@@ -231,19 +234,28 @@
   V(java_lang_String_init_StringBuilder, "(Ljava/lang/StringBuilder;)V", newStringFromStringBuilder, "newStringFromStringBuilder", "(Ljava/lang/StringBuilder;)Ljava/lang/String;", NewStringFromStringBuilder) \
 
 #define STATIC_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, ...) \
-    static ArtMethod* init_runtime_name; \
-    static ArtMethod* new_runtime_name;
+    static ArtMethod* init_runtime_name = nullptr; \
+    static ArtMethod* new_runtime_name = nullptr;
     STRING_INIT_LIST(STATIC_STRING_INIT)
 #undef STATIC_STRING_INIT
 
-void WellKnownClasses::InitStringInit(JNIEnv* env) {
-  ScopedObjectAccess soa(Thread::Current());
-  #define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name,             \
-                           new_java_name, new_signature, ...)                               \
-      init_runtime_name = jni::DecodeArtMethod(                                             \
-          CacheMethod(env, java_lang_String, false, "<init>", init_signature));             \
-      new_runtime_name = jni::DecodeArtMethod(                                              \
-          CacheMethod(env, java_lang_StringFactory, true, new_java_name, new_signature));
+void WellKnownClasses::InitStringInit(ObjPtr<mirror::Class> string_class,
+                                      ObjPtr<mirror::Class> string_builder_class) {
+  PointerSize p_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+  auto find_method = [p_size](ObjPtr<mirror::Class> klass,
+                              const char* name,
+                              const char* sig,
+                              bool expext_static) REQUIRES_SHARED(Locks::mutator_lock_) {
+    ArtMethod* ret = klass->FindClassMethod(name, sig, p_size);
+    CHECK(ret != nullptr);
+    CHECK_EQ(expext_static, ret->IsStatic());
+    return ret;
+  };
+
+  #define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name,                  \
+                           new_java_name, new_signature, ...)                                    \
+      init_runtime_name = find_method(string_class, "<init>", init_signature, false);            \
+      new_runtime_name = find_method(string_builder_class, new_java_name, new_signature, true);
       STRING_INIT_LIST(LOAD_STRING_INIT)
   #undef LOAD_STRING_INIT
 }
@@ -252,6 +264,7 @@
   QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
   #define SET_ENTRY_POINT(init_runtime_name, init_signature, new_runtime_name,              \
                           new_java_name, new_signature, entry_point_name)                   \
+      DCHECK(!Runtime::Current()->IsStarted() || (new_runtime_name) != nullptr);            \
       qpoints->p ## entry_point_name = reinterpret_cast<void(*)()>(new_runtime_name);
       STRING_INIT_LIST(SET_ENTRY_POINT)
   #undef SET_ENTRY_POINT
@@ -260,7 +273,9 @@
 ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) {
   #define TO_STRING_FACTORY(init_runtime_name, init_signature, new_runtime_name,            \
                             new_java_name, new_signature, entry_point_name)                 \
+      DCHECK((init_runtime_name) != nullptr);                                               \
       if (string_init == (init_runtime_name)) {                                             \
+        DCHECK((new_runtime_name) != nullptr);                                              \
         return (new_runtime_name);                                                          \
       }
       STRING_INIT_LIST(TO_STRING_FACTORY)
@@ -282,26 +297,9 @@
 }
 #undef STRING_INIT_LIST
 
-class ScopedHiddenApiExemption {
- public:
-  explicit ScopedHiddenApiExemption(Runtime* runtime)
-      : runtime_(runtime),
-        initial_policy_(runtime_->GetHiddenApiEnforcementPolicy()) {
-    runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kNoChecks);
-  }
-
-  ~ScopedHiddenApiExemption() {
-    runtime_->SetHiddenApiEnforcementPolicy(initial_policy_);
-  }
-
- private:
-  Runtime* runtime_;
-  const hiddenapi::EnforcementPolicy initial_policy_;
-  DISALLOW_COPY_AND_ASSIGN(ScopedHiddenApiExemption);
-};
-
 void WellKnownClasses::Init(JNIEnv* env) {
-  ScopedHiddenApiExemption hiddenapi_exemption(Runtime::Current());
+  hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
+      hiddenapi::EnforcementPolicy::kNoChecks);
 
   dalvik_annotation_optimization_CriticalNative =
       CacheClass(env, "dalvik/annotation/optimization/CriticalNative");
@@ -427,9 +425,6 @@
   java_lang_Integer_valueOf = CachePrimitiveBoxingMethod(env, 'I', "java/lang/Integer");
   java_lang_Long_valueOf = CachePrimitiveBoxingMethod(env, 'J', "java/lang/Long");
   java_lang_Short_valueOf = CachePrimitiveBoxingMethod(env, 'S', "java/lang/Short");
-
-  InitStringInit(env);
-  Thread::Current()->InitStringEntryPoints();
 }
 
 void WellKnownClasses::LateInit(JNIEnv* env) {
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 25c07b2..c06e4a7 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -40,6 +40,9 @@
 
   static void Clear();
 
+  static void InitStringInit(ObjPtr<mirror::Class> string_class,
+                             ObjPtr<mirror::Class> string_builder_class)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   static ArtMethod* StringInitToStringFactory(ArtMethod* method);
   static uint32_t StringInitToEntryPoint(ArtMethod* method);
 
@@ -168,9 +171,6 @@
   static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_length;
   static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_offset;
   static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_type;
-
- private:
-  static void InitStringInit(JNIEnv* env);
 };
 
 }  // namespace art
diff --git a/simulator/Android.bp b/simulator/Android.bp
index 74b5a90..8690426 100644
--- a/simulator/Android.bp
+++ b/simulator/Android.bp
@@ -44,6 +44,7 @@
     defaults: ["libart_simulator_defaults"],
     shared_libs: [
         "libart",
+        "libartbase",
         "libvixl-arm64",
     ],
 }
@@ -56,6 +57,7 @@
     ],
     shared_libs: [
         "libartd",
+        "libartbased",
         "libvixld-arm64",
     ],
 }
@@ -80,6 +82,7 @@
     name: "libart-simulator-container",
     defaults: ["libart_simulator_container_defaults"],
     shared_libs: [
+        "libartbase",
         "libart",
     ],
 }
@@ -91,6 +94,7 @@
         "libart_simulator_container_defaults",
     ],
     shared_libs: [
+        "libartbased",
         "libartd",
     ],
 }
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 6a9bf61..2b57824 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -33,7 +33,7 @@
 14 (class java.lang.Short)
 [java.lang.String(int,int,char[]), public java.lang.String(), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder)]
 [private final int java.lang.String.count, private int java.lang.String.hash, private static final java.io.ObjectStreamField[] java.lang.String.serialPersistentFields, private static final long java.lang.String.serialVersionUID, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER]
-[native void java.lang.String.getCharsNoCheck(int,int,char[],int), private boolean java.lang.String.nonSyncContentEquals(java.lang.AbstractStringBuilder), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native java.lang.String java.lang.String.doReplace(char,char), private native java.lang.String java.lang.String.fastSubstring(int,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.CharSequence[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.Iterable), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int), void java.lang.String.getChars(char[],int)]
+[native void java.lang.String.getCharsNoCheck(int,int,char[],int), private boolean java.lang.String.nonSyncContentEquals(java.lang.AbstractStringBuilder), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native java.lang.String java.lang.String.doReplace(char,char), private native java.lang.String java.lang.String.fastSubstring(int,int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), private static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.CharSequence[]), public static java.lang.String java.lang.String.join(java.lang.CharSequence,java.lang.Iterable), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(char[],int,int,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(char[],int,int,java.lang.String,int), void java.lang.String.getChars(char[],int)]
 []
 [interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
 0
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index ebde3bf..93c1538 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -1137,6 +1137,126 @@
 
   static Object[] sArray;
 
+  /// CHECK-START: int Main.testLocalArrayMerge1(boolean) load_store_elimination (before)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const0>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge1(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG:                 Return [<<Const1>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge1(boolean) load_store_elimination (after)
+  /// CHECK-NOT:                 NewArray
+  /// CHECK-NOT:                 ArraySet
+  /// CHECK-NOT:                 ArrayGet
+  private static int testLocalArrayMerge1(boolean x) {
+    // The explicit store can be removed right away
+    // since it is equivalent to the default.
+    int[] a = { 0 };
+    // The diamond pattern stores/load can be replaced
+    // by the direct value.
+    if (x) {
+      a[0] = 1;
+    } else {
+      a[0] = 1;
+    }
+    return a[0];
+  }
+
+  /// CHECK-START: int Main.testLocalArrayMerge2(boolean) load_store_elimination (before)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+  /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const2>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const3>>]
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge2(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge2(boolean) load_store_elimination (after)
+  /// CHECK-DAG:                 ArraySet
+  /// CHECK-DAG:                 ArraySet
+  /// CHECK-NOT:                 ArraySet
+  private static int testLocalArrayMerge2(boolean x) {
+    // The explicit store can be removed eventually even
+    // though it is not equivalent to the default.
+    int[] a = { 1 };
+    // The diamond pattern stores/load remain.
+    if (x) {
+      a[0] = 2;
+    } else {
+      a[0] = 3;
+    }
+    return a[0];
+  }
+
+  /// CHECK-START: int Main.testLocalArrayMerge3(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const2>>]
+  /// CHECK-DAG: <<Get:i\d+>>    ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG:                 Return [<<Get>>]
+  private static int testLocalArrayMerge3(boolean x) {
+    // All stores/load remain.
+    int[] a = { 1 };
+    if (x) {
+      a[0] = 2;
+    }
+    return a[0];
+  }
+
+  /// CHECK-START: int Main.testLocalArrayMerge4(boolean) load_store_elimination (before)
+  /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<A:l\d+>>      NewArray
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const0>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG:                 ArraySet [<<A>>,<<Const0>>,<<Const1>>]
+  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG: <<Get2:a\d+>>   ArrayGet [<<A>>,<<Const0>>]
+  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get1>>,<<Get2>>]
+  /// CHECK-DAG:                 Return [<<Add>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge4(boolean) load_store_elimination (after)
+  /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+  /// CHECK-DAG: <<Cnv1:b\d+>>   TypeConversion [<<Const1>>]
+  /// CHECK-DAG: <<Cnv2:a\d+>>   TypeConversion [<<Const1>>]
+  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Cnv1>>,<<Cnv2>>]
+  /// CHECK-DAG:                 Return [<<Add>>]
+  //
+  /// CHECK-START: int Main.testLocalArrayMerge4(boolean) load_store_elimination (after)
+  /// CHECK-NOT:                 NewArray
+  /// CHECK-NOT:                 ArraySet
+  /// CHECK-NOT:                 ArrayGet
+  private static int testLocalArrayMerge4(boolean x) {
+    byte[] a = { 0 };
+    if (x) {
+      a[0] = 1;
+    } else {
+      a[0] = 1;
+    }
+    // Differently typed (signed vs unsigned),
+    // but same reference.
+    return a[0] + (a[0] & 0xff);
+  }
+
   static void assertIntEquals(int result, int expected) {
     if (expected != result) {
       throw new Error("Expected: " + expected + ", found: " + result);
@@ -1271,6 +1391,15 @@
     assertIntEquals(testclass2.i, 55);
 
     assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
+
+    assertIntEquals(testLocalArrayMerge1(true), 1);
+    assertIntEquals(testLocalArrayMerge1(false), 1);
+    assertIntEquals(testLocalArrayMerge2(true), 2);
+    assertIntEquals(testLocalArrayMerge2(false), 3);
+    assertIntEquals(testLocalArrayMerge3(true), 2);
+    assertIntEquals(testLocalArrayMerge3(false), 1);
+    assertIntEquals(testLocalArrayMerge4(true), 2);
+    assertIntEquals(testLocalArrayMerge4(false), 2);
   }
 
   static boolean sFlag;
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index adafb78..8898c48 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -133,17 +133,8 @@
 
 .end method
 
-# Test that the compiler does not assume that the first argument of String.<init>
-# is a NewInstance by inserting an irreducible loop between them (b/26676472).
-
-# We verify the type of the input instruction (Phi) in debuggable mode, because
-# it is eliminated by later stages of SsaBuilder otherwise.
-
-## CHECK-START-DEBUGGABLE: java.lang.String TestCase.thisNotNewInstance1(byte[], boolean) register (after)
-## CHECK-DAG:                   InvokeStaticOrDirect env:[[<<Phi:l\d+>>,{{.*]]}}
-## CHECK-DAG:     <<Phi>>       Phi
-
-.method public static thisNotNewInstance1([BZ)Ljava/lang/String;
+# Test #1 for irreducible loops and String.<init>.
+.method public static irreducibleLoopAndStringInit1([BZ)Ljava/lang/String;
    .registers 5
 
    new-instance v0, Ljava/lang/String;
@@ -164,11 +155,8 @@
 
 .end method
 
-## CHECK-START-DEBUGGABLE: java.lang.String TestCase.thisNotNewInstance2(byte[], boolean) register (after)
-## CHECK-DAG:                   InvokeStaticOrDirect env:[[<<Phi:l\d+>>,{{.*]]}}
-## CHECK-DAG:     <<Phi>>       Phi
-
-.method public static thisNotNewInstance2([BZ)Ljava/lang/String;
+# Test #2 for irreducible loops and String.<init>.
+.method public static irreducibleLoopAndStringInit2([BZ)Ljava/lang/String;
    .registers 5
 
    new-instance v0, Ljava/lang/String;
@@ -188,3 +176,26 @@
    return-object v0
 
 .end method
+
+# Test #3 for irreducible loops and String.<init> alias.
+.method public static irreducibleLoopAndStringInit3([BZ)Ljava/lang/String;
+   .registers 5
+
+   new-instance v0, Ljava/lang/String;
+   move-object v2, v0
+
+   # Irreducible loop
+   if-eqz p1, :loop_entry
+   :loop_header
+   const v1, 0x1
+   xor-int p1, p1, v1
+   :loop_entry
+   if-eqz p1, :string_init
+   goto :loop_header
+
+   :string_init
+   const-string v1, "UTF8"
+   invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+   return-object v2
+
+.end method
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index 78cb37a..d38b7f4 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -65,14 +65,21 @@
     }
 
     {
-      Method m = c.getMethod("thisNotNewInstance1", byte[].class, boolean.class);
+      Method m = c.getMethod("irreducibleLoopAndStringInit1", byte[].class, boolean.class);
       String result = (String) m.invoke(null, new Object[] { testData, true });
       assertEqual(testString, result);
       result = (String) m.invoke(null, new Object[] { testData, false });
       assertEqual(testString, result);
     }
     {
-      Method m = c.getMethod("thisNotNewInstance2", byte[].class, boolean.class);
+      Method m = c.getMethod("irreducibleLoopAndStringInit2", byte[].class, boolean.class);
+      String result = (String) m.invoke(null, new Object[] { testData, true });
+      assertEqual(testString, result);
+      result = (String) m.invoke(null, new Object[] { testData, false });
+      assertEqual(testString, result);
+    }
+    {
+      Method m = c.getMethod("irreducibleLoopAndStringInit3", byte[].class, boolean.class);
       String result = (String) m.invoke(null, new Object[] { testData, true });
       assertEqual(testString, result);
       result = (String) m.invoke(null, new Object[] { testData, false });
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index e2b8aa0..7c1507f 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -48,9 +48,8 @@
     }
   }
 
-  CodeInfo info = header->GetOptimizedCodeInfo();
-  CodeInfoEncoding encoding = info.ExtractEncoding();
-  CHECK(info.HasInlineInfo(encoding));
+  CodeInfo info(header);
+  CHECK(info.HasInlineInfo());
 }
 
 static void allocate_profiling_info(jclass cls, const char* method_name) {
diff --git a/test/586-checker-null-array-get/src/Main.java b/test/586-checker-null-array-get/src/Main.java
index de9429f..ebe91cf 100644
--- a/test/586-checker-null-array-get/src/Main.java
+++ b/test/586-checker-null-array-get/src/Main.java
@@ -107,9 +107,8 @@
   /// CHECK-DAG: <<GetJ3:j\d+>>      ArrayGet [<<CheckJ>>,{{i\d+}}]
   public static void bar() {
     // We create multiple accesses that will lead the bounds check
-    // elimination pass to add a HDeoptimize. Not having the bounds check helped
-    // the load store elimination think it could merge two ArrayGet with different
-    // types.
+    // elimination pass to add a HDeoptimize. Not having the bounds check
+    // makes the ArrayGets look almost the same if it were not for the type!
     String[] array = (String[])getNull();
     objectField = array[0];
     objectField = array[1];
diff --git a/test/674-hiddenapi/src-ex/ChildClass.java b/test/674-hiddenapi/src-ex/ChildClass.java
index 0349e8f..db3ba6d 100644
--- a/test/674-hiddenapi/src-ex/ChildClass.java
+++ b/test/674-hiddenapi/src-ex/ChildClass.java
@@ -87,16 +87,21 @@
     ChildClass.everythingWhitelisted = everythingWhitelisted;
 
     boolean isSameBoot = (isParentInBoot == isChildInBoot);
+    boolean isDebuggable = VMRuntime.getRuntime().isJavaDebuggable();
 
     // Run meaningful combinations of access flags.
     for (Hiddenness hiddenness : Hiddenness.values()) {
       final Behaviour expected;
-      if (isSameBoot || hiddenness == Hiddenness.Whitelist || everythingWhitelisted) {
+      // Warnings are now disabled whenever access is granted, even for
+      // greylisted APIs. This is the behaviour for release builds.
+      if (isSameBoot || everythingWhitelisted || hiddenness == Hiddenness.Whitelist) {
         expected = Behaviour.Granted;
       } else if (hiddenness == Hiddenness.Blacklist) {
         expected = Behaviour.Denied;
-      } else {
+      } else if (isDebuggable) {
         expected = Behaviour.Warning;
+      } else {
+        expected = Behaviour.Granted;
       }
 
       for (boolean isStatic : booleanValues) {
diff --git a/test/677-fsi2/expected.txt b/test/677-fsi2/expected.txt
new file mode 100644
index 0000000..de00847
--- /dev/null
+++ b/test/677-fsi2/expected.txt
@@ -0,0 +1,4 @@
+Run default
+Hello World
+Run without dex2oat
+Hello World
diff --git a/test/677-fsi2/info.txt b/test/677-fsi2/info.txt
new file mode 100644
index 0000000..ed0a0f2
--- /dev/null
+++ b/test/677-fsi2/info.txt
@@ -0,0 +1 @@
+Test that -Xonly-use-system-oat-files works.
diff --git a/test/677-fsi2/run b/test/677-fsi2/run
new file mode 100644
index 0000000..039a6a7
--- /dev/null
+++ b/test/677-fsi2/run
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "Run default"
+${RUN} $@ --runtime-option -Xonly-use-system-oat-files
+return_status1=$?
+
+echo "Run without dex2oat"
+${RUN} $@ --no-dex2oat --runtime-option -Xonly-use-system-oat-files
+return_status2=$?
+
+(exit $return_status1) && (exit $return_status2)
diff --git a/test/677-fsi2/src/Main.java b/test/677-fsi2/src/Main.java
new file mode 100644
index 0000000..834075f
--- /dev/null
+++ b/test/677-fsi2/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    System.out.println("Hello World");
+  }
+}
diff --git a/test/712-varhandle-invocations/src/SampleValues.java b/test/712-varhandle-invocations/src/SampleValues.java
new file mode 100644
index 0000000..79f4f19
--- /dev/null
+++ b/test/712-varhandle-invocations/src/SampleValues.java
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** Sample values for use in VarHandle tests. These are here to avoid repeatedly boxing which
+ * makes gcstress tests run slowly. */
+public class SampleValues {
+    public static final boolean[] PRIMITIVE_BOOLEANS = new boolean[] {true, false};
+
+    public static final Boolean[] BOOLEANS = new Boolean[] {true, false};
+
+    public static final byte[] PRIMITIVE_BYTES =
+            new byte[] {(byte) -128, (byte) -61, (byte) 7, (byte) 127, (byte) 33};
+
+    public static final Byte[] BYTES =
+            new Byte[] {(byte) -128, (byte) -61, (byte) 7, (byte) 127, (byte) 33};
+
+    public static final short[] PRIMITIVE_SHORTS =
+            new short[] {(short) -32768, (short) -384, (short) 32767, (short) 0xaa55};
+
+    public static final Short[] SHORTS =
+            new Short[] {(short) -32768, (short) -384, (short) 32767, (short) 0xaa55};
+
+    public static final char[] PRIMITIVE_CHARS =
+            new char[] {'A', '#', '$', 'Z', 't', 'c'};
+
+    public static final Character[] CHARACTERS =
+            new Character[] {'A', '#', '$', 'Z', 't', 'c'};
+
+    public static final int[] PRIMITIVE_INTS =
+            new int[] {-0x01234567, 0x7f6e5d4c, 0x12345678, 0x10215220, 42};
+
+    public static final Integer[] INTEGERS =
+            new Integer[] {-0x01234567, 0x7f6e5d4c, 0x12345678, 0x10215220, 42};
+
+    public static final long[] PRIMITIVE_LONGS =
+            new long[] {-0x0123456789abcdefl, 0x789abcdef0123456l, 0xfedcba9876543210l};
+
+    public static final Long[] LONGS =
+            new Long[] {-0x0123456789abcdefl, 0x789abcdef0123456l, 0xfedcba9876543210l};
+
+    public static final float[] PRIMITIVE_FLOATS =
+            new float[] {-7.77e23f, 1.234e-17f, 3.40e36f, -8.888e3f, 4.442e11f};
+
+    public static final Float[] FLOATS =
+            new Float[] {-7.77e23f, 1.234e-17f, 3.40e36f, -8.888e3f, 4.442e11f};
+
+    public static final double[] PRIMITIVE_DOUBLES =
+            new double[] {-1.0e-200, 1.11e200, 3.141, 1.1111, 6.022e23, 6.626e-34};
+
+    public static final Double[] DOUBLES =
+            new Double[] {-1.0e-200, 1.11e200, 3.141, 1.1111, 6.022e23, 6.626e-34};
+
+    public static boolean get_boolean(int index) {
+        return PRIMITIVE_BOOLEANS[index];
+    }
+
+    public static Boolean get_Boolean(int index) {
+        return BOOLEANS[index];
+    }
+
+    public static byte get_byte(int index) {
+        return PRIMITIVE_BYTES[index];
+    }
+
+    public static Byte get_Byte(int index) {
+        return BYTES[index];
+    }
+
+    public static short get_short(int index) {
+        return PRIMITIVE_SHORTS[index];
+    }
+
+    public static Short get_Short(int index) {
+        return SHORTS[index];
+    }
+
+    public static char get_char(int index) {
+        return PRIMITIVE_CHARS[index];
+    }
+
+    public static Character get_Character(int index) {
+        return CHARACTERS[index];
+    }
+
+    public static int get_int(int index) {
+        return PRIMITIVE_INTS[index];
+    }
+
+    public static Integer get_Integer(int index) {
+        return INTEGERS[index];
+    }
+
+    public static long get_long(int index) {
+        return PRIMITIVE_LONGS[index];
+    }
+
+    public static Long get_Long(int index) {
+        return LONGS[index];
+    }
+
+    public static float get_float(int index) {
+        return PRIMITIVE_FLOATS[index];
+    }
+
+    public static Float get_Float(int index) {
+        return FLOATS[index];
+    }
+
+    public static double get_double(int index) {
+        return PRIMITIVE_DOUBLES[index];
+    }
+
+    public static Double get_Double(int index) {
+        return DOUBLES[index];
+    }
+}
+
diff --git a/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java b/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java
index bc64c0c..5a69b54 100644
--- a/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java
+++ b/test/712-varhandle-invocations/src/VarHandleUnitTestCollector.java
@@ -19,30 +19,52 @@
 // Results collector for VarHandle Unit tests
 public final class VarHandleUnitTestCollector {
     private final PrintStream out = System.out;
+    private final boolean verbose = false;
 
     private int numberOfSuccesses;
     private int numberOfSkips;
     private int numberOfFailures;
+    private int consecutiveResults = 0;
+    private String current;
+    private long startMillis;
 
     public void start(String testName) {
-        out.print(testName);
-        out.print("...");
+        out.append(testName)
+                .append("...");
+        consecutiveResults = 0;
+        current = testName;
+        startMillis = System.currentTimeMillis();
+    }
+
+    private void printStatus(String status) {
+        out.print(status);
+        if (verbose) {
+            out.print('[');
+            out.print(System.currentTimeMillis() - startMillis);
+            out.print(']');
+        }
+        out.println();
     }
 
     public void skip() {
         numberOfSkips += 1;
-        out.println("SKIP");
+        printStatus("SKIP");
+        consecutiveResults++;
     }
 
     public void success() {
         numberOfSuccesses += 1;
-        out.println("OK");
+        printStatus("OK");
+        if (consecutiveResults++ > 1) {
+            throw new AssertionError("Oops: " + consecutiveResults);
+        }
     }
 
     public void fail(String errorMessage) {
         numberOfFailures += 1;
-        out.println("FAIL");
+        printStatus("FAIL");
         out.print(errorMessage);
+        consecutiveResults++;
     }
 
     public void printSummary() {
diff --git a/test/712-varhandle-invocations/util-src/generate_java.py b/test/712-varhandle-invocations/util-src/generate_java.py
index 9520b53..f535b40 100644
--- a/test/712-varhandle-invocations/util-src/generate_java.py
+++ b/test/712-varhandle-invocations/util-src/generate_java.py
@@ -757,7 +757,9 @@
 """)
     with io.StringIO() as body_text:
         compatible_types = types_that_widen_to(var_type)
-        for value_type in VALUE_TYPES:
+        incompatible_types = { RANDOM.choice(list(VALUE_TYPES - compatible_types)) }
+        test_types = compatible_types | incompatible_types
+        for value_type in test_types:
             print("try {", file=body_text)
             return_type = accessor.get_return_type(var_type)
             if return_type:
@@ -765,7 +767,7 @@
             print("vh.{0}(this".format(accessor.method_name), end="", file=body_text)
             num_args = accessor.get_number_of_var_type_arguments()
             for i in range(0, num_args):
-                print(", {0}({1})".format(value_type.boxing_method(), value_type.examples[i]), end="", file=body_text)
+                print(", SampleValues.get_{0}({1})".format(value_type.boxed_type, i), end="", file=body_text)
             print(");", file=body_text)
             if value_type in compatible_types:
                 print("   assertTrue(vh.isAccessModeSupported(VarHandle.AccessMode.{0}));".format(accessor.access_mode),
@@ -817,7 +819,9 @@
     with io.StringIO() as body_text:
         return_type = accessor.get_return_type(var_type)
         compatible_types = { return_type }
-        for value_type in VALUE_TYPES:
+        incompatible_types = { RANDOM.choice(list(VALUE_TYPES - compatible_types)) }
+        test_types = compatible_types | incompatible_types
+        for value_type in test_types:
             print("try {", file=body_text)
             print("{0} result = ({0}) ".format(value_type.boxed_type), end="", file=body_text)
             print("vh.{0}(this".format(accessor.method_name), end="", file=body_text)
diff --git a/test/979-const-method-handle/expected.txt b/test/979-const-method-handle/expected.txt
index 2d169b9..bbaaedb 100644
--- a/test/979-const-method-handle/expected.txt
+++ b/test/979-const-method-handle/expected.txt
@@ -5,4 +5,5 @@
 Hello World! And Hello Zorba
 name is HoverFly
 2.718281828459045
+repeatConstMethodHandle()
 Attempting to set Math.E raised IAE
diff --git a/test/979-const-method-handle/src/Main.java b/test/979-const-method-handle/src/Main.java
index 79be913..427ca7a 100644
--- a/test/979-const-method-handle/src/Main.java
+++ b/test/979-const-method-handle/src/Main.java
@@ -20,12 +20,25 @@
 import java.lang.invoke.MethodType;
 
 class Main {
+    /**
+     * Number of iterations run to attempt to trigger JIT compilation. These tests run on ART and
+     * the RI so they iterate rather than using the ART only native method ensureJitCompiled().
+     */
+    private static final int ITERATIONS_FOR_JIT = 12000;
+
+    /** A static field updated by method handle getters and setters. */
     private static String name = "default";
 
     private static void unreachable() {
         throw new Error("Unreachable");
     }
 
+    private static void assertEquals(Object expected, Object actual) {
+        if (!expected.equals(actual)) {
+            throw new AssertionError("Assertion failure: " + expected + " != " + actual);
+        }
+    }
+
     private static class LocalClass {
         public LocalClass() {}
 
@@ -52,15 +65,9 @@
         System.out.print("repeatConstMethodType0(");
         System.out.print(expected);
         System.out.println(")");
-        for (int i = 0; i < 12000; ++i) {
+        for (int i = 0; i < ITERATIONS_FOR_JIT; ++i) {
             MethodType actual = methodType0();
-            if (!actual.equals(expected)) {
-                System.out.print("Expected: ");
-                System.out.println(expected);
-                System.out.print("Actual: ");
-                System.out.println(actual);
-                unreachable();
-            }
+            assertEquals(expected, actual);
         }
     }
 
@@ -68,15 +75,9 @@
         System.out.print("repeatConstMethodType1(");
         System.out.print(expected);
         System.out.println(")");
-        for (int i = 0; i < 12000; ++i) {
+        for (int i = 0; i < ITERATIONS_FOR_JIT; ++i) {
             MethodType actual = methodType1();
-            if (!actual.equals(expected)) {
-                System.out.print("Expected: ");
-                System.out.println(expected);
-                System.out.print("Actual: ");
-                System.out.println(actual);
-                unreachable();
-            }
+            assertEquals(expected, actual);
         }
     }
 
@@ -107,6 +108,16 @@
 
     @ConstantMethodHandle(
             kind = ConstantMethodHandle.STATIC_GET,
+            owner = "Main",
+            fieldOrMethodName = "name",
+            descriptor = "Ljava/lang/String;")
+    private static MethodHandle getNameHandle() {
+        unreachable();
+        return null;
+    }
+
+    @ConstantMethodHandle(
+            kind = ConstantMethodHandle.STATIC_GET,
             owner = "java/lang/Math",
             fieldOrMethodName = "E",
             descriptor = "D")
@@ -125,6 +136,18 @@
         return null;
     }
 
+    private static void repeatConstMethodHandle() throws Throwable {
+        System.out.println("repeatConstMethodHandle()");
+        String[] values = {"A", "B", "C"};
+        for (int i = 0; i < ITERATIONS_FOR_JIT; ++i) {
+            String value = values[i % values.length];
+            setNameHandle().invoke(value);
+            String actual = (String) getNameHandle().invokeExact();
+            assertEquals(value, actual);
+            assertEquals(value, name);
+        }
+    }
+
     public static void main(String[] args) throws Throwable {
         System.out.println(methodType0());
         repeatConstMethodType0(
@@ -136,6 +159,7 @@
         System.out.print("name is ");
         System.out.println(name);
         System.out.println(getMathE().invoke());
+        repeatConstMethodHandle();
         try {
             putMathE().invokeExact(Math.PI);
             unreachable();
diff --git a/test/988-method-trace/expected.txt b/test/988-method-trace/expected.txt
index 7f64e23..6e16722 100644
--- a/test/988-method-trace/expected.txt
+++ b/test/988-method-trace/expected.txt
@@ -130,8 +130,10 @@
 ....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
 ...<= public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
 ...=> public java.lang.String java.lang.StringBuilder.toString()
-....=> static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[])
-....<= static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[]) -> <class java.lang.String: Bad argument: -19 < 0>
+....=> public static java.lang.String java.lang.StringFactory.newStringFromChars(char[],int,int)
+.....=> static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[])
+.....<= static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[]) -> <class java.lang.String: Bad argument: -19 < 0>
+....<= public static java.lang.String java.lang.StringFactory.newStringFromChars(char[],int,int) -> <class java.lang.String: Bad argument: -19 < 0>
 ...<= public java.lang.String java.lang.StringBuilder.toString() -> <class java.lang.String: Bad argument: -19 < 0>
 ...=> public java.lang.Error(java.lang.String)
 ....=> public java.lang.Throwable(java.lang.String)
@@ -231,8 +233,10 @@
 ....<= public java.lang.AbstractStringBuilder java.lang.AbstractStringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
 ...<= public java.lang.StringBuilder java.lang.StringBuilder.append(java.lang.String) -> <class java.lang.StringBuilder: Bad argument: -19 < 0>
 ...=> public java.lang.String java.lang.StringBuilder.toString()
-....=> static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[])
-....<= static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[]) -> <class java.lang.String: Bad argument: -19 < 0>
+....=> public static java.lang.String java.lang.StringFactory.newStringFromChars(char[],int,int)
+.....=> static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[])
+.....<= static java.lang.String java.lang.StringFactory.newStringFromChars(int,int,char[]) -> <class java.lang.String: Bad argument: -19 < 0>
+....<= public static java.lang.String java.lang.StringFactory.newStringFromChars(char[],int,int) -> <class java.lang.String: Bad argument: -19 < 0>
 ...<= public java.lang.String java.lang.StringBuilder.toString() -> <class java.lang.String: Bad argument: -19 < 0>
 ...=> public java.lang.Error(java.lang.String)
 ....=> public java.lang.Throwable(java.lang.String)
diff --git a/test/999-redefine-hiddenapi/api-blacklist.txt b/test/999-redefine-hiddenapi/api-blacklist.txt
new file mode 100644
index 0000000..63e37aa
--- /dev/null
+++ b/test/999-redefine-hiddenapi/api-blacklist.txt
@@ -0,0 +1,2 @@
+Lart/Test999;->foo()V
+Lart/Test999;->bar:I
diff --git a/test/999-redefine-hiddenapi/build b/test/999-redefine-hiddenapi/build
new file mode 100644
index 0000000..f4b029f
--- /dev/null
+++ b/test/999-redefine-hiddenapi/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+USE_HIDDENAPI=true ./default-build "$@"
diff --git a/test/999-redefine-hiddenapi/expected.txt b/test/999-redefine-hiddenapi/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/999-redefine-hiddenapi/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/999-redefine-hiddenapi/info.txt b/test/999-redefine-hiddenapi/info.txt
new file mode 100644
index 0000000..87bc30c
--- /dev/null
+++ b/test/999-redefine-hiddenapi/info.txt
@@ -0,0 +1 @@
+Tests that JVMTI class redefinition does not strip away hidden API access flags.
diff --git a/test/999-redefine-hiddenapi/run b/test/999-redefine-hiddenapi/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/999-redefine-hiddenapi/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/999-redefine-hiddenapi/src-ex/Test999.java b/test/999-redefine-hiddenapi/src-ex/Test999.java
new file mode 100644
index 0000000..97495c5
--- /dev/null
+++ b/test/999-redefine-hiddenapi/src-ex/Test999.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Test999 {
+  public void foo() {
+    System.out.println("hello");
+  }
+
+  public int bar = 42;
+}
diff --git a/test/999-redefine-hiddenapi/src-redefine/art/Test999.java b/test/999-redefine-hiddenapi/src-redefine/art/Test999.java
new file mode 100644
index 0000000..c1b838c
--- /dev/null
+++ b/test/999-redefine-hiddenapi/src-redefine/art/Test999.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Test999 {
+  public void foo() {
+    System.out.println("Goodbye");
+  }
+
+  public int bar = 64;
+}
diff --git a/test/999-redefine-hiddenapi/src-redefine/gen.sh b/test/999-redefine-hiddenapi/src-redefine/gen.sh
new file mode 100755
index 0000000..6948cbb
--- /dev/null
+++ b/test/999-redefine-hiddenapi/src-redefine/gen.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+TMP=`mktemp -d`
+
+CLASS "art/Test999"
+
+(cd "$TMP" && javac -d "${TMP}" "$DIR/${CLASS}.java" && d8 --output . "$TMP/${CLASS}.class")
+
+echo '  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode('
+base64 "${TMP}/${CLASS}.class" | sed -E 's/^/    "/' | sed ':a;N;$!ba;s/\n/" +\n/g' | sed -E '$ s/$/");/'
+echo '  private static final byte[] DEX_BYTES = Base64.getDecoder().decode('
+base64 "${TMP}/classes.dex" | sed -E 's/^/    "/' | sed ':a;N;$!ba;s/\n/" +\n/g' | sed -E '$ s/$/");/'
+
+rm -rf "$TMP"
diff --git a/test/999-redefine-hiddenapi/src/Main.java b/test/999-redefine-hiddenapi/src/Main.java
new file mode 100644
index 0000000..c6365ac
--- /dev/null
+++ b/test/999-redefine-hiddenapi/src/Main.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.lang.reflect.Method;
+import java.util.Base64;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+
+    // Run the initialization routine. This will enable hidden API checks in
+    // the runtime, in case they are not enabled by default.
+    init();
+
+    // Load the '-ex' APK and attach it to the boot class path.
+    appendToBootClassLoader(DEX_EXTRA);
+
+    // Find the test class in boot class loader and verify that its members are hidden.
+    Class<?> klass = Class.forName("art.Test999", true, BOOT_CLASS_LOADER);
+    assertMethodIsHidden(klass, "before redefinition");
+    assertFieldIsHidden(klass, "before redefinition");
+
+    // Redefine the class using JVMTI.
+    art.Redefinition.setTestConfiguration(art.Redefinition.Config.COMMON_REDEFINE);
+    art.Redefinition.doCommonClassRedefinition(klass, CLASS_BYTES, DEX_BYTES);
+
+    // Verify that the class members are still hidden.
+    assertMethodIsHidden(klass, "after redefinition");
+    assertFieldIsHidden(klass, "after redefinition");
+  }
+
+  private static void assertMethodIsHidden(Class<?> klass, String msg) throws Exception {
+    try {
+      klass.getDeclaredMethod("foo");
+      // Unexpected. Should have thrown NoSuchMethodException.
+      throw new Exception("Method should not be accessible " + msg);
+    } catch (NoSuchMethodException ex) {
+      // Expected.
+    }
+  }
+
+  private static void assertFieldIsHidden(Class<?> klass, String msg) throws Exception {
+    try {
+      klass.getDeclaredField("bar");
+      // Unexpected. Should have thrown NoSuchFieldException.
+      throw new Exception("Field should not be accessible " + msg);
+    } catch (NoSuchFieldException ex) {
+      // Expected.
+    }
+  }
+
+  private static final String DEX_EXTRA =
+      new File(System.getenv("DEX_LOCATION"), "999-redefine-hiddenapi-ex.jar").getAbsolutePath();
+
+  private static ClassLoader BOOT_CLASS_LOADER = Object.class.getClassLoader();
+
+  // Native functions. Note that these are implemented in 674-hiddenapi/hiddenapi.cc.
+  private static native void appendToBootClassLoader(String dexPath);
+  private static native void init();
+
+  /**
+   * base64 encoded class/dex file for
+   *
+   * public class Test999 {
+   *   public void foo() {
+   *     System.out.println("Goodbye");
+   *   }
+   *
+   *   public int bar = 64;
+   * }
+   */
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+    "yv66vgAAADUAIAoABwARCQAGABIJABMAFAgAFQoAFgAXBwAYBwAZAQADYmFyAQABSQEABjxpbml0" +
+    "PgEAAygpVgEABENvZGUBAA9MaW5lTnVtYmVyVGFibGUBAANmb28BAApTb3VyY2VGaWxlAQAMVGVz" +
+    "dDk5OS5qYXZhDAAKAAsMAAgACQcAGgwAGwAcAQAHR29vZGJ5ZQcAHQwAHgAfAQALYXJ0L1Rlc3Q5" +
+    "OTkBABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lv" +
+    "L1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xh" +
+    "bmcvU3RyaW5nOylWACEABgAHAAAAAQABAAgACQAAAAIAAQAKAAsAAQAMAAAAJwACAAEAAAALKrcA" +
+    "ASoQQLUAArEAAAABAA0AAAAKAAIAAAATAAQAGAABAA4ACwABAAwAAAAlAAIAAQAAAAmyAAMSBLYA" +
+    "BbEAAAABAA0AAAAKAAIAAAAVAAgAFgABAA8AAAACABA=");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQD0dZ+IWxOi+cJDSWjfTnUerlZj1Lll3ONIAwAAcAAAAHhWNBIAAAAAAAAAAJwCAAAQ" +
+    "AAAAcAAAAAcAAACwAAAAAgAAAMwAAAACAAAA5AAAAAQAAAD0AAAAAQAAABQBAAAUAgAANAEAAIYB" +
+    "AACOAQAAlwEAAJoBAACpAQAAwAEAANQBAADoAQAA/AEAAAoCAAANAgAAEQIAABYCAAAbAgAAIAIA" +
+    "ACkCAAACAAAAAwAAAAQAAAAFAAAABgAAAAcAAAAJAAAACQAAAAYAAAAAAAAACgAAAAYAAACAAQAA" +
+    "AQAAAAsAAAAFAAIADQAAAAEAAAAAAAAAAQAAAAwAAAACAAEADgAAAAMAAAAAAAAAAQAAAAEAAAAD" +
+    "AAAAAAAAAAgAAAAAAAAAhwIAAAAAAAACAAEAAQAAAHQBAAAIAAAAcBADAAEAEwBAAFkQAAAOAAMA" +
+    "AQACAAAAeQEAAAgAAABiAAEAGgEBAG4gAgAQAA4AEwAOQAAVAA54AAAAAQAAAAQABjxpbml0PgAH" +
+    "R29vZGJ5ZQABSQANTGFydC9UZXN0OTk5OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9s" +
+    "YW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AAxUZXN0" +
+    "OTk5LmphdmEAAVYAAlZMAANiYXIAA2ZvbwADb3V0AAdwcmludGxuAFx+fkQ4eyJtaW4tYXBpIjox" +
+    "LCJzaGEtMSI6IjU2YzJlMzBmNTIzM2I4NDRmZjZkZGQ4N2ZiNTNkMzRmYjE3MjM3ZGYiLCJ2ZXJz" +
+    "aW9uIjoidjEuMi4xNS1kZXYifQAAAQEBAAEAgYAEtAIBAdQCAAAAAAAOAAAAAAAAAAEAAAAAAAAA" +
+    "AQAAABAAAABwAAAAAgAAAAcAAACwAAAAAwAAAAIAAADMAAAABAAAAAIAAADkAAAABQAAAAQAAAD0" +
+    "AAAABgAAAAEAAAAUAQAAASAAAAIAAAA0AQAAAyAAAAIAAAB0AQAAARAAAAEAAACAAQAAAiAAABAA" +
+    "AACGAQAAACAAAAEAAACHAgAAAxAAAAEAAACYAgAAABAAAAEAAACcAgAA");
+}
diff --git a/test/999-redefine-hiddenapi/src/art/Redefinition.java b/test/999-redefine-hiddenapi/src/art/Redefinition.java
new file mode 100644
index 0000000..1eec70b
--- /dev/null
+++ b/test/999-redefine-hiddenapi/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static final class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+}
diff --git a/test/Android.bp b/test/Android.bp
index 76189f6..7909bf8 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -64,6 +64,7 @@
         "libart-gtest",
         "libdexfiled",
         "libprofiled",
+        "libartbased",
 
         "libbase",
         "libicuuc",
@@ -117,6 +118,7 @@
         "libartd-compiler",
         "libdexfiled",
         "libprofiled",
+        "libartbased",
     ],
     static_libs: [
         "libgtest",
@@ -156,6 +158,7 @@
         "libartd-compiler",
         "libdexfiled",
         "libprofiled",
+        "libartbased",
         "libbase",
         "libbacktrace",
     ],
@@ -187,6 +190,7 @@
         "libart",
         "libdexfile",
         "libprofile",
+        "libartbase",
     ],
 }
 
@@ -201,6 +205,7 @@
         "libartd",
         "libdexfiled",
         "libprofiled",
+        "libartbased",
     ],
 }
 
@@ -322,6 +327,7 @@
         "libart",
         "libdexfile",
         "libprofile",
+        "libartbase",
     ],
 }
 
@@ -335,6 +341,7 @@
         "libartd",
         "libdexfiled",
         "libprofiled",
+        "libartbased",
     ],
 }
 
@@ -370,22 +377,27 @@
 }
 
 art_cc_defaults {
-    name: "libtistress-defaults",
+    name: "libtistress-srcs",
     defaults: ["libartagent-defaults"],
     srcs: [
         "ti-stress/stress.cc",
     ],
+    header_libs: ["libopenjdkjvmti_headers"],
+}
+
+art_cc_defaults {
+    name: "libtistress-defaults",
+    defaults: ["libtistress-srcs"],
     shared_libs: [
         "libbase",
         "slicer",
     ],
-    header_libs: ["libopenjdkjvmti_headers"],
 }
 
 art_cc_test_library {
     name: "libtistress",
     defaults: ["libtistress-defaults"],
-    shared_libs: ["libart"],
+    shared_libs: ["libartbase"],
 }
 
 art_cc_test_library {
@@ -394,7 +406,30 @@
         "art_debug_defaults",
         "libtistress-defaults",
     ],
-    shared_libs: ["libartd"],
+    shared_libs: ["libartbased"],
+}
+
+art_cc_defaults {
+    name: "libtistress-static-defaults",
+    defaults: ["libtistress-srcs"],
+    static_libs: art_static_dependencies + [
+        "slicer",
+    ],
+}
+
+art_cc_test_library {
+    name: "libtistresss",
+    defaults: ["libtistress-static-defaults"],
+    static_libs: ["libartbase"],
+}
+
+art_cc_test_library {
+    name: "libtistressds",
+    defaults: [
+        "art_debug_defaults",
+        "libtistress-static-defaults"
+    ],
+    static_libs: ["libartbased"],
 }
 
 cc_defaults {
@@ -468,6 +503,7 @@
         "libart",
         "libdexfile",
         "libprofile",
+        "libartbase",
     ],
 }
 
@@ -481,6 +517,7 @@
         "libartd",
         "libdexfiled",
         "libprofiled",
+        "libartbased",
     ],
 }
 
diff --git a/test/etc/default-build b/test/etc/default-build
index 8bb898c..c61de0a 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -561,6 +561,11 @@
 if [[ -d classes-ex ]] && [ ${NEED_DEX} = "true" ]; then
   make_dex classes-ex
 
+  # Apply hiddenapi on the dex files if the test has API list file(s).
+  if [ ${USE_HIDDENAPI} = "true" -a ${HAS_HIDDENAPI_SPEC} = "true" ]; then
+    make_hiddenapi classes-ex.dex
+  fi
+
   # quick shuffle so that the stored name is "classes.dex"
   mv classes.dex classes-1.dex
   mv classes-ex.dex classes.dex
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index fad8011..1ba433e 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -861,9 +861,9 @@
     # System libraries needed by libarttestd.so
     PUBLIC_LIBS=libc++.so:libbacktrace.so:libbase.so:libnativehelper.so
     if [ "$TEST_IS_NDEBUG" = "y" ]; then
-      PUBLIC_LIBS=$PUBLIC_LIBS:libart.so:libdexfile.so:libprofile.so
+      PUBLIC_LIBS=$PUBLIC_LIBS:libart.so:libdexfile.so:libprofile.so:libartbase.so
     else
-      PUBLIC_LIBS=$PUBLIC_LIBS:libartd.so:libdexfiled.so:libprofiled.so
+      PUBLIC_LIBS=$PUBLIC_LIBS:libartd.so:libdexfiled.so:libprofiled.so:libartbased.so
     fi
 
     # Create a script with the command. The command can get longer than the longest
diff --git a/test/knownfailures.json b/test/knownfailures.json
index f473a99..493582f 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -454,7 +454,8 @@
             "674-hiddenapi",
             "649-vdex-duplicate-method",
             "804-class-extends-itself",
-            "921-hello-failure"
+            "921-hello-failure",
+            "999-redefine-hiddenapi"
         ],
         "description": [
             "Tests that use illegal dex files or otherwise break dexter assumptions"
@@ -471,7 +472,8 @@
             "629-vdex-speed",
             "647-jni-get-field-id",
             "674-hiddenapi",
-            "944-transform-classloaders"
+            "944-transform-classloaders",
+            "999-redefine-hiddenapi"
         ],
         "description": [
             "Tests that use custom class loaders or other features not supported ",
@@ -649,12 +651,6 @@
         "description": ["Requires zip, which isn't available on device"]
     },
     {
-        "tests": "712-varhandle-invocations",
-        "variant": "speed-profile & debug & gcstress & target",
-        "bug": "b/73275005",
-        "description": ["Time out"]
-    },
-    {
         "tests": ["1941-dispose-stress", "522-checker-regression-monitor-exit"],
         "variant": "jvm",
         "bug": "b/73888836",
@@ -876,7 +872,6 @@
           "667-jit-jni-stub",
           "667-out-of-bounds",
           "668-aiobe",
-          "674-hiddenapi",
           "674-hotness-compiled",
           "674-vdex-uncompress",
           "675-checker-unverified-method",
@@ -954,8 +949,11 @@
     },
     {
         "tests": ["616-cha-unloading",
+                  "674-hiddenapi",
+                  "677-fsi2",
                   "678-quickening",
-                  "679-locks"],
+                  "679-locks",
+                  "999-redefine-hiddenapi"],
         "variant": "jvm",
         "description": ["Doesn't run on RI."]
     },
@@ -981,5 +979,11 @@
                   "991-field-trace-2"],
         "variant": "gcstress & debug & target",
         "description": ["Test can time out on gcstress with debug"]
+    },
+    {
+        "tests": ["080-oom-throw"],
+        "variant": "jit",
+        "bug": "b/77567088",
+        "description": ["Test throws exception before or during OOME."]
     }
 ]
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index bbe7465..0eba742 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -25,7 +25,6 @@
 #include <jni.h>
 
 #include "base/utils.h"
-#include "exec_utils.h"
 #include "jvmti.h"
 
 #pragma clang diagnostic push
@@ -920,4 +919,8 @@
   return 0;
 }
 
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM* vm, char* options, void* reserved) {
+  return Agent_OnLoad(vm, options, reserved);
+}
+
 }  // namespace art
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
index 39e57bd..23cc917 100644
--- a/tools/cpp-define-generator/Android.bp
+++ b/tools/cpp-define-generator/Android.bp
@@ -31,6 +31,7 @@
     include_dirs: [
         "art/libartbase",
         "art/libdexfile",
+        "art/libartbase",
         "art/runtime",
     ],
     srcs: ["main.cc"],
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
index 08d5885..977d1ca 100644
--- a/tools/cpp-define-generator/constant_lockword.def
+++ b/tools/cpp-define-generator/constant_lockword.def
@@ -23,23 +23,29 @@
 #define DEFINE_LOCK_WORD_EXPR(macro_name, type, constant_field_name) \
   DEFINE_EXPR(LOCK_WORD_ ## macro_name, type, art::LockWord::constant_field_name)
 
+// FIXME: The naming is inconsistent, the `Shifted` -> `_SHIFTED` suffix is sometimes missing.
 DEFINE_LOCK_WORD_EXPR(STATE_SHIFT,               int32_t,  kStateShift)
-DEFINE_LOCK_WORD_EXPR(STATE_MASK,                uint32_t, kStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(STATE_MASK_SHIFTED,        uint32_t, kStateMaskShifted)
 DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_SHIFT,  int32_t,  kReadBarrierStateShift)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK,   uint32_t,  kReadBarrierStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK,   uint32_t, kReadBarrierStateMaskShifted)
 DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE,       int32_t,  kThinLockCountOne)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SIZE,      int32_t,  kThinLockCountSize)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SHIFT,     int32_t,  kThinLockCountShift)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_MASK_SHIFTED, uint32_t, kThinLockCountMaskShifted)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE,       uint32_t, kThinLockCountOne)
+DEFINE_LOCK_WORD_EXPR(THIN_LOCK_OWNER_MASK_SHIFTED, uint32_t, kThinLockOwnerMaskShifted)
 
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS, uint32_t, kStateForwardingAddress)
+DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS,  uint32_t, kStateForwardingAddress)
 DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_OVERFLOW, uint32_t, kStateForwardingAddressOverflow)
 DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_SHIFT, uint32_t, kForwardingAddressShift)
 
-DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED,   uint32_t,  kGCStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED,     uint32_t,  kGCStateMaskShifted)
 DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT,   int32_t,  kGCStateShift)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_SIZE,             int32_t,  kGCStateSize)
+DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT,            int32_t,  kGCStateShift)
 
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT, int32_t, kMarkBitStateShift)
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED, uint32_t, kMarkBitStateMaskShifted)
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT,            int32_t,  kMarkBitStateShift)
+DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED,     uint32_t, kMarkBitStateMaskShifted)
 
 #undef DEFINE_LOCK_WORD_EXPR
 
diff --git a/tools/dexanalyze/Android.bp b/tools/dexanalyze/Android.bp
index 2754e64..a229d73 100644
--- a/tools/dexanalyze/Android.bp
+++ b/tools/dexanalyze/Android.bp
@@ -37,6 +37,7 @@
     defaults: ["dexanalyze-defaults"],
     shared_libs: [
         "libdexfile",
+        "libartbase",
         "libbase",
     ],
 }
diff --git a/tools/dexanalyze/dexanalyze.cc b/tools/dexanalyze/dexanalyze.cc
index a5f647c..46c4852 100644
--- a/tools/dexanalyze/dexanalyze.cc
+++ b/tools/dexanalyze/dexanalyze.cc
@@ -15,6 +15,7 @@
  */
 
 #include <cstdint>
+#include <iostream>
 #include <set>
 #include <sstream>
 
@@ -29,7 +30,19 @@
 namespace art {
 
 class DexAnalyze {
-  static const int kExitCodeUsageError = 1;
+  static constexpr int kExitCodeUsageError = 1;
+  static constexpr int kExitCodeFailedToOpenFile = 2;
+  static constexpr int kExitCodeFailedToOpenDex = 3;
+  static constexpr int kExitCodeFailedToProcessDex = 4;
+
+  static void StdoutLogger(android::base::LogId,
+                           android::base::LogSeverity,
+                           const char*,
+                           const char*,
+                           unsigned int,
+                           const char* message) {
+    std::cout << message << std::endl;
+  }
 
   static int Usage(char** argv) {
     LOG(ERROR)
@@ -53,6 +66,8 @@
           run_all_experiments_ = true;
         } else if (arg == "-count-indices") {
           exp_count_indices_ = true;
+        } else if (arg == "-analyze-strings") {
+          exp_analyze_strings_ = true;
         } else if (arg == "-d") {
           dump_per_input_dex_ = true;
         } else if (!arg.empty() && arg[0] == '-') {
@@ -72,6 +87,7 @@
     bool run_dex_file_verifier_ = true;
     bool dump_per_input_dex_ = false;
     bool exp_count_indices_ = false;
+    bool exp_analyze_strings_ = false;
     bool run_all_experiments_ = false;
     std::vector<std::string> filenames_;
   };
@@ -82,29 +98,36 @@
       if (options->run_all_experiments_ || options->exp_count_indices_) {
         experiments_.emplace_back(new CountDexIndices);
       }
+      if (options->run_all_experiments_ || options->exp_analyze_strings_) {
+        experiments_.emplace_back(new AnalyzeStrings);
+      }
     }
 
     bool ProcessDexFile(const DexFile& dex_file) {
       for (std::unique_ptr<Experiment>& experiment : experiments_) {
         experiment->ProcessDexFile(dex_file);
       }
+      total_size_ += dex_file.Size();
       ++dex_count_;
       return true;
     }
 
     void Dump(std::ostream& os) {
       for (std::unique_ptr<Experiment>& experiment : experiments_) {
-        experiment->Dump(os);
+        experiment->Dump(os, total_size_);
       }
     }
 
     const Options* const options_;
     std::vector<std::unique_ptr<Experiment>> experiments_;
     size_t dex_count_ = 0;
+    uint64_t total_size_ = 0u;
   };
 
  public:
   static int Run(int argc, char** argv) {
+    android::base::SetLogger(StdoutLogger);
+
     Options options;
     int result = options.Parse(argc, argv);
     if (result != 0) {
@@ -115,10 +138,10 @@
     Analysis cumulative(&options);
     for (const std::string& filename : options.filenames_) {
       std::string content;
-      // TODO: once added, use an api to android::base to read a std::vector<uint8_t>.
+      // TODO: once added, use an API to android::base to read a std::vector<uint8_t>.
       if (!android::base::ReadFileToString(filename.c_str(), &content)) {
         LOG(ERROR) << "ReadFileToString failed for " + filename << std::endl;
-        continue;
+        return kExitCodeFailedToOpenFile;
       }
       std::vector<std::unique_ptr<const DexFile>> dex_files;
       const DexFileLoader dex_file_loader;
@@ -130,14 +153,14 @@
                                    &error_msg,
                                    &dex_files)) {
         LOG(ERROR) << "OpenAll failed for " + filename << " with " << error_msg << std::endl;
-        continue;
+        return kExitCodeFailedToOpenDex;
       }
       for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
         if (options.dump_per_input_dex_) {
           Analysis current(&options);
           if (!current.ProcessDexFile(*dex_file)) {
             LOG(ERROR) << "Failed to process " << filename << " with error " << error_msg;
-            continue;
+            return kExitCodeFailedToProcessDex;
           }
           LOG(INFO) << "Analysis for " << dex_file->GetLocation() << std::endl;
           current.Dump(LOG_STREAM(INFO));
@@ -154,7 +177,6 @@
 }  // namespace art
 
 int main(int argc, char** argv) {
-  android::base::SetLogger(android::base::StderrLogger);
   return art::DexAnalyze::Run(argc, argv);
 }
 
diff --git a/tools/dexanalyze/dexanalyze_experiments.cc b/tools/dexanalyze/dexanalyze_experiments.cc
index e1f119d..f9bf45d 100644
--- a/tools/dexanalyze/dexanalyze_experiments.cc
+++ b/tools/dexanalyze/dexanalyze_experiments.cc
@@ -15,12 +15,110 @@
  */
 
 #include "dexanalyze_experiments.h"
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <iostream>
+#include <map>
+#include <vector>
+
+#include "android-base/stringprintf.h"
+#include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_instruction-inl.h"
 #include "dex/standard_dex_file.h"
+#include "dex/utf-inl.h"
 
 namespace art {
 
+std::string Percent(uint64_t value, uint64_t max) {
+  if (max == 0) {
+    ++max;
+  }
+  return android::base::StringPrintf("%" PRId64 "(%.2f%%)",
+                                     value,
+                                     static_cast<double>(value * 100) / static_cast<double>(max));
+}
+
+static size_t PrefixLen(const std::string& a, const std::string& b) {
+  size_t len = 0;
+  for (; len < a.length() && len < b.length() && a[len] == b[len]; ++len) {}
+  return len;
+}
+
+void AnalyzeStrings::ProcessDexFile(const DexFile& dex_file) {
+  std::vector<std::string> strings;
+  for (size_t i = 0; i < dex_file.NumStringIds(); ++i) {
+    uint32_t length = 0;
+    const char* data = dex_file.StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &length);
+    // Analyze if the string has any UTF16 chars.
+    bool have_wide_char = false;
+    const char* ptr = data;
+    for (size_t j = 0; j < length; ++j) {
+      have_wide_char = have_wide_char || GetUtf16FromUtf8(&ptr) >= 0x100;
+    }
+    if (have_wide_char) {
+      wide_string_bytes_ += 2 * length;
+    } else {
+      ascii_string_bytes_ += length;
+    }
+    string_data_bytes_ += ptr - data;
+
+    strings.push_back(data);
+  }
+  // Note that the strings are probably already sorted.
+  std::sort(strings.begin(), strings.end());
+
+  // Tunable parameters.
+  static const size_t kMinPrefixLen = 3;
+  static const size_t kPrefixConstantCost = 5;
+  static const size_t kPrefixIndexCost = 2;
+
+  // Calculate total shared prefix.
+  std::vector<size_t> shared_len;
+  std::set<std::string> prefixes;
+  for (size_t i = 0; i < strings.size(); ++i) {
+    size_t best_len = 0;
+    if (i > 0) {
+      best_len = std::max(best_len, PrefixLen(strings[i], strings[i - 1]));
+    }
+    if (i < strings.size() - 1) {
+      best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
+    }
+    std::string prefix;
+    if (best_len >= kMinPrefixLen) {
+      prefix = strings[i].substr(0, best_len);
+      prefixes.insert(prefix);
+      total_prefix_savings_ += prefix.length();
+    }
+    total_prefix_index_cost_ += kPrefixIndexCost;
+  }
+  total_num_prefixes_ += prefixes.size();
+  for (const std::string& s : prefixes) {
+    // 4 bytes for an offset, one for length.
+    total_prefix_dict_ += s.length();
+    total_prefix_table_ += kPrefixConstantCost;
+  }
+}
+
+void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
+  os << "Total string data bytes " << Percent(string_data_bytes_, total_size) << "\n";
+  os << "UTF-16 string data bytes " << Percent(wide_string_bytes_, total_size) << "\n";
+  os << "ASCII string data bytes " << Percent(ascii_string_bytes_, total_size) << "\n";
+
+  // Prefix based strings.
+  os << "Total shared prefix bytes " << Percent(total_prefix_savings_, total_size) << "\n";
+  os << "Prefix dictionary cost " << Percent(total_prefix_dict_, total_size) << "\n";
+  os << "Prefix table cost " << Percent(total_prefix_table_, total_size) << "\n";
+  os << "Prefix index cost " << Percent(total_prefix_index_cost_, total_size) << "\n";
+  int64_t net_savings = total_prefix_savings_;
+  net_savings -= total_prefix_dict_;
+  net_savings -= total_prefix_table_;
+  net_savings -= total_prefix_index_cost_;
+  os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
+  os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
+}
+
 void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
   num_string_ids_ += dex_file.NumStringIds();
   num_method_ids_ += dex_file.NumMethodIds();
@@ -29,85 +127,79 @@
   num_class_defs_ += dex_file.NumClassDefs();
   for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs(); ++class_def_index) {
     const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
-    const uint8_t* class_data = dex_file.GetClassData(class_def);
-    if (class_data == nullptr) {
-      continue;
-    }
-    ClassDataItemIterator it(dex_file, class_data);
-    it.SkipAllFields();
+    ClassAccessor accessor(dex_file, class_def);
     std::set<size_t> unique_method_ids;
     std::set<size_t> unique_string_ids;
-    while (it.HasNextMethod()) {
-      const DexFile::CodeItem* code_item = it.GetMethodCodeItem();
-      if (code_item != nullptr) {
-        CodeItemInstructionAccessor instructions(dex_file, code_item);
-        const uint16_t* code_ptr = instructions.Insns();
-        dex_code_bytes_ += instructions.InsnsSizeInCodeUnits() * sizeof(code_ptr[0]);
-        for (const DexInstructionPcPair& inst : instructions) {
-          switch (inst->Opcode()) {
-            case Instruction::CONST_STRING: {
-              const dex::StringIndex string_index(inst->VRegB_21c());
-              unique_string_ids.insert(string_index.index_);
-              ++num_string_ids_from_code_;
-              break;
-            }
-            case Instruction::CONST_STRING_JUMBO: {
-              const dex::StringIndex string_index(inst->VRegB_31c());
-              unique_string_ids.insert(string_index.index_);
-              ++num_string_ids_from_code_;
-              break;
-            }
-            // Invoke cases.
-            case Instruction::INVOKE_VIRTUAL:
-            case Instruction::INVOKE_VIRTUAL_RANGE: {
-              bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE);
-              uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
-              if (dex_file.GetMethodId(method_idx).class_idx_ == class_def.class_idx_) {
-                ++same_class_virtual_;
-              } else {
-                ++other_class_virtual_;
-                unique_method_ids.insert(method_idx);
-              }
-              break;
-            }
-            case Instruction::INVOKE_DIRECT:
-            case Instruction::INVOKE_DIRECT_RANGE: {
-              bool is_range = (inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE);
-              uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-              if (dex_file.GetMethodId(method_idx).class_idx_ == class_def.class_idx_) {
-                ++same_class_direct_;
-              } else {
-                ++other_class_direct_;
-                unique_method_ids.insert(method_idx);
-              }
-              break;
-            }
-            case Instruction::INVOKE_STATIC:
-            case Instruction::INVOKE_STATIC_RANGE: {
-              bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE);
-              uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-              if (dex_file.GetMethodId(method_idx).class_idx_ == class_def.class_idx_) {
-                ++same_class_static_;
-              } else {
-                ++other_class_static_;
-                unique_method_ids.insert(method_idx);
-              }
-              break;
-            }
-            default:
-              break;
+    accessor.VisitMethods([&](const ClassAccessor::Method& method) {
+      const DexFile::CodeItem* code_item = accessor.GetCodeItem(method);
+      if (code_item == nullptr) {
+        return;
+      }
+      CodeItemInstructionAccessor instructions(dex_file, code_item);
+      const uint16_t* code_ptr = instructions.Insns();
+      dex_code_bytes_ += instructions.InsnsSizeInCodeUnits() * sizeof(code_ptr[0]);
+      for (const DexInstructionPcPair& inst : instructions) {
+        switch (inst->Opcode()) {
+          case Instruction::CONST_STRING: {
+            const dex::StringIndex string_index(inst->VRegB_21c());
+            unique_string_ids.insert(string_index.index_);
+            ++num_string_ids_from_code_;
+            break;
           }
+          case Instruction::CONST_STRING_JUMBO: {
+            const dex::StringIndex string_index(inst->VRegB_31c());
+            unique_string_ids.insert(string_index.index_);
+            ++num_string_ids_from_code_;
+            break;
+          }
+          // Invoke cases.
+          case Instruction::INVOKE_VIRTUAL:
+          case Instruction::INVOKE_VIRTUAL_RANGE: {
+            bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE);
+            uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+            if (dex_file.GetMethodId(method_idx).class_idx_ == class_def.class_idx_) {
+              ++same_class_virtual_;
+            } else {
+              ++other_class_virtual_;
+              unique_method_ids.insert(method_idx);
+            }
+            break;
+          }
+          case Instruction::INVOKE_DIRECT:
+          case Instruction::INVOKE_DIRECT_RANGE: {
+            bool is_range = (inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE);
+            uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+            if (dex_file.GetMethodId(method_idx).class_idx_ == class_def.class_idx_) {
+              ++same_class_direct_;
+            } else {
+              ++other_class_direct_;
+              unique_method_ids.insert(method_idx);
+            }
+            break;
+          }
+          case Instruction::INVOKE_STATIC:
+          case Instruction::INVOKE_STATIC_RANGE: {
+            bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE);
+            uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+            if (dex_file.GetMethodId(method_idx).class_idx_ == class_def.class_idx_) {
+              ++same_class_static_;
+            } else {
+              ++other_class_static_;
+              unique_method_ids.insert(method_idx);
+            }
+            break;
+          }
+          default:
+            break;
         }
       }
-      it.Next();
-    }
-    DCHECK(!it.HasNext());
+    });
     total_unique_method_idx_ += unique_method_ids.size();
     total_unique_string_ids_ += unique_string_ids.size();
   }
 }
 
-void CountDexIndices::Dump(std::ostream& os) const {
+void CountDexIndices::Dump(std::ostream& os, uint64_t total_size) const {
   os << "Num string ids: " << num_string_ids_ << "\n";
   os << "Num method ids: " << num_method_ids_ << "\n";
   os << "Num field ids: " << num_field_ids_ << "\n";
@@ -127,6 +219,7 @@
   os << "Same class invoke: " << same_class_total << "\n";
   os << "Other class invoke: " << other_class_total << "\n";
   os << "Invokes from code: " << (same_class_total + other_class_total) << "\n";
+  os << "Total dex size: " << total_size << "\n";
 }
 
 }  // namespace art
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 5d0f51b..0fb4d32 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -24,12 +24,31 @@
 
 class DexFile;
 
+std::string Percent(uint64_t value, uint64_t max);
+
 // An experiment a stateful visitor that runs on dex files. Results are cumulative.
 class Experiment {
  public:
   virtual ~Experiment() {}
   virtual void ProcessDexFile(const DexFile& dex_file) = 0;
-  virtual void Dump(std::ostream& os) const = 0;
+  virtual void Dump(std::ostream& os, uint64_t total_size) const = 0;
+};
+
+// Analyze string data and strings accessed from code.
+class AnalyzeStrings : public Experiment {
+ public:
+  void ProcessDexFile(const DexFile& dex_file);
+  void Dump(std::ostream& os, uint64_t total_size) const;
+
+ private:
+  int64_t wide_string_bytes_ = 0u;
+  int64_t ascii_string_bytes_ = 0u;
+  int64_t string_data_bytes_ = 0u;
+  int64_t total_prefix_savings_ = 0u;
+  int64_t total_prefix_dict_ = 0u;
+  int64_t total_prefix_table_ = 0u;
+  int64_t total_prefix_index_cost_ = 0u;
+  int64_t total_num_prefixes_ = 0u;
 };
 
 // Count numbers of dex indices.
@@ -37,7 +56,7 @@
  public:
   void ProcessDexFile(const DexFile& dex_file);
 
-  void Dump(std::ostream& os) const;
+  void Dump(std::ostream& os, uint64_t total_size) const;
 
  private:
   // Total string ids loaded from dex code.
@@ -65,4 +84,3 @@
 }  // namespace art
 
 #endif  // ART_TOOLS_DEXANALYZE_DEXANALYZE_EXPERIMENTS_H_
-
diff --git a/tools/dexanalyze/dexanalyze_test.cc b/tools/dexanalyze/dexanalyze_test.cc
index c9b8f53..96be3f9 100644
--- a/tools/dexanalyze/dexanalyze_test.cc
+++ b/tools/dexanalyze/dexanalyze_test.cc
@@ -36,10 +36,22 @@
   }
 };
 
+TEST_F(DexAnalyzeTest, NoInputFileGiven) {
+  DexAnalyzeExec({ "-a" }, /*expect_success*/ false);
+}
+
+TEST_F(DexAnalyzeTest, CantOpenInput) {
+  DexAnalyzeExec({ "-a", "/non/existent/path" }, /*expect_success*/ false);
+}
+
 TEST_F(DexAnalyzeTest, TestAnalyzeMultidex) {
   DexAnalyzeExec({ "-a", GetTestDexFileName("MultiDex") }, /*expect_success*/ true);
 }
 
+TEST_F(DexAnalyzeTest, TestAnalizeCoreDex) {
+  DexAnalyzeExec({ "-a", GetLibCoreDexFileNames()[0] }, /*expect_success*/ true);
+}
+
 TEST_F(DexAnalyzeTest, TestInvalidArg) {
   DexAnalyzeExec({ "-invalid-option" }, /*expect_success*/ false);
 }
diff --git a/tools/hiddenapi/Android.bp b/tools/hiddenapi/Android.bp
index af87d31..3b364b6 100644
--- a/tools/hiddenapi/Android.bp
+++ b/tools/hiddenapi/Android.bp
@@ -40,6 +40,7 @@
     shared_libs: [
         "libart",
         "libdexfile",
+        "libartbase",
     ],
 }
 
@@ -52,6 +53,7 @@
     shared_libs: [
         "libartd",
         "libdexfiled",
+        "libartbased",
     ],
 }
 
diff --git a/tools/public.libraries.buildbot.txt b/tools/public.libraries.buildbot.txt
index 6c81145..9b171a2 100644
--- a/tools/public.libraries.buildbot.txt
+++ b/tools/public.libraries.buildbot.txt
@@ -1,5 +1,7 @@
 libart.so
 libartd.so
+libartbase.so
+libartbased.so
 libdexfile.so
 libdexfiled.so
 libbacktrace.so
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index d376cad..eebc092 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -360,7 +360,9 @@
 if [[ $mode == "host" ]]; then
   pkill -9 -f /bin/dalvikvm
 else
-  adb shell pkill -9 -f /bin/dalvikvm
+  # Tests may run on older Android versions where pkill requires "-l SIGNAL"
+  # rather than "-SIGNAL".
+  adb shell pkill -l 9 -f /bin/dalvikvm
 fi
 echo "Done."
 
diff --git a/tools/ti-fast/Android.bp b/tools/ti-fast/Android.bp
new file mode 100644
index 0000000..fd867c9
--- /dev/null
+++ b/tools/ti-fast/Android.bp
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+cc_defaults {
+    name: "tifast-defaults",
+    host_supported: true,
+    srcs: ["tifast.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+    ],
+    header_libs: [
+        "libopenjdkjvmti_headers",
+    ],
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+    symlink_preferred_arch: true,
+}
+
+art_cc_library {
+    name: "libtifast",
+    defaults: ["tifast-defaults"],
+}
+
+art_cc_library {
+    name: "libtifastd",
+    defaults: [
+        "art_debug_defaults",
+        "tifast-defaults",
+    ],
+}
diff --git a/tools/ti-fast/README.md b/tools/ti-fast/README.md
new file mode 100644
index 0000000..bc46882
--- /dev/null
+++ b/tools/ti-fast/README.md
@@ -0,0 +1,101 @@
+# tifast
+
+tifast is a JVMTI agent designed for profiling the performance impact listening
+to various JVMTI events. It is called tifast since none of the event handlers do
+anything meaning that it can be considered speed-of-light.
+
+# Usage
+### Build
+>    `make libtifast`
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+### Command Line
+
+The agent is loaded using -agentpath like normal. It takes arguments in the
+following format:
+>     `[log,][EventName1[,EventName2[,...]]]`
+
+* If 'log' is the first argument the event handlers will LOG(INFO) when they are
+  called. This behavior is static. The no-log methods have no branches and just
+  immediately return.
+
+* The event-names are the same names as are used in the jvmtiEventCallbacks
+  struct.
+
+* All required capabilities are automatically gained. No capabilities other than
+  those needed to listen for the events are gained.
+
+* Only events which do not require additional function calls to cause delivery
+  and are sent more than once are supported.
+
+#### Supported events
+
+The following events may be listened for with this agent
+
+* `SingleStep`
+
+* `MethodEntry`
+
+* `MethodExit`
+
+* `NativeMethodBind`
+
+* `Exception`
+
+* `ExceptionCatch`
+
+* `ThreadStart`
+
+* `ThreadEnd`
+
+* `ClassLoad`
+
+* `ClassPrepare`
+
+* `ClassFileLoadHook`
+
+* `CompiledMethodLoad`
+
+* `CompiledMethodUnload`
+
+* `DynamicCodeGenerated`
+
+* `DataDumpRequest`
+
+* `MonitorContendedEnter`
+
+* `MonitorContendedEntered`
+
+* `MonitorWait`
+
+* `MonitorWaited`
+
+* `ResourceExhausted`
+
+* `VMObjectAlloc`
+
+* `GarbageCollectionStart`
+
+* `GarbageCollectionFinish`
+
+All other events cannot be listened for by this agent. Most of these missing
+events either require the use of other functions in order to be called
+(`FramePop`, `ObjectFree`, etc) or are only called once (`VMInit`, `VMDeath`,
+etc).
+
+#### ART
+>    `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:libtifast.so=MethodEntry' -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+>    `adb shell setenforce 0`
+>
+>    `adb push $ANDROID_PRODUCT_OUT/system/lib64/libtifast.so /data/local/tmp/`
+>
+>    `adb shell am start-activity --attach-agent /data/local/tmp/libtifast.so=MonitorWait,ClassPrepare some.debuggable.apps/.the.app.MainActivity`
+
+#### RI
+>    `java '-agentpath:libtifast.so=MethodEntry' -cp tmp/helloworld/classes helloworld`
diff --git a/tools/ti-fast/tifast.cc b/tools/ti-fast/tifast.cc
new file mode 100644
index 0000000..428304e
--- /dev/null
+++ b/tools/ti-fast/tifast.cc
@@ -0,0 +1,205 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+
+#include <atomic>
+#include <iostream>
+#include <istream>
+#include <iomanip>
+#include <jni.h>
+#include <jvmti.h>
+#include <memory>
+#include <string>
+#include <sstream>
+#include <vector>
+
+namespace tifast {
+
+#define EVENT(x) JVMTI_EVENT_ ## x
+
+namespace {
+
+static void AddCapsForEvent(jvmtiEvent event, jvmtiCapabilities* caps) {
+  switch (event) {
+#define DO_CASE(name, cap_name) \
+    case EVENT(name):           \
+      caps->cap_name = 1;       \
+      break
+    DO_CASE(SINGLE_STEP, can_generate_single_step_events);
+    DO_CASE(METHOD_ENTRY, can_generate_method_entry_events);
+    DO_CASE(METHOD_EXIT, can_generate_method_exit_events);
+    DO_CASE(NATIVE_METHOD_BIND, can_generate_native_method_bind_events);
+    DO_CASE(EXCEPTION, can_generate_exception_events);
+    DO_CASE(EXCEPTION_CATCH, can_generate_exception_events);
+    DO_CASE(COMPILED_METHOD_LOAD, can_generate_compiled_method_load_events);
+    DO_CASE(COMPILED_METHOD_UNLOAD, can_generate_compiled_method_load_events);
+    DO_CASE(MONITOR_CONTENDED_ENTER, can_generate_monitor_events);
+    DO_CASE(MONITOR_CONTENDED_ENTERED, can_generate_monitor_events);
+    DO_CASE(MONITOR_WAIT, can_generate_monitor_events);
+    DO_CASE(MONITOR_WAITED, can_generate_monitor_events);
+    DO_CASE(VM_OBJECT_ALLOC, can_generate_vm_object_alloc_events);
+    DO_CASE(GARBAGE_COLLECTION_START, can_generate_garbage_collection_events);
+    DO_CASE(GARBAGE_COLLECTION_FINISH, can_generate_garbage_collection_events);
+#undef DO_CASE
+    default: break;
+  }
+}
+
+// Setup for all supported events. Give a macro with fun(name, event_num, args)
+#define FOR_ALL_SUPPORTED_EVENTS(fun) \
+    fun(SingleStep, EVENT(SINGLE_STEP), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation)) \
+    fun(MethodEntry, EVENT(METHOD_ENTRY), (jvmtiEnv*, JNIEnv*, jthread, jmethodID)) \
+    fun(MethodExit, EVENT(METHOD_EXIT), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jboolean, jvalue)) \
+    fun(NativeMethodBind, EVENT(NATIVE_METHOD_BIND), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, void*, void**)) \
+    fun(Exception, EVENT(EXCEPTION), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation, jobject, jmethodID, jlocation)) \
+    fun(ExceptionCatch, EVENT(EXCEPTION_CATCH), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation, jobject)) \
+    fun(ThreadStart, EVENT(THREAD_START), (jvmtiEnv*, JNIEnv*, jthread)) \
+    fun(ThreadEnd, EVENT(THREAD_END), (jvmtiEnv*, JNIEnv*, jthread)) \
+    fun(ClassLoad, EVENT(CLASS_LOAD), (jvmtiEnv*, JNIEnv*, jthread, jclass)) \
+    fun(ClassPrepare, EVENT(CLASS_PREPARE), (jvmtiEnv*, JNIEnv*, jthread, jclass)) \
+    fun(ClassFileLoadHook, EVENT(CLASS_FILE_LOAD_HOOK), (jvmtiEnv*, JNIEnv*, jclass, jobject, const char*, jobject, jint, const unsigned char*, jint*, unsigned char**)) \
+    fun(CompiledMethodLoad, EVENT(COMPILED_METHOD_LOAD), (jvmtiEnv*, jmethodID, jint, const void*, jint, const jvmtiAddrLocationMap*, const void*)) \
+    fun(CompiledMethodUnload, EVENT(COMPILED_METHOD_UNLOAD), (jvmtiEnv*, jmethodID, const void*)) \
+    fun(DynamicCodeGenerated, EVENT(DYNAMIC_CODE_GENERATED), (jvmtiEnv*, const char*, const void*, jint)) \
+    fun(DataDumpRequest, EVENT(DATA_DUMP_REQUEST), (jvmtiEnv*)) \
+    fun(MonitorContendedEnter, EVENT(MONITOR_CONTENDED_ENTER), (jvmtiEnv*, JNIEnv*, jthread, jobject)) \
+    fun(MonitorContendedEntered, EVENT(MONITOR_CONTENDED_ENTERED), (jvmtiEnv*, JNIEnv*, jthread, jobject)) \
+    fun(MonitorWait, EVENT(MONITOR_WAIT), (jvmtiEnv*, JNIEnv*, jthread, jobject, jlong)) \
+    fun(MonitorWaited, EVENT(MONITOR_WAITED), (jvmtiEnv*, JNIEnv*, jthread, jobject, jboolean)) \
+    fun(ResourceExhausted, EVENT(RESOURCE_EXHAUSTED), (jvmtiEnv*, JNIEnv*, jint, const void*, const char*)) \
+    fun(VMObjectAlloc, EVENT(VM_OBJECT_ALLOC), (jvmtiEnv*, JNIEnv*, jthread, jobject, jclass, jlong)) \
+    fun(GarbageCollectionStart, EVENT(GARBAGE_COLLECTION_START), (jvmtiEnv*)) \
+    fun(GarbageCollectionFinish, EVENT(GARBAGE_COLLECTION_FINISH), (jvmtiEnv*))
+
+#define GENERATE_EMPTY_FUNCTION(name, number, args) \
+    static void JNICALL empty ## name  args { }
+FOR_ALL_SUPPORTED_EVENTS(GENERATE_EMPTY_FUNCTION)
+#undef GENERATE_EMPTY_FUNCTION
+
+static jvmtiEventCallbacks kEmptyCallbacks {
+#define CREATE_EMPTY_EVENT_CALLBACKS(name, num, args) \
+    .name = empty ## name,
+  FOR_ALL_SUPPORTED_EVENTS(CREATE_EMPTY_EVENT_CALLBACKS)
+#undef CREATE_EMPTY_EVENT_CALLBACKS
+};
+
+#define GENERATE_LOG_FUNCTION(name, number, args) \
+    static void JNICALL log ## name  args { \
+      LOG(INFO) << "Got event " << #name ; \
+    }
+FOR_ALL_SUPPORTED_EVENTS(GENERATE_LOG_FUNCTION)
+#undef GENERATE_LOG_FUNCTION
+
+static jvmtiEventCallbacks kLogCallbacks {
+#define CREATE_LOG_EVENT_CALLBACK(name, num, args) \
+    .name = log ## name,
+  FOR_ALL_SUPPORTED_EVENTS(CREATE_LOG_EVENT_CALLBACK)
+#undef CREATE_LOG_EVENT_CALLBACK
+};
+
+static jvmtiEvent NameToEvent(const std::string& desired_name) {
+#define CHECK_NAME(name, event, args) \
+  if (desired_name == #name) { \
+    return event; \
+  }
+  FOR_ALL_SUPPORTED_EVENTS(CHECK_NAME);
+  LOG(FATAL) << "Unknown event " << desired_name;
+  __builtin_unreachable();
+#undef CHECK_NAME
+}
+
+#undef FOR_ALL_SUPPORTED_EVENTS
+static std::vector<jvmtiEvent> GetRequestedEventList(const std::string& args) {
+  std::vector<jvmtiEvent> res;
+  std::stringstream args_stream(args);
+  std::string item;
+  while (std::getline(args_stream, item, ',')) {
+    if (item == "") {
+      continue;
+    }
+    res.push_back(NameToEvent(item));
+  }
+  return res;
+}
+
+}  // namespace
+
+static jint AgentStart(JavaVM* vm,
+                       char* options,
+                       void* reserved ATTRIBUTE_UNUSED) {
+  jvmtiEnv* jvmti = nullptr;
+  jvmtiError error = JVMTI_ERROR_NONE;
+  {
+    jint res = 0;
+    res = vm->GetEnv(reinterpret_cast<void**>(&jvmti), JVMTI_VERSION_1_1);
+
+    if (res != JNI_OK || jvmti == nullptr) {
+      LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+      return JNI_ERR;
+    }
+  }
+  std::string args(options);
+  bool is_log = false;
+  if (args.compare(0, 3, "log") == 0) {
+    is_log = true;
+    args = args.substr(3);
+  }
+
+  std::vector<jvmtiEvent> events = GetRequestedEventList(args);
+
+  jvmtiCapabilities caps{};
+  for (jvmtiEvent e : events) {
+    AddCapsForEvent(e, &caps);
+  }
+  error = jvmti->AddCapabilities(&caps);
+  if (error != JVMTI_ERROR_NONE) {
+    LOG(ERROR) << "Unable to set caps";
+    return JNI_ERR;
+  }
+
+  if (is_log) {
+    error = jvmti->SetEventCallbacks(&kLogCallbacks, static_cast<jint>(sizeof(kLogCallbacks)));
+  } else {
+    error = jvmti->SetEventCallbacks(&kEmptyCallbacks, static_cast<jint>(sizeof(kEmptyCallbacks)));
+  }
+  if (error != JVMTI_ERROR_NONE) {
+    LOG(ERROR) << "Unable to set event callbacks.";
+    return JNI_ERR;
+  }
+  for (jvmtiEvent e : events) {
+    error = jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+                                            e,
+                                            nullptr /* all threads */);
+    if (error != JVMTI_ERROR_NONE) {
+      LOG(ERROR) << "Unable to enable event " << e;
+      return JNI_ERR;
+    }
+  }
+  return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm, char* options, void* reserved) {
+  return AgentStart(vm, options, reserved);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm, char* options, void* reserved) {
+  return AgentStart(jvm, options, reserved);
+}
+
+}  // namespace tifast
+
diff --git a/tools/tracefast-plugin/Android.bp b/tools/tracefast-plugin/Android.bp
new file mode 100644
index 0000000..1d7dd30
--- /dev/null
+++ b/tools/tracefast-plugin/Android.bp
@@ -0,0 +1,108 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+    name: "tracefast-defaults",
+    host_supported: true,
+    srcs: ["tracefast.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+    ],
+    target: {
+        android: {
+            shared_libs: [
+                "libcutils",
+            ],
+        },
+        darwin: {
+            enabled: false,
+        },
+    },
+    header_libs: [
+        "libnativehelper_header_only",
+    ],
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+    symlink_preferred_arch: true,
+}
+
+cc_defaults {
+    name: "tracefast-interpreter-defaults",
+    defaults: ["tracefast-defaults"],
+    cflags: ["-DTRACEFAST_INTERPRETER=1"],
+}
+
+cc_defaults {
+    name: "tracefast-trampoline-defaults",
+    defaults: ["tracefast-defaults"],
+    cflags: ["-DTRACEFAST_TRAMPOLINE=1"],
+}
+
+art_cc_library {
+    name: "libtracefast-interpreter",
+    defaults: ["tracefast-interpreter-defaults"],
+    shared_libs: [
+        "libart",
+        "libartbase",
+    ],
+}
+
+art_cc_library {
+    name: "libtracefast-interpreterd",
+    defaults: [
+        "art_debug_defaults",
+        "tracefast-interpreter-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libartbased",
+    ],
+}
+
+art_cc_library {
+    name: "libtracefast-trampoline",
+    defaults: ["tracefast-trampoline-defaults"],
+    shared_libs: [
+        "libart",
+        "libartbase",
+    ],
+}
+
+art_cc_library {
+    name: "libtracefast-trampolined",
+    defaults: [
+        "art_debug_defaults",
+        "tracefast-trampoline-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libartbased",
+    ],
+}
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
new file mode 100644
index 0000000..ed6ac3d
--- /dev/null
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/scoped_gc_critical_section.h"
+#include "instrumentation.h"
+#include "runtime.h"
+#include "runtime_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace tracefast {
+
+#if ((!defined(TRACEFAST_INTERPRETER) && !defined(TRACEFAST_TRAMPOLINE)) || \
+     (defined(TRACEFAST_INTERPRETER) && defined(TRACEFAST_TRAMPOLINE)))
+#error Must set one of TRACEFAST_TRAMPOLINE or TRACEFAST_INTERPRETER during build
+#endif
+
+
+#ifdef TRACEFAST_INTERPRETER
+static constexpr const char* kTracerInstrumentationKey = "tracefast_INTERPRETER";
+static constexpr bool kNeedsInterpreter = true;
+#else  // defined(TRACEFAST_TRAMPOLINE)
+static constexpr const char* kTracerInstrumentationKey = "tracefast_TRAMPOLINE";
+static constexpr bool kNeedsInterpreter = false;
+#endif  // TRACEFAST_INITERPRETER
+
+class Tracer FINAL : public art::instrumentation::InstrumentationListener {
+ public:
+  Tracer() {}
+
+  void MethodEntered(art::Thread* thread ATTRIBUTE_UNUSED,
+                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                     art::ArtMethod* method ATTRIBUTE_UNUSED,
+                     uint32_t dex_pc ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> return_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    const art::JValue& return_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void DexPcMoved(art::Thread* thread ATTRIBUTE_UNUSED,
+                  art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                  art::ArtMethod* method ATTRIBUTE_UNUSED,
+                  uint32_t new_dex_pc ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void FieldRead(art::Thread* thread ATTRIBUTE_UNUSED,
+                 art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                 art::ArtMethod* method ATTRIBUTE_UNUSED,
+                 uint32_t dex_pc ATTRIBUTE_UNUSED,
+                 art::ArtField* field ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    art::ArtField* field ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> field_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void FieldWritten(art::Thread* thread ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                    art::ArtMethod* method ATTRIBUTE_UNUSED,
+                    uint32_t dex_pc ATTRIBUTE_UNUSED,
+                    art::ArtField* field ATTRIBUTE_UNUSED,
+                    const art::JValue& field_value ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void ExceptionThrown(art::Thread* thread ATTRIBUTE_UNUSED,
+                       art::Handle<art::mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void ExceptionHandled(art::Thread* self ATTRIBUTE_UNUSED,
+                        art::Handle<art::mirror::Throwable> throwable ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void Branch(art::Thread* thread ATTRIBUTE_UNUSED,
+              art::ArtMethod* method ATTRIBUTE_UNUSED,
+              uint32_t dex_pc ATTRIBUTE_UNUSED,
+              int32_t dex_pc_offset ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
+                                art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
+                                art::ArtMethod* caller ATTRIBUTE_UNUSED,
+                                uint32_t dex_pc ATTRIBUTE_UNUSED,
+                                art::ArtMethod* callee ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+  void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
+                       const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Tracer);
+};
+
+Tracer gEmptyTracer;
+
+static void StartTracing() REQUIRES(!art::Locks::mutator_lock_,
+                                    !art::Locks::thread_list_lock_,
+                                    !art::Locks::thread_suspend_count_lock_) {
+  art::Thread* self = art::Thread::Current();
+  art::Runtime* runtime = art::Runtime::Current();
+  art::gc::ScopedGCCriticalSection gcs(self,
+                                       art::gc::kGcCauseInstrumentation,
+                                       art::gc::kCollectorTypeInstrumentation);
+  art::ScopedSuspendAll ssa("starting fast tracing");
+  runtime->GetInstrumentation()->AddListener(&gEmptyTracer,
+                                             art::instrumentation::Instrumentation::kMethodEntered |
+                                             art::instrumentation::Instrumentation::kMethodExited |
+                                             art::instrumentation::Instrumentation::kMethodUnwind);
+  runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey, kNeedsInterpreter);
+}
+
+class TraceFastPhaseCB : public art::RuntimePhaseCallback {
+ public:
+  TraceFastPhaseCB() {}
+
+  void NextRuntimePhase(art::RuntimePhaseCallback::RuntimePhase phase)
+      OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (phase == art::RuntimePhaseCallback::RuntimePhase::kInit) {
+      art::ScopedThreadSuspension sts(art::Thread::Current(),
+                                      art::ThreadState::kWaitingForMethodTracingStart);
+      StartTracing();
+    }
+  }
+};
+TraceFastPhaseCB gPhaseCallback;
+
+// The plugin initialization function.
+extern "C" bool ArtPlugin_Initialize() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  art::Runtime* runtime = art::Runtime::Current();
+  art::ScopedThreadSuspension stsc(art::Thread::Current(),
+                                   art::ThreadState::kWaitingForMethodTracingStart);
+  art::ScopedSuspendAll ssa("Add phase callback");
+  runtime->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gPhaseCallback);
+  return true;
+}
+
+extern "C" bool ArtPlugin_Deinitialize() {
+  // Don't need to bother doing anything.
+  return true;
+}
+
+}  // namespace tracefast
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index 570960c..5186c43 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -24,7 +24,11 @@
         "veridex.cc",
     ],
     cflags: ["-Wall", "-Werror"],
-    shared_libs: ["libdexfile", "libbase"],
+    shared_libs: [
+        "libdexfile",
+        "libartbase",
+        "libbase",
+    ],
     header_libs: [
         "art_libartbase_headers",
     ],
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index e2833bf..154c60f 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -112,7 +112,12 @@
       RegisterValue(RegisterSource::kNone, DexFileReference(nullptr, 0), cls);
 }
 
-const RegisterValue& VeriFlowAnalysis::GetRegister(uint32_t dex_register) {
+void VeriFlowAnalysis::UpdateRegister(uint32_t dex_register, int32_t value, const VeriClass* cls) {
+  current_registers_[dex_register] =
+      RegisterValue(RegisterSource::kConstant, value, DexFileReference(nullptr, 0), cls);
+}
+
+const RegisterValue& VeriFlowAnalysis::GetRegister(uint32_t dex_register) const {
   return current_registers_[dex_register];
 }
 
@@ -131,6 +136,49 @@
   return RegisterValue(RegisterSource::kField, DexFileReference(&dex_file, field_index), cls);
 }
 
+int VeriFlowAnalysis::GetBranchFlags(const Instruction& instruction) const {
+  switch (instruction.Opcode()) {
+    #define IF_XX(cond, op) \
+    case Instruction::IF_##cond: { \
+      RegisterValue lhs = GetRegister(instruction.VRegA()); \
+      RegisterValue rhs = GetRegister(instruction.VRegB()); \
+      if (lhs.IsConstant() && rhs.IsConstant()) { \
+        if (lhs.GetConstant() op rhs.GetConstant()) { \
+          return Instruction::kBranch; \
+        } else { \
+          return Instruction::kContinue; \
+        } \
+      } \
+      break; \
+    } \
+    case Instruction::IF_##cond##Z: { \
+      RegisterValue val = GetRegister(instruction.VRegA()); \
+      if (val.IsConstant()) { \
+        if (val.GetConstant() op 0) { \
+          return Instruction::kBranch; \
+        } else { \
+          return Instruction::kContinue; \
+        } \
+      } \
+      break; \
+    }
+
+    IF_XX(EQ, ==);
+    IF_XX(NE, !=);
+    IF_XX(LT, <);
+    IF_XX(LE, <=);
+    IF_XX(GT, >);
+    IF_XX(GE, >=);
+
+    #undef IF_XX
+
+    default:
+      break;
+  }
+
+  return Instruction::FlagsOf(instruction.Opcode());
+}
+
 void VeriFlowAnalysis::AnalyzeCode() {
   std::vector<uint32_t> work_list;
   work_list.push_back(0);
@@ -149,16 +197,17 @@
       ProcessDexInstruction(inst);
       SetVisited(dex_pc);
 
-      int opcode_flags = Instruction::FlagsOf(inst.Opcode());
-      if ((opcode_flags & Instruction::kContinue) != 0) {
-        if ((opcode_flags & Instruction::kBranch) != 0) {
+      int branch_flags = GetBranchFlags(inst);
+
+      if ((branch_flags & Instruction::kContinue) != 0) {
+        if ((branch_flags & Instruction::kBranch) != 0) {
           uint32_t branch_dex_pc = dex_pc + inst.GetTargetOffset();
           if (MergeRegisterValues(branch_dex_pc)) {
             work_list.push_back(branch_dex_pc);
           }
         }
         dex_pc += inst.SizeInCodeUnits();
-      } else if ((opcode_flags & Instruction::kBranch) != 0) {
+      } else if ((branch_flags & Instruction::kBranch) != 0) {
         dex_pc += inst.GetTargetOffset();
         DCHECK(IsBranchTarget(dex_pc));
       } else {
@@ -178,12 +227,30 @@
 
 void VeriFlowAnalysis::ProcessDexInstruction(const Instruction& instruction) {
   switch (instruction.Opcode()) {
-    case Instruction::CONST_4:
-    case Instruction::CONST_16:
-    case Instruction::CONST:
+    case Instruction::CONST_4: {
+      int32_t register_index = instruction.VRegA();
+      int32_t value = instruction.VRegB_11n();
+      UpdateRegister(register_index, value, VeriClass::integer_);
+      break;
+    }
+    case Instruction::CONST_16: {
+      int32_t register_index = instruction.VRegA();
+      int32_t value = instruction.VRegB_21s();
+      UpdateRegister(register_index, value, VeriClass::integer_);
+      break;
+    }
+
+    case Instruction::CONST: {
+      int32_t register_index = instruction.VRegA();
+      int32_t value = instruction.VRegB_31i();
+      UpdateRegister(register_index, value, VeriClass::integer_);
+      break;
+    }
+
     case Instruction::CONST_HIGH16: {
       int32_t register_index = instruction.VRegA();
-      UpdateRegister(register_index, VeriClass::integer_);
+      int32_t value = instruction.VRegB_21h();
+      UpdateRegister(register_index, value, VeriClass::integer_);
       break;
     }
 
@@ -268,6 +335,8 @@
     case Instruction::RETURN: {
       break;
     }
+
+    // If operations will be handled when looking at the control flow.
     #define IF_XX(cond) \
     case Instruction::IF_##cond: break; \
     case Instruction::IF_##cond##Z: break
@@ -279,6 +348,8 @@
     IF_XX(GT);
     IF_XX(GE);
 
+    #undef IF_XX
+
     case Instruction::GOTO:
     case Instruction::GOTO_16:
     case Instruction::GOTO_32: {
@@ -495,7 +566,13 @@
     case Instruction::SGET_BYTE:
     case Instruction::SGET_CHAR:
     case Instruction::SGET_SHORT: {
-      UpdateRegister(instruction.VRegA_22c(), GetFieldType(instruction.VRegC_22c()));
+      uint32_t dest_reg = instruction.VRegA_21c();
+      uint16_t field_index = instruction.VRegB_21c();
+      if (VeriClass::sdkInt_ != nullptr && resolver_->GetField(field_index) == VeriClass::sdkInt_) {
+        UpdateRegister(dest_reg, gTargetSdkVersion, VeriClass::integer_);
+      } else {
+        UpdateRegister(dest_reg, GetFieldType(instruction.VRegC_22c()));
+      }
       break;
     }
 
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 62c9916..fc09360 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -35,6 +35,7 @@
   kMethod,
   kClass,
   kString,
+  kConstant,
   kNone
 };
 
@@ -44,28 +45,33 @@
 class RegisterValue {
  public:
   RegisterValue() : source_(RegisterSource::kNone),
-                    parameter_index_(0),
+                    value_(0),
                     reference_(nullptr, 0),
                     type_(nullptr) {}
   RegisterValue(RegisterSource source, DexFileReference reference, const VeriClass* type)
-      : source_(source), parameter_index_(0), reference_(reference), type_(type) {}
+      : source_(source), value_(0), reference_(reference), type_(type) {}
 
   RegisterValue(RegisterSource source,
-                uint32_t parameter_index,
+                uint32_t value,
                 DexFileReference reference,
                 const VeriClass* type)
-      : source_(source), parameter_index_(parameter_index), reference_(reference), type_(type) {}
+      : source_(source), value_(value), reference_(reference), type_(type) {}
 
   RegisterSource GetSource() const { return source_; }
   DexFileReference GetDexFileReference() const { return reference_; }
   const VeriClass* GetType() const { return type_; }
   uint32_t GetParameterIndex() const {
     CHECK(IsParameter());
-    return parameter_index_;
+    return value_;
+  }
+  uint32_t GetConstant() const {
+    CHECK(IsConstant());
+    return value_;
   }
   bool IsParameter() const { return source_ == RegisterSource::kParameter; }
   bool IsClass() const { return source_ == RegisterSource::kClass; }
   bool IsString() const { return source_ == RegisterSource::kString; }
+  bool IsConstant() const { return source_ == RegisterSource::kConstant; }
 
   std::string ToString() const {
     switch (source_) {
@@ -91,7 +97,7 @@
 
  private:
   RegisterSource source_;
-  uint32_t parameter_index_;
+  uint32_t value_;
   DexFileReference reference_;
   const VeriClass* type_;
 };
@@ -137,12 +143,15 @@
       uint32_t dex_register, RegisterSource kind, VeriClass* cls, uint32_t source_id);
   void UpdateRegister(uint32_t dex_register, const RegisterValue& value);
   void UpdateRegister(uint32_t dex_register, const VeriClass* cls);
+  void UpdateRegister(uint32_t dex_register, int32_t value, const VeriClass* cls);
   void ProcessDexInstruction(const Instruction& inst);
   void SetVisited(uint32_t dex_pc);
   RegisterValue GetFieldType(uint32_t field_index);
 
+  int GetBranchFlags(const Instruction& instruction) const;
+
  protected:
-  const RegisterValue& GetRegister(uint32_t dex_register);
+  const RegisterValue& GetRegister(uint32_t dex_register) const;
   RegisterValue GetReturnType(uint32_t method_index);
 
   VeridexResolver* resolver_;
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index dc7ea94..bcd4815 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -25,6 +25,7 @@
 #include "precise_hidden_api_finder.h"
 #include "resolver.h"
 
+#include <cstdlib>
 #include <sstream>
 
 namespace art {
@@ -62,6 +63,7 @@
 VeriMethod VeriClass::getDeclaredMethod_ = nullptr;
 VeriMethod VeriClass::getClass_ = nullptr;
 VeriMethod VeriClass::loadClass_ = nullptr;
+VeriField VeriClass::sdkInt_ = nullptr;
 
 struct VeridexOptions {
   const char* dex_file = nullptr;
@@ -70,6 +72,7 @@
   const char* light_greylist = nullptr;
   const char* dark_greylist = nullptr;
   bool precise = true;
+  int target_sdk_version = 28; /* P */
 };
 
 static const char* Substr(const char* str, int index) {
@@ -91,6 +94,7 @@
   static const char* kDarkGreylistOption = "--dark-greylist=";
   static const char* kLightGreylistOption = "--light-greylist=";
   static const char* kImprecise = "--imprecise";
+  static const char* kTargetSdkVersion = "--target-sdk-version=";
 
   for (int i = 0; i < argc; ++i) {
     if (StartsWith(argv[i], kDexFileOption)) {
@@ -105,6 +109,8 @@
       options->light_greylist = Substr(argv[i], strlen(kLightGreylistOption));
     } else if (strcmp(argv[i], kImprecise) == 0) {
       options->precise = false;
+    } else if (StartsWith(argv[i], kTargetSdkVersion)) {
+      options->target_sdk_version = atoi(Substr(argv[i], strlen(kTargetSdkVersion)));
     }
   }
 }
@@ -124,6 +130,7 @@
   static int Run(int argc, char** argv) {
     VeridexOptions options;
     ParseArgs(&options, argc, argv);
+    gTargetSdkVersion = options.target_sdk_version;
 
     std::vector<std::string> boot_content;
     std::vector<std::string> app_content;
@@ -200,6 +207,11 @@
     VeriClass::loadClass_ = boot_resolvers[0]->LookupDeclaredMethodIn(
         *VeriClass::class_loader_, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
 
+    VeriClass* version = type_map["Landroid/os/Build$VERSION;"];
+    if (version != nullptr) {
+      VeriClass::sdkInt_ = boot_resolvers[0]->LookupFieldIn(*version, "SDK_INT", "I");
+    }
+
     std::vector<std::unique_ptr<VeridexResolver>> app_resolvers;
     Resolve(app_dex_files, resolver_map, type_map, &app_resolvers);
 
diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h
index 9c0a158..31ddbf4 100644
--- a/tools/veridex/veridex.h
+++ b/tools/veridex/veridex.h
@@ -24,6 +24,8 @@
 
 namespace art {
 
+static int gTargetSdkVersion = 1000;  // Will be initialized after parsing options.
+
 /**
  * Abstraction for fields defined in dex files. Currently, that's a pointer into their
  * `encoded_field` description.
@@ -86,6 +88,8 @@
   static VeriMethod getClass_;
   static VeriMethod loadClass_;
 
+  static VeriField sdkInt_;
+
  private:
   Primitive::Type kind_;
   uint8_t dimensions_;