Add method info to oat files

The method info data is stored separately from the code info to
reduce oat size by improving deduplication of stack maps.

To reduce code size, this moves the invoke info and inline info
method indices to this table.

Oat size for a large app (arm64): 77746816 -> 74023552 (-4.8%)
Average oat size reduction for golem (arm64): 2%

Repurposed unused SrcMapElem deduping to be for MethodInfo.
TODO: Delete SrcMapElem in a follow up CL.

Bug: 36124906

Test: clean-oat-host && test-art-host-run-test

Change-Id: I2241362e728389030b959f42161ce817cf6e2009
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 9a45379..8b30292 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -55,11 +55,17 @@
   // If the code size is 0 it means the method was skipped due to profile guided compilation.
   if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0u) {
     ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
-    uint32_t code_size = code.size();
+    const uint32_t code_size = code.size();
     ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
-    uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+    const uint32_t vmap_table_offset = vmap_table.empty() ? 0u
         : sizeof(OatQuickMethodHeader) + vmap_table.size();
+    // The method info is directly before the vmap table.
+    ArrayRef<const uint8_t> method_info = compiled_method->GetMethodInfo();
+    const uint32_t method_info_offset = method_info.empty() ? 0u
+        : vmap_table_offset + method_info.size();
+
     OatQuickMethodHeader method_header(vmap_table_offset,
+                                       method_info_offset,
                                        compiled_method->GetFrameSizeInBytes(),
                                        compiled_method->GetCoreSpillMask(),
                                        compiled_method->GetFpSpillMask(),
@@ -68,11 +74,12 @@
     header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
     std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
     const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet());
-    const size_t size = vmap_table.size() + sizeof(method_header) + code_size;
+    const size_t size = method_info.size() + vmap_table.size() + sizeof(method_header) + code_size;
     chunk->reserve(size + max_padding);
     chunk->resize(sizeof(method_header));
     memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
     chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
+    chunk->insert(chunk->begin(), method_info.begin(), method_info.end());
     chunk->insert(chunk->end(), code.begin(), code.end());
     CHECK_EQ(chunk->size(), size);
     const void* unaligned_code_ptr = chunk->data() + (size - code_size);
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f06d90c..0d9021f 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -105,15 +105,15 @@
                                const size_t frame_size_in_bytes,
                                const uint32_t core_spill_mask,
                                const uint32_t fp_spill_mask,
-                               const ArrayRef<const SrcMapElem>& src_mapping_table,
+                               const ArrayRef<const uint8_t>& method_info,
                                const ArrayRef<const uint8_t>& vmap_table,
                                const ArrayRef<const uint8_t>& cfi_info,
                                const ArrayRef<const LinkerPatch>& patches)
     : CompiledCode(driver, instruction_set, quick_code),
-      frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
+      frame_size_in_bytes_(frame_size_in_bytes),
+      core_spill_mask_(core_spill_mask),
       fp_spill_mask_(fp_spill_mask),
-      src_mapping_table_(
-          driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)),
+      method_info_(driver->GetCompiledMethodStorage()->DeduplicateMethodInfo(method_info)),
       vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
       cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
       patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
@@ -126,7 +126,7 @@
     const size_t frame_size_in_bytes,
     const uint32_t core_spill_mask,
     const uint32_t fp_spill_mask,
-    const ArrayRef<const SrcMapElem>& src_mapping_table,
+    const ArrayRef<const uint8_t>& method_info,
     const ArrayRef<const uint8_t>& vmap_table,
     const ArrayRef<const uint8_t>& cfi_info,
     const ArrayRef<const LinkerPatch>& patches) {
@@ -139,7 +139,7 @@
                   frame_size_in_bytes,
                   core_spill_mask,
                   fp_spill_mask,
-                  src_mapping_table,
+                  method_info,
                   vmap_table,
                   cfi_info, patches);
   return ret;
@@ -156,7 +156,7 @@
   storage->ReleaseLinkerPatches(patches_);
   storage->ReleaseCFIInfo(cfi_info_);
   storage->ReleaseVMapTable(vmap_table_);
-  storage->ReleaseSrcMappingTable(src_mapping_table_);
+  storage->ReleaseMethodInfo(method_info_);
 }
 
 }  // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 00e2d62..2342257 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -420,7 +420,7 @@
                  const size_t frame_size_in_bytes,
                  const uint32_t core_spill_mask,
                  const uint32_t fp_spill_mask,
-                 const ArrayRef<const SrcMapElem>& src_mapping_table,
+                 const ArrayRef<const uint8_t>& method_info,
                  const ArrayRef<const uint8_t>& vmap_table,
                  const ArrayRef<const uint8_t>& cfi_info,
                  const ArrayRef<const LinkerPatch>& patches);
@@ -434,7 +434,7 @@
       const size_t frame_size_in_bytes,
       const uint32_t core_spill_mask,
       const uint32_t fp_spill_mask,
-      const ArrayRef<const SrcMapElem>& src_mapping_table,
+      const ArrayRef<const uint8_t>& method_info,
       const ArrayRef<const uint8_t>& vmap_table,
       const ArrayRef<const uint8_t>& cfi_info,
       const ArrayRef<const LinkerPatch>& patches);
@@ -453,8 +453,8 @@
     return fp_spill_mask_;
   }
 
-  ArrayRef<const SrcMapElem> GetSrcMappingTable() const {
-    return GetArray(src_mapping_table_);
+  ArrayRef<const uint8_t> GetMethodInfo() const {
+    return GetArray(method_info_);
   }
 
   ArrayRef<const uint8_t> GetVmapTable() const {
@@ -476,9 +476,9 @@
   const uint32_t core_spill_mask_;
   // For quick code, a bit mask describing spilled FPR callee-save registers.
   const uint32_t fp_spill_mask_;
-  // For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset.
-  const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_;
-  // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
+  // For quick code, method specific information that is not very dedupe friendly (method indices).
+  const LengthPrefixedArray<uint8_t>* const method_info_;
+  // For quick code, holds code infos which contain stack maps, inline information, and etc.
   const LengthPrefixedArray<uint8_t>* const vmap_table_;
   // For quick code, a FDE entry for the debug_frame section.
   const LengthPrefixedArray<uint8_t>* const cfi_info_;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 76aeaa5..808e28c 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -370,7 +370,7 @@
         0,
         0,
         0,
-        ArrayRef<const SrcMapElem>(),                // src_mapping_table
+        ArrayRef<const uint8_t>(),                   // method_info
         ArrayRef<const uint8_t>(builder.GetData()),  // vmap_table
         ArrayRef<const uint8_t>(),                   // cfi data
         ArrayRef<const LinkerPatch>());
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index a0a8f81..e6a47ba 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -172,8 +172,8 @@
     : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
       dedupe_enabled_(true),
       dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
-      dedupe_src_mapping_table_("dedupe source mapping table",
-                                LengthPrefixedArrayAlloc<SrcMapElem>(swap_space_.get())),
+      dedupe_method_info_("dedupe method info",
+                          LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
       dedupe_vmap_table_("dedupe vmap table",
                          LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
       dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
@@ -207,13 +207,13 @@
   ReleaseArrayIfNotDeduplicated(code);
 }
 
-const LengthPrefixedArray<SrcMapElem>* CompiledMethodStorage::DeduplicateSrcMappingTable(
-    const ArrayRef<const SrcMapElem>& src_map) {
-  return AllocateOrDeduplicateArray(src_map, &dedupe_src_mapping_table_);
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMethodInfo(
+    const ArrayRef<const uint8_t>& src_map) {
+  return AllocateOrDeduplicateArray(src_map, &dedupe_method_info_);
 }
 
-void CompiledMethodStorage::ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map) {
-  ReleaseArrayIfNotDeduplicated(src_map);
+void CompiledMethodStorage::ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info) {
+  ReleaseArrayIfNotDeduplicated(method_info);
 }
 
 const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable(
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index 124b5a6..b833702 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -52,9 +52,9 @@
   const LengthPrefixedArray<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code);
   void ReleaseCode(const LengthPrefixedArray<uint8_t>* code);
 
-  const LengthPrefixedArray<SrcMapElem>* DeduplicateSrcMappingTable(
-      const ArrayRef<const SrcMapElem>& src_map);
-  void ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map);
+  const LengthPrefixedArray<uint8_t>* DeduplicateMethodInfo(
+      const ArrayRef<const uint8_t>& method_info);
+  void ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info);
 
   const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table);
   void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table);
@@ -96,7 +96,7 @@
   bool dedupe_enabled_;
 
   ArrayDedupeSet<uint8_t> dedupe_code_;
-  ArrayDedupeSet<SrcMapElem> dedupe_src_mapping_table_;
+  ArrayDedupeSet<uint8_t> dedupe_method_info_;
   ArrayDedupeSet<uint8_t> dedupe_vmap_table_;
   ArrayDedupeSet<uint8_t> dedupe_cfi_info_;
   ArrayDedupeSet<LinkerPatch> dedupe_linker_patches_;
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index b72d0ac..6572d17 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -51,11 +51,11 @@
       ArrayRef<const uint8_t>(raw_code1),
       ArrayRef<const uint8_t>(raw_code2),
   };
-  const SrcMapElem raw_src_map1[] = { { 1u, 2u }, { 3u, 4u }, { 5u, 6u } };
-  const SrcMapElem raw_src_map2[] = { { 8u, 7u }, { 6u, 5u }, { 4u, 3u }, { 2u, 1u } };
-  ArrayRef<const SrcMapElem> src_map[] = {
-      ArrayRef<const SrcMapElem>(raw_src_map1),
-      ArrayRef<const SrcMapElem>(raw_src_map2),
+  const uint8_t raw_method_info_map1[] = { 1u, 2u, 3u, 4u, 5u, 6u };
+  const uint8_t raw_method_info_map2[] = { 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u };
+  ArrayRef<const uint8_t> method_info[] = {
+      ArrayRef<const uint8_t>(raw_method_info_map1),
+      ArrayRef<const uint8_t>(raw_method_info_map2),
   };
   const uint8_t raw_vmap_table1[] = { 2, 4, 6 };
   const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 };
@@ -85,7 +85,7 @@
   std::vector<CompiledMethod*> compiled_methods;
   compiled_methods.reserve(1u << 7);
   for (auto&& c : code) {
-    for (auto&& s : src_map) {
+    for (auto&& s : method_info) {
       for (auto&& v : vmap_table) {
         for (auto&& f : cfi_info) {
           for (auto&& p : patches) {
@@ -113,7 +113,7 @@
       bool same_patches = ((i ^ j) & patches_bit) == 0u;
       ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data())
           << i << " " << j;
-      ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data())
+      ASSERT_EQ(same_src_map, lhs->GetMethodInfo().data() == rhs->GetMethodInfo().data())
           << i << " " << j;
       ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data())
           << i << " " << j;
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index eac46e5..c975944 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -74,8 +74,8 @@
 
     fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size());
     MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
-    stack_maps.FillIn(stack_maps_region);
-    OatQuickMethodHeader method_header(stack_maps_offset, 4 * sizeof(void*), 0u, 0u, code_size);
+    stack_maps.FillInCodeInfo(stack_maps_region);
+    OatQuickMethodHeader method_header(stack_maps_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size);
     memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header));
     std::copy(fake_code_.begin(),
               fake_code_.end(),
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 3bd290d..68ec7bd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -660,8 +660,8 @@
                                                  frame_size,
                                                  main_jni_conv->CoreSpillMask(),
                                                  main_jni_conv->FpSpillMask(),
-                                                 ArrayRef<const SrcMapElem>(),
-                                                 ArrayRef<const uint8_t>(),  // vmap_table.
+                                                 /* method_info */ ArrayRef<const uint8_t>(),
+                                                 /* vmap_table */ ArrayRef<const uint8_t>(),
                                                  ArrayRef<const uint8_t>(*jni_asm->cfi().data()),
                                                  ArrayRef<const LinkerPatch>());
 }
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 233daf4..908cb41 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -87,7 +87,7 @@
         /* frame_size_in_bytes */ 0u,
         /* core_spill_mask */ 0u,
         /* fp_spill_mask */ 0u,
-        /* src_mapping_table */ ArrayRef<const SrcMapElem>(),
+        /* method_info */ ArrayRef<const uint8_t>(),
         /* vmap_table */ ArrayRef<const uint8_t>(),
         /* cfi_info */ ArrayRef<const uint8_t>(),
         patches));
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 97b1374..ead4124 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -485,7 +485,7 @@
   // it is time to update OatHeader::kOatVersion
   EXPECT_EQ(72U, sizeof(OatHeader));
   EXPECT_EQ(4U, sizeof(OatMethodOffsets));
-  EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
+  EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
   EXPECT_EQ(161 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
             sizeof(QuickEntryPoints));
 }
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index afcdf5e..5406ae7 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -326,6 +326,7 @@
     size_relative_call_thunks_(0),
     size_misc_thunks_(0),
     size_vmap_table_(0),
+    size_method_info_(0),
     size_oat_dex_file_location_size_(0),
     size_oat_dex_file_location_data_(0),
     size_oat_dex_file_location_checksum_(0),
@@ -809,6 +810,7 @@
       DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
       OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
       uint32_t vmap_table_offset = method_header->GetVmapTableOffset();
+      uint32_t method_info_offset = method_header->GetMethodInfoOffset();
       // The code offset was 0 when the mapping/vmap table offset was set, so it's set
       // to 0-offset and we need to adjust it by code_offset.
       uint32_t code_offset = quick_code_offset - thumb_offset;
@@ -819,13 +821,18 @@
           vmap_table_offset += code_offset;
           DCHECK_LT(vmap_table_offset, code_offset);
         }
+        if (method_info_offset != 0u) {
+          method_info_offset += code_offset;
+          DCHECK_LT(method_info_offset, code_offset);
+        }
       } else {
+        CHECK(compiled_method->GetMethodInfo().empty());
         if (kIsVdexEnabled) {
           // We write the offset in the .vdex file.
           DCHECK_EQ(vmap_table_offset, 0u);
           vmap_table_offset = current_quickening_info_offset_;
-          ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
-          current_quickening_info_offset_ += map.size() * sizeof(map.front());
+          ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
+          current_quickening_info_offset_ += vmap_table.size() * sizeof(vmap_table.front());
         } else {
           // We write the offset of the quickening info relative to the code.
           vmap_table_offset += code_offset;
@@ -836,6 +843,7 @@
       uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
       uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
       *method_header = OatQuickMethodHeader(vmap_table_offset,
+                                            method_info_offset,
                                             frame_size_in_bytes,
                                             core_spill_mask,
                                             fp_spill_mask,
@@ -909,6 +917,9 @@
       if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) {
         return lhs->GetVmapTable().data() < rhs->GetVmapTable().data();
       }
+      if (UNLIKELY(lhs->GetMethodInfo().data() != rhs->GetMethodInfo().data())) {
+        return lhs->GetMethodInfo().data() < rhs->GetMethodInfo().data();
+      }
       if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) {
         return lhs->GetPatches().data() < rhs->GetPatches().data();
       }
@@ -983,6 +994,44 @@
   SafeMap<const uint8_t*, uint32_t> dedupe_map_;
 };
 
+class OatWriter::InitMethodInfoVisitor : public OatDexMethodVisitor {
+ public:
+  InitMethodInfoVisitor(OatWriter* writer, size_t offset) : OatDexMethodVisitor(writer, offset) {}
+
+  bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
+    CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+    if (compiled_method != nullptr) {
+      DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
+      DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset(), 0u);
+      ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo();
+      const uint32_t map_size = map.size() * sizeof(map[0]);
+      if (map_size != 0u) {
+        size_t offset = dedupe_map_.GetOrCreate(
+            map.data(),
+            [this, map_size]() {
+              uint32_t new_offset = offset_;
+              offset_ += map_size;
+              return new_offset;
+            });
+        // Code offset is not initialized yet, so set the map offset to 0u-offset.
+        DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u);
+        oat_class->method_headers_[method_offsets_index_].SetMethodInfoOffset(0u - offset);
+      }
+      ++method_offsets_index_;
+    }
+
+    return true;
+  }
+
+ private:
+  // Deduplication is already done on a pointer basis by the compiler driver,
+  // so we can simply compare the pointers to find out if things are duplicated.
+  SafeMap<const uint8_t*, uint32_t> dedupe_map_;
+};
+
 class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
  public:
   InitImageMethodVisitor(OatWriter* writer, size_t offset)
@@ -1434,7 +1483,7 @@
     OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
     const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
-    if (compiled_method != nullptr) {  // ie. not an abstract method
+    if (compiled_method != nullptr) {  // i.e. not an abstract method
       size_t file_offset = file_offset_;
       OutputStream* out = out_;
 
@@ -1483,6 +1532,63 @@
   }
 };
 
+class OatWriter::WriteMethodInfoVisitor : public OatDexMethodVisitor {
+ public:
+  WriteMethodInfoVisitor(OatWriter* writer,
+                         OutputStream* out,
+                         const size_t file_offset,
+                         size_t relative_offset)
+    : OatDexMethodVisitor(writer, relative_offset),
+      out_(out),
+      file_offset_(file_offset) {}
+
+  bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+    OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
+    const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+    if (compiled_method != nullptr) {  // i.e. not an abstract method
+      size_t file_offset = file_offset_;
+      OutputStream* out = out_;
+      uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset();
+      uint32_t code_offset = oat_class->method_offsets_[method_offsets_index_].code_offset_;
+      ++method_offsets_index_;
+      DCHECK((compiled_method->GetMethodInfo().size() == 0u && map_offset == 0u) ||
+             (compiled_method->GetMethodInfo().size() != 0u && map_offset != 0u))
+          << compiled_method->GetMethodInfo().size() << " " << map_offset << " "
+          << dex_file_->PrettyMethod(it.GetMemberIndex());
+      if (map_offset != 0u) {
+        // Transform map_offset to actual oat data offset.
+        map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset;
+        DCHECK_NE(map_offset, 0u);
+        DCHECK_LE(map_offset, offset_) << dex_file_->PrettyMethod(it.GetMemberIndex());
+
+        ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo();
+        size_t map_size = map.size() * sizeof(map[0]);
+        if (map_offset == offset_) {
+          // Write deduplicated map (code info for Optimizing or transformation info for dex2dex).
+          if (UNLIKELY(!out->WriteFully(map.data(), map_size))) {
+            ReportWriteFailure(it);
+            return false;
+          }
+          offset_ += map_size;
+        }
+      }
+      DCHECK_OFFSET_();
+    }
+
+    return true;
+  }
+
+ private:
+  OutputStream* const out_;
+  size_t const file_offset_;
+
+  void ReportWriteFailure(const ClassDataItemIterator& it) {
+    PLOG(ERROR) << "Failed to write map for "
+        << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to " << out_->GetLocation();
+  }
+};
+
 // Visit all methods from all classes in all dex files with the specified visitor.
 bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
   for (const DexFile* dex_file : *dex_files_) {
@@ -1576,11 +1682,18 @@
   if (!compiler_driver_->GetCompilerOptions().IsAnyMethodCompilationEnabled()) {
     return offset;
   }
-  InitMapMethodVisitor visitor(this, offset);
-  bool success = VisitDexMethods(&visitor);
-  DCHECK(success);
-  offset = visitor.GetOffset();
-
+  {
+    InitMapMethodVisitor visitor(this, offset);
+    bool success = VisitDexMethods(&visitor);
+    DCHECK(success);
+    offset = visitor.GetOffset();
+  }
+  {
+    InitMethodInfoVisitor visitor(this, offset);
+    bool success = VisitDexMethods(&visitor);
+    DCHECK(success);
+    offset = visitor.GetOffset();
+  }
   return offset;
 }
 
@@ -1920,6 +2033,7 @@
     DO_STAT(size_relative_call_thunks_);
     DO_STAT(size_misc_thunks_);
     DO_STAT(size_vmap_table_);
+    DO_STAT(size_method_info_);
     DO_STAT(size_oat_dex_file_location_size_);
     DO_STAT(size_oat_dex_file_location_data_);
     DO_STAT(size_oat_dex_file_location_checksum_);
@@ -2035,13 +2149,24 @@
 }
 
 size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) {
-  size_t vmap_tables_offset = relative_offset;
-  WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset);
-  if (UNLIKELY(!VisitDexMethods(&visitor))) {
-    return 0;
+  {
+    size_t vmap_tables_offset = relative_offset;
+    WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset);
+    if (UNLIKELY(!VisitDexMethods(&visitor))) {
+      return 0;
+    }
+    relative_offset = visitor.GetOffset();
+    size_vmap_table_ = relative_offset - vmap_tables_offset;
   }
-  relative_offset = visitor.GetOffset();
-  size_vmap_table_ = relative_offset - vmap_tables_offset;
+  {
+    size_t method_infos_offset = relative_offset;
+    WriteMethodInfoVisitor visitor(this, out, file_offset, relative_offset);
+    if (UNLIKELY(!VisitDexMethods(&visitor))) {
+      return 0;
+    }
+    relative_offset = visitor.GetOffset();
+    size_method_info_ = relative_offset - method_infos_offset;
+  }
 
   return relative_offset;
 }
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 5113714..e778f75 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -254,9 +254,11 @@
   class InitOatClassesMethodVisitor;
   class InitCodeMethodVisitor;
   class InitMapMethodVisitor;
+  class InitMethodInfoVisitor;
   class InitImageMethodVisitor;
   class WriteCodeMethodVisitor;
   class WriteMapMethodVisitor;
+  class WriteMethodInfoVisitor;
   class WriteQuickeningInfoMethodVisitor;
 
   // Visit all the methods in all the compiled dex files in their definition order
@@ -425,6 +427,7 @@
   uint32_t size_relative_call_thunks_;
   uint32_t size_misc_thunks_;
   uint32_t size_vmap_table_;
+  uint32_t size_method_info_;
   uint32_t size_oat_dex_file_location_size_;
   uint32_t size_oat_dex_file_location_data_;
   uint32_t size_oat_dex_file_location_checksum_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 424b850..b7c8075 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -654,8 +654,12 @@
   }
 }
 
-size_t CodeGenerator::ComputeStackMapsSize() {
-  return stack_map_stream_.PrepareForFillIn();
+void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
+                                                     size_t* method_info_size) {
+  DCHECK(stack_map_size != nullptr);
+  DCHECK(method_info_size != nullptr);
+  *stack_map_size = stack_map_stream_.PrepareForFillIn();
+  *method_info_size = stack_map_stream_.ComputeMethodInfoSize();
 }
 
 static void CheckCovers(uint32_t dex_pc,
@@ -723,10 +727,13 @@
   }
 }
 
-void CodeGenerator::BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item) {
-  stack_map_stream_.FillIn(region);
+void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
+                                   MemoryRegion method_info_region,
+                                   const DexFile::CodeItem& code_item) {
+  stack_map_stream_.FillInCodeInfo(stack_map_region);
+  stack_map_stream_.FillInMethodInfo(method_info_region);
   if (kIsDebugBuild) {
-    CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(region), code_item);
+    CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item);
   }
 }
 
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b912672..ea463ee 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -341,8 +341,10 @@
     slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
   }
 
-  void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
-  size_t ComputeStackMapsSize();
+  void BuildStackMaps(MemoryRegion stack_map_region,
+                      MemoryRegion method_info_region,
+                      const DexFile::CodeItem& code_item);
+  void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size);
   size_t GetNumberOfJitRoots() const {
     return jit_string_roots_.size() + jit_class_roots_.size();
   }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d6153b0..8b1b04b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1,3 +1,4 @@
+
 /*
  * Copyright (C) 2014 The Android Open Source Project
  *
@@ -856,8 +857,15 @@
                                          const DexFile::CodeItem* code_item) const {
   ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
   ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
-  stack_map.resize(codegen->ComputeStackMapsSize());
-  codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item);
+  ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps));
+  size_t stack_map_size = 0;
+  size_t method_info_size = 0;
+  codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
+  stack_map.resize(stack_map_size);
+  method_info.resize(method_info_size);
+  codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()),
+                          MemoryRegion(method_info.data(), method_info.size()),
+                          *code_item);
 
   CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
       compiler_driver,
@@ -869,7 +877,7 @@
       codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
       codegen->GetCoreSpillMask(),
       codegen->GetFpuSpillMask(),
-      ArrayRef<const SrcMapElem>(),
+      ArrayRef<const uint8_t>(method_info),
       ArrayRef<const uint8_t>(stack_map),
       ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
       ArrayRef<const LinkerPatch>(linker_patches));
@@ -1200,7 +1208,9 @@
     }
   }
 
-  size_t stack_map_size = codegen->ComputeStackMapsSize();
+  size_t stack_map_size = 0;
+  size_t method_info_size = 0;
+  codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size);
   size_t number_of_roots = codegen->GetNumberOfJitRoots();
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
@@ -1216,20 +1226,30 @@
     return false;
   }
   uint8_t* stack_map_data = nullptr;
+  uint8_t* method_info_data = nullptr;
   uint8_t* roots_data = nullptr;
-  uint32_t data_size = code_cache->ReserveData(
-      self, stack_map_size, number_of_roots, method, &stack_map_data, &roots_data);
+  uint32_t data_size = code_cache->ReserveData(self,
+                                               stack_map_size,
+                                               method_info_size,
+                                               number_of_roots,
+                                               method,
+                                               &stack_map_data,
+                                               &method_info_data,
+                                               &roots_data);
   if (stack_map_data == nullptr || roots_data == nullptr) {
     return false;
   }
   MaybeRecordStat(MethodCompilationStat::kCompiled);
-  codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item);
+  codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size),
+                          MemoryRegion(method_info_data, method_info_size),
+                          *code_item);
   codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
 
   const void* code = code_cache->CommitCode(
       self,
       method,
       stack_map_data,
+      method_info_data,
       roots_data,
       codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
       codegen->GetCoreSpillMask(),
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 4d12ad6..b7840d7 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -152,6 +152,9 @@
   encoding.location_catalog.num_entries = location_catalog_entries_.size();
   encoding.location_catalog.num_bytes = ComputeDexRegisterLocationCatalogSize();
   encoding.inline_info.num_entries = inline_infos_.size();
+  // Must be done before calling ComputeInlineInfoEncoding since ComputeInlineInfoEncoding requires
+  // dex_method_index_idx to be filled in.
+  PrepareMethodIndices();
   ComputeInlineInfoEncoding(&encoding.inline_info.encoding,
                             encoding.dex_register_map.num_bytes);
   CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset();
@@ -245,7 +248,7 @@
     for (size_t j = 0; j < entry.inlining_depth; ++j) {
       InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
       if (inline_entry.method == nullptr) {
-        method_index_max = std::max(method_index_max, inline_entry.method_index);
+        method_index_max = std::max(method_index_max, inline_entry.dex_method_index_idx);
         extra_data_max = std::max(extra_data_max, 1u);
       } else {
         method_index_max = std::max(
@@ -288,7 +291,25 @@
   return entry.offset;
 }
 
-void StackMapStream::FillIn(MemoryRegion region) {
+void StackMapStream::FillInMethodInfo(MemoryRegion region) {
+  {
+    MethodInfo info(region.begin(), method_indices_.size());
+    for (size_t i = 0; i < method_indices_.size(); ++i) {
+      info.SetMethodIndex(i, method_indices_[i]);
+    }
+  }
+  if (kIsDebugBuild) {
+    // Check the data matches.
+    MethodInfo info(region.begin());
+    const size_t count = info.NumMethodIndices();
+    DCHECK_EQ(count, method_indices_.size());
+    for (size_t i = 0; i < count; ++i) {
+      DCHECK_EQ(info.GetMethodIndex(i), method_indices_[i]);
+    }
+  }
+}
+
+void StackMapStream::FillInCodeInfo(MemoryRegion region) {
   DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
   DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
 
@@ -345,7 +366,7 @@
       InvokeInfo invoke_info(code_info.GetInvokeInfo(encoding, invoke_info_idx));
       invoke_info.SetNativePcCodeOffset(encoding.invoke_info.encoding, entry.native_pc_code_offset);
       invoke_info.SetInvokeType(encoding.invoke_info.encoding, entry.invoke_type);
-      invoke_info.SetMethodIndex(encoding.invoke_info.encoding, entry.dex_method_index);
+      invoke_info.SetMethodIndexIdx(encoding.invoke_info.encoding, entry.dex_method_index_idx);
       ++invoke_info_idx;
     }
 
@@ -364,7 +385,7 @@
       for (size_t depth = 0; depth < entry.inlining_depth; ++depth) {
         InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index];
         if (inline_entry.method != nullptr) {
-          inline_info.SetMethodIndexAtDepth(
+          inline_info.SetMethodIndexIdxAtDepth(
               encoding.inline_info.encoding,
               depth,
               High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
@@ -373,9 +394,9 @@
               depth,
               Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method)));
         } else {
-          inline_info.SetMethodIndexAtDepth(encoding.inline_info.encoding,
-                                            depth,
-                                            inline_entry.method_index);
+          inline_info.SetMethodIndexIdxAtDepth(encoding.inline_info.encoding,
+                                               depth,
+                                               inline_entry.dex_method_index_idx);
           inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1);
         }
         inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc);
@@ -533,6 +554,29 @@
   return dedupe.size();
 }
 
+void StackMapStream::PrepareMethodIndices() {
+  CHECK(method_indices_.empty());
+  method_indices_.resize(stack_maps_.size() + inline_infos_.size());
+  ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+  for (StackMapEntry& stack_map : stack_maps_) {
+    const size_t index = dedupe.size();
+    const uint32_t method_index = stack_map.dex_method_index;
+    if (method_index != DexFile::kDexNoIndex) {
+      stack_map.dex_method_index_idx = dedupe.emplace(method_index, index).first->second;
+      method_indices_[index] = method_index;
+    }
+  }
+  for (InlineInfoEntry& inline_info : inline_infos_) {
+    const size_t index = dedupe.size();
+    const uint32_t method_index = inline_info.method_index;
+    CHECK_NE(method_index, DexFile::kDexNoIndex);
+    inline_info.dex_method_index_idx = dedupe.emplace(method_index, index).first->second;
+    method_indices_[index] = method_index;
+  }
+  method_indices_.resize(dedupe.size());
+}
+
+
 size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) {
   // Preallocate memory since we do not want it to move (the dedup map will point into it).
   const size_t byte_entry_size = RoundUp(entry_size_in_bits, kBitsPerByte) / kBitsPerByte;
@@ -590,7 +634,8 @@
       DCHECK_EQ(invoke_info.GetNativePcOffset(encoding.invoke_info.encoding, instruction_set_),
                 entry.native_pc_code_offset.Uint32Value(instruction_set_));
       DCHECK_EQ(invoke_info.GetInvokeType(encoding.invoke_info.encoding), entry.invoke_type);
-      DCHECK_EQ(invoke_info.GetMethodIndex(encoding.invoke_info.encoding), entry.dex_method_index);
+      DCHECK_EQ(invoke_info.GetMethodIndexIdx(encoding.invoke_info.encoding),
+                entry.dex_method_index_idx);
       invoke_info_index++;
     }
     CheckDexRegisterMap(code_info,
@@ -615,8 +660,10 @@
           DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info.encoding, d),
                     inline_entry.method);
         } else {
-          DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info.encoding, d),
-                    inline_entry.method_index);
+          const size_t method_index_idx =
+              inline_info.GetMethodIndexIdxAtDepth(encoding.inline_info.encoding, d);
+          DCHECK_EQ(method_index_idx, inline_entry.dex_method_index_idx);
+          DCHECK_EQ(method_indices_[method_index_idx], inline_entry.method_index);
         }
 
         CheckDexRegisterMap(code_info,
@@ -633,4 +680,9 @@
   }
 }
 
+size_t StackMapStream::ComputeMethodInfoSize() const {
+  DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before " << __FUNCTION__;
+  return MethodInfo::ComputeSize(method_indices_.size());
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 4225a87..e6471e1 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -22,6 +22,7 @@
 #include "base/hash_map.h"
 #include "base/value_object.h"
 #include "memory_region.h"
+#include "method_info.h"
 #include "nodes.h"
 #include "stack_map.h"
 
@@ -70,6 +71,7 @@
         inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
         stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
         register_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
+        method_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
         dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
         stack_mask_max_(-1),
         dex_pc_max_(0),
@@ -120,6 +122,7 @@
     size_t dex_register_map_index;
     InvokeType invoke_type;
     uint32_t dex_method_index;
+    uint32_t dex_method_index_idx;  // Index into dex method index table.
   };
 
   struct InlineInfoEntry {
@@ -128,6 +131,7 @@
     uint32_t method_index;
     DexRegisterMapEntry dex_register_entry;
     size_t dex_register_map_index;
+    uint32_t dex_method_index_idx;  // Index into the dex method index table.
   };
 
   void BeginStackMapEntry(uint32_t dex_pc,
@@ -164,7 +168,10 @@
   // Prepares the stream to fill in a memory region. Must be called before FillIn.
   // Returns the size (in bytes) needed to store this stream.
   size_t PrepareForFillIn();
-  void FillIn(MemoryRegion region);
+  void FillInCodeInfo(MemoryRegion region);
+  void FillInMethodInfo(MemoryRegion region);
+
+  size_t ComputeMethodInfoSize() const;
 
  private:
   size_t ComputeDexRegisterLocationCatalogSize() const;
@@ -180,6 +187,9 @@
   // Returns the number of unique register masks.
   size_t PrepareRegisterMasks();
 
+  // Prepare and deduplicate method indices.
+  void PrepareMethodIndices();
+
   // Deduplicate entry if possible and return the corresponding index into dex_register_entries_
   // array. If entry is not a duplicate, a new entry is added to dex_register_entries_.
   size_t AddDexRegisterMapEntry(const DexRegisterMapEntry& entry);
@@ -232,6 +242,7 @@
   ArenaVector<InlineInfoEntry> inline_infos_;
   ArenaVector<uint8_t> stack_masks_;
   ArenaVector<uint32_t> register_masks_;
+  ArenaVector<uint32_t> method_indices_;
   ArenaVector<DexRegisterMapEntry> dex_register_entries_;
   int stack_mask_max_;
   uint32_t dex_pc_max_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 330f7f2..a842c6e 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -60,7 +60,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -173,7 +173,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -433,7 +433,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -519,7 +519,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -611,7 +611,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -672,7 +672,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo ci(region);
   CodeInfoEncoding encoding = ci.ExtractEncoding();
@@ -721,7 +721,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -823,7 +823,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo ci(region);
   CodeInfoEncoding encoding = ci.ExtractEncoding();
@@ -950,7 +950,7 @@
   size_t size = stream.PrepareForFillIn();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
   MemoryRegion region(memory, size);
-  stream.FillIn(region);
+  stream.FillInCodeInfo(region);
 
   CodeInfo code_info(region);
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -979,11 +979,16 @@
   stream.AddInvoke(kDirect, 65535);
   stream.EndStackMapEntry();
 
-  const size_t size = stream.PrepareForFillIn();
-  MemoryRegion region(arena.Alloc(size, kArenaAllocMisc), size);
-  stream.FillIn(region);
+  const size_t code_info_size = stream.PrepareForFillIn();
+  MemoryRegion code_info_region(arena.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
+  stream.FillInCodeInfo(code_info_region);
 
-  CodeInfo code_info(region);
+  const size_t method_info_size = stream.ComputeMethodInfoSize();
+  MemoryRegion method_info_region(arena.Alloc(method_info_size, kArenaAllocMisc), method_info_size);
+  stream.FillInMethodInfo(method_info_region);
+
+  CodeInfo code_info(code_info_region);
+  MethodInfo method_info(method_info_region.begin());
   CodeInfoEncoding encoding = code_info.ExtractEncoding();
   ASSERT_EQ(3u, code_info.GetNumberOfStackMaps(encoding));
 
@@ -996,13 +1001,13 @@
   EXPECT_TRUE(invoke2.IsValid());
   EXPECT_TRUE(invoke3.IsValid());
   EXPECT_EQ(invoke1.GetInvokeType(encoding.invoke_info.encoding), kSuper);
-  EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding), 1u);
+  EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding, method_info), 1u);
   EXPECT_EQ(invoke1.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 4u);
   EXPECT_EQ(invoke2.GetInvokeType(encoding.invoke_info.encoding), kStatic);
-  EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding), 3u);
+  EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding, method_info), 3u);
   EXPECT_EQ(invoke2.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 8u);
   EXPECT_EQ(invoke3.GetInvokeType(encoding.invoke_info.encoding), kDirect);
-  EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding), 65535u);
+  EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding, method_info), 65535u);
   EXPECT_EQ(invoke3.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 16u);
 }
 
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e767023..878d0f2 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -596,7 +596,7 @@
       kByteKindStackMapInlineInfoIndex,
       kByteKindStackMapRegisterMaskIndex,
       kByteKindStackMapStackMaskIndex,
-      kByteKindInlineInfoMethodIndex,
+      kByteKindInlineInfoMethodIndexIdx,
       kByteKindInlineInfoDexPc,
       kByteKindInlineInfoExtraData,
       kByteKindInlineInfoDexRegisterMap,
@@ -605,7 +605,7 @@
       // Special ranges for std::accumulate convenience.
       kByteKindStackMapFirst = kByteKindStackMapNativePc,
       kByteKindStackMapLast = kByteKindStackMapStackMaskIndex,
-      kByteKindInlineInfoFirst = kByteKindInlineInfoMethodIndex,
+      kByteKindInlineInfoFirst = kByteKindInlineInfoMethodIndexIdx,
       kByteKindInlineInfoLast = kByteKindInlineInfoIsLast,
     };
     int64_t bits[kByteKindCount] = {};
@@ -685,8 +685,8 @@
         {
           ScopedIndentation indent1(&os);
           Dump(os,
-               "InlineInfoMethodIndex         ",
-               bits[kByteKindInlineInfoMethodIndex],
+               "InlineInfoMethodIndexIdx      ",
+               bits[kByteKindInlineInfoMethodIndexIdx],
                inline_info_bits,
                "inline info");
           Dump(os,
@@ -1363,7 +1363,8 @@
         CodeInfo code_info(raw_code_info);
         DCHECK(code_item != nullptr);
         ScopedIndentation indent1(vios);
-        DumpCodeInfo(vios, code_info, oat_method, *code_item);
+        MethodInfo method_info = oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo();
+        DumpCodeInfo(vios, code_info, oat_method, *code_item, method_info);
       }
     } else if (IsMethodGeneratedByDexToDexCompiler(oat_method, code_item)) {
       // We don't encode the size in the table, so just emit that we have quickened
@@ -1379,12 +1380,14 @@
   void DumpCodeInfo(VariableIndentationOutputStream* vios,
                     const CodeInfo& code_info,
                     const OatFile::OatMethod& oat_method,
-                    const DexFile::CodeItem& code_item) {
+                    const DexFile::CodeItem& code_item,
+                    const MethodInfo& method_info) {
     code_info.Dump(vios,
                    oat_method.GetCodeOffset(),
                    code_item.registers_size_,
                    options_.dump_code_info_stack_maps_,
-                   instruction_set_);
+                   instruction_set_,
+                   method_info);
   }
 
   void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method,
@@ -1592,6 +1595,7 @@
     } else if (!bad_input && IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) {
       // The optimizing compiler outputs its CodeInfo data in the vmap table.
       StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
+      MethodInfo method_info(oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo());
       {
         CodeInfoEncoding encoding(helper.GetEncoding());
         StackMapEncoding stack_map_encoding(encoding.stack_map.encoding);
@@ -1652,8 +1656,9 @@
           const size_t num_inline_infos = encoding.inline_info.num_entries;
           if (num_inline_infos > 0u) {
             stats_.AddBits(
-                Stats::kByteKindInlineInfoMethodIndex,
-                encoding.inline_info.encoding.GetMethodIndexEncoding().BitSize() * num_inline_infos);
+                Stats::kByteKindInlineInfoMethodIndexIdx,
+                encoding.inline_info.encoding.GetMethodIndexIdxEncoding().BitSize() *
+                    num_inline_infos);
             stats_.AddBits(
                 Stats::kByteKindInlineInfoDexPc,
                 encoding.inline_info.encoding.GetDexPcEncoding().BitSize() * num_inline_infos);
@@ -1679,6 +1684,7 @@
           stack_map.Dump(vios,
                          helper.GetCodeInfo(),
                          helper.GetEncoding(),
+                         method_info,
                          oat_method.GetCodeOffset(),
                          code_item->registers_size_,
                          instruction_set_);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 3bc49b8..ba8cec3 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -43,6 +43,7 @@
 namespace art {
 
 inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
+                                    const MethodInfo& method_info,
                                     const InlineInfo& inline_info,
                                     const InlineInfoEncoding& encoding,
                                     uint8_t inlining_depth)
@@ -56,7 +57,7 @@
     return inline_info.GetArtMethodAtDepth(encoding, inlining_depth);
   }
 
-  uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, inlining_depth);
+  uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, inlining_depth);
   if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
     // "charAt" special case. It is the only non-leaf method we inline across dex files.
     ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
@@ -68,6 +69,7 @@
   ArtMethod* caller = outer_method;
   if (inlining_depth != 0) {
     caller = GetResolvedMethod(outer_method,
+                               method_info,
                                inline_info,
                                encoding,
                                inlining_depth - 1);
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 6301362..2b35b46 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -201,12 +201,14 @@
       DCHECK(current_code->IsOptimized());
       uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
       CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+      MethodInfo method_info = current_code->GetOptimizedMethodInfo();
       CodeInfoEncoding encoding = code_info.ExtractEncoding();
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
       DCHECK(stack_map.IsValid());
       if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
         InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
         caller = GetResolvedMethod(outer_method,
+                                   method_info,
                                    inline_info,
                                    encoding.inline_info.encoding,
                                    inline_info.GetDepth(encoding.inline_info.encoding) - 1);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 3fd20a6..25073a8 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -372,10 +372,11 @@
     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
     CodeInfo code_info = current_code->GetOptimizedCodeInfo();
     CodeInfoEncoding encoding = code_info.ExtractEncoding();
+    MethodInfo method_info = current_code->GetOptimizedMethodInfo();
     InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding));
     if (invoke.IsValid()) {
       *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding));
-      *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding);
+      *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info);
       return true;
     }
     return false;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index e7b23dc..fc41f94 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -211,6 +211,7 @@
 uint8_t* JitCodeCache::CommitCode(Thread* self,
                                   ArtMethod* method,
                                   uint8_t* stack_map,
+                                  uint8_t* method_info,
                                   uint8_t* roots_data,
                                   size_t frame_size_in_bytes,
                                   size_t core_spill_mask,
@@ -225,6 +226,7 @@
   uint8_t* result = CommitCodeInternal(self,
                                        method,
                                        stack_map,
+                                       method_info,
                                        roots_data,
                                        frame_size_in_bytes,
                                        core_spill_mask,
@@ -242,6 +244,7 @@
     result = CommitCodeInternal(self,
                                 method,
                                 stack_map,
+                                method_info,
                                 roots_data,
                                 frame_size_in_bytes,
                                 core_spill_mask,
@@ -510,6 +513,7 @@
 uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
                                           ArtMethod* method,
                                           uint8_t* stack_map,
+                                          uint8_t* method_info,
                                           uint8_t* roots_data,
                                           size_t frame_size_in_bytes,
                                           size_t core_spill_mask,
@@ -547,6 +551,7 @@
       method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
       new (method_header) OatQuickMethodHeader(
           code_ptr - stack_map,
+          code_ptr - method_info,
           frame_size_in_bytes,
           core_spill_mask,
           fp_spill_mask,
@@ -739,12 +744,14 @@
 
 size_t JitCodeCache::ReserveData(Thread* self,
                                  size_t stack_map_size,
+                                 size_t method_info_size,
                                  size_t number_of_roots,
                                  ArtMethod* method,
                                  uint8_t** stack_map_data,
+                                 uint8_t** method_info_data,
                                  uint8_t** roots_data) {
   size_t table_size = ComputeRootTableSize(number_of_roots);
-  size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
+  size_t size = RoundUp(stack_map_size + method_info_size + table_size, sizeof(void*));
   uint8_t* result = nullptr;
 
   {
@@ -774,11 +781,13 @@
   if (result != nullptr) {
     *roots_data = result;
     *stack_map_data = result + table_size;
+    *method_info_data = *stack_map_data + stack_map_size;
     FillRootTableLength(*roots_data, number_of_roots);
     return size;
   } else {
     *roots_data = nullptr;
     *stack_map_data = nullptr;
+    *method_info_data = nullptr;
     return 0;
   }
 }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index c970979..db214e7 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -105,6 +105,7 @@
   uint8_t* CommitCode(Thread* self,
                       ArtMethod* method,
                       uint8_t* stack_map,
+                      uint8_t* method_info,
                       uint8_t* roots_data,
                       size_t frame_size_in_bytes,
                       size_t core_spill_mask,
@@ -129,10 +130,12 @@
   // for storing `number_of_roots` roots. Returns null if there is no more room.
   // Return the number of bytes allocated.
   size_t ReserveData(Thread* self,
-                     size_t size,
+                     size_t stack_map_size,
+                     size_t method_info_size,
                      size_t number_of_roots,
                      ArtMethod* method,
                      uint8_t** stack_map_data,
+                     uint8_t** method_info_data,
                      uint8_t** roots_data)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!lock_);
@@ -249,6 +252,7 @@
   uint8_t* CommitCodeInternal(Thread* self,
                               ArtMethod* method,
                               uint8_t* stack_map,
+                              uint8_t* method_info,
                               uint8_t* roots_data,
                               size_t frame_size_in_bytes,
                               size_t core_spill_mask,
diff --git a/runtime/method_info.h b/runtime/method_info.h
new file mode 100644
index 0000000..5a72125
--- /dev/null
+++ b/runtime/method_info.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_METHOD_INFO_H_
+#define ART_RUNTIME_METHOD_INFO_H_
+
+#include "base/logging.h"
+#include "leb128.h"
+#include "memory_region.h"
+
+namespace art {
+
+// Method info is for not dedupe friendly data of a method. Currently it only holds methods indices.
+// Putting this data in MethodInfo instead of code infos saves ~5% oat size.
+class MethodInfo {
+  using MethodIndexType = uint16_t;
+
+ public:
+  // Reading mode
+  explicit MethodInfo(const uint8_t* ptr) {
+    if (ptr != nullptr) {
+      num_method_indices_ = DecodeUnsignedLeb128(&ptr);
+      region_ = MemoryRegion(const_cast<uint8_t*>(ptr),
+                             num_method_indices_ * sizeof(MethodIndexType));
+    }
+  }
+
+  // Writing mode
+  MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) {
+    DCHECK(ptr != nullptr);
+    ptr = EncodeUnsignedLeb128(ptr, num_method_indices_);
+    region_ = MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType));
+  }
+
+  static size_t ComputeSize(size_t num_method_indices) {
+    uint8_t temp[8];
+    uint8_t* ptr = temp;
+    ptr = EncodeUnsignedLeb128(ptr, num_method_indices);
+    return (ptr - temp) + num_method_indices * sizeof(MethodIndexType);
+  }
+
+  ALWAYS_INLINE MethodIndexType GetMethodIndex(size_t index) const {
+    // Use bit functions to avoid pesky alignment requirements.
+    return region_.LoadBits(index * BitSizeOf<MethodIndexType>(), BitSizeOf<MethodIndexType>());
+  }
+
+  void SetMethodIndex(size_t index, MethodIndexType method_index) {
+    region_.StoreBits(index * BitSizeOf<MethodIndexType>(),
+                      method_index,
+                      BitSizeOf<MethodIndexType>());
+  }
+
+  size_t NumMethodIndices() const {
+    return num_method_indices_;
+  }
+
+ private:
+  size_t num_method_indices_ = 0u;
+  MemoryRegion region_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_METHOD_INFO_H_
diff --git a/runtime/oat.h b/runtime/oat.h
index df43107..7943b0f 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
-  static constexpr uint8_t kOatVersion[] = { '1', '1', '5', '\0' };  // hash-based DexCache fields
+  static constexpr uint8_t kOatVersion[] = { '1', '1', '6', '\0' };  // Add method infos
 
   static constexpr const char* kImageLocationKey = "image-location";
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index b4e4285..8eef586 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -22,13 +22,14 @@
 
 namespace art {
 
-OatQuickMethodHeader::OatQuickMethodHeader(
-    uint32_t vmap_table_offset,
-    uint32_t frame_size_in_bytes,
-    uint32_t core_spill_mask,
-    uint32_t fp_spill_mask,
-    uint32_t code_size)
+OatQuickMethodHeader::OatQuickMethodHeader(uint32_t vmap_table_offset,
+                                           uint32_t method_info_offset,
+                                           uint32_t frame_size_in_bytes,
+                                           uint32_t core_spill_mask,
+                                           uint32_t fp_spill_mask,
+                                           uint32_t code_size)
     : vmap_table_offset_(vmap_table_offset),
+      method_info_offset_(method_info_offset),
       frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
       code_size_(code_size) {}
 
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 3cdde5a..07a4a9f 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -20,6 +20,7 @@
 #include "arch/instruction_set.h"
 #include "base/macros.h"
 #include "quick/quick_method_frame_info.h"
+#include "method_info.h"
 #include "stack_map.h"
 #include "utils.h"
 
@@ -30,11 +31,13 @@
 // OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
 class PACKED(4) OatQuickMethodHeader {
  public:
-  explicit OatQuickMethodHeader(uint32_t vmap_table_offset = 0U,
-                                uint32_t frame_size_in_bytes = 0U,
-                                uint32_t core_spill_mask = 0U,
-                                uint32_t fp_spill_mask = 0U,
-                                uint32_t code_size = 0U);
+  OatQuickMethodHeader() = default;
+  explicit OatQuickMethodHeader(uint32_t vmap_table_offset,
+                                uint32_t method_info_offset,
+                                uint32_t frame_size_in_bytes,
+                                uint32_t core_spill_mask,
+                                uint32_t fp_spill_mask,
+                                uint32_t code_size);
 
   ~OatQuickMethodHeader();
 
@@ -63,8 +66,7 @@
 
   const void* GetOptimizedCodeInfoPtr() const {
     DCHECK(IsOptimized());
-    const void* data = reinterpret_cast<const void*>(code_ - vmap_table_offset_);
-    return data;
+    return reinterpret_cast<const void*>(code_ - vmap_table_offset_);
   }
 
   uint8_t* GetOptimizedCodeInfoPtr() {
@@ -76,6 +78,20 @@
     return CodeInfo(GetOptimizedCodeInfoPtr());
   }
 
+  const void* GetOptimizedMethodInfoPtr() const {
+    DCHECK(IsOptimized());
+    return reinterpret_cast<const void*>(code_ - method_info_offset_);
+  }
+
+  uint8_t* GetOptimizedMethodInfoPtr() {
+    DCHECK(IsOptimized());
+    return code_ - method_info_offset_;
+  }
+
+  MethodInfo GetOptimizedMethodInfo() const {
+    return MethodInfo(reinterpret_cast<const uint8_t*>(GetOptimizedMethodInfoPtr()));
+  }
+
   const uint8_t* GetCode() const {
     return code_;
   }
@@ -100,6 +116,18 @@
     return &vmap_table_offset_;
   }
 
+  uint32_t GetMethodInfoOffset() const {
+    return method_info_offset_;
+  }
+
+  void SetMethodInfoOffset(uint32_t offset) {
+    method_info_offset_ = offset;
+  }
+
+  const uint32_t* GetMethodInfoOffsetAddr() const {
+    return &method_info_offset_;
+  }
+
   const uint8_t* GetVmapTable() const {
     CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler";
     return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_;
@@ -160,12 +188,18 @@
   static constexpr uint32_t kCodeSizeMask = ~kShouldDeoptimizeMask;
 
   // The offset in bytes from the start of the vmap table to the end of the header.
-  uint32_t vmap_table_offset_;
+  uint32_t vmap_table_offset_ = 0u;
+  // The offset in bytes from the start of the method info to the end of the header.
+  // The method info offset is not in the CodeInfo since CodeInfo has good dedupe properties that
+  // would be lost from doing so. The method info memory region contains method indices since they
+  // are hard to dedupe.
+
+  uint32_t method_info_offset_ = 0u;
   // The stack frame information.
   QuickMethodFrameInfo frame_info_;
   // The code size in bytes. The highest bit is used to signify if the compiled
   // code with the method header has should_deoptimize flag.
-  uint32_t code_size_;
+  uint32_t code_size_ = 0u;
   // The actual code.
   uint8_t code_[0];
 };
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 51a24e4..0628643 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -142,8 +142,10 @@
       InlineInfo inline_info = GetCurrentInlineInfo();
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
       CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
+      MethodInfo method_info = method_header->GetOptimizedMethodInfo();
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
       return GetResolvedMethod(*GetCurrentQuickFrame(),
+                               method_info,
                                inline_info,
                                encoding.inline_info.encoding,
                                depth_in_stack_map);
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index d657311..250ff2a 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -118,7 +118,8 @@
                     uint32_t code_offset,
                     uint16_t number_of_dex_registers,
                     bool dump_stack_maps,
-                    InstructionSet instruction_set) const {
+                    InstructionSet instruction_set,
+                    const MethodInfo& method_info) const {
   CodeInfoEncoding encoding = ExtractEncoding();
   size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
   vios->Stream()
@@ -139,6 +140,7 @@
       stack_map.Dump(vios,
                      *this,
                      encoding,
+                     method_info,
                      code_offset,
                      number_of_dex_registers,
                      instruction_set,
@@ -189,6 +191,7 @@
 void StackMap::Dump(VariableIndentationOutputStream* vios,
                     const CodeInfo& code_info,
                     const CodeInfoEncoding& encoding,
+                    const MethodInfo& method_info,
                     uint32_t code_offset,
                     uint16_t number_of_dex_registers,
                     InstructionSet instruction_set,
@@ -222,12 +225,13 @@
     // We do not know the length of the dex register maps of inlined frames
     // at this level, so we just pass null to `InlineInfo::Dump` to tell
     // it not to look at these maps.
-    inline_info.Dump(vios, code_info, nullptr);
+    inline_info.Dump(vios, code_info, method_info, nullptr);
   }
 }
 
 void InlineInfo::Dump(VariableIndentationOutputStream* vios,
                       const CodeInfo& code_info,
+                      const MethodInfo& method_info,
                       uint16_t number_of_dex_registers[]) const {
   InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
   vios->Stream() << "InlineInfo with depth "
@@ -245,7 +249,7 @@
     } else {
       vios->Stream()
           << std::dec
-          << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, i);
+          << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, method_info, i);
     }
     vios->Stream() << ")\n";
     if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index d936ce9..ffa17c9 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -23,6 +23,7 @@
 #include "bit_memory_region.h"
 #include "dex_file.h"
 #include "memory_region.h"
+#include "method_info.h"
 #include "leb128.h"
 
 namespace art {
@@ -367,7 +368,8 @@
     return region_.size();
   }
 
-  void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info);
+  void Dump(VariableIndentationOutputStream* vios,
+            const CodeInfo& code_info);
 
   // Special (invalid) Dex register location catalog entry index meaning
   // that there is no location for a given Dex register (i.e., it is
@@ -862,6 +864,7 @@
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& code_info,
             const CodeInfoEncoding& encoding,
+            const MethodInfo& method_info,
             uint32_t code_offset,
             uint16_t number_of_dex_registers,
             InstructionSet instruction_set,
@@ -885,12 +888,12 @@
 
 class InlineInfoEncoding {
  public:
-  void SetFromSizes(size_t method_index_max,
+  void SetFromSizes(size_t method_index_idx_max,
                     size_t dex_pc_max,
                     size_t extra_data_max,
                     size_t dex_register_map_size) {
     total_bit_size_ = kMethodIndexBitOffset;
-    total_bit_size_ += MinimumBitsToStore(method_index_max);
+    total_bit_size_ += MinimumBitsToStore(method_index_idx_max);
 
     dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
     // Note: We're not encoding the dex pc if there is none. That's the case
@@ -908,7 +911,7 @@
     total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
   }
 
-  ALWAYS_INLINE FieldEncoding GetMethodIndexEncoding() const {
+  ALWAYS_INLINE FieldEncoding GetMethodIndexIdxEncoding() const {
     return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_);
   }
   ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
@@ -975,16 +978,23 @@
     }
   }
 
-  ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
-                                               uint32_t depth) const {
+  ALWAYS_INLINE uint32_t GetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
+                                                  uint32_t depth) const {
     DCHECK(!EncodesArtMethodAtDepth(encoding, depth));
-    return encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth));
+    return encoding.GetMethodIndexIdxEncoding().Load(GetRegionAtDepth(encoding, depth));
   }
 
-  ALWAYS_INLINE void SetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
-                                           uint32_t depth,
-                                           uint32_t index) {
-    encoding.GetMethodIndexEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+  ALWAYS_INLINE void SetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
+                                              uint32_t depth,
+                                              uint32_t index) {
+    encoding.GetMethodIndexIdxEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+  }
+
+
+  ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
+                                               const MethodInfo& method_info,
+                                               uint32_t depth) const {
+    return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(encoding, depth));
   }
 
   ALWAYS_INLINE uint32_t GetDexPcAtDepth(const InlineInfoEncoding& encoding,
@@ -1012,7 +1022,8 @@
   ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding,
                                                uint32_t depth) const {
     uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth));
-    uint32_t high_bits = encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth));
+    uint32_t high_bits = encoding.GetMethodIndexIdxEncoding().Load(
+        GetRegionAtDepth(encoding, depth));
     if (high_bits == 0) {
       return reinterpret_cast<ArtMethod*>(low_bits);
     } else {
@@ -1040,6 +1051,7 @@
 
   void Dump(VariableIndentationOutputStream* vios,
             const CodeInfo& info,
+            const MethodInfo& method_info,
             uint16_t* number_of_dex_registers) const;
 
  private:
@@ -1219,12 +1231,18 @@
     encoding.GetInvokeTypeEncoding().Store(region_, invoke_type);
   }
 
-  ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding) const {
+  ALWAYS_INLINE uint32_t GetMethodIndexIdx(const InvokeInfoEncoding& encoding) const {
     return encoding.GetMethodIndexEncoding().Load(region_);
   }
 
-  ALWAYS_INLINE void SetMethodIndex(const InvokeInfoEncoding& encoding, uint32_t method_index) {
-    encoding.GetMethodIndexEncoding().Store(region_, method_index);
+  ALWAYS_INLINE void SetMethodIndexIdx(const InvokeInfoEncoding& encoding,
+                                       uint32_t method_index_idx) {
+    encoding.GetMethodIndexEncoding().Store(region_, method_index_idx);
+  }
+
+  ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding,
+                                        MethodInfo method_info) const {
+    return method_info.GetMethodIndex(GetMethodIndexIdx(encoding));
   }
 
   bool IsValid() const { return region_.pointer() != nullptr; }
@@ -1542,7 +1560,8 @@
             uint32_t code_offset,
             uint16_t number_of_dex_registers,
             bool dump_stack_maps,
-            InstructionSet instruction_set) const;
+            InstructionSet instruction_set,
+            const MethodInfo& method_info) const;
 
   // Check that the code info has valid stack map and abort if it does not.
   void AssertValidStackMap(const CodeInfoEncoding& encoding) const {