Merge "Revert^2 "Add CodeInfo to JNI methods.""
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index f2a942f..bda7108 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -208,8 +208,7 @@
       std::vector<DexRegisterMap> dex_reg_maps;
       if (accessor.HasCodeItem() && mi->code_info != nullptr) {
         code_info.reset(new CodeInfo(mi->code_info));
-        for (size_t s = 0; s < code_info->GetNumberOfStackMaps(); ++s) {
-          const StackMap stack_map = code_info->GetStackMapAt(s);
+        for (StackMap stack_map : code_info->GetStackMaps()) {
           dex_reg_maps.push_back(code_info->GetDexRegisterMapOf(stack_map));
         }
       }
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index a7adab5..3d78943 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -101,9 +101,7 @@
         // Use stack maps to create mapping table from pc to dex.
         const CodeInfo code_info(mi->code_info);
         pc2dex_map.reserve(code_info.GetNumberOfStackMaps());
-        for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
-          StackMap stack_map = code_info.GetStackMapAt(s);
-          DCHECK(stack_map.IsValid());
+        for (StackMap stack_map : code_info.GetStackMaps()) {
           const uint32_t pc = stack_map.GetNativePcOffset(isa);
           const int32_t dex = stack_map.GetDexPc();
           pc2dex_map.push_back({pc, dex});
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index a212445..efb23e7 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -17,19 +17,34 @@
 #include "loop_analysis.h"
 
 #include "base/bit_vector-inl.h"
+#include "induction_var_range.h"
 
 namespace art {
 
 void LoopAnalysis::CalculateLoopBasicProperties(HLoopInformation* loop_info,
-                                                LoopAnalysisInfo* analysis_results) {
+                                                LoopAnalysisInfo* analysis_results,
+                                                int64_t trip_count) {
+  analysis_results->trip_count_ = trip_count;
+
   for (HBlocksInLoopIterator block_it(*loop_info);
        !block_it.Done();
        block_it.Advance()) {
     HBasicBlock* block = block_it.Current();
 
+    // Check whether one of the successor is loop exit.
     for (HBasicBlock* successor : block->GetSuccessors()) {
       if (!loop_info->Contains(*successor)) {
         analysis_results->exits_num_++;
+
+        // We track number of invariant loop exits which correspond to HIf instruction and
+        // can be eliminated by loop peeling; other control flow instruction are ignored and will
+        // not cause loop peeling to happen as they either cannot be inside a loop, or by
+        // definition cannot be loop exits (unconditional instructions), or are not beneficial for
+        // the optimization.
+        HIf* hif = block->GetLastInstruction()->AsIf();
+        if (hif != nullptr && !loop_info->Contains(*hif->InputAt(0)->GetBlock())) {
+          analysis_results->invariant_exits_num_++;
+        }
       }
     }
 
@@ -48,20 +63,13 @@
   }
 }
 
-bool LoopAnalysis::HasLoopAtLeastOneInvariantExit(HLoopInformation* loop_info) {
-  HGraph* graph = loop_info->GetHeader()->GetGraph();
-  for (uint32_t block_id : loop_info->GetBlocks().Indexes()) {
-    HBasicBlock* block = graph->GetBlocks()[block_id];
-    DCHECK(block != nullptr);
-    if (block->EndsWithIf()) {
-      HIf* hif = block->GetLastInstruction()->AsIf();
-      HInstruction* input = hif->InputAt(0);
-      if (IsLoopExit(loop_info, hif) && !loop_info->Contains(*input->GetBlock())) {
-        return true;
-      }
-    }
+int64_t LoopAnalysis::GetLoopTripCount(HLoopInformation* loop_info,
+                                       const InductionVarRange* induction_range) {
+  int64_t trip_count;
+  if (!induction_range->HasKnownTripCount(loop_info, &trip_count)) {
+    trip_count = LoopAnalysisInfo::kUnknownTripCount;
   }
-  return false;
+  return trip_count;
 }
 
 // Default implementation of loop helper; used for all targets unless a custom implementation
@@ -77,18 +85,22 @@
   // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
   static constexpr uint32_t kScalarHeuristicMaxBodySizeBlocks = 6;
 
-  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE {
-    return loop_analysis_info->HasLongTypeInstructions() ||
-           IsLoopTooBig(loop_analysis_info,
+  bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+    return analysis_info->HasLongTypeInstructions() ||
+           IsLoopTooBig(analysis_info,
                         kScalarHeuristicMaxBodySizeInstr,
                         kScalarHeuristicMaxBodySizeBlocks);
   }
 
-  uint32_t GetScalarUnrollingFactor(HLoopInformation* loop_info ATTRIBUTE_UNUSED,
-                                    uint64_t trip_count) const OVERRIDE {
+  uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const OVERRIDE {
+    int64_t trip_count = analysis_info->GetTripCount();
+    // Unroll only loops with known trip count.
+    if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
+      return LoopAnalysisInfo::kNoUnrollingFactor;
+    }
     uint32_t desired_unrolling_factor = kScalarMaxUnrollFactor;
     if (trip_count < desired_unrolling_factor || trip_count % desired_unrolling_factor != 0) {
-      return kNoUnrollingFactor;
+      return LoopAnalysisInfo::kNoUnrollingFactor;
     }
 
     return desired_unrolling_factor;
@@ -136,12 +148,12 @@
     // TODO: Unroll loops with unknown trip count.
     DCHECK_NE(vector_length, 0u);
     if (trip_count < (2 * vector_length + max_peel)) {
-      return kNoUnrollingFactor;
+      return LoopAnalysisInfo::kNoUnrollingFactor;
     }
     // Don't unroll for large loop body size.
     uint32_t instruction_count = block->GetInstructions().CountSize();
     if (instruction_count >= kArm64SimdHeuristicMaxBodySizeInstr) {
-      return kNoUnrollingFactor;
+      return LoopAnalysisInfo::kNoUnrollingFactor;
     }
     // Find a beneficial unroll factor with the following restrictions:
     //  - At least one iteration of the transformed loop should be executed.
diff --git a/compiler/optimizing/loop_analysis.h b/compiler/optimizing/loop_analysis.h
index 7f321b7..bcb7b70 100644
--- a/compiler/optimizing/loop_analysis.h
+++ b/compiler/optimizing/loop_analysis.h
@@ -21,26 +21,33 @@
 
 namespace art {
 
+class InductionVarRange;
 class LoopAnalysis;
 
-// No loop unrolling factor (just one copy of the loop-body).
-static constexpr uint32_t kNoUnrollingFactor = 1;
-
 // Class to hold cached information on properties of the loop.
 class LoopAnalysisInfo : public ValueObject {
  public:
+  // No loop unrolling factor (just one copy of the loop-body).
+  static constexpr uint32_t kNoUnrollingFactor = 1;
+  // Used for unknown and non-constant trip counts (see InductionVarRange::HasKnownTripCount).
+  static constexpr int64_t kUnknownTripCount = -1;
+
   explicit LoopAnalysisInfo(HLoopInformation* loop_info)
-      : bb_num_(0),
+      : trip_count_(kUnknownTripCount),
+        bb_num_(0),
         instr_num_(0),
         exits_num_(0),
+        invariant_exits_num_(0),
         has_instructions_preventing_scalar_peeling_(false),
         has_instructions_preventing_scalar_unrolling_(false),
         has_long_type_instructions_(false),
         loop_info_(loop_info) {}
 
+  int64_t GetTripCount() const { return trip_count_; }
   size_t GetNumberOfBasicBlocks() const { return bb_num_; }
   size_t GetNumberOfInstructions() const { return instr_num_; }
   size_t GetNumberOfExits() const { return exits_num_; }
+  size_t GetNumberOfInvariantExits() const { return invariant_exits_num_; }
 
   bool HasInstructionsPreventingScalarPeeling() const {
     return has_instructions_preventing_scalar_peeling_;
@@ -50,19 +57,27 @@
     return has_instructions_preventing_scalar_unrolling_;
   }
 
+  bool HasInstructionsPreventingScalarOpts() const {
+    return HasInstructionsPreventingScalarPeeling() || HasInstructionsPreventingScalarUnrolling();
+  }
+
   bool HasLongTypeInstructions() const {
     return has_long_type_instructions_;
   }
 
-  const HLoopInformation* GetLoopInfo() const { return loop_info_; }
+  HLoopInformation* GetLoopInfo() const { return loop_info_; }
 
  private:
+  // Trip count of the loop if known, kUnknownTripCount otherwise.
+  int64_t trip_count_;
   // Number of basic blocks in the loop body.
   size_t bb_num_;
   // Number of instructions in the loop body.
   size_t instr_num_;
   // Number of loop's exits.
   size_t exits_num_;
+  // Number of "if" loop exits (with HIf instruction) whose condition is loop-invariant.
+  size_t invariant_exits_num_;
   // Whether the loop has instructions which make scalar loop peeling non-beneficial.
   bool has_instructions_preventing_scalar_peeling_;
   // Whether the loop has instructions which make scalar loop unrolling non-beneficial.
@@ -72,7 +87,7 @@
   bool has_long_type_instructions_;
 
   // Corresponding HLoopInformation.
-  const HLoopInformation* loop_info_;
+  HLoopInformation* loop_info_;
 
   friend class LoopAnalysis;
 };
@@ -84,20 +99,12 @@
   // Calculates loops basic properties like body size, exits number, etc. and fills
   // 'analysis_results' with this information.
   static void CalculateLoopBasicProperties(HLoopInformation* loop_info,
-                                           LoopAnalysisInfo* analysis_results);
+                                           LoopAnalysisInfo* analysis_results,
+                                           int64_t trip_count);
 
-  // Returns whether the loop has at least one loop invariant exit.
-  static bool HasLoopAtLeastOneInvariantExit(HLoopInformation* loop_info);
-
-  // Returns whether HIf's true or false successor is outside the specified loop.
-  //
-  // Prerequisite: HIf must be in the specified loop.
-  static bool IsLoopExit(HLoopInformation* loop_info, const HIf* hif) {
-    DCHECK(loop_info->Contains(*hif->GetBlock()));
-    HBasicBlock* true_succ = hif->IfTrueSuccessor();
-    HBasicBlock* false_succ = hif->IfFalseSuccessor();
-    return (!loop_info->Contains(*true_succ) || !loop_info->Contains(*false_succ));
-  }
+  // Returns the trip count of the loop if it is known and kUnknownTripCount otherwise.
+  static int64_t GetLoopTripCount(HLoopInformation* loop_info,
+                                  const InductionVarRange* induction_range);
 
  private:
   // Returns whether an instruction makes scalar loop peeling/unrolling non-beneficial.
@@ -143,9 +150,9 @@
   // Returns optimal scalar unrolling factor for the loop.
   //
   // Returns kNoUnrollingFactor by default, should be overridden by particular target loop helper.
-  virtual uint32_t GetScalarUnrollingFactor(HLoopInformation* loop_info ATTRIBUTE_UNUSED,
-                                            uint64_t trip_count ATTRIBUTE_UNUSED) const {
-    return kNoUnrollingFactor;
+  virtual uint32_t GetScalarUnrollingFactor(
+      const LoopAnalysisInfo* analysis_info ATTRIBUTE_UNUSED) const {
+    return LoopAnalysisInfo::kNoUnrollingFactor;
   }
 
   // Returns whether scalar loop peeling is enabled,
@@ -160,7 +167,7 @@
                                           int64_t trip_count ATTRIBUTE_UNUSED,
                                           uint32_t max_peel ATTRIBUTE_UNUSED,
                                           uint32_t vector_length ATTRIBUTE_UNUSED) const {
-    return kNoUnrollingFactor;
+    return LoopAnalysisInfo::kNoUnrollingFactor;
   }
 };
 
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 72aa253..440cd33 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -744,102 +744,104 @@
 }
 
 bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
-  return TryOptimizeInnerLoopFinite(node) ||
-         TryPeelingForLoopInvariantExitsElimination(node) ||
-         TryUnrollingForBranchPenaltyReduction(node);
+  return TryOptimizeInnerLoopFinite(node) || TryPeelingAndUnrolling(node);
 }
 
 
 
 //
-// Loop unrolling: generic part methods.
+// Scalar loop peeling and unrolling: generic part methods.
 //
 
-bool HLoopOptimization::TryUnrollingForBranchPenaltyReduction(LoopNode* node) {
-  // Don't run peeling/unrolling if compiler_options_ is nullptr (i.e., running under tests)
-  // as InstructionSet is needed.
-  if (compiler_options_ == nullptr) {
+bool HLoopOptimization::TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo* analysis_info,
+                                                              bool generate_code) {
+  if (analysis_info->GetNumberOfExits() > 1) {
     return false;
   }
 
-  HLoopInformation* loop_info = node->loop_info;
-  int64_t trip_count = 0;
-  // Only unroll loops with a known tripcount.
-  if (!induction_range_.HasKnownTripCount(loop_info, &trip_count)) {
+  uint32_t unrolling_factor = arch_loop_helper_->GetScalarUnrollingFactor(analysis_info);
+  if (unrolling_factor == LoopAnalysisInfo::kNoUnrollingFactor) {
     return false;
   }
 
-  uint32_t unrolling_factor = arch_loop_helper_->GetScalarUnrollingFactor(loop_info, trip_count);
-  if (unrolling_factor == kNoUnrollingFactor) {
-    return false;
+  if (generate_code) {
+    // TODO: support other unrolling factors.
+    DCHECK_EQ(unrolling_factor, 2u);
+
+    // Perform unrolling.
+    HLoopInformation* loop_info = analysis_info->GetLoopInfo();
+    PeelUnrollSimpleHelper helper(loop_info);
+    helper.DoUnrolling();
+
+    // Remove the redundant loop check after unrolling.
+    HIf* copy_hif =
+        helper.GetBasicBlockMap()->Get(loop_info->GetHeader())->GetLastInstruction()->AsIf();
+    int32_t constant = loop_info->Contains(*copy_hif->IfTrueSuccessor()) ? 1 : 0;
+    copy_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
   }
-
-  LoopAnalysisInfo loop_analysis_info(loop_info);
-  LoopAnalysis::CalculateLoopBasicProperties(loop_info, &loop_analysis_info);
-
-  // Check "IsLoopClonable" last as it can be time-consuming.
-  if (loop_analysis_info.HasInstructionsPreventingScalarUnrolling() ||
-      arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&loop_analysis_info) ||
-      (loop_analysis_info.GetNumberOfExits() > 1) ||
-      !PeelUnrollHelper::IsLoopClonable(loop_info)) {
-    return false;
-  }
-
-  // TODO: support other unrolling factors.
-  DCHECK_EQ(unrolling_factor, 2u);
-
-  // Perform unrolling.
-  PeelUnrollSimpleHelper helper(loop_info);
-  helper.DoUnrolling();
-
-  // Remove the redundant loop check after unrolling.
-  HIf* copy_hif =
-      helper.GetBasicBlockMap()->Get(loop_info->GetHeader())->GetLastInstruction()->AsIf();
-  int32_t constant = loop_info->Contains(*copy_hif->IfTrueSuccessor()) ? 1 : 0;
-  copy_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
-
   return true;
 }
 
-bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopNode* node) {
-  // Don't run peeling/unrolling if compiler_options_ is nullptr (i.e., running under tests)
-  // as InstructionSet is needed.
-  if (compiler_options_ == nullptr) {
-    return false;
-  }
-
-  HLoopInformation* loop_info = node->loop_info;
-  // Check 'IsLoopClonable' the last as it might be time-consuming.
+bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
+                                                                   bool generate_code) {
+  HLoopInformation* loop_info = analysis_info->GetLoopInfo();
   if (!arch_loop_helper_->IsLoopPeelingEnabled()) {
     return false;
   }
 
-  LoopAnalysisInfo loop_analysis_info(loop_info);
-  LoopAnalysis::CalculateLoopBasicProperties(loop_info, &loop_analysis_info);
-
-  // Check "IsLoopClonable" last as it can be time-consuming.
-  if (loop_analysis_info.HasInstructionsPreventingScalarPeeling() ||
-      arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&loop_analysis_info) ||
-      !LoopAnalysis::HasLoopAtLeastOneInvariantExit(loop_info) ||
-      !PeelUnrollHelper::IsLoopClonable(loop_info)) {
+  if (analysis_info->GetNumberOfInvariantExits() == 0) {
     return false;
   }
 
-  // Perform peeling.
-  PeelUnrollSimpleHelper helper(loop_info);
-  helper.DoPeeling();
+  if (generate_code) {
+    // Perform peeling.
+    PeelUnrollSimpleHelper helper(loop_info);
+    helper.DoPeeling();
 
-  const SuperblockCloner::HInstructionMap* hir_map = helper.GetInstructionMap();
-  for (auto entry : *hir_map) {
-    HInstruction* copy = entry.second;
-    if (copy->IsIf()) {
-      TryToEvaluateIfCondition(copy->AsIf(), graph_);
+    // Statically evaluate loop check after peeling for loop invariant condition.
+    const SuperblockCloner::HInstructionMap* hir_map = helper.GetInstructionMap();
+    for (auto entry : *hir_map) {
+      HInstruction* copy = entry.second;
+      if (copy->IsIf()) {
+        TryToEvaluateIfCondition(copy->AsIf(), graph_);
+      }
     }
   }
 
   return true;
 }
 
+bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
+  // Don't run peeling/unrolling if compiler_options_ is nullptr (i.e., running under tests)
+  // as InstructionSet is needed.
+  if (compiler_options_ == nullptr) {
+    return false;
+  }
+
+  HLoopInformation* loop_info = node->loop_info;
+  int64_t trip_count = LoopAnalysis::GetLoopTripCount(loop_info, &induction_range_);
+  LoopAnalysisInfo analysis_info(loop_info);
+  LoopAnalysis::CalculateLoopBasicProperties(loop_info, &analysis_info, trip_count);
+
+  if (analysis_info.HasInstructionsPreventingScalarOpts() ||
+      arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&analysis_info)) {
+    return false;
+  }
+
+  if (!TryPeelingForLoopInvariantExitsElimination(&analysis_info, /*generate_code*/ false) &&
+      !TryUnrollingForBranchPenaltyReduction(&analysis_info, /*generate_code*/ false)) {
+    return false;
+  }
+
+  // Run 'IsLoopClonable' the last as it might be time-consuming.
+  if (!PeelUnrollHelper::IsLoopClonable(loop_info)) {
+    return false;
+  }
+
+  return TryPeelingForLoopInvariantExitsElimination(&analysis_info) ||
+         TryUnrollingForBranchPenaltyReduction(&analysis_info);
+}
+
 //
 // Loop vectorization. The implementation is based on the book by Aart J.C. Bik:
 // "The Software Vectorization Handbook. Applying Multimedia Extensions for Maximum Performance."
@@ -1076,7 +1078,7 @@
                     vector_index_,
                     ptc,
                     graph_->GetConstant(induc_type, 1),
-                    kNoUnrollingFactor);
+                    LoopAnalysisInfo::kNoUnrollingFactor);
   }
 
   // Generate vector loop, possibly further unrolled:
@@ -1103,7 +1105,7 @@
                     vector_index_,
                     stc,
                     graph_->GetConstant(induc_type, 1),
-                    kNoUnrollingFactor);
+                    LoopAnalysisInfo::kNoUnrollingFactor);
   }
 
   // Link reductions to their final uses.
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 9743b25..bc47924 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -144,12 +144,19 @@
   bool OptimizeInnerLoop(LoopNode* node);
 
   // Tries to apply loop unrolling for branch penalty reduction and better instruction scheduling
-  // opportunities. Returns whether transformation happened.
-  bool TryUnrollingForBranchPenaltyReduction(LoopNode* loop_node);
+  // opportunities. Returns whether transformation happened. 'generate_code' determines whether the
+  // optimization should be actually applied.
+  bool TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo* analysis_info,
+                                             bool generate_code = true);
 
   // Tries to apply loop peeling for loop invariant exits elimination. Returns whether
-  // transformation happened.
-  bool TryPeelingForLoopInvariantExitsElimination(LoopNode* loop_node);
+  // transformation happened. 'generate_code' determines whether the optimization should be
+  // actually applied.
+  bool TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
+                                                  bool generate_code = true);
+
+  // Tries to apply scalar loop peeling and unrolling.
+  bool TryPeelingAndUnrolling(LoopNode* node);
 
   //
   // Vectorization analysis and synthesis.
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 5d36195..3e1a36d 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -151,7 +151,7 @@
       StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
       CHECK_EQ(stack_map.HasDexRegisterMap(), (num_dex_registers != 0));
       CHECK_EQ(stack_map.HasInlineInfo(), (inlining_depth != 0));
-      CHECK_EQ(code_info.GetInlineDepthOf(stack_map), inlining_depth);
+      CHECK_EQ(code_info.GetInlineInfosOf(stack_map).size(), inlining_depth);
     });
   }
 }
@@ -209,7 +209,7 @@
     size_t depth = current_inline_infos_.size() - 1;
     dchecks_.emplace_back([=](const CodeInfo& code_info) {
       StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
-      InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, depth);
+      InlineInfo inline_info = code_info.GetInlineInfosOf(stack_map)[depth];
       CHECK_EQ(inline_info.GetDexPc(), dex_pc);
       bool encode_art_method = EncodeArtMethodInInlineInfo(method);
       CHECK_EQ(inline_info.EncodesArtMethod(), encode_art_method);
@@ -275,7 +275,6 @@
 
   if (kVerifyStackMaps) {
     size_t stack_map_index = stack_maps_.size();
-    uint32_t depth = current_inline_infos_.size();
     // We need to make copy of the current registers for later (when the check is run).
     auto expected_dex_registers = std::make_shared<dchecked_vector<DexRegisterLocation>>(
         current_dex_registers_.begin(), current_dex_registers_.end());
@@ -285,8 +284,9 @@
       for (DexRegisterLocation reg : code_info.GetDexRegisterMapOf(stack_map)) {
         CHECK_EQ((*expected_dex_registers)[expected_reg++], reg);
       }
-      for (uint32_t d = 0; d < depth; d++) {
-        for (DexRegisterLocation reg : code_info.GetDexRegisterMapAtDepth(d, stack_map)) {
+      for (InlineInfo inline_info : code_info.GetInlineInfosOf(stack_map)) {
+        DexRegisterMap map = code_info.GetInlineDexRegisterMapOf(stack_map, inline_info);
+        for (DexRegisterLocation reg : map) {
           CHECK_EQ((*expected_dex_registers)[expected_reg++], reg);
         }
       }
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 6241e0c..9ed90a4 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -193,13 +193,12 @@
     ASSERT_EQ(-2, location1.GetValue());
 
     ASSERT_TRUE(stack_map.HasInlineInfo());
-    InlineInfo inline_info0 = code_info.GetInlineInfoAtDepth(stack_map, 0);
-    InlineInfo inline_info1 = code_info.GetInlineInfoAtDepth(stack_map, 1);
-    ASSERT_EQ(2u, code_info.GetInlineDepthOf(stack_map));
-    ASSERT_EQ(3u, inline_info0.GetDexPc());
-    ASSERT_EQ(2u, inline_info1.GetDexPc());
-    ASSERT_TRUE(inline_info0.EncodesArtMethod());
-    ASSERT_TRUE(inline_info1.EncodesArtMethod());
+    auto inline_infos = code_info.GetInlineInfosOf(stack_map);
+    ASSERT_EQ(2u, inline_infos.size());
+    ASSERT_EQ(3u, inline_infos[0].GetDexPc());
+    ASSERT_EQ(2u, inline_infos[1].GetDexPc());
+    ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+    ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
   }
 
   // Second stack map.
@@ -614,19 +613,18 @@
     ASSERT_EQ(0, dex_registers0[0].GetStackOffsetInBytes());
     ASSERT_EQ(4, dex_registers0[1].GetConstant());
 
-    InlineInfo if0_0 = ci.GetInlineInfoAtDepth(sm0, 0);
-    InlineInfo if0_1 = ci.GetInlineInfoAtDepth(sm0, 1);
-    ASSERT_EQ(2u, ci.GetInlineDepthOf(sm0));
-    ASSERT_EQ(2u, if0_0.GetDexPc());
-    ASSERT_TRUE(if0_0.EncodesArtMethod());
-    ASSERT_EQ(3u, if0_1.GetDexPc());
-    ASSERT_TRUE(if0_1.EncodesArtMethod());
+    auto inline_infos = ci.GetInlineInfosOf(sm0);
+    ASSERT_EQ(2u, inline_infos.size());
+    ASSERT_EQ(2u, inline_infos[0].GetDexPc());
+    ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+    ASSERT_EQ(3u, inline_infos[1].GetDexPc());
+    ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, sm0);
+    DexRegisterMap dex_registers1 = ci.GetInlineDexRegisterMapOf(sm0, inline_infos[0]);
     ASSERT_EQ(1u, dex_registers1.size());
     ASSERT_EQ(8, dex_registers1[0].GetStackOffsetInBytes());
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, sm0);
+    DexRegisterMap dex_registers2 = ci.GetInlineDexRegisterMapOf(sm0, inline_infos[1]);
     ASSERT_EQ(3u, dex_registers2.size());
     ASSERT_EQ(16, dex_registers2[0].GetStackOffsetInBytes());
     ASSERT_EQ(20, dex_registers2[1].GetConstant());
@@ -642,22 +640,20 @@
     ASSERT_EQ(56, dex_registers0[0].GetStackOffsetInBytes());
     ASSERT_EQ(0, dex_registers0[1].GetConstant());
 
-    InlineInfo if1_0 = ci.GetInlineInfoAtDepth(sm1, 0);
-    InlineInfo if1_1 = ci.GetInlineInfoAtDepth(sm1, 1);
-    InlineInfo if1_2 = ci.GetInlineInfoAtDepth(sm1, 2);
-    ASSERT_EQ(3u, ci.GetInlineDepthOf(sm1));
-    ASSERT_EQ(2u, if1_0.GetDexPc());
-    ASSERT_TRUE(if1_0.EncodesArtMethod());
-    ASSERT_EQ(3u, if1_1.GetDexPc());
-    ASSERT_TRUE(if1_1.EncodesArtMethod());
-    ASSERT_EQ(5u, if1_2.GetDexPc());
-    ASSERT_TRUE(if1_2.EncodesArtMethod());
+    auto inline_infos = ci.GetInlineInfosOf(sm1);
+    ASSERT_EQ(3u, inline_infos.size());
+    ASSERT_EQ(2u, inline_infos[0].GetDexPc());
+    ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+    ASSERT_EQ(3u, inline_infos[1].GetDexPc());
+    ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
+    ASSERT_EQ(5u, inline_infos[2].GetDexPc());
+    ASSERT_TRUE(inline_infos[2].EncodesArtMethod());
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, sm1);
+    DexRegisterMap dex_registers1 = ci.GetInlineDexRegisterMapOf(sm1, inline_infos[0]);
     ASSERT_EQ(1u, dex_registers1.size());
     ASSERT_EQ(12, dex_registers1[0].GetStackOffsetInBytes());
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, sm1);
+    DexRegisterMap dex_registers2 = ci.GetInlineDexRegisterMapOf(sm1, inline_infos[1]);
     ASSERT_EQ(3u, dex_registers2.size());
     ASSERT_EQ(80, dex_registers2[0].GetStackOffsetInBytes());
     ASSERT_EQ(10, dex_registers2[1].GetConstant());
@@ -684,22 +680,20 @@
     ASSERT_EQ(56, dex_registers0[0].GetStackOffsetInBytes());
     ASSERT_EQ(0, dex_registers0[1].GetConstant());
 
-    InlineInfo if2_0 = ci.GetInlineInfoAtDepth(sm3, 0);
-    InlineInfo if2_1 = ci.GetInlineInfoAtDepth(sm3, 1);
-    InlineInfo if2_2 = ci.GetInlineInfoAtDepth(sm3, 2);
-    ASSERT_EQ(3u, ci.GetInlineDepthOf(sm3));
-    ASSERT_EQ(2u, if2_0.GetDexPc());
-    ASSERT_TRUE(if2_0.EncodesArtMethod());
-    ASSERT_EQ(5u, if2_1.GetDexPc());
-    ASSERT_TRUE(if2_1.EncodesArtMethod());
-    ASSERT_EQ(10u, if2_2.GetDexPc());
-    ASSERT_TRUE(if2_2.EncodesArtMethod());
+    auto inline_infos = ci.GetInlineInfosOf(sm3);
+    ASSERT_EQ(3u, inline_infos.size());
+    ASSERT_EQ(2u, inline_infos[0].GetDexPc());
+    ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+    ASSERT_EQ(5u, inline_infos[1].GetDexPc());
+    ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
+    ASSERT_EQ(10u, inline_infos[2].GetDexPc());
+    ASSERT_TRUE(inline_infos[2].EncodesArtMethod());
 
-    DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, sm3);
+    DexRegisterMap dex_registers1 = ci.GetInlineDexRegisterMapOf(sm3, inline_infos[1]);
     ASSERT_EQ(1u, dex_registers1.size());
     ASSERT_EQ(2, dex_registers1[0].GetMachineRegister());
 
-    DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, sm3);
+    DexRegisterMap dex_registers2 = ci.GetInlineDexRegisterMapOf(sm3, inline_infos[2]);
     ASSERT_EQ(2u, dex_registers2.size());
     ASSERT_FALSE(dex_registers2[0].IsLive());
     ASSERT_EQ(3, dex_registers2[1].GetMachineRegister());
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8dca889..cbc6424 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -290,7 +290,7 @@
   UsageError("      Default: default");
   UsageError("");
   UsageError("  --compile-pic: Force indirect use of code, methods, and classes");
-  UsageError("      Default: disabled");
+  UsageError("      Default: disabled for apps (ignored for boot image which is always PIC)");
   UsageError("");
   UsageError("  --compiler-backend=(Quick|Optimizing): select compiler backend");
   UsageError("      set.");
@@ -725,6 +725,9 @@
 
   void ProcessOptions(ParserOptions* parser_options) {
     compiler_options_->boot_image_ = !image_filenames_.empty();
+    if (compiler_options_->boot_image_) {
+      compiler_options_->compile_pic_ = true;
+    }
     compiler_options_->app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
 
     if (IsBootImage() && image_filenames_.size() == 1) {
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index b0fc4d1..ee47721 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -26,6 +26,7 @@
 
 #include "base/bit_memory_region.h"
 #include "base/casts.h"
+#include "base/iteration_range.h"
 #include "base/memory_region.h"
 #include "base/scoped_arena_containers.h"
 #include "base/stl_util.h"
@@ -207,9 +208,18 @@
     bool operator>=(const_iterator i) const { DCHECK(table_ == i.table_); return row_ >= i.row_; }
     bool operator<(const_iterator i) const { DCHECK(table_ == i.table_); return row_ < i.row_; }
     bool operator>(const_iterator i) const { DCHECK(table_ == i.table_); return row_ > i.row_; }
-    Accessor operator*() { return Accessor(table_, row_); }
-    Accessor operator->() { return Accessor(table_, row_); }
-    Accessor operator[](size_t index) { return Accessor(table_, row_ + index); }
+    Accessor operator*() {
+      DCHECK_LT(row_, table_->NumRows());
+      return Accessor(table_, row_);
+    }
+    Accessor operator->() {
+      DCHECK_LT(row_, table_->NumRows());
+      return Accessor(table_, row_);
+    }
+    Accessor operator[](size_t index) {
+      DCHECK_LT(row_ + index, table_->NumRows());
+      return Accessor(table_, row_ + index);
+    }
    private:
     const BitTable* table_ = nullptr;
     uint32_t row_ = 0;
@@ -236,6 +246,34 @@
   return a + n;
 }
 
+template<typename Accessor>
+class BitTableRange : public IterationRange<typename BitTable<Accessor>::const_iterator> {
+ public:
+  typedef typename BitTable<Accessor>::const_iterator const_iterator;
+
+  using IterationRange<const_iterator>::IterationRange;
+  BitTableRange() : IterationRange<const_iterator>(const_iterator(), const_iterator()) { }
+
+  bool empty() const { return this->begin() == this->end(); }
+  size_t size() const { return this->end() - this->begin(); }
+
+  Accessor operator[](size_t index) const {
+    const_iterator it = this->begin() + index;
+    DCHECK(it < this->end());
+    return *it;
+  }
+
+  Accessor back() const {
+    DCHECK(!empty());
+    return *(this->end() - 1);
+  }
+
+  void pop_back() {
+    DCHECK(!empty());
+    --this->last_;
+  }
+};
+
 // Helper class for encoding BitTable. It can optionally de-duplicate the inputs.
 template<uint32_t kNumColumns>
 class BitTableBuilderBase {
@@ -246,6 +284,9 @@
   class Entry {
    public:
     Entry() {
+      // The definition of kNoValue here is for host and target debug builds which complain about
+      // missing a symbol definition for BitTableBase<N>::kNovValue when optimization is off.
+      static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
       std::fill_n(data_, kNumColumns, kNoValue);
     }
 
diff --git a/libartbase/base/iteration_range.h b/libartbase/base/iteration_range.h
index 76049a7..cd87d85 100644
--- a/libartbase/base/iteration_range.h
+++ b/libartbase/base/iteration_range.h
@@ -39,9 +39,9 @@
   iterator cbegin() const { return first_; }
   iterator cend() const { return last_; }
 
- private:
-  const iterator first_;
-  const iterator last_;
+ protected:
+  iterator first_;
+  iterator last_;
 };
 
 template <typename Iter>
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 67abdca..4e88ef6 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -1200,6 +1200,9 @@
   bool IsAtMethod() const {
     return pos_ >= EndOfInstanceFieldsPos();
   }
+  bool IsAtVirtualMethod() const {
+    return pos_ >= EndOfDirectMethodsPos();
+  }
   bool HasNextStaticField() const {
     return pos_ < EndOfStaticFieldsPos();
   }
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 4a3d3b0..40ef10f 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -46,38 +46,38 @@
 
 inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
                                     const MethodInfo& method_info,
-                                    const CodeInfo& code_info,
-                                    const StackMap& stack_map,
-                                    uint8_t inlining_depth)
+                                    const BitTableRange<InlineInfo>& inline_infos)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(!outer_method->IsObsolete());
-  InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, inlining_depth);
 
   // This method is being used by artQuickResolutionTrampoline, before it sets up
   // the passed parameters in a GC friendly way. Therefore we must never be
   // suspended while executing it.
   ScopedAssertNoThreadSuspension sants(__FUNCTION__);
 
-  if (inline_info.EncodesArtMethod()) {
-    return inline_info.GetArtMethod();
-  }
+  {
+    InlineInfo inline_info = inline_infos.back();
 
-  uint32_t method_index = inline_info.GetMethodIndex(method_info);
-  if (inline_info.GetDexPc() == static_cast<uint32_t>(-1)) {
-    // "charAt" special case. It is the only non-leaf method we inline across dex files.
-    ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
-    DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
-    return inlined_method;
+    if (inline_info.EncodesArtMethod()) {
+      return inline_info.GetArtMethod();
+    }
+
+    uint32_t method_index = inline_info.GetMethodIndex(method_info);
+    if (inline_info.GetDexPc() == static_cast<uint32_t>(-1)) {
+      // "charAt" special case. It is the only non-leaf method we inline across dex files.
+      ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
+      DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
+      return inlined_method;
+    }
   }
 
   // Find which method did the call in the inlining hierarchy.
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   ArtMethod* method = outer_method;
-  for (uint32_t depth = 0, end = inlining_depth + 1u; depth != end; ++depth) {
-    inline_info = code_info.GetInlineInfoAtDepth(stack_map, depth);
+  for (InlineInfo inline_info : inline_infos) {
     DCHECK(!inline_info.EncodesArtMethod());
     DCHECK_NE(inline_info.GetDexPc(), static_cast<uint32_t>(-1));
-    method_index = inline_info.GetMethodIndex(method_info);
+    uint32_t method_index = inline_info.GetMethodIndex(method_info);
     ArtMethod* inlined_method = class_linker->LookupResolvedMethod(method_index,
                                                                    method->GetDexCache(),
                                                                    method->GetClassLoader());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index d902455..0c61965 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -205,9 +205,9 @@
       MethodInfo method_info = current_code->GetOptimizedMethodInfo();
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(stack_map.IsValid());
-      uint32_t depth = code_info.GetInlineDepthOf(stack_map);
-      if (depth != 0) {
-        caller = GetResolvedMethod(outer_method, method_info, code_info, stack_map, depth - 1);
+      BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+      if (!inline_infos.empty()) {
+        caller = GetResolvedMethod(outer_method, method_info, inline_infos);
       }
     }
     if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 3ccfa55..c894406 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -345,10 +345,9 @@
       CodeInfo code_info(current_code);
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
       DCHECK(stack_map.IsValid());
-      uint32_t depth = code_info.GetInlineDepthOf(stack_map);
-      if (depth != 0) {
-        InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, depth - 1);
-        return inline_info.GetDexPc();
+      BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+      if (!inline_infos.empty()) {
+        return inline_infos.back().GetDexPc();
       } else {
         return stack_map.GetDexPc();
       }
@@ -1236,37 +1235,35 @@
   LOG(FATAL_WITHOUT_ABORT) << "  instruction: " << DumpInstruction(outer_method, dex_pc);
 
   ArtMethod* caller = outer_method;
-  uint32_t depth = code_info.GetInlineDepthOf(stack_map);
-  if (depth != 0) {
-    for (size_t d = 0; d < depth; ++d) {
-      InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, d);
-      const char* tag = "";
-      dex_pc = inline_info.GetDexPc();
-      if (inline_info.EncodesArtMethod()) {
-        tag = "encoded ";
-        caller = inline_info.GetArtMethod();
+  BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+  for (InlineInfo inline_info : inline_infos) {
+    const char* tag = "";
+    dex_pc = inline_info.GetDexPc();
+    if (inline_info.EncodesArtMethod()) {
+      tag = "encoded ";
+      caller = inline_info.GetArtMethod();
+    } else {
+      uint32_t method_index = inline_info.GetMethodIndex(method_info);
+      if (dex_pc == static_cast<uint32_t>(-1)) {
+        tag = "special ";
+        CHECK(inline_info.Equals(inline_infos.back()));
+        caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
+        CHECK_EQ(caller->GetDexMethodIndex(), method_index);
       } else {
-        uint32_t method_index = inline_info.GetMethodIndex(method_info);
-        if (dex_pc == static_cast<uint32_t>(-1)) {
-          tag = "special ";
-          CHECK_EQ(d + 1u, depth);
-          caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
-          CHECK_EQ(caller->GetDexMethodIndex(), method_index);
-        } else {
-          ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
-          ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
-          caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
-          CHECK(caller != nullptr);
-        }
+        ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
+        ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
+        caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
+        CHECK(caller != nullptr);
       }
-      LOG(FATAL_WITHOUT_ABORT) << "Inlined method #" << d << ": " << tag << caller->PrettyMethod()
-          << " dex pc: " << dex_pc
-          << " dex file: " << caller->GetDexFile()->GetLocation()
-          << " class table: "
-          << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
-      DumpB74410240ClassData(caller->GetDeclaringClass());
-      LOG(FATAL_WITHOUT_ABORT) << "  instruction: " << DumpInstruction(caller, dex_pc);
     }
+    LOG(FATAL_WITHOUT_ABORT) << "InlineInfo #" << inline_info.Row()
+        << ": " << tag << caller->PrettyMethod()
+        << " dex pc: " << dex_pc
+        << " dex file: " << caller->GetDexFile()->GetLocation()
+        << " class table: "
+        << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
+    DumpB74410240ClassData(caller->GetDeclaringClass());
+    LOG(FATAL_WITHOUT_ABORT) << "  instruction: " << DumpInstruction(caller, dex_pc);
   }
 }
 
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 2dbde6f..e8d9658 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -404,7 +404,7 @@
     uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
     BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
     DexRegisterMap vreg_map = IsInInlinedFrame()
-        ? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1, stack_map)
+        ? code_info.GetInlineDexRegisterMapOf(stack_map, GetCurrentInlinedFrame())
         : code_info.GetDexRegisterMapOf(stack_map);
     if (vreg_map.empty()) {
       return;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index e99cb1b..f58fc3b 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -68,7 +68,6 @@
       cur_oat_quick_method_header_(nullptr),
       num_frames_(num_frames),
       cur_depth_(0),
-      current_inlining_depth_(0),
       context_(context),
       check_suspended_(check_suspended) {
   if (check_suspended_) {
@@ -76,32 +75,15 @@
   }
 }
 
-static StackMap GetCurrentStackMap(CodeInfo& code_info,
-                                   const OatQuickMethodHeader* method_header,
-                                   uintptr_t cur_quick_frame_pc)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc);
-  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
-  DCHECK(stack_map.IsValid());
-  return stack_map;
-}
-
 ArtMethod* StackVisitor::GetMethod() const {
   if (cur_shadow_frame_ != nullptr) {
     return cur_shadow_frame_->GetMethod();
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
-      size_t depth_in_stack_map = current_inlining_depth_ - 1;
       const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfo code_info(method_header);
-      StackMap stack_map = GetCurrentStackMap(code_info, method_header, cur_quick_frame_pc_);
       MethodInfo method_info = method_header->GetOptimizedMethodInfo();
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
-      return GetResolvedMethod(*GetCurrentQuickFrame(),
-                               method_info,
-                               code_info,
-                               stack_map,
-                               depth_in_stack_map);
+      return GetResolvedMethod(*GetCurrentQuickFrame(), method_info, current_inline_frames_);
     } else {
       return *cur_quick_frame_;
     }
@@ -114,11 +96,7 @@
     return cur_shadow_frame_->GetDexPC();
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
-      const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfo code_info(method_header);
-      size_t depth_in_stack_map = current_inlining_depth_ - 1;
-      StackMap stack_map = GetCurrentStackMap(code_info, method_header, cur_quick_frame_pc_);
-      return code_info.GetInlineInfoAtDepth(stack_map, depth_in_stack_map).GetDexPc();
+      return current_inline_frames_.back().GetDexPc();
     } else if (cur_oat_quick_method_header_ == nullptr) {
       return dex::kDexNoIndex;
     } else {
@@ -233,10 +211,9 @@
   uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
   DCHECK(stack_map.IsValid());
-  size_t depth_in_stack_map = current_inlining_depth_ - 1;
 
   DexRegisterMap dex_register_map = IsInInlinedFrame()
-      ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map, stack_map)
+      ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
       : code_info.GetDexRegisterMapOf(stack_map);
   if (dex_register_map.empty()) {
     return false;
@@ -823,10 +800,10 @@
               cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
           StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
           if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
-            DCHECK_EQ(current_inlining_depth_, 0u);
-            for (current_inlining_depth_ = code_info.GetInlineDepthOf(stack_map);
-                 current_inlining_depth_ != 0;
-                 --current_inlining_depth_) {
+            DCHECK_EQ(current_inline_frames_.size(), 0u);
+            for (current_inline_frames_ = code_info.GetInlineInfosOf(stack_map);
+                 !current_inline_frames_.empty();
+                 current_inline_frames_.pop_back()) {
               bool should_continue = VisitFrame();
               if (UNLIKELY(!should_continue)) {
                 return;
diff --git a/runtime/stack.h b/runtime/stack.h
index a16930b..02578d2 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -23,6 +23,7 @@
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "quick/quick_method_frame_info.h"
+#include "stack_map.h"
 
 namespace art {
 
@@ -219,11 +220,11 @@
   void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool IsInInlinedFrame() const {
-    return current_inlining_depth_ != 0;
+    return !current_inline_frames_.empty();
   }
 
-  size_t GetCurrentInliningDepth() const {
-    return current_inlining_depth_;
+  InlineInfo GetCurrentInlinedFrame() const {
+    return current_inline_frames_.back();
   }
 
   uintptr_t GetCurrentQuickFramePc() const {
@@ -309,9 +310,9 @@
   size_t num_frames_;
   // Depth of the frame we're currently at.
   size_t cur_depth_;
-  // Current inlining depth of the method we are currently at.
-  // 0 if there is no inlined frame.
-  size_t current_inlining_depth_;
+  // Current inlined frames of the method we are currently at.
+  // We keep poping frames from the end as we visit the frames.
+  BitTableRange<InlineInfo> current_inline_frames_;
 
  protected:
   Context* const context_;
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index a3c6e05..7e46eb7 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -232,8 +232,7 @@
 
   // Display stack maps along with (live) Dex register maps.
   if (verbose) {
-    for (size_t i = 0; i < GetNumberOfStackMaps(); ++i) {
-      StackMap stack_map = GetStackMapAt(i);
+    for (StackMap stack_map : stack_maps_) {
       stack_map.Dump(vios, *this, method_info, code_offset, instruction_set);
     }
   }
@@ -259,9 +258,7 @@
   }
   vios->Stream() << ")\n";
   code_info.GetDexRegisterMapOf(*this).Dump(vios);
-  uint32_t depth = code_info.GetInlineDepthOf(*this);
-  for (size_t d = 0; d < depth; d++) {
-    InlineInfo inline_info = code_info.GetInlineInfoAtDepth(*this, d);
+  for (InlineInfo inline_info : code_info.GetInlineInfosOf(*this)) {
     inline_info.Dump(vios, code_info, *this, method_info);
   }
 }
@@ -285,7 +282,7 @@
         << ", method_index=" << GetMethodIndex(method_info);
   }
   vios->Stream() << ")\n";
-  code_info.GetDexRegisterMapAtDepth(depth, stack_map).Dump(vios);
+  code_info.GetInlineDexRegisterMapOf(stack_map, *this).Dump(vios);
 }
 
 }  // namespace art
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index ad52f37..2f2053a 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -298,8 +298,8 @@
     return BitsToBytesRoundUp(size_in_bits_);
   }
 
-  bool HasInlineInfo() const {
-    return inline_infos_.NumRows() > 0;
+  ALWAYS_INLINE const BitTable<StackMap>& GetStackMaps() const {
+    return stack_maps_;
   }
 
   ALWAYS_INLINE StackMap GetStackMapAt(size_t index) const {
@@ -330,6 +330,10 @@
       : dex_register_catalog_.GetRow(index).GetLocation();
   }
 
+  bool HasInlineInfo() const {
+    return inline_infos_.NumRows() > 0;
+  }
+
   uint32_t GetNumberOfStackMaps() const {
     return stack_maps_.NumRows();
   }
@@ -347,14 +351,18 @@
     return DexRegisterMap(0, DexRegisterLocation::None());
   }
 
-  ALWAYS_INLINE DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth, StackMap stack_map) const {
+  ALWAYS_INLINE DexRegisterMap GetInlineDexRegisterMapOf(StackMap stack_map,
+                                                         InlineInfo inline_info) const {
     if (stack_map.HasDexRegisterMap()) {
+      DCHECK(stack_map.HasInlineInfoIndex());
+      uint32_t depth = inline_info.Row() - stack_map.GetInlineInfoIndex();
       // The register counts are commutative and include all outer levels.
       // This allows us to determine the range [first, last) in just two lookups.
       // If we are at depth 0 (the first inlinee), the count from the main method is used.
-      uint32_t first = (depth == 0) ? number_of_dex_registers_
-          : GetInlineInfoAtDepth(stack_map, depth - 1).GetNumberOfDexRegisters();
-      uint32_t last = GetInlineInfoAtDepth(stack_map, depth).GetNumberOfDexRegisters();
+      uint32_t first = (depth == 0)
+          ? number_of_dex_registers_
+          : inline_infos_.GetRow(inline_info.Row() - 1).GetNumberOfDexRegisters();
+      uint32_t last = inline_info.GetNumberOfDexRegisters();
       DexRegisterMap map(last - first, DexRegisterLocation::Invalid());
       DecodeDexRegisterMap(stack_map.Row(), first, &map);
       return map;
@@ -362,28 +370,20 @@
     return DexRegisterMap(0, DexRegisterLocation::None());
   }
 
-  InlineInfo GetInlineInfo(size_t index) const {
-    return inline_infos_.GetRow(index);
-  }
-
-  uint32_t GetInlineDepthOf(StackMap stack_map) const {
-    uint32_t depth = 0;
+  BitTableRange<InlineInfo> GetInlineInfosOf(StackMap stack_map) const {
     uint32_t index = stack_map.GetInlineInfoIndex();
     if (index != StackMap::kNoValue) {
-      while (GetInlineInfo(index + depth++).GetIsLast() == InlineInfo::kMore) { }
+      auto begin = inline_infos_.begin() + index;
+      auto end = begin;
+      while ((*end++).GetIsLast() == InlineInfo::kMore) { }
+      return BitTableRange<InlineInfo>(begin, end);
+    } else {
+      return BitTableRange<InlineInfo>();
     }
-    return depth;
-  }
-
-  InlineInfo GetInlineInfoAtDepth(StackMap stack_map, uint32_t depth) const {
-    DCHECK(stack_map.HasInlineInfo());
-    DCHECK_LT(depth, GetInlineDepthOf(stack_map));
-    return GetInlineInfo(stack_map.GetInlineInfoIndex() + depth);
   }
 
   StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
-    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i);
+    for (StackMap stack_map : stack_maps_) {
       if (stack_map.GetDexPc() == dex_pc && stack_map.GetKind() != StackMap::Kind::Debug) {
         return stack_map;
       }
@@ -403,8 +403,7 @@
   }
 
   StackMap GetOsrStackMapForDexPc(uint32_t dex_pc) const {
-    for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
-      StackMap stack_map = GetStackMapAt(i);
+    for (StackMap stack_map : stack_maps_) {
       if (stack_map.GetDexPc() == dex_pc && stack_map.GetKind() == StackMap::Kind::OSR) {
         return stack_map;
       }
@@ -415,8 +414,7 @@
   StackMap GetStackMapForNativePcOffset(uint32_t pc, InstructionSet isa = kRuntimeISA) const;
 
   InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset) {
-    for (size_t index = 0; index < invoke_infos_.NumRows(); index++) {
-      InvokeInfo item = GetInvokeInfo(index);
+    for (InvokeInfo item : invoke_infos_) {
       if (item.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
         return item;
       }
diff --git a/test/530-checker-lse/smali/Main.smali b/test/530-checker-lse/smali/Main.smali
index 2678017..4c18266 100644
--- a/test/530-checker-lse/smali/Main.smali
+++ b/test/530-checker-lse/smali/Main.smali
@@ -124,6 +124,38 @@
     goto :goto_5
 .end method
 
+## CHECK-START: int Main2.test10(TestClass) load_store_elimination (before)
+## CHECK: StaticFieldGet
+## CHECK: InstanceFieldGet
+## CHECK: StaticFieldSet
+## CHECK: InstanceFieldGet
+
+## CHECK-START: int Main2.test10(TestClass) load_store_elimination (after)
+## CHECK: StaticFieldGet
+## CHECK: InstanceFieldGet
+## CHECK: StaticFieldSet
+## CHECK-NOT: NullCheck
+## CHECK-NOT: InstanceFieldGet
+
+# Original java source:
+#
+#  // Static fields shouldn't alias with instance fields.
+#  static int test10(TestClass obj) {
+#    TestClass.si += obj.i;
+#    return obj.i;
+#  }
+
+.method public static test10(LTestClass;)I
+    .registers 3
+    .param p0, "obj"    # LTestClass;
+    sget                v0, LTestClass;->si:I
+    iget                v1, p0, LTestClass;->i:I
+    add-int/2addr       v0, v1
+    sput                v0, LTestClass;->si:I
+    iget                p0, p0, LTestClass;->i:I
+    return              p0
+.end method
+
 ## CHECK-START: int Main2.test23(boolean) load_store_elimination (before)
 ## CHECK: NewInstance
 ## CHECK: InstanceFieldSet
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 541ae8b..22bff0a 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -251,24 +251,6 @@
     return obj2.i;
   }
 
-  /// CHECK-START: int Main.test10(TestClass) load_store_elimination (before)
-  /// CHECK: StaticFieldGet
-  /// CHECK: InstanceFieldGet
-  /// CHECK: StaticFieldSet
-
-  /// CHECK-START: int Main.test10(TestClass) load_store_elimination (after)
-  /// CHECK: StaticFieldGet
-  /// CHECK: InstanceFieldGet
-  /// CHECK: StaticFieldSet
-  /// CHECK-NOT: NullCheck
-  /// CHECK-NOT: InstanceFieldGet
-
-  // Static fields shouldn't alias with instance fields.
-  static int test10(TestClass obj) {
-    TestClass.si += obj.i;
-    return obj.i;
-  }
-
   /// CHECK-START: int Main.test11(TestClass) load_store_elimination (before)
   /// CHECK: InstanceFieldSet
   /// CHECK: InstanceFieldGet
@@ -1176,6 +1158,7 @@
     Class main2 = Class.forName("Main2");
     Method test4 = main2.getMethod("test4", TestClass.class, boolean.class);
     Method test5 = main2.getMethod("test5", TestClass.class, boolean.class);
+    Method test10 = main2.getMethod("test10", TestClass.class);
     Method test23 = main2.getMethod("test23", boolean.class);
     Method test24 = main2.getMethod("test24");
 
@@ -1198,7 +1181,7 @@
     obj2 = new TestClass();
     obj1.next = obj2;
     assertIntEquals(test9(new TestClass()), 1);
-    assertIntEquals(test10(new TestClass(3, 4)), 3);
+    assertIntEquals((int)test10.invoke(null, new TestClass(3, 4)), 3);
     assertIntEquals(TestClass.si, 3);
     assertIntEquals(test11(new TestClass()), 10);
     assertIntEquals(test12(new TestClass(), new TestClass()), 10);
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 2e1ec5a..c252a9b 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -16,7 +16,8 @@
 
 #include <fstream>
 #include <iostream>
-#include <unordered_set>
+#include <map>
+#include <set>
 
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
@@ -72,24 +73,71 @@
   UsageError("    --blacklist=<filename>:");
   UsageError("        text files with signatures of methods/fields to be annotated");
   UsageError("");
+  UsageError("  Command \"list\": dump lists of public and private API");
+  UsageError("    --boot-dex=<filename>: dex file which belongs to boot class path");
+  UsageError("    --stub-dex=<filename>: dex/apk file which belongs to SDK API stubs");
+  UsageError("");
+  UsageError("    --out-public=<filename>: output file for a list of all public APIs");
+  UsageError("    --out-private=<filename>: output file for a list of all private APIs");
+  UsageError("");
 
   exit(EXIT_FAILURE);
 }
 
+template<typename E>
+static bool Contains(const std::vector<E>& vec, const E& elem) {
+  return std::find(vec.begin(), vec.end(), elem) != vec.end();
+}
+
 class DexClass {
  public:
   DexClass(const DexFile& dex_file, uint32_t idx)
       : dex_file_(dex_file), class_def_(dex_file.GetClassDef(idx)) {}
 
   const DexFile& GetDexFile() const { return dex_file_; }
-
-  const dex::TypeIndex GetClassIndex() const { return class_def_.class_idx_; }
-
   const uint8_t* GetData() const { return dex_file_.GetClassData(class_def_); }
 
-  const char* GetDescriptor() const { return dex_file_.GetClassDescriptor(class_def_); }
+  const dex::TypeIndex GetClassIndex() const { return class_def_.class_idx_; }
+  const dex::TypeIndex GetSuperclassIndex() const { return class_def_.superclass_idx_; }
+
+  bool HasSuperclass() const { return dex_file_.IsTypeIndexValid(GetSuperclassIndex()); }
+
+  std::string GetDescriptor() const { return dex_file_.GetClassDescriptor(class_def_); }
+
+  std::string GetSuperclassDescriptor() const {
+    if (HasSuperclass()) {
+      return dex_file_.StringByTypeIdx(GetSuperclassIndex());
+    } else {
+      return "";
+    }
+  }
+
+  std::set<std::string> GetInterfaceDescriptors() const {
+    std::set<std::string> list;
+    const DexFile::TypeList* ifaces = dex_file_.GetInterfacesList(class_def_);
+    for (uint32_t i = 0; ifaces != nullptr && i < ifaces->Size(); ++i) {
+      list.insert(dex_file_.StringByTypeIdx(ifaces->GetTypeItem(i).type_idx_));
+    }
+    return list;
+  }
+
+  inline bool IsVisible() const { return HasAccessFlags(kAccPublic); }
+
+  inline bool Equals(const DexClass& other) const {
+    bool equals = GetDescriptor() == other.GetDescriptor();
+    if (equals) {
+      // TODO(dbrazdil): Check that methods/fields match as well once b/111116543 is fixed.
+      CHECK_EQ(GetAccessFlags(), other.GetAccessFlags());
+      CHECK_EQ(GetSuperclassDescriptor(), other.GetSuperclassDescriptor());
+      CHECK(GetInterfaceDescriptors() == other.GetInterfaceDescriptors());
+    }
+    return equals;
+  }
 
  private:
+  uint32_t GetAccessFlags() const { return class_def_.access_flags_; }
+  bool HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
+
   const DexFile& dex_file_;
   const DexFile::ClassDef& class_def_;
 };
@@ -98,10 +146,12 @@
  public:
   DexMember(const DexClass& klass, const ClassDataItemIterator& it)
       : klass_(klass), it_(it) {
-    DCHECK_EQ(it_.IsAtMethod() ? GetMethodId().class_idx_ : GetFieldId().class_idx_,
+    DCHECK_EQ(IsMethod() ? GetMethodId().class_idx_ : GetFieldId().class_idx_,
               klass_.GetClassIndex());
   }
 
+  inline const DexClass& GetDeclaringClass() const { return klass_; }
+
   // Sets hidden bits in access flags and writes them back into the DEX in memory.
   // Note that this will not update the cached data of ClassDataItemIterator
   // until it iterates over this item again and therefore will fail a CHECK if
@@ -115,7 +165,7 @@
     // `ptr` initially points to the next ClassData item. We iterate backwards
     // until we hit the terminating byte of the previous Leb128 value.
     const uint8_t* ptr = it_.DataPointer();
-    if (it_.IsAtMethod()) {
+    if (IsMethod()) {
       ptr = ReverseSearchUnsignedLeb128(ptr);
       DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), it_.GetMethodCodeItemOffset());
     }
@@ -126,30 +176,57 @@
     UpdateUnsignedLeb128(const_cast<uint8_t*>(ptr), new_flags);
   }
 
+  inline bool IsMethod() const { return it_.IsAtMethod(); }
+  inline bool IsVirtualMethod() const { return it_.IsAtVirtualMethod(); }
+
+  // Returns true if the member is public/protected and is in a public class.
+  inline bool IsVisible() const {
+    return GetDeclaringClass().IsVisible() &&
+           (HasAccessFlags(kAccPublic) || HasAccessFlags(kAccProtected));
+  }
+
   // Constructs a string with a unique signature of this class member.
   std::string GetApiEntry() const {
     std::stringstream ss;
-    ss << klass_.GetDescriptor() << "->";
-    if (it_.IsAtMethod()) {
-      const DexFile::MethodId& mid = GetMethodId();
-      ss << klass_.GetDexFile().GetMethodName(mid)
-         << klass_.GetDexFile().GetMethodSignature(mid).ToString();
-    } else {
-      const DexFile::FieldId& fid = GetFieldId();
-      ss << klass_.GetDexFile().GetFieldName(fid) << ":"
-         << klass_.GetDexFile().GetFieldTypeDescriptor(fid);
-    }
+    ss << klass_.GetDescriptor() << "->" << GetName() << (IsMethod() ? "" : ":") << GetSignature();
     return ss.str();
   }
 
+  inline bool operator==(const DexMember& other) {
+    // These need to match if they should resolve to one another.
+    bool equals = IsMethod() == other.IsMethod() &&
+                  GetName() == other.GetName() &&
+                  GetSignature() == other.GetSignature();
+
+    // Sanity checks if they do match.
+    if (equals) {
+      CHECK_EQ(IsVirtualMethod(), other.IsVirtualMethod());
+    }
+
+    return equals;
+  }
+
  private:
+  inline uint32_t GetAccessFlags() const { return it_.GetMemberAccessFlags(); }
+  inline uint32_t HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
+
+  inline std::string GetName() const {
+    return IsMethod() ? klass_.GetDexFile().GetMethodName(GetMethodId())
+                      : klass_.GetDexFile().GetFieldName(GetFieldId());
+  }
+
+  inline std::string GetSignature() const {
+    return IsMethod() ? klass_.GetDexFile().GetMethodSignature(GetMethodId()).ToString()
+                      : klass_.GetDexFile().GetFieldTypeDescriptor(GetFieldId());
+  }
+
   inline const DexFile::MethodId& GetMethodId() const {
-    DCHECK(it_.IsAtMethod());
+    DCHECK(IsMethod());
     return klass_.GetDexFile().GetMethodId(it_.GetMemberIndex());
   }
 
   inline const DexFile::FieldId& GetFieldId() const {
-    DCHECK(!it_.IsAtMethod());
+    DCHECK(!IsMethod());
     return klass_.GetDexFile().GetFieldId(it_.GetMemberIndex());
   }
 
@@ -159,24 +236,31 @@
 
 class ClassPath FINAL {
  public:
-  explicit ClassPath(const std::vector<std::string>& dex_paths) {
-    OpenDexFiles(dex_paths);
+  ClassPath(const std::vector<std::string>& dex_paths, bool open_writable) {
+    OpenDexFiles(dex_paths, open_writable);
+  }
+
+  template<typename Fn>
+  void ForEachDexClass(Fn fn) {
+    for (auto& dex_file : dex_files_) {
+      for (uint32_t class_idx = 0; class_idx < dex_file->NumClassDefs(); ++class_idx) {
+        DexClass klass(*dex_file, class_idx);
+        fn(klass);
+      }
+    }
   }
 
   template<typename Fn>
   void ForEachDexMember(Fn fn) {
-    for (auto& dex_file : dex_files_) {
-      for (uint32_t class_idx = 0; class_idx < dex_file->NumClassDefs(); ++class_idx) {
-        DexClass klass(*dex_file, class_idx);
-        const uint8_t* klass_data = klass.GetData();
-        if (klass_data != nullptr) {
-          for (ClassDataItemIterator it(*dex_file, klass_data); it.HasNext(); it.Next()) {
-            DexMember member(klass, it);
-            fn(member);
-          }
+    ForEachDexClass([&fn](DexClass& klass) {
+      const uint8_t* klass_data = klass.GetData();
+      if (klass_data != nullptr) {
+        for (ClassDataItemIterator it(klass.GetDexFile(), klass_data); it.HasNext(); it.Next()) {
+          DexMember member(klass, it);
+          fn(member);
         }
       }
-    }
+    });
   }
 
   void UpdateDexChecksums() {
@@ -189,37 +273,231 @@
   }
 
  private:
-  void OpenDexFiles(const std::vector<std::string>& dex_paths) {
+  void OpenDexFiles(const std::vector<std::string>& dex_paths, bool open_writable) {
     ArtDexFileLoader dex_loader;
     std::string error_msg;
-    for (const std::string& filename : dex_paths) {
-      File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
-      CHECK_NE(fd.Fd(), -1) << "Unable to open file '" << filename << "': " << strerror(errno);
 
-      // Memory-map the dex file with MAP_SHARED flag so that changes in memory
-      // propagate to the underlying file. We run dex file verification as if
-      // the dex file was not in boot claass path to check basic assumptions,
-      // such as that at most one of public/private/protected flag is set.
-      // We do those checks here and skip them when loading the processed file
-      // into boot class path.
-      std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
-                                                                 /* location */ filename,
-                                                                 /* verify */ true,
-                                                                 /* verify_checksum */ true,
-                                                                 /* mmap_shared */ true,
-                                                                 &error_msg));
-      CHECK(dex_file.get() != nullptr) << "Open failed for '" << filename << "' " << error_msg;
-      CHECK(dex_file->IsStandardDexFile()) << "Expected a standard dex file '" << filename << "'";
-      CHECK(dex_file->EnableWrite())
-          << "Failed to enable write permission for '" << filename << "'";
-      dex_files_.push_back(std::move(dex_file));
+    if (open_writable) {
+      for (const std::string& filename : dex_paths) {
+        File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
+        CHECK_NE(fd.Fd(), -1) << "Unable to open file '" << filename << "': " << strerror(errno);
+
+        // Memory-map the dex file with MAP_SHARED flag so that changes in memory
+        // propagate to the underlying file. We run dex file verification as if
+        // the dex file was not in boot claass path to check basic assumptions,
+        // such as that at most one of public/private/protected flag is set.
+        // We do those checks here and skip them when loading the processed file
+        // into boot class path.
+        std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
+                                                                   /* location */ filename,
+                                                                   /* verify */ true,
+                                                                   /* verify_checksum */ true,
+                                                                   /* mmap_shared */ true,
+                                                                   &error_msg));
+        CHECK(dex_file.get() != nullptr) << "Open failed for '" << filename << "' " << error_msg;
+        CHECK(dex_file->IsStandardDexFile()) << "Expected a standard dex file '" << filename << "'";
+        CHECK(dex_file->EnableWrite())
+            << "Failed to enable write permission for '" << filename << "'";
+        dex_files_.push_back(std::move(dex_file));
+      }
+    } else {
+      for (const std::string& filename : dex_paths) {
+        bool success = dex_loader.Open(filename.c_str(),
+                                       /* location */ filename,
+                                       /* verify */ true,
+                                       /* verify_checksum */ true,
+                                       &error_msg,
+                                       &dex_files_);
+        CHECK(success) << "Open failed for '" << filename << "' " << error_msg;
+      }
     }
   }
 
-  // Opened DEX files. Note that these are opened as `const` but may be written into.
+  // Opened dex files. Note that these are opened as `const` but may be written into.
   std::vector<std::unique_ptr<const DexFile>> dex_files_;
 };
 
+class HierarchyClass FINAL {
+ public:
+  HierarchyClass() {}
+
+  void AddDexClass(const DexClass& klass) {
+    CHECK(dex_classes_.empty() || klass.Equals(dex_classes_.front()));
+    dex_classes_.push_back(klass);
+  }
+
+  void AddExtends(HierarchyClass& parent) {
+    CHECK(!Contains(extends_, &parent));
+    CHECK(!Contains(parent.extended_by_, this));
+    extends_.push_back(&parent);
+    parent.extended_by_.push_back(this);
+  }
+
+  const DexClass& GetOneDexClass() const {
+    CHECK(!dex_classes_.empty());
+    return dex_classes_.front();
+  }
+
+  // See comment on Hierarchy::ForEachResolvableMember.
+  template<typename Fn>
+  bool ForEachResolvableMember(const DexMember& other, Fn fn) {
+    return ForEachResolvableMember_Impl(other, fn) != ResolutionResult::kNotFound;
+  }
+
+ private:
+  // Result of resolution which takes into account whether the member was found
+  // for the first time or not. This is just a performance optimization to prevent
+  // re-visiting previously visited members.
+  // Note that order matters. When accumulating results, we always pick the maximum.
+  enum class ResolutionResult {
+    kNotFound,
+    kFoundOld,
+    kFoundNew,
+  };
+
+  inline ResolutionResult Accumulate(ResolutionResult a, ResolutionResult b) {
+    return static_cast<ResolutionResult>(
+        std::max(static_cast<unsigned>(a), static_cast<unsigned>(b)));
+  }
+
+  template<typename Fn>
+  ResolutionResult ForEachResolvableMember_Impl(const DexMember& other, Fn fn) {
+    // First try to find a member matching `other` in this class.
+    ResolutionResult foundInClass = ForEachMatchingMember(other, fn);
+
+    switch (foundInClass) {
+      case ResolutionResult::kFoundOld:
+        // A matching member was found and previously explored. All subclasses
+        // must have been explored too.
+        break;
+
+      case ResolutionResult::kFoundNew:
+        // A matching member was found and this was the first time it was visited.
+        // If it is a virtual method, visit all methods overriding/implementing it too.
+        if (other.IsVirtualMethod()) {
+          for (HierarchyClass* subclass : extended_by_) {
+            subclass->ForEachOverridingMember(other, fn);
+          }
+        }
+        break;
+
+      case ResolutionResult::kNotFound:
+        // A matching member was not found in this class. Explore the superclasses
+        // and implemented interfaces.
+        for (HierarchyClass* superclass : extends_) {
+          foundInClass = Accumulate(
+              foundInClass, superclass->ForEachResolvableMember_Impl(other, fn));
+        }
+        break;
+    }
+
+    return foundInClass;
+  }
+
+  template<typename Fn>
+  ResolutionResult ForEachMatchingMember(const DexMember& other, Fn fn) {
+    ResolutionResult found = ResolutionResult::kNotFound;
+    for (const DexClass& dex_class : dex_classes_) {
+      const uint8_t* data = dex_class.GetData();
+      if (data != nullptr) {
+        for (ClassDataItemIterator it(dex_class.GetDexFile(), data); it.HasNext(); it.Next()) {
+          DexMember member(dex_class, it);
+          if (member == other) {
+            found = Accumulate(found, fn(member) ? ResolutionResult::kFoundNew
+                                                 : ResolutionResult::kFoundOld);
+          }
+        }
+      }
+    }
+    return found;
+  }
+
+  template<typename Fn>
+  void ForEachOverridingMember(const DexMember& other, Fn fn) {
+    CHECK(other.IsVirtualMethod());
+    ResolutionResult found = ForEachMatchingMember(other, fn);
+    if (found == ResolutionResult::kFoundOld) {
+      // No need to explore further.
+      return;
+    } else {
+      for (HierarchyClass* subclass : extended_by_) {
+        subclass->ForEachOverridingMember(other, fn);
+      }
+    }
+  }
+
+  // DexClass entries of this class found across all the provided dex files.
+  std::vector<DexClass> dex_classes_;
+
+  // Classes which this class inherits, or interfaces which it implements.
+  std::vector<HierarchyClass*> extends_;
+
+  // Classes which inherit from this class.
+  std::vector<HierarchyClass*> extended_by_;
+};
+
+class Hierarchy FINAL {
+ public:
+  explicit Hierarchy(ClassPath& class_path) : class_path_(class_path) {
+    BuildClassHierarchy();
+  }
+
+  // Perform an operation for each member of the hierarchy which could potentially
+  // be the result of method/field resolution of `other`.
+  // The function `fn` should accept a DexMember reference and return true if
+  // the member was changed. This drives a performance optimization which only
+  // visits overriding members the first time the overridden member is visited.
+  // Returns true if at least one resolvable member was found.
+  template<typename Fn>
+  bool ForEachResolvableMember(const DexMember& other, Fn fn) {
+    HierarchyClass* klass = FindClass(other.GetDeclaringClass().GetDescriptor());
+    return (klass != nullptr) && klass->ForEachResolvableMember(other, fn);
+  }
+
+ private:
+  HierarchyClass* FindClass(const std::string& descriptor) {
+    auto it = classes_.find(descriptor);
+    if (it == classes_.end()) {
+      return nullptr;
+    } else {
+      return &it->second;
+    }
+  }
+
+  void BuildClassHierarchy() {
+    // Create one HierarchyClass entry in `classes_` per class descriptor
+    // and add all DexClass objects with the same descriptor to that entry.
+    class_path_.ForEachDexClass([this](DexClass& klass) {
+      classes_[klass.GetDescriptor()].AddDexClass(klass);
+    });
+
+    // Connect each HierarchyClass to its successors and predecessors.
+    for (auto& entry : classes_) {
+      HierarchyClass& klass = entry.second;
+      const DexClass& dex_klass = klass.GetOneDexClass();
+
+      if (!dex_klass.HasSuperclass()) {
+        CHECK(dex_klass.GetInterfaceDescriptors().empty())
+            << "java/lang/Object should not implement any interfaces";
+        continue;
+      }
+
+      HierarchyClass* superclass = FindClass(dex_klass.GetSuperclassDescriptor());
+      CHECK(superclass != nullptr);
+      klass.AddExtends(*superclass);
+
+      for (const std::string& iface_desc : dex_klass.GetInterfaceDescriptors()) {
+        HierarchyClass* iface = FindClass(iface_desc);
+        CHECK(iface != nullptr);
+        klass.AddExtends(*iface);
+      }
+    }
+  }
+
+  ClassPath& class_path_;
+  std::map<std::string, HierarchyClass> classes_;
+};
+
 class HiddenApi FINAL {
  public:
   HiddenApi() {}
@@ -229,14 +507,16 @@
     case Command::kEncode:
       EncodeAccessFlags();
       break;
+    case Command::kList:
+      ListApi();
+      break;
     }
   }
 
  private:
   enum class Command {
-    // Currently just one command. A "list" command will be added for generating
-    // a full list of boot class members.
     kEncode,
+    kList,
   };
 
   Command ParseArgs(int argc, char** argv) {
@@ -262,6 +542,22 @@
           }
         }
         return Command::kEncode;
+      } else if (command == "list") {
+        for (int i = 1; i < argc; ++i) {
+          const StringPiece option(argv[i]);
+          if (option.starts_with("--boot-dex=")) {
+            boot_dex_paths_.push_back(option.substr(strlen("--boot-dex=")).ToString());
+          } else if (option.starts_with("--stub-dex=")) {
+            stub_dex_paths_.push_back(option.substr(strlen("--stub-dex=")).ToString());
+          } else if (option.starts_with("--out-public=")) {
+            out_public_path_ = option.substr(strlen("--out-public=")).ToString();
+          } else if (option.starts_with("--out-private=")) {
+            out_private_path_ = option.substr(strlen("--out-private=")).ToString();
+          } else {
+            Usage("Unknown argument '%s'", option.data());
+          }
+        }
+        return Command::kList;
       } else {
         Usage("Unknown command '%s'", command.data());
       }
@@ -282,7 +578,7 @@
     OpenApiFile(blacklist_path_, api_list, HiddenApiAccessFlags::kBlacklist);
 
     // Open all dex files.
-    ClassPath boot_class_path(boot_dex_paths_);
+    ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ true);
 
     // Set access flags of all members.
     boot_class_path.ForEachDexMember([&api_list](DexMember& boot_member) {
@@ -315,13 +611,90 @@
     api_file.close();
   }
 
+  void ListApi() {
+    if (boot_dex_paths_.empty()) {
+      Usage("No boot DEX files specified");
+    } else if (stub_dex_paths_.empty()) {
+      Usage("No stub DEX files specified");
+    } else if (out_public_path_.empty()) {
+      Usage("No public API output path specified");
+    } else if (out_private_path_.empty()) {
+      Usage("No private API output path specified");
+    }
+
+    // Complete list of boot class path members. The associated boolean states
+    // whether it is public (true) or private (false).
+    std::map<std::string, bool> boot_members;
+
+    // Deduplicate errors before printing them.
+    std::set<std::string> unresolved;
+
+    // Open all dex files.
+    ClassPath stub_class_path(stub_dex_paths_, /* open_writable */ false);
+    ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ false);
+    Hierarchy boot_hierarchy(boot_class_path);
+
+    // Mark all boot dex members private.
+    boot_class_path.ForEachDexMember([&boot_members](DexMember& boot_member) {
+      boot_members[boot_member.GetApiEntry()] = false;
+    });
+
+    // Resolve each SDK dex member against the framework and mark it white.
+    stub_class_path.ForEachDexMember(
+        [&boot_hierarchy, &boot_members, &unresolved](DexMember& stub_member) {
+          if (!stub_member.IsVisible()) {
+            // Typically fake constructors and inner-class `this` fields.
+            return;
+          }
+          bool resolved = boot_hierarchy.ForEachResolvableMember(
+              stub_member,
+              [&boot_members](DexMember& boot_member) {
+                std::string entry = boot_member.GetApiEntry();
+                auto it = boot_members.find(entry);
+                CHECK(it != boot_members.end());
+                if (it->second) {
+                  return false;  // has been marked before
+                } else {
+                  boot_members.insert(it, std::make_pair(entry, true));
+                  return true;  // marked for the first time
+                }
+              });
+          if (!resolved) {
+            unresolved.insert(stub_member.GetApiEntry());
+          }
+        });
+
+    // Print errors.
+    for (const std::string& str : unresolved) {
+      LOG(WARNING) << "unresolved: " << str;
+    }
+
+    // Write into public/private API files.
+    std::ofstream file_public(out_public_path_.c_str());
+    std::ofstream file_private(out_private_path_.c_str());
+    for (const std::pair<std::string, bool> entry : boot_members) {
+      if (entry.second) {
+        file_public << entry.first << std::endl;
+      } else {
+        file_private << entry.first << std::endl;
+      }
+    }
+    file_public.close();
+    file_private.close();
+  }
+
   // Paths to DEX files which should be processed.
   std::vector<std::string> boot_dex_paths_;
+  std::vector<std::string> stub_dex_paths_;
 
   // Paths to text files which contain the lists of API members.
   std::string light_greylist_path_;
   std::string dark_greylist_path_;
   std::string blacklist_path_;
+
+  // Paths to text files to which we will output list of all API members.
+  std::string out_public_path_;
+  std::string out_private_path_;
 };
 
 }  // namespace art