Stack walk: Cache CodeInfo and StackMap for current PC.

This speeds maps startup by 0.15%.

Test: test.py -b --host --64 --optimizing
Change-Id: Ic37eeba727148b877f21fdfacfa9f55558db88a7
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index 5ec162d..0c1b04e 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -108,6 +108,7 @@
   static constexpr uint32_t kNumColumns = NumColumns;
   static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
 
+  BitTableAccessor() = default;
   BitTableAccessor(const BitTableBase<kNumColumns>* table, uint32_t row)
       : table_(table), row_(row) {
     DCHECK(table_ != nullptr);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ec89d3f..172fe3e 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -69,6 +69,8 @@
       cur_oat_quick_method_header_(nullptr),
       num_frames_(num_frames),
       cur_depth_(0),
+      cur_inline_info_(nullptr, CodeInfo()),
+      cur_stack_map_(0, StackMap()),
       context_(context),
       check_suspended_(check_suspended) {
   if (check_suspended_) {
@@ -76,15 +78,34 @@
   }
 }
 
+CodeInfo* StackVisitor::GetCurrentInlineInfo() const {
+  DCHECK(!(*cur_quick_frame_)->IsNative());
+  const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
+  if (cur_inline_info_.first != header) {
+    cur_inline_info_ = std::make_pair(header, CodeInfo(header, CodeInfo::InlineInfoOnly));
+  }
+  return &cur_inline_info_.second;
+}
+
+StackMap* StackVisitor::GetCurrentStackMap() const {
+  DCHECK(!(*cur_quick_frame_)->IsNative());
+  const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
+  if (cur_stack_map_.first != cur_quick_frame_pc_) {
+    uint32_t pc = header->NativeQuickPcOffset(cur_quick_frame_pc_);
+    cur_stack_map_ = std::make_pair(cur_quick_frame_pc_,
+                                    GetCurrentInlineInfo()->GetStackMapForNativePcOffset(pc));
+  }
+  return &cur_stack_map_.second;
+}
+
 ArtMethod* StackVisitor::GetMethod() const {
   if (cur_shadow_frame_ != nullptr) {
     return cur_shadow_frame_->GetMethod();
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
-      const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfo code_info(method_header);
+      CodeInfo* code_info = GetCurrentInlineInfo();
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
-      return GetResolvedMethod(*GetCurrentQuickFrame(), code_info, current_inline_frames_);
+      return GetResolvedMethod(*GetCurrentQuickFrame(), *code_info, current_inline_frames_);
     } else {
       return *cur_quick_frame_;
     }
@@ -100,6 +121,10 @@
       return current_inline_frames_.back().GetDexPc();
     } else if (cur_oat_quick_method_header_ == nullptr) {
       return dex::kDexNoIndex;
+    } else if (!(*GetCurrentQuickFrame())->IsNative()) {
+      StackMap* stack_map = GetCurrentStackMap();
+      DCHECK(stack_map->IsValid());
+      return stack_map->GetDexPc();
     } else {
       return cur_oat_quick_method_header_->ToDexPc(
           GetMethod(), cur_quick_frame_pc_, abort_on_failure);
@@ -819,14 +844,11 @@
             && !method->IsNative()  // JNI methods cannot have any inlined frames.
             && CodeInfo::HasInlineInfo(cur_oat_quick_method_header_->GetOptimizedCodeInfoPtr())) {
           DCHECK_NE(cur_quick_frame_pc_, 0u);
-          current_code_info_ = CodeInfo(cur_oat_quick_method_header_,
-                                        CodeInfo::DecodeFlags::InlineInfoOnly);
-          uint32_t native_pc_offset =
-              cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
-          StackMap stack_map = current_code_info_.GetStackMapForNativePcOffset(native_pc_offset);
-          if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
+          CodeInfo* code_info = GetCurrentInlineInfo();
+          StackMap* stack_map = GetCurrentStackMap();
+          if (stack_map->IsValid() && stack_map->HasInlineInfo()) {
             DCHECK_EQ(current_inline_frames_.size(), 0u);
-            for (current_inline_frames_ = current_code_info_.GetInlineInfosOf(stack_map);
+            for (current_inline_frames_ = code_info->GetInlineInfosOf(*stack_map);
                  !current_inline_frames_.empty();
                  current_inline_frames_.pop_back()) {
               bool should_continue = VisitFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index ff80d13..aa741df 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -339,6 +339,9 @@
 
   void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
 
+  ALWAYS_INLINE CodeInfo* GetCurrentInlineInfo() const;
+  ALWAYS_INLINE StackMap* GetCurrentStackMap() const;
+
   Thread* const thread_;
   const StackWalkKind walk_kind_;
   ShadowFrame* cur_shadow_frame_;
@@ -351,9 +354,14 @@
   size_t cur_depth_;
   // Current inlined frames of the method we are currently at.
   // We keep poping frames from the end as we visit the frames.
-  CodeInfo current_code_info_;
   BitTableRange<InlineInfo> current_inline_frames_;
 
+  // Cache the most recently decoded inline info data.
+  // The 'current_inline_frames_' refers to this data, so we need to keep it alive anyway.
+  // Marked mutable since the cache fields are updated from const getters.
+  mutable std::pair<const OatQuickMethodHeader*, CodeInfo> cur_inline_info_;
+  mutable std::pair<uintptr_t, StackMap> cur_stack_map_;
+
  protected:
   Context* const context_;
   const bool check_suspended_;