Move mirror::ArtMethod to native

Optimizing + quick tests are passing, devices boot.

TODO: Test and fix bugs in mips64.

Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS.
Some of the savings are from removal of virtual methods and direct
methods object arrays.

Bug: 19264997
Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 09b56a1..4110887 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,13 +17,15 @@
 #include "stack.h"
 
 #include "arch/context.h"
+#include "art_method-inl.h"
 #include "base/hex_dump.h"
 #include "entrypoints/entrypoint_utils-inl.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc_map.h"
-#include "mirror/art_method-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/space-inl.h"
+#include "linear_alloc.h"
 #include "mirror/class-inl.h"
-#include "mirror/object.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "quick/quick_method_frame_info.h"
@@ -35,8 +37,10 @@
 
 namespace art {
 
+static constexpr bool kDebugStackWalk = false;
+
 mirror::Object* ShadowFrame::GetThisObject() const {
-  mirror::ArtMethod* m = GetMethod();
+  ArtMethod* m = GetMethod();
   if (m->IsStatic()) {
     return nullptr;
   } else if (m->IsNative()) {
@@ -50,7 +54,7 @@
 }
 
 mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
-  mirror::ArtMethod* m = GetMethod();
+  ArtMethod* m = GetMethod();
   if (m->IsStatic()) {
     return nullptr;
   } else {
@@ -105,8 +109,8 @@
   DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
 }
 
-InlineInfo StackVisitor::GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::ArtMethod* outer_method = GetCurrentQuickFrame()->AsMirrorPtr();
+InlineInfo StackVisitor::GetCurrentInlineInfo() const {
+  ArtMethod* outer_method = *GetCurrentQuickFrame();
   uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
   CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
@@ -114,7 +118,7 @@
   return code_info.GetInlineInfoOf(stack_map);
 }
 
-mirror::ArtMethod* StackVisitor::GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ArtMethod* StackVisitor::GetMethod() const {
   if (cur_shadow_frame_ != nullptr) {
     return cur_shadow_frame_->GetMethod();
   } else if (cur_quick_frame_ != nullptr) {
@@ -124,13 +128,12 @@
       uint32_t method_index = inline_info.GetMethodIndexAtDepth(depth_in_stack_map);
       InvokeType invoke_type =
           static_cast<InvokeType>(inline_info.GetInvokeTypeAtDepth(depth_in_stack_map));
-      return GetResolvedMethod(GetCurrentQuickFrame()->AsMirrorPtr(), method_index, invoke_type);
+      return GetResolvedMethod(*GetCurrentQuickFrame(), method_index, invoke_type);
     } else {
-      return cur_quick_frame_->AsMirrorPtr();
+      return *cur_quick_frame_;
     }
-  } else {
-    return nullptr;
   }
+  return nullptr;
 }
 
 uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
@@ -148,11 +151,12 @@
   }
 }
 
-extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 mirror::Object* StackVisitor::GetThisObject() const {
-  mirror::ArtMethod* m = GetMethod();
+  DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
+  ArtMethod* m = GetMethod();
   if (m->IsStatic()) {
     return nullptr;
   } else if (m->IsNative()) {
@@ -191,7 +195,7 @@
   return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
 }
 
-bool StackVisitor::IsReferenceVReg(mirror::ArtMethod* m, uint16_t vreg) {
+bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
   // Process register map (which native and runtime methods don't have)
   if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
     return false;
@@ -218,8 +222,7 @@
   return vreg < num_regs && TestBitmap(vreg, reg_bitmap);
 }
 
-bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
-                           uint32_t* val) const {
+bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const {
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
     DCHECK(m == GetMethod());
@@ -235,7 +238,7 @@
   }
 }
 
-bool StackVisitor::GetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                         uint32_t* val) const {
   const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
   DCHECK(code_pointer != nullptr);
@@ -258,7 +261,7 @@
   }
 }
 
-bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
+bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                             uint32_t* val) const {
   DCHECK_EQ(m, GetMethod());
   const DexFile::CodeItem* code_item = m->GetCodeItem();
@@ -267,7 +270,7 @@
   uint16_t number_of_dex_registers = code_item->registers_size_;
   DCHECK_LT(vreg, code_item->registers_size_);
 
-  mirror::ArtMethod* outer_method = GetCurrentQuickFrame()->AsMirrorPtr();
+  ArtMethod* outer_method = *GetCurrentQuickFrame();
   const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
   DCHECK(code_pointer != nullptr);
   CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
@@ -332,7 +335,7 @@
   return true;
 }
 
-bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
                                VRegKind kind_hi, uint64_t* val) const {
   if (kind_lo == kLongLoVReg) {
     DCHECK_EQ(kind_hi, kLongHiVReg);
@@ -357,7 +360,7 @@
   }
 }
 
-bool StackVisitor::GetVRegPairFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
                                             VRegKind kind_hi, uint64_t* val) const {
   const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
   DCHECK(code_pointer != nullptr);
@@ -384,7 +387,7 @@
   }
 }
 
-bool StackVisitor::GetVRegPairFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
+bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
                                                 VRegKind kind_lo, VRegKind kind_hi,
                                                 uint64_t* val) const {
   uint32_t low_32bits;
@@ -416,7 +419,7 @@
   return true;
 }
 
-bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value,
                            VRegKind kind) {
   if (cur_quick_frame_ != nullptr) {
       DCHECK(context_ != nullptr);  // You can't reliably write registers without a context.
@@ -432,7 +435,7 @@
     }
 }
 
-bool StackVisitor::SetVRegFromQuickCode(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
+bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value,
                                         VRegKind kind) {
   DCHECK(context_ != nullptr);  // You can't reliably write registers without a context.
   DCHECK(m == GetMethod());
@@ -490,7 +493,7 @@
   return true;
 }
 
-bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
                                VRegKind kind_lo, VRegKind kind_hi) {
   if (kind_lo == kLongLoVReg) {
     DCHECK_EQ(kind_hi, kLongHiVReg);
@@ -515,7 +518,7 @@
 }
 
 bool StackVisitor::SetVRegPairFromQuickCode(
-    mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
+    ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
   const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
   DCHECK(code_pointer != nullptr);
   const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
@@ -631,7 +634,7 @@
   return visitor.frames;
 }
 
-bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc) {
+bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
   struct HasMoreFramesVisitor : public StackVisitor {
     HasMoreFramesVisitor(Thread* thread,
                          StackWalkKind walk_kind,
@@ -647,7 +650,7 @@
 
     bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
       if (found_frame_) {
-        mirror::ArtMethod* method = GetMethod();
+        ArtMethod* method = GetMethod();
         if (method != nullptr && !method->IsRuntimeMethod()) {
           has_more_frames_ = true;
           next_method_ = method;
@@ -663,7 +666,7 @@
     size_t frame_height_;
     bool found_frame_;
     bool has_more_frames_;
-    mirror::ArtMethod* next_method_;
+    ArtMethod* next_method_;
     uint32_t next_dex_pc_;
   };
   HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
@@ -689,7 +692,7 @@
 
 std::string StackVisitor::DescribeLocation() const {
   std::string result("Visiting method '");
-  mirror::ArtMethod* m = GetMethod();
+  ArtMethod* m = GetMethod();
   if (m == nullptr) {
     return "upcall";
   }
@@ -709,8 +712,34 @@
 
 void StackVisitor::SanityCheckFrame() const {
   if (kIsDebugBuild) {
-    mirror::ArtMethod* method = GetMethod();
-    CHECK_EQ(method->GetClass(), mirror::ArtMethod::GetJavaLangReflectArtMethod());
+    ArtMethod* method = GetMethod();
+    auto* declaring_class = method->GetDeclaringClass();
+    // Runtime methods have null declaring class.
+    if (!method->IsRuntimeMethod()) {
+      CHECK(declaring_class != nullptr);
+      CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
+          << declaring_class;
+    } else {
+      CHECK(declaring_class == nullptr);
+    }
+    auto* runtime = Runtime::Current();
+    auto* la = runtime->GetLinearAlloc();
+    if (!la->Contains(method)) {
+      // Check image space.
+      bool in_image = false;
+      for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
+        if (space->IsImageSpace()) {
+          auto* image_space = space->AsImageSpace();
+          const auto& header = image_space->GetImageHeader();
+          const auto* methods = &header.GetMethodsSection();
+          if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) {
+            in_image = true;
+            break;
+          }
+        }
+      }
+      CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image";
+    }
     if (cur_quick_frame_ != nullptr) {
       method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
       // Frame sanity.
@@ -746,7 +775,7 @@
     if (cur_quick_frame_ != nullptr) {  // Handle quick stack frames.
       // Can't be both a shadow and a quick fragment.
       DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
-      mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
+      ArtMethod* method = *cur_quick_frame_;
       while (method != nullptr) {
         SanityCheckFrame();
 
@@ -793,8 +822,7 @@
             if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
               // Skip runtime save all callee frames which are used to deliver exceptions.
             } else if (instrumentation_frame.interpreter_entry_) {
-              mirror::ArtMethod* callee =
-                  Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+              ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
               CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
                                             << PrettyMethod(GetMethod());
             } else if (instrumentation_frame.method_ != GetMethod()) {
@@ -813,9 +841,20 @@
         }
         cur_quick_frame_pc_ = return_pc;
         uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
-        cur_quick_frame_ = reinterpret_cast<StackReference<mirror::ArtMethod>*>(next_frame);
+        cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
+
+        if (kDebugStackWalk) {
+          LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
+              << " optimized=" << method->IsOptimized(sizeof(void*))
+              << " native=" << method->IsNative()
+              << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
+              << "," << method->GetEntryPointFromJni()
+              << "," << method->GetEntryPointFromInterpreter()
+              << " next=" << *cur_quick_frame_;
+        }
+
         cur_depth_++;
-        method = cur_quick_frame_->AsMirrorPtr();
+        method = *cur_quick_frame_;
       }
     } else if (cur_shadow_frame_ != nullptr) {
       do {
@@ -848,4 +887,42 @@
       visitor->DescribeLocation() << " vreg=" << vreg_;
 }
 
+int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
+                                             uint32_t core_spills, uint32_t fp_spills,
+                                             size_t frame_size, int reg, InstructionSet isa) {
+  size_t pointer_size = InstructionSetPointerSize(isa);
+  if (kIsDebugBuild) {
+    auto* runtime = Runtime::Current();
+    if (runtime != nullptr) {
+      CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
+    }
+  }
+  DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
+  DCHECK_NE(reg, -1);
+  int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
+      + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
+      + sizeof(uint32_t);  // Filler.
+  int num_regs = code_item->registers_size_ - code_item->ins_size_;
+  int temp_threshold = code_item->registers_size_;
+  const int max_num_special_temps = 1;
+  if (reg == temp_threshold) {
+    // The current method pointer corresponds to special location on stack.
+    return 0;
+  } else if (reg >= temp_threshold + max_num_special_temps) {
+    /*
+     * Special temporaries may have custom locations and the logic above deals with that.
+     * However, non-special temporaries are placed relative to the outs.
+     */
+    int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */;
+    int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
+    return temps_start + relative_offset;
+  }  else if (reg < num_regs) {
+    int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
+    return locals_start + (reg * sizeof(uint32_t));
+  } else {
+    // Handle ins.
+    return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */;
+  }
+}
+
 }  // namespace art