Add an abstraction over a compiled code.

That's just step 1, moving code-related functions of ArtMethod to
another class. That class is only a wrapper on an ArtMethod, but will
be changed to be a wrapper around compiled code.

Change-Id: I6f35fc06d37220558dff61691e51ae20066b0dd6
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index b9d81a7..f5f7748 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,6 +26,7 @@
 #include <vector>
 
 #include "arch/instruction_set_features.h"
+#include "art_code.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/unix_file/fd_file.h"
@@ -54,6 +55,7 @@
 #include "output_stream.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change.h"
+#include "stack_map.h"
 #include "ScopedLocalRef.h"
 #include "thread_list.h"
 #include "verifier/dex_gc_map.h"
@@ -1961,24 +1963,27 @@
     DCHECK(method != nullptr);
     const auto image_pointer_size =
         InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
+    const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
+    const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
+    ArtCode art_code(method);
     if (method->IsNative()) {
-      DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
-      DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+      DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
+      DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
       bool first_occurrence;
-      const void* quick_oat_code = state->GetQuickOatCodeBegin(method);
       uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
-      state->ComputeOatSize(quick_oat_code, &first_occurrence);
+      state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
       if (first_occurrence) {
         state->stats_.native_to_managed_code_bytes += quick_oat_code_size;
       }
-      if (quick_oat_code != method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) {
-        indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code);
+      if (quick_oat_code_begin !=
+            method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) {
+        indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin);
       }
     } else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
       method->IsResolutionMethod() || method->IsImtConflictMethod() ||
       method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
-      DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
-      DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+      DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
+      DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
     } else {
       const DexFile::CodeItem* code_item = method->GetCodeItem();
       size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
@@ -1986,29 +1991,27 @@
 
       bool first_occurrence;
       size_t gc_map_bytes = state->ComputeOatSize(
-          method->GetNativeGcMap(image_pointer_size), &first_occurrence);
+          art_code.GetNativeGcMap(image_pointer_size), &first_occurrence);
       if (first_occurrence) {
         state->stats_.gc_map_bytes += gc_map_bytes;
       }
 
       size_t pc_mapping_table_bytes = state->ComputeOatSize(
-          method->GetMappingTable(image_pointer_size), &first_occurrence);
+          art_code.GetMappingTable(image_pointer_size), &first_occurrence);
       if (first_occurrence) {
         state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
       }
 
       size_t vmap_table_bytes = 0u;
-      if (!method->IsOptimized(image_pointer_size)) {
+      if (!art_code.IsOptimized(image_pointer_size)) {
         // Method compiled with the optimizing compiler have no vmap table.
         vmap_table_bytes = state->ComputeOatSize(
-            method->GetVmapTable(image_pointer_size), &first_occurrence);
+            art_code.GetVmapTable(image_pointer_size), &first_occurrence);
         if (first_occurrence) {
           state->stats_.vmap_table_bytes += vmap_table_bytes;
         }
       }
 
-      const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
-      const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
       uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
       state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
       if (first_occurrence) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 2eb5db1..8fe3fa2 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -19,6 +19,7 @@
 include art/build/Android.common_build.mk
 
 LIBART_COMMON_SRC_FILES := \
+  art_code.cc \
   art_field.cc \
   art_method.cc \
   atomic.cc.arm \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 4a45f49..e676a09 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -39,7 +39,7 @@
     runtime->SetInstructionSet(isa);
     ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
     runtime->SetCalleeSaveMethod(save_method, type);
-    QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+    QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
     EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
         << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
         << frame_info.FpSpillMask() << std::dec;
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 8f6b1ff..d5c7846 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -38,8 +38,8 @@
 }
 
 void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
-  ArtMethod* method = fr.GetMethod();
-  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  ArtCode art_code = fr.GetCurrentCode();
+  const QuickMethodFrameInfo frame_info = art_code.GetQuickFrameInfo();
   int spill_pos = 0;
 
   // Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 4477631..cdc03fe 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -40,8 +40,8 @@
 }
 
 void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
-  ArtMethod* method = fr.GetMethod();
-  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  ArtCode code = fr.GetCurrentCode();
+  const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
   int spill_pos = 0;
 
   // Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 08ab356..dba62d9 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -38,8 +38,8 @@
 }
 
 void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
-  ArtMethod* method = fr.GetMethod();
-  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  ArtCode code = fr.GetCurrentCode();
+  const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
   int spill_pos = 0;
 
   // Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index 2c17f1c..597a74c 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -38,8 +38,8 @@
 }
 
 void Mips64Context::FillCalleeSaves(const StackVisitor& fr) {
-  ArtMethod* method = fr.GetMethod();
-  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  ArtCode code fr.GetCurrentCode();
+  const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
   int spill_pos = 0;
 
   // Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 987ad60..0d88dd0 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,9 +16,10 @@
 
 #include "context_x86.h"
 
-#include "art_method-inl.h"
+#include "art_code.h"
 #include "base/bit_utils.h"
 #include "quick/quick_method_frame_info.h"
+#include "stack.h"
 
 namespace art {
 namespace x86 {
@@ -37,8 +38,8 @@
 }
 
 void X86Context::FillCalleeSaves(const StackVisitor& fr) {
-  ArtMethod* method = fr.GetMethod();
-  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  ArtCode code = fr.GetCurrentCode();
+  const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
   int spill_pos = 0;
 
   // Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 3dc7d71..12c94bc 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,9 +16,10 @@
 
 #include "context_x86_64.h"
 
-#include "art_method-inl.h"
+#include "art_code.h"
 #include "base/bit_utils.h"
 #include "quick/quick_method_frame_info.h"
+#include "stack.h"
 
 namespace art {
 namespace x86_64 {
@@ -37,8 +38,8 @@
 }
 
 void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
-  ArtMethod* method = fr.GetMethod();
-  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  ArtCode code = fr.GetCurrentCode();
+  const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
   int spill_pos = 0;
 
   // Core registers come first, from the highest down to the lowest.
diff --git a/runtime/art_code.cc b/runtime/art_code.cc
new file mode 100644
index 0000000..b999ec8
--- /dev/null
+++ b/runtime/art_code.cc
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_code.h"
+
+#include "art_method.h"
+#include "art_method-inl.h"
+#include "class_linker.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "handle_scope.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "mapping_table.h"
+#include "oat.h"
+#include "runtime.h"
+#include "utils.h"
+
+namespace art {
+
+  // Converts a dex PC to a native PC.
+uintptr_t ArtCode::ToNativeQuickPc(const uint32_t dex_pc,
+                                   bool is_for_catch_handler,
+                                   bool abort_on_failure)
+      SHARED_REQUIRES(Locks::mutator_lock_) {
+  const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+  if (IsOptimized(sizeof(void*))) {
+    // Optimized code does not have a mapping table. Search for the dex-to-pc
+    // mapping in stack maps.
+    CodeInfo code_info = GetOptimizedCodeInfo();
+    StackMapEncoding encoding = code_info.ExtractEncoding();
+
+    // All stack maps are stored in the same CodeItem section, safepoint stack
+    // maps first, then catch stack maps. We use `is_for_catch_handler` to select
+    // the order of iteration.
+    StackMap stack_map =
+        LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
+                                     : code_info.GetStackMapForDexPc(dex_pc, encoding);
+    if (stack_map.IsValid()) {
+      return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
+    }
+  } else {
+    MappingTable table((entry_point != nullptr) ? GetMappingTable(sizeof(void*)) : nullptr);
+    if (table.TotalSize() == 0) {
+      DCHECK_EQ(dex_pc, 0U);
+      return 0;   // Special no mapping/pc == 0 case
+    }
+    // Assume the caller wants a dex-to-pc mapping so check here first.
+    typedef MappingTable::DexToPcIterator It;
+    for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+      if (cur.DexPc() == dex_pc) {
+        return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+      }
+    }
+    // Now check pc-to-dex mappings.
+    typedef MappingTable::PcToDexIterator It2;
+    for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+      if (cur.DexPc() == dex_pc) {
+        return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+      }
+    }
+  }
+
+  if (abort_on_failure) {
+    LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+               << " in " << PrettyMethod(method_);
+  }
+  return UINTPTR_MAX;
+}
+
+bool ArtCode::IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+  // Temporary solution for detecting if a method has been optimized: the compiler
+  // does not create a GC map. Instead, the vmap table contains the stack map
+  // (as in stack_map.h).
+  return !method_->IsNative()
+      && method_->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
+      && GetQuickOatEntryPoint(pointer_size) != nullptr
+      && GetNativeGcMap(pointer_size) == nullptr;
+}
+
+CodeInfo ArtCode::GetOptimizedCodeInfo() {
+  DCHECK(IsOptimized(sizeof(void*)));
+  const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(sizeof(void*)));
+  DCHECK(code_pointer != nullptr);
+  uint32_t offset =
+      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
+  const void* data =
+      reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
+  return CodeInfo(data);
+}
+
+uintptr_t ArtCode::NativeQuickPcOffset(const uintptr_t pc) {
+  const void* quick_entry_point = GetQuickOatEntryPoint(sizeof(void*));
+  CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
+  CHECK_EQ(quick_entry_point,
+           Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*)));
+  return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
+}
+
+uint32_t ArtCode::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
+  const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+  uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
+  if (IsOptimized(sizeof(void*))) {
+    CodeInfo code_info = GetOptimizedCodeInfo();
+    StackMapEncoding encoding = code_info.ExtractEncoding();
+    StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+    if (stack_map.IsValid()) {
+      return stack_map.GetDexPc(encoding);
+    }
+  } else {
+    MappingTable table(entry_point != nullptr ? GetMappingTable(sizeof(void*)) : nullptr);
+    if (table.TotalSize() == 0) {
+      // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
+      // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
+      DCHECK(method_->IsNative() || method_->IsCalleeSaveMethod() || method_->IsProxyMethod())
+          << PrettyMethod(method_);
+      return DexFile::kDexNoIndex;   // Special no mapping case
+    }
+    // Assume the caller wants a pc-to-dex mapping so check here first.
+    typedef MappingTable::PcToDexIterator It;
+    for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+      if (cur.NativePcOffset() == sought_offset) {
+        return cur.DexPc();
+      }
+    }
+    // Now check dex-to-pc mappings.
+    typedef MappingTable::DexToPcIterator It2;
+    for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+      if (cur.NativePcOffset() == sought_offset) {
+        return cur.DexPc();
+      }
+    }
+  }
+  if (abort_on_failure) {
+      LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
+             << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
+             << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
+             << ") in " << PrettyMethod(method_);
+  }
+  return DexFile::kDexNoIndex;
+}
+
+const uint8_t* ArtCode::GetNativeGcMap(size_t pointer_size) {
+  const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+  if (code_pointer == nullptr) {
+    return nullptr;
+  }
+  uint32_t offset =
+      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
+  if (UNLIKELY(offset == 0u)) {
+    return nullptr;
+  }
+  return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+const uint8_t* ArtCode::GetVmapTable(size_t pointer_size) {
+  CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
+  const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+  if (code_pointer == nullptr) {
+    return nullptr;
+  }
+  uint32_t offset =
+      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
+  if (UNLIKELY(offset == 0u)) {
+    return nullptr;
+  }
+  return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+const uint8_t* ArtCode::GetMappingTable(size_t pointer_size) {
+  const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+  if (code_pointer == nullptr) {
+    return nullptr;
+  }
+  uint32_t offset =
+      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
+  if (UNLIKELY(offset == 0u)) {
+    return nullptr;
+  }
+  return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+// Counts the number of references in the parameter list of the corresponding method.
+// Note: Thus does _not_ include "this" for non-static methods.
+static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
+    SHARED_REQUIRES(Locks::mutator_lock_) {
+  uint32_t shorty_len;
+  const char* shorty = method->GetShorty(&shorty_len);
+  uint32_t refs = 0;
+  for (uint32_t i = 1; i < shorty_len ; ++i) {
+    if (shorty[i] == 'L') {
+      refs++;
+    }
+  }
+  return refs;
+}
+
+QuickMethodFrameInfo ArtCode::GetQuickFrameInfo() {
+  Runtime* runtime = Runtime::Current();
+
+  if (UNLIKELY(method_->IsAbstract())) {
+    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+  }
+
+  // This goes before IsProxyMethod since runtime methods have a null declaring class.
+  if (UNLIKELY(method_->IsRuntimeMethod())) {
+    return runtime->GetRuntimeMethodFrameInfo(method_);
+  }
+
+  // For Proxy method we add special handling for the direct method case  (there is only one
+  // direct method - constructor). Direct method is cloned from original
+  // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
+  // quick compiled method without any stubs. So the frame info should be returned as it is a
+  // quick method not a stub. However, if instrumentation stubs are installed, the
+  // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
+  // oat code pointer, thus we have to add a special case here.
+  if (UNLIKELY(method_->IsProxyMethod())) {
+    if (method_->IsDirect()) {
+      CHECK(method_->IsConstructor());
+      const void* code_pointer =
+          EntryPointToCodePointer(method_->GetEntryPointFromQuickCompiledCode());
+      return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+    } else {
+      return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+    }
+  }
+
+  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*));
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  // On failure, instead of null we get the quick-generic-jni-trampoline for native method
+  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+  // for non-native methods. And we really shouldn't see a failure for non-native methods here.
+  DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
+
+  if (class_linker->IsQuickGenericJniStub(entry_point)) {
+    // Generic JNI frame.
+    DCHECK(method_->IsNative());
+    uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method_) + 1;
+    size_t scope_size = HandleScope::SizeOf(handle_refs);
+    QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+
+    // Callee saves + handle scope + method ref + alignment
+    // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
+    size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
+                                sizeof(ArtMethod*) + scope_size, kStackAlignment);
+    return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+  }
+
+  const void* code_pointer = EntryPointToCodePointer(entry_point);
+  return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+}
+
+void ArtCode::AssertPcIsWithinQuickCode(uintptr_t pc) {
+  if (method_->IsNative() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
+    return;
+  }
+  if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+    return;
+  }
+  const void* code = method_->GetEntryPointFromQuickCompiledCode();
+  if (code == GetQuickInstrumentationEntryPoint()) {
+    return;
+  }
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  if (class_linker->IsQuickToInterpreterBridge(code) ||
+      class_linker->IsQuickResolutionStub(code)) {
+    return;
+  }
+  // If we are the JIT then we may have just compiled the method after the
+  // IsQuickToInterpreterBridge check.
+  jit::Jit* const jit = Runtime::Current()->GetJit();
+  if (jit != nullptr &&
+      jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+    return;
+  }
+
+  uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+  CHECK(PcIsWithinQuickCode(pc))
+      << PrettyMethod(method_)
+      << " pc=" << std::hex << pc
+      << " code=" << code
+      << " size=" << code_size;
+}
+
+bool ArtCode::PcIsWithinQuickCode(uintptr_t pc) {
+  /*
+   * During a stack walk, a return PC may point past-the-end of the code
+   * in the case that the last instruction is a call that isn't expected to
+   * return.  Thus, we check <= code + GetCodeSize().
+   *
+   * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+   */
+  uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
+      method_->GetEntryPointFromQuickCompiledCode()));
+  if (code == 0) {
+    return pc == 0;
+  }
+  uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+  return code <= pc && pc <= (code + code_size);
+}
+
+const void* ArtCode::GetQuickOatEntryPoint(size_t pointer_size) {
+  if (method_->IsAbstract() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
+    return nullptr;
+  }
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(method_, pointer_size);
+  // On failure, instead of null we get the quick-generic-jni-trampoline for native method
+  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+  // for non-native methods.
+  if (class_linker->IsQuickToInterpreterBridge(code) ||
+      class_linker->IsQuickGenericJniStub(code)) {
+    return nullptr;
+  }
+  return code;
+}
+
+}  // namespace art
diff --git a/runtime/art_code.h b/runtime/art_code.h
new file mode 100644
index 0000000..1d2d898
--- /dev/null
+++ b/runtime/art_code.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ART_CODE_H_
+#define ART_RUNTIME_ART_CODE_H_
+
+#include "base/mutex.h"
+#include "offsets.h"
+#include "quick/quick_method_frame_info.h"
+#include "stack_map.h"
+
+namespace art {
+
+class ArtMethod;
+
+class ArtCode FINAL {
+ public:
+  explicit ArtCode(ArtMethod** method) : method_(*method) {}
+  explicit ArtCode(ArtMethod* method) : method_(method) {}
+  ArtCode() : method_(nullptr) {}
+
+  // Converts a dex PC to a native PC.
+  uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
+                            bool is_for_catch_handler,
+                            bool abort_on_failure = true)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+  uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Converts a native PC to a dex PC.
+  uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
+  const uint8_t* GetNativeGcMap(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  const uint8_t* GetVmapTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  const uint8_t* GetMappingTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+  FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+    return FrameOffset(GetFrameSizeInBytes() - sizeof(void*));
+  }
+
+  template <bool kCheckFrameSize = true>
+  uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
+    uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
+    if (kCheckFrameSize) {
+      DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
+    }
+    return result;
+  }
+
+  const void* GetQuickOatEntryPoint(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  bool PcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+    constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
+    DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
+    return FrameOffset(handle_scope_offset);
+  }
+
+  ArtMethod* GetMethod() const { return method_; }
+
+ private:
+  ArtMethod* method_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_ART_CODE_H_
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 632a50f..c415073 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -212,18 +212,6 @@
   return type;
 }
 
-inline uint32_t ArtMethod::GetCodeSize() {
-  DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
-  return GetCodeSize(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
-}
-
-inline uint32_t ArtMethod::GetCodeSize(const void* code) {
-  if (code == nullptr) {
-    return 0u;
-  }
-  return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
-}
-
 inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
   switch (type) {
     case kStatic:
@@ -248,85 +236,6 @@
   }
 }
 
-inline uint32_t ArtMethod::GetQuickOatCodeOffset() {
-  DCHECK(!Runtime::Current()->IsStarted());
-  return PointerToLowMemUInt32(GetEntryPointFromQuickCompiledCode());
-}
-
-inline void ArtMethod::SetQuickOatCodeOffset(uint32_t code_offset) {
-  DCHECK(!Runtime::Current()->IsStarted());
-  SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(code_offset));
-}
-
-inline const uint8_t* ArtMethod::GetMappingTable(size_t pointer_size) {
-  const void* code_pointer = GetQuickOatCodePointer(pointer_size);
-  if (code_pointer == nullptr) {
-    return nullptr;
-  }
-  return GetMappingTable(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer, size_t pointer_size) {
-  DCHECK(code_pointer != nullptr);
-  DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
-  uint32_t offset =
-      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
-  if (UNLIKELY(offset == 0u)) {
-    return nullptr;
-  }
-  return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-inline const uint8_t* ArtMethod::GetVmapTable(size_t pointer_size) {
-  const void* code_pointer = GetQuickOatCodePointer(pointer_size);
-  if (code_pointer == nullptr) {
-    return nullptr;
-  }
-  return GetVmapTable(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer, size_t pointer_size) {
-  CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
-  DCHECK(code_pointer != nullptr);
-  DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
-  uint32_t offset =
-      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
-  if (UNLIKELY(offset == 0u)) {
-    return nullptr;
-  }
-  return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
-  DCHECK(IsOptimized(sizeof(void*)));
-  const void* code_pointer = GetQuickOatCodePointer(sizeof(void*));
-  DCHECK(code_pointer != nullptr);
-  uint32_t offset =
-      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
-  const void* data =
-      reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
-  return CodeInfo(data);
-}
-
-inline const uint8_t* ArtMethod::GetNativeGcMap(size_t pointer_size) {
-  const void* code_pointer = GetQuickOatCodePointer(pointer_size);
-  if (code_pointer == nullptr) {
-    return nullptr;
-  }
-  return GetNativeGcMap(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetNativeGcMap(const void* code_pointer, size_t pointer_size) {
-  DCHECK(code_pointer != nullptr);
-  DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
-  uint32_t offset =
-      reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
-  if (UNLIKELY(offset == 0u)) {
-    return nullptr;
-  }
-  return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
 inline bool ArtMethod::IsRuntimeMethod() {
   return dex_method_index_ == DexFile::kDexNoIndex;
 }
@@ -367,20 +276,6 @@
   return result;
 }
 
-inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
-  const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(
-      this, sizeof(void*));
-  return pc - reinterpret_cast<uintptr_t>(code);
-}
-
-inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
-  DCHECK(code_pointer != nullptr);
-  if (kIsDebugBuild && !IsProxyMethod()) {
-    CHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*)));
-  }
-  return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
-}
-
 inline const DexFile* ArtMethod::GetDexFile() {
   return GetDexCache()->GetDexFile();
 }
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 92648b9..f9d9077 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -180,98 +180,6 @@
   return DexFile::kDexNoIndex;
 }
 
-uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
-  const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
-  uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
-  if (IsOptimized(sizeof(void*))) {
-    CodeInfo code_info = GetOptimizedCodeInfo();
-    StackMapEncoding encoding = code_info.ExtractEncoding();
-    StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
-    if (stack_map.IsValid()) {
-      return stack_map.GetDexPc(encoding);
-    }
-  } else {
-    MappingTable table(entry_point != nullptr ?
-        GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
-    if (table.TotalSize() == 0) {
-      // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
-      // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
-      DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
-      return DexFile::kDexNoIndex;   // Special no mapping case
-    }
-    // Assume the caller wants a pc-to-dex mapping so check here first.
-    typedef MappingTable::PcToDexIterator It;
-    for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
-      if (cur.NativePcOffset() == sought_offset) {
-        return cur.DexPc();
-      }
-    }
-    // Now check dex-to-pc mappings.
-    typedef MappingTable::DexToPcIterator It2;
-    for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
-      if (cur.NativePcOffset() == sought_offset) {
-        return cur.DexPc();
-      }
-    }
-  }
-  if (abort_on_failure) {
-      LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
-             << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
-             << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
-             << ") in " << PrettyMethod(this);
-  }
-  return DexFile::kDexNoIndex;
-}
-
-uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc,
-                                     bool is_for_catch_handler,
-                                     bool abort_on_failure) {
-  const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
-  if (IsOptimized(sizeof(void*))) {
-    // Optimized code does not have a mapping table. Search for the dex-to-pc
-    // mapping in stack maps.
-    CodeInfo code_info = GetOptimizedCodeInfo();
-    StackMapEncoding encoding = code_info.ExtractEncoding();
-
-    // All stack maps are stored in the same CodeItem section, safepoint stack
-    // maps first, then catch stack maps. We use `is_for_catch_handler` to select
-    // the order of iteration.
-    StackMap stack_map =
-        LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
-                                     : code_info.GetStackMapForDexPc(dex_pc, encoding);
-    if (stack_map.IsValid()) {
-      return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
-    }
-  } else {
-    MappingTable table(entry_point != nullptr ?
-        GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
-    if (table.TotalSize() == 0) {
-      DCHECK_EQ(dex_pc, 0U);
-      return 0;   // Special no mapping/pc == 0 case
-    }
-    // Assume the caller wants a dex-to-pc mapping so check here first.
-    typedef MappingTable::DexToPcIterator It;
-    for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
-      if (cur.DexPc() == dex_pc) {
-        return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
-      }
-    }
-    // Now check pc-to-dex mappings.
-    typedef MappingTable::PcToDexIterator It2;
-    for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
-      if (cur.DexPc() == dex_pc) {
-        return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
-      }
-    }
-  }
-
-  if (abort_on_failure) {
-    LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
-               << " in " << PrettyMethod(this);
-  }
-  return UINTPTR_MAX;
-}
-
 uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
                                    uint32_t dex_pc, bool* has_no_move_exception) {
   const DexFile::CodeItem* code_item = GetCodeItem();
@@ -322,76 +230,6 @@
   return found_dex_pc;
 }
 
-void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
-  if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
-    return;
-  }
-  if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
-    return;
-  }
-  const void* code = GetEntryPointFromQuickCompiledCode();
-  if (code == GetQuickInstrumentationEntryPoint()) {
-    return;
-  }
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  if (class_linker->IsQuickToInterpreterBridge(code) ||
-      class_linker->IsQuickResolutionStub(code)) {
-    return;
-  }
-  // If we are the JIT then we may have just compiled the method after the
-  // IsQuickToInterpreterBridge check.
-  jit::Jit* const jit = Runtime::Current()->GetJit();
-  if (jit != nullptr &&
-      jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
-    return;
-  }
-  /*
-   * During a stack walk, a return PC may point past-the-end of the code
-   * in the case that the last instruction is a call that isn't expected to
-   * return.  Thus, we check <= code + GetCodeSize().
-   *
-   * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
-   */
-  CHECK(PcIsWithinQuickCode(reinterpret_cast<uintptr_t>(code), pc))
-      << PrettyMethod(this)
-      << " pc=" << std::hex << pc
-      << " code=" << code
-      << " size=" << GetCodeSize(
-          EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
-}
-
-bool ArtMethod::IsEntrypointInterpreter() {
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
-  return oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode();
-}
-
-const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
-  if (IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
-    return nullptr;
-  }
-  Runtime* runtime = Runtime::Current();
-  ClassLinker* class_linker = runtime->GetClassLinker();
-  const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
-  // On failure, instead of null we get the quick-generic-jni-trampoline for native method
-  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
-  // for non-native methods.
-  if (class_linker->IsQuickToInterpreterBridge(code) ||
-      class_linker->IsQuickGenericJniStub(code)) {
-    return nullptr;
-  }
-  return code;
-}
-
-#ifndef NDEBUG
-uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
-  CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
-  CHECK_EQ(quick_entry_point,
-           Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)));
-  return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-}
-#endif
-
 void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
                        const char* shorty) {
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -435,8 +273,9 @@
 
       // Ensure that we won't be accidentally calling quick compiled code when -Xint.
       if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
-        DCHECK(!runtime->UseJit());
-        CHECK(IsEntrypointInterpreter())
+        CHECK(!runtime->UseJit());
+        const void* oat_quick_code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(this);
+        CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode())
             << "Don't call compiled code when -Xint " << PrettyMethod(this);
       }
 
@@ -480,74 +319,6 @@
   self->PopManagedStackFragment(fragment);
 }
 
-// Counts the number of references in the parameter list of the corresponding method.
-// Note: Thus does _not_ include "this" for non-static methods.
-static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
-  uint32_t shorty_len;
-  const char* shorty = method->GetShorty(&shorty_len);
-  uint32_t refs = 0;
-  for (uint32_t i = 1; i < shorty_len ; ++i) {
-    if (shorty[i] == 'L') {
-      refs++;
-    }
-  }
-  return refs;
-}
-
-QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
-  Runtime* runtime = Runtime::Current();
-
-  if (UNLIKELY(IsAbstract())) {
-    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-  }
-
-  // This goes before IsProxyMethod since runtime methods have a null declaring class.
-  if (UNLIKELY(IsRuntimeMethod())) {
-    return runtime->GetRuntimeMethodFrameInfo(this);
-  }
-
-  // For Proxy method we add special handling for the direct method case  (there is only one
-  // direct method - constructor). Direct method is cloned from original
-  // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
-  // quick compiled method without any stubs. So the frame info should be returned as it is a
-  // quick method not a stub. However, if instrumentation stubs are installed, the
-  // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
-  // oat code pointer, thus we have to add a special case here.
-  if (UNLIKELY(IsProxyMethod())) {
-    if (IsDirect()) {
-      CHECK(IsConstructor());
-      return GetQuickFrameInfo(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
-    } else {
-      return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-    }
-  }
-
-  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
-  ClassLinker* class_linker = runtime->GetClassLinker();
-  // On failure, instead of null we get the quick-generic-jni-trampoline for native method
-  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
-  // for non-native methods. And we really shouldn't see a failure for non-native methods here.
-  DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
-
-  if (class_linker->IsQuickGenericJniStub(entry_point)) {
-    // Generic JNI frame.
-    DCHECK(IsNative());
-    uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(this) + 1;
-    size_t scope_size = HandleScope::SizeOf(handle_refs);
-    QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-
-    // Callee saves + handle scope + method ref + alignment
-    // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
-    size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
-                                sizeof(ArtMethod*) + scope_size, kStackAlignment);
-    return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
-  }
-
-  const void* code_pointer = EntryPointToCodePointer(entry_point);
-  return GetQuickFrameInfo(code_pointer);
-}
-
 void ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
   CHECK(IsNative()) << PrettyMethod(this);
   CHECK(!IsFastNative()) << PrettyMethod(this);
@@ -590,16 +361,6 @@
   return true;
 }
 
-const uint8_t* ArtMethod::GetQuickenedInfo() {
-  bool found = false;
-  OatFile::OatMethod oat_method =
-      Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
-  if (!found || (oat_method.GetQuickCode() != nullptr)) {
-    return nullptr;
-  }
-  return oat_method.GetVmapTable();
-}
-
 ProfilingInfo* ArtMethod::CreateProfilingInfo() {
   DCHECK(!Runtime::Current()->IsAotCompiler());
   ProfilingInfo* info = ProfilingInfo::Create(this);
@@ -613,4 +374,14 @@
   }
 }
 
+const uint8_t* ArtMethod::GetQuickenedInfo() {
+  bool found = false;
+  OatFile::OatMethod oat_method =
+      Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
+  if (!found || (oat_method.GetQuickCode() != nullptr)) {
+    return nullptr;
+  }
+  return oat_method.GetVmapTable();
+}
+
 }  // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 0315c3a..3c58644 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_ART_METHOD_H_
 #define ART_RUNTIME_ART_METHOD_H_
 
+#include "base/bit_utils.h"
 #include "base/casts.h"
 #include "dex_file.h"
 #include "gc_root.h"
@@ -24,10 +25,8 @@
 #include "method_reference.h"
 #include "modifiers.h"
 #include "mirror/object.h"
-#include "quick/quick_method_frame_info.h"
 #include "read_barrier_option.h"
 #include "stack.h"
-#include "stack_map.h"
 #include "utils.h"
 
 namespace art {
@@ -164,16 +163,6 @@
     SetAccessFlags(GetAccessFlags() | kAccPreverified);
   }
 
-  bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
-    // Temporary solution for detecting if a method has been optimized: the compiler
-    // does not create a GC map. Instead, the vmap table contains the stack map
-    // (as in stack_map.h).
-    return !IsNative()
-        && GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
-        && GetQuickOatCodePointer(pointer_size) != nullptr
-        && GetNativeGcMap(pointer_size) == nullptr;
-  }
-
   bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
 
   uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -280,94 +269,6 @@
                      entry_point_from_quick_compiled_code, pointer_size);
   }
 
-  uint32_t GetCodeSize() SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Check whether the given PC is within the quick compiled code associated with this method's
-  // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
-  // debug purposes.
-  bool PcIsWithinQuickCode(uintptr_t pc) {
-    return PcIsWithinQuickCode(
-        reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
-  }
-
-  void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Returns true if the entrypoint points to the interpreter, as
-  // opposed to the compiled code, that is, this method will be
-  // interpretered on invocation.
-  bool IsEntrypointInterpreter() SHARED_REQUIRES(Locks::mutator_lock_);
-
-  uint32_t GetQuickOatCodeOffset();
-  void SetQuickOatCodeOffset(uint32_t code_offset);
-
-  ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
-    uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
-    // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
-    //       least 2 byte aligned.
-    code &= ~0x1;
-    return reinterpret_cast<const void*>(code);
-  }
-
-  // Actual entry point pointer to compiled oat code or null.
-  const void* GetQuickOatEntryPoint(size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  // Actual pointer to compiled oat code or null.
-  const void* GetQuickOatCodePointer(size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
-    return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
-  }
-
-  // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
-  const uint8_t* GetMappingTable(size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
-  const uint8_t* GetVmapTable(size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
-  CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
-  const uint8_t* GetNativeGcMap(size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  template <bool kCheckFrameSize = true>
-  uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
-    uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
-    if (kCheckFrameSize) {
-      DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
-    }
-    return result;
-  }
-
-  QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-  QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
-    return GetReturnPcOffset(GetFrameSizeInBytes());
-  }
-
-  FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
-    DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
-    return FrameOffset(frame_size_in_bytes - sizeof(void*));
-  }
-
-  FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
-    constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
-    DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
-    return FrameOffset(handle_scope_offset);
-  }
-
   void RegisterNative(const void* native_method, bool is_fast)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -429,27 +330,6 @@
 
   bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-#ifdef NDEBUG
-  uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
-    return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-  }
-#else
-  uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-#endif
-
-  // Converts a native PC to a dex PC.
-  uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Converts a dex PC to a native PC.
-  uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
-                            bool is_for_catch_handler,
-                            bool abort_on_failure = true)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
   MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
     return MethodReference(GetDexFile(), GetDexMethodIndex());
   }
@@ -542,6 +422,8 @@
     return ++hotness_count_;
   }
 
+  const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
  protected:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
   // The class we are a part of.
@@ -622,24 +504,6 @@
     }
   }
 
-  // Code points to the start of the quick code.
-  static uint32_t GetCodeSize(const void* code);
-
-  static bool PcIsWithinQuickCode(uintptr_t code, uintptr_t pc) {
-    if (code == 0) {
-      return pc == 0;
-    }
-    /*
-     * During a stack walk, a return PC may point past-the-end of the code
-     * in the case that the last instruction is a call that isn't expected to
-     * return.  Thus, we check <= code + GetCodeSize().
-     *
-     * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
-     */
-    return code <= pc && pc <= code + GetCodeSize(
-        EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
-  }
-
   DISALLOW_COPY_AND_ASSIGN(ArtMethod);  // Need to use CopyFrom to deal with 32 vs 64 bits.
 };
 
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 7965cd7..e897351 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
 #define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
 
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "gc_map.h"
 #include "scoped_thread_state_change.h"
@@ -53,7 +54,7 @@
 
   void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
       SHARED_REQUIRES(Locks::mutator_lock_) {
-    if (GetMethod()->IsOptimized(sizeof(void*))) {
+    if (GetCurrentCode().IsOptimized(sizeof(void*))) {
       CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
     } else {
       CheckQuickMethod(registers, number_of_references, native_pc_offset);
@@ -64,7 +65,7 @@
   void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
       SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
-    CodeInfo code_info = m->GetOptimizedCodeInfo();
+    CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
     StackMapEncoding encoding = code_info.ExtractEncoding();
     StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
     uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
@@ -108,7 +109,7 @@
   void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
       SHARED_REQUIRES(Locks::mutator_lock_) {
     ArtMethod* m = GetMethod();
-    NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*)));
+    NativePcOffsetToReferenceMap map(GetCurrentCode().GetNativeGcMap(sizeof(void*)));
     const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
     CHECK(ref_bitmap);
     for (int i = 0; i < number_of_references; ++i) {
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index f66628d..21e4e44 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -34,6 +34,7 @@
 #include "mirror/throwable.h"
 #include "nth_caller_visitor.h"
 #include "runtime.h"
+#include "stack_map.h"
 #include "thread.h"
 
 namespace art {
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index f193999..17e6aac 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -16,6 +16,7 @@
 
 #include "entrypoints/entrypoint_utils.h"
 
+#include "art_code.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/mutex.h"
@@ -358,16 +359,17 @@
   const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
   auto** caller_sp = reinterpret_cast<ArtMethod**>(
       reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
+  ArtCode current_code = GetCallingCodeFrom(caller_sp);
   ArtMethod* outer_method = *caller_sp;
   ArtMethod* caller = outer_method;
 
-  if ((outer_method != nullptr) && outer_method->IsOptimized(sizeof(void*))) {
+  if ((outer_method != nullptr) && current_code.IsOptimized(sizeof(void*))) {
     const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
     uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
         (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
     if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
-      uintptr_t native_pc_offset = outer_method->NativeQuickPcOffset(caller_pc);
-      CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+      uintptr_t native_pc_offset = current_code.NativeQuickPcOffset(caller_pc);
+      CodeInfo code_info = current_code.GetOptimizedCodeInfo();
       StackMapEncoding encoding = code_info.ExtractEncoding();
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
       DCHECK(stack_map.IsValid());
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 4217cab..171ace2 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -20,6 +20,7 @@
 #include <jni.h>
 #include <stdint.h>
 
+#include "art_code.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "dex_instruction.h"
@@ -184,6 +185,10 @@
                                      Runtime::CalleeSaveType type,
                                      bool do_caller_check = false);
 
+inline ArtCode GetCallingCodeFrom(ArtMethod** sp) {
+  return ArtCode(sp);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index c5492f1..377675e 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "callee_save_frame.h"
 #include "common_throws.h"
@@ -294,7 +295,8 @@
   static mirror::Object* GetProxyThisObject(ArtMethod** sp)
       SHARED_REQUIRES(Locks::mutator_lock_) {
     CHECK((*sp)->IsProxyMethod());
-    CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes());
+    CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize,
+             GetCallingCodeFrom(sp).GetFrameSizeInBytes());
     CHECK_GT(kNumQuickGprArgs, 0u);
     constexpr uint32_t kThisGprIndex = 0u;  // 'this' is in the 1st GPR.
     size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -320,12 +322,11 @@
     const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
     ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
         reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
-    ArtMethod* outer_method = *caller_sp;
     uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
-    uintptr_t outer_pc_offset = outer_method->NativeQuickPcOffset(outer_pc);
+    uintptr_t outer_pc_offset = GetCallingCodeFrom(caller_sp).NativeQuickPcOffset(outer_pc);
 
-    if (outer_method->IsOptimized(sizeof(void*))) {
-      CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+    if (GetCallingCodeFrom(caller_sp).IsOptimized(sizeof(void*))) {
+      CodeInfo code_info = GetCallingCodeFrom(caller_sp).GetOptimizedCodeInfo();
       StackMapEncoding encoding = code_info.ExtractEncoding();
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
       DCHECK(stack_map.IsValid());
@@ -336,7 +337,7 @@
         return stack_map.GetDexPc(encoding);
       }
     } else {
-      return outer_method->ToDexPc(outer_pc);
+      return GetCallingCodeFrom(caller_sp).ToDexPc(outer_pc);
     }
   }
 
@@ -841,8 +842,9 @@
       self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
   // Register the top of the managed stack, making stack crawlable.
   DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
-  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
-            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
+  DCHECK_EQ(GetCallingCodeFrom(sp).GetFrameSizeInBytes(),
+            ArtCode(Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs))
+                .GetFrameSizeInBytes())
       << PrettyMethod(proxy_method);
   self->VerifyStack();
   // Start new JNI local reference state.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 0b36694..5299394 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -49,7 +49,7 @@
   static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
       NO_THREAD_SAFETY_ANALYSIS {
     ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
-    QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+    QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
     EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
         << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
         << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
@@ -58,8 +58,8 @@
   static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset)
       NO_THREAD_SAFETY_ANALYSIS {
     ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
-    QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
-    EXPECT_EQ(save_method->GetReturnPcOffset().SizeValue(), pc_offset)
+    QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+    EXPECT_EQ(ArtCode(save_method).GetReturnPcOffset().SizeValue(), pc_offset)
         << "Expected and real pc offset differs for " << type
         << " core spills=" << std::hex << frame_info.CoreSpillMask()
         << " fp spills=" << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 9f84bd2..da1d80e 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -169,7 +169,7 @@
   r->SetInstructionSet(kRuntimeISA);
   ArtMethod* save_method = r->CreateCalleeSaveMethod();
   r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
-  QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+  QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
 
   ASSERT_EQ(kStackAlignment, 16U);
   // ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
@@ -187,14 +187,14 @@
   }
 
   fake_stack.push_back(
-      method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false));  // return pc
+      ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false));  // return pc
 
   // Create/push fake 16byte stack frame for method g
   fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
   fake_stack.push_back(0);
   fake_stack.push_back(0);
   fake_stack.push_back(
-      method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false));  // return pc
+      ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false));  // return pc
 
   // Create/push fake 16byte stack frame for method f
   fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index c3a9627..30a0983 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -20,6 +20,7 @@
 #include <sys/mman.h>
 #include <sys/ucontext.h>
 
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "base/stl_util.h"
 #include "mirror/class.h"
@@ -359,16 +360,17 @@
     return false;
   }
 
+  ArtCode art_code(method_obj);
+
   // We can be certain that this is a method now.  Check if we have a GC map
   // at the return PC address.
   if (true || kIsDebugBuild) {
     VLOG(signals) << "looking for dex pc for return pc " << std::hex << return_pc;
-    const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_obj,
-                                                                                 sizeof(void*));
-    uint32_t sought_offset = return_pc - reinterpret_cast<uintptr_t>(code);
+    uint32_t sought_offset = return_pc -
+        reinterpret_cast<uintptr_t>(art_code.GetQuickOatEntryPoint(sizeof(void*)));
     VLOG(signals) << "pc offset: " << std::hex << sought_offset;
   }
-  uint32_t dexpc = method_obj->ToDexPc(return_pc, false);
+  uint32_t dexpc = art_code.ToDexPc(return_pc, false);
   VLOG(signals) << "dexpc: " << dexpc;
   return !check_dex_pc || dexpc != DexFile::kDexNoIndex;
 }
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index deada4c..5ff016a 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -19,6 +19,7 @@
 #include <sstream>
 
 #include "arch/context.h"
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "atomic.h"
 #include "class_linker.h"
@@ -251,7 +252,7 @@
         instrumentation_stack_->insert(it, instrumentation_frame);
         SetReturnPc(instrumentation_exit_pc_);
       }
-      dex_pcs_.push_back(m->ToDexPc(last_return_pc_));
+      dex_pcs_.push_back(GetCurrentCode().ToDexPc(last_return_pc_));
       last_return_pc_ = return_pc;
       ++instrumentation_stack_depth_;
       return true;  // Continue.
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 5df6525..f7913e1 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -22,7 +22,7 @@
 namespace art {
 
 inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+  const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
   if (code == nullptr) {
     return nullptr;
   }
@@ -47,7 +47,7 @@
 }
 
 inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  const void* code = EntryPointToCodePointer(GetQuickCode());
   if (code == nullptr) {
     return 0u;
   }
@@ -55,7 +55,7 @@
 }
 
 inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  const void* code = EntryPointToCodePointer(GetQuickCode());
   if (code == nullptr) {
     return 0u;
   }
@@ -63,7 +63,7 @@
 }
 
 inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  const void* code = EntryPointToCodePointer(GetQuickCode());
   if (code == nullptr) {
     return 0u;
   }
@@ -71,7 +71,7 @@
 }
 
 inline const uint8_t* OatFile::OatMethod::GetGcMap() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+  const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
   if (code == nullptr) {
     return nullptr;
   }
@@ -122,7 +122,7 @@
 }
 
 inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+  const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
   if (code == nullptr) {
     return nullptr;
   }
@@ -134,7 +134,7 @@
 }
 
 inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+  const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
   if (code == nullptr) {
     return nullptr;
   }
@@ -146,7 +146,7 @@
 }
 
 inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
-  const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+  const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
   if (code == nullptr) {
     return 0u;
   }
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 27f8677..364b734 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -29,6 +29,7 @@
 #include "mirror/class.h"
 #include "oat.h"
 #include "os.h"
+#include "utils.h"
 
 namespace art {
 
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 63f43cf..7ba19ab 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -17,6 +17,7 @@
 #include "quick_exception_handler.h"
 
 #include "arch/context.h"
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "dex_instruction.h"
 #include "entrypoints/entrypoint_utils.h"
@@ -26,6 +27,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/throwable.h"
+#include "stack_map.h"
 #include "verifier/method_verifier.h"
 
 namespace art {
@@ -99,7 +101,7 @@
         exception_handler_->SetHandlerMethod(method);
         exception_handler_->SetHandlerDexPc(found_dex_pc);
         exception_handler_->SetHandlerQuickFramePc(
-            method->ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
+            GetCurrentCode().ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
         exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
         return false;  // End stack walk.
       } else if (UNLIKELY(GetThread()->HasDebuggerShadowFrames())) {
@@ -159,7 +161,7 @@
   // If the handler is in optimized code, we need to set the catch environment.
   if (*handler_quick_frame_ != nullptr &&
       handler_method_ != nullptr &&
-      handler_method_->IsOptimized(sizeof(void*))) {
+      ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*))) {
     SetCatchEnvironmentForOptimizedHandler(&visitor);
   }
 }
@@ -200,14 +202,14 @@
 void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor) {
   DCHECK(!is_deoptimization_);
   DCHECK(*handler_quick_frame_ != nullptr) << "Method should not be called on upcall exceptions";
-  DCHECK(handler_method_ != nullptr && handler_method_->IsOptimized(sizeof(void*)));
+  DCHECK(handler_method_ != nullptr && ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*)));
 
   if (kDebugExceptionDelivery) {
     self_->DumpStack(LOG(INFO) << "Setting catch phis: ");
   }
 
   const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
-  CodeInfo code_info = handler_method_->GetOptimizedCodeInfo();
+  CodeInfo code_info = ArtCode(handler_quick_frame_).GetOptimizedCodeInfo();
   StackMapEncoding encoding = code_info.ExtractEncoding();
 
   // Find stack map of the throwing instruction.
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index f0b3c4e..44a13c9 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -41,7 +41,7 @@
  public:
   explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
   void Dump(std::ostream& os) const {
-    DumpNativeStack(os, GetTid(), "\t", nullptr, raw_context_);
+    DumpNativeStack(os, GetTid(), "\t", nullptr, nullptr, raw_context_);
   }
  private:
   // Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/stack.cc b/runtime/stack.cc
index d93a57d..05bb0b6 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,6 +17,7 @@
 #include "stack.h"
 
 #include "arch/context.h"
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "base/hex_dump.h"
 #include "entrypoints/entrypoint_utils-inl.h"
@@ -110,9 +111,9 @@
 }
 
 InlineInfo StackVisitor::GetCurrentInlineInfo() const {
-  ArtMethod* outer_method = GetOuterMethod();
-  uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
-  CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+  ArtCode outer_code = GetCurrentCode();
+  uint32_t native_pc_offset = outer_code.NativeQuickPcOffset(cur_quick_frame_pc_);
+  CodeInfo code_info = outer_code.GetOptimizedCodeInfo();
   StackMapEncoding encoding = code_info.ExtractEncoding();
   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
   DCHECK(stack_map.IsValid());
@@ -142,7 +143,7 @@
       size_t depth_in_stack_map = current_inlining_depth_ - 1;
       return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map);
     } else {
-      return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+      return GetCurrentCode().ToDexPc(cur_quick_frame_pc_, abort_on_failure);
     }
   } else {
     return 0;
@@ -160,7 +161,8 @@
   } else if (m->IsNative()) {
     if (cur_quick_frame_ != nullptr) {
       HandleScope* hs = reinterpret_cast<HandleScope*>(
-          reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffset().SizeValue());
+          reinterpret_cast<char*>(cur_quick_frame_) +
+            GetCurrentCode().GetHandleScopeOffset().SizeValue());
       return hs->GetReference(0);
     } else {
       return cur_shadow_frame_->GetVRegReference(0);
@@ -190,7 +192,7 @@
 
 size_t StackVisitor::GetNativePcOffset() const {
   DCHECK(!IsShadowFrame());
-  return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
+  return GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
 }
 
 bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
@@ -199,10 +201,10 @@
   if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
     return false;
   }
-  if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+  if (GetCurrentCode().IsOptimized(sizeof(void*))) {
     return true;  // TODO: Implement.
   }
-  const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
+  const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
   CHECK(native_gc_map != nullptr) << PrettyMethod(m);
   const DexFile::CodeItem* code_item = m->GetCodeItem();
   // Can't be null or how would we compile its instructions?
@@ -211,9 +213,7 @@
   size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
   const uint8_t* reg_bitmap = nullptr;
   if (num_regs > 0) {
-    Runtime* runtime = Runtime::Current();
-    const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
-    uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
+    uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
     reg_bitmap = map.FindBitMap(native_pc_offset);
     DCHECK(reg_bitmap != nullptr);
   }
@@ -252,7 +252,7 @@
     if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
       return true;
     }
-    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+    if (GetCurrentCode().IsOptimized(sizeof(void*))) {
       return GetVRegFromOptimizedCode(m, vreg, kind, val);
     } else {
       return GetVRegFromQuickCode(m, vreg, kind, val);
@@ -266,10 +266,9 @@
 
 bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                         uint32_t* val) const {
-  const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
-  DCHECK(code_pointer != nullptr);
-  const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
-  QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+  DCHECK_EQ(m, GetMethod());
+  const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+  QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
   uint32_t vmap_offset;
   // TODO: IsInContext stops before spotting floating point registers.
   if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -289,19 +288,16 @@
 
 bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                             uint32_t* val) const {
-  ArtMethod* outer_method = GetOuterMethod();
-  const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
-  DCHECK(code_pointer != nullptr);
   DCHECK_EQ(m, GetMethod());
   const DexFile::CodeItem* code_item = m->GetCodeItem();
   DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
                                                     // its instructions?
   uint16_t number_of_dex_registers = code_item->registers_size_;
   DCHECK_LT(vreg, code_item->registers_size_);
-  CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+  CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
   StackMapEncoding encoding = code_info.ExtractEncoding();
 
-  uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
+  uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
   StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
   DCHECK(stack_map.IsValid());
   size_t depth_in_stack_map = current_inlining_depth_ - 1;
@@ -406,7 +402,7 @@
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
     DCHECK(m == GetMethod());
-    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+    if (GetCurrentCode().IsOptimized(sizeof(void*))) {
       return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
     } else {
       return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
@@ -420,10 +416,9 @@
 
 bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
                                             VRegKind kind_hi, uint64_t* val) const {
-  const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
-  DCHECK(code_pointer != nullptr);
-  const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
-  QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+  DCHECK_EQ(m, GetMethod());
+  const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+  QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
   uint32_t vmap_offset_lo, vmap_offset_hi;
   // TODO: IsInContext stops before spotting floating point registers.
   if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -482,7 +477,7 @@
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably write registers without a context.
     DCHECK(m == GetMethod());
-    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+    if (GetCurrentCode().IsOptimized(sizeof(void*))) {
       return false;
     } else {
       return SetVRegFromQuickCode(m, vreg, new_value, kind);
@@ -497,10 +492,8 @@
                                         VRegKind kind) {
   DCHECK(context_ != nullptr);  // You can't reliably write registers without a context.
   DCHECK(m == GetMethod());
-  const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
-  DCHECK(code_pointer != nullptr);
-  const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
-  QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+  const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+  QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
   uint32_t vmap_offset;
   // TODO: IsInContext stops before spotting floating point registers.
   if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -591,7 +584,7 @@
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably write registers without a context.
     DCHECK(m == GetMethod());
-    if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+    if (GetCurrentCode().IsOptimized(sizeof(void*))) {
       return false;
     } else {
       return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
@@ -605,10 +598,9 @@
 
 bool StackVisitor::SetVRegPairFromQuickCode(
     ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
-  const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
-  DCHECK(code_pointer != nullptr);
-  const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
-  QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+  DCHECK_EQ(m, GetMethod());
+  const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+  QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
   uint32_t vmap_offset_lo, vmap_offset_hi;
   // TODO: IsInContext stops before spotting floating point registers.
   if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -725,14 +717,14 @@
 uintptr_t StackVisitor::GetReturnPc() const {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   DCHECK(sp != nullptr);
-  uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
+  uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
   return *reinterpret_cast<uintptr_t*>(pc_addr);
 }
 
 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
   uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
   CHECK(sp != nullptr);
-  uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
+  uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
   *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
 }
 
@@ -867,9 +859,9 @@
       }
     }
     if (cur_quick_frame_ != nullptr) {
-      method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
+      GetCurrentCode().AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
       // Frame sanity.
-      size_t frame_size = method->GetFrameSizeInBytes();
+      size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
       CHECK_NE(frame_size, 0u);
       // A rough guess at an upper size we expect to see for a frame.
       // 256 registers
@@ -880,7 +872,7 @@
       // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
       const size_t kMaxExpectedFrameSize = 2 * KB;
       CHECK_LE(frame_size, kMaxExpectedFrameSize);
-      size_t return_pc_offset = method->GetReturnPcOffset().SizeValue();
+      size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
       CHECK_LT(return_pc_offset, frame_size);
     }
   }
@@ -906,10 +898,10 @@
         SanityCheckFrame();
 
         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
-            && method->IsOptimized(sizeof(void*))) {
-          CodeInfo code_info = method->GetOptimizedCodeInfo();
+            && GetCurrentCode().IsOptimized(sizeof(void*))) {
+          CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
           StackMapEncoding encoding = code_info.ExtractEncoding();
-          uint32_t native_pc_offset = method->NativeQuickPcOffset(cur_quick_frame_pc_);
+          uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
           StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
           if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) {
             InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
@@ -934,9 +926,9 @@
         if (context_ != nullptr) {
           context_->FillCalleeSaves(*this);
         }
-        size_t frame_size = method->GetFrameSizeInBytes();
+        size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
         // Compute PC for next stack frame from return PC.
-        size_t return_pc_offset = method->GetReturnPcOffset(frame_size).SizeValue();
+        size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
         uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
         uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
         if (UNLIKELY(exit_stubs_installed)) {
@@ -966,13 +958,15 @@
             return_pc = instrumentation_frame.return_pc_;
           }
         }
+        ArtCode code = GetCurrentCode();
+
         cur_quick_frame_pc_ = return_pc;
         uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
         cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
 
         if (kDebugStackWalk) {
           LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
-              << " optimized=" << method->IsOptimized(sizeof(void*))
+              << " optimized=" << code.IsOptimized(sizeof(void*))
               << " native=" << method->IsNative()
               << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
               << "," << method->GetEntryPointFromJni()
diff --git a/runtime/stack.h b/runtime/stack.h
index 32a4765..3e0566d 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <string>
 
+#include "art_code.h"
 #include "arch/instruction_set.h"
 #include "base/macros.h"
 #include "base/mutex.h"
@@ -717,6 +718,10 @@
     return cur_shadow_frame_;
   }
 
+  bool IsCurrentFrameInInterpreter() const {
+    return cur_shadow_frame_ != nullptr;
+  }
+
   HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
     ArtMethod** sp = GetCurrentQuickFrame();
     // Skip ArtMethod*; handle scope comes next;
@@ -730,6 +735,8 @@
 
   static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
 
+  ArtCode GetCurrentCode() const { return ArtCode(cur_quick_frame_); }
+
  private:
   // Private constructor known in the case that num_frames_ has already been computed.
   StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f1407a7..8e0c288 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -32,6 +32,7 @@
 #include <sstream>
 
 #include "arch/context.h"
+#include "art_code.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/bit_utils.h"
@@ -66,6 +67,7 @@
 #include "ScopedLocalRef.h"
 #include "ScopedUtfChars.h"
 #include "stack.h"
+#include "stack_map.h"
 #include "thread_list.h"
 #include "thread-inl.h"
 #include "utils.h"
@@ -1493,7 +1495,9 @@
     // If we're currently in native code, dump that stack before dumping the managed stack.
     if (dump_for_abort || ShouldShowNativeStack(this)) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
-      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
+      ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
+      ArtCode art_code(method);
+      DumpNativeStack(os, GetTid(), "  native: ", method, &art_code);
     }
     DumpJavaStack(os);
   } else {
@@ -2651,7 +2655,7 @@
     } else {
       // Java method.
       // Portable path use DexGcMap and store in Method.native_gc_map_.
-      const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
+      const uint8_t* gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
       CHECK(gc_map != nullptr) << PrettyMethod(m);
       verifier::DexPcToReferenceMap dex_gc_map(gc_map);
       uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -2698,13 +2702,11 @@
 
     // Process register map (which native and runtime methods don't have)
     if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
-      if (m->IsOptimized(sizeof(void*))) {
+      if (GetCurrentCode().IsOptimized(sizeof(void*))) {
         auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
             reinterpret_cast<uintptr_t>(cur_quick_frame));
-        Runtime* runtime = Runtime::Current();
-        const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
-        uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
-        CodeInfo code_info = m->GetOptimizedCodeInfo();
+        uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
+        CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
         StackMapEncoding encoding = code_info.ExtractEncoding();
         StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
         DCHECK(map.IsValid());
@@ -2734,7 +2736,7 @@
           }
         }
       } else {
-        const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
+        const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
         CHECK(native_gc_map != nullptr) << PrettyMethod(m);
         const DexFile::CodeItem* code_item = m->GetCodeItem();
         // Can't be null or how would we compile its instructions?
@@ -2742,14 +2744,12 @@
         NativePcOffsetToReferenceMap map(native_gc_map);
         size_t num_regs = map.RegWidth() * 8;
         if (num_regs > 0) {
-          Runtime* runtime = Runtime::Current();
-          const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
-          uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
+          uintptr_t native_pc_offset =
+              GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
           const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
           DCHECK(reg_bitmap != nullptr);
-          const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point);
-          const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
-          QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+          const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+          QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
           // For all dex registers in the bitmap
           DCHECK(cur_quick_frame != nullptr);
           for (size_t reg = 0; reg < num_regs; ++reg) {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 27dacea..40cd6d3 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,6 +25,7 @@
 #include <unistd.h>
 #include <memory>
 
+#include "art_code.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/stl_util.h"
@@ -1092,7 +1093,7 @@
 #endif
 
 void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
-    ArtMethod* current_method, void* ucontext_ptr) {
+    ArtMethod* current_method, ArtCode* current_code, void* ucontext_ptr) {
 #if __linux__
   // b/18119146
   if (RUNNING_ON_MEMORY_TOOL != 0) {
@@ -1148,8 +1149,8 @@
         try_addr2line = true;
       } else if (
           current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
-          current_method->PcIsWithinQuickCode(it->pc)) {
-        const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
+          current_code->PcIsWithinQuickCode(it->pc)) {
+        const void* start_of_code = current_code->GetQuickOatEntryPoint(sizeof(void*));
         os << JniLongName(current_method) << "+"
            << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
       } else {
@@ -1163,7 +1164,7 @@
     }
   }
 #else
-  UNUSED(os, tid, prefix, current_method, ucontext_ptr);
+  UNUSED(os, tid, prefix, current_method, current_code, ucontext_ptr);
 #endif
 }
 
diff --git a/runtime/utils.h b/runtime/utils.h
index 19cc462..b67f273 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -33,6 +33,7 @@
 
 namespace art {
 
+class ArtCode;
 class ArtField;
 class ArtMethod;
 class DexFile;
@@ -221,7 +222,7 @@
 
 // Dumps the native stack for thread 'tid' to 'os'.
 void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
-    ArtMethod* current_method = nullptr, void* ucontext = nullptr)
+    ArtMethod* current_method = nullptr, ArtCode* current_code = nullptr, void* ucontext = nullptr)
     NO_THREAD_SAFETY_ANALYSIS;
 
 // Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
@@ -306,6 +307,14 @@
 void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
 void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
 
+static inline const void* EntryPointToCodePointer(const void* entry_point) {
+  uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
+  // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
+  //       least 2 byte aligned.
+  code &= ~0x1;
+  return reinterpret_cast<const void*>(code);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_UTILS_H_
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 285df18..f8d321c 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -22,11 +22,11 @@
 #define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
   int t[] = {__VA_ARGS__}; \
   int t_size = sizeof(t) / sizeof(*t); \
-  uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, \
+  uintptr_t native_quick_pc = GetCurrentCode().ToNativeQuickPc(dex_pc, \
                                                  /* is_catch_handler */ false, \
                                                  abort_if_not_found); \
   if (native_quick_pc != UINTPTR_MAX) { \
-    CheckReferences(t, t_size, m->NativeQuickPcOffset(native_quick_pc)); \
+    CheckReferences(t, t_size, GetCurrentCode().NativeQuickPcOffset(native_quick_pc)); \
   } \
 } while (false);
 
@@ -49,7 +49,7 @@
       CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1);  // v8: this, v1: x
       CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1);  // v8: this, v3: y, v1: x
       CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1);  // v8: this, v3: y, v1: x
-      if (!m->IsOptimized(sizeof(void*))) {
+      if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
         CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1);  // v8: this, v3: y, v1: x
       }
       CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1);  // v8: this, v3: y, v1: x
@@ -65,7 +65,7 @@
       // Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
       CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
       CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1);  // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
-      if (!m->IsOptimized(sizeof(void*))) {
+      if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
         // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
         CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
         // v5 is removed from the root set because there is a "merge" operation.
@@ -74,7 +74,7 @@
       }
       CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1);  // v8: this, v2: y, v1: x (dead v0: ex)
 
-      if (!m->IsOptimized(sizeof(void*))) {
+      if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
         CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
       }
       CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 9facfdb..0ee2ff9 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -15,6 +15,7 @@
  */
 
 #include "arch/context.h"
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "jni.h"
 #include "scoped_thread_state_change.h"
@@ -45,10 +46,14 @@
       CHECK_EQ(value, 42u);
 
       bool success = GetVReg(m, 1, kIntVReg, &value);
-      if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+      if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+        CHECK(!success);
+      }
 
       success = GetVReg(m, 2, kIntVReg, &value);
-      if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+      if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+        CHECK(!success);
+      }
 
       CHECK(GetVReg(m, 3, kReferenceVReg, &value));
       CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
@@ -78,10 +83,14 @@
       CHECK_EQ(value, 42u);
 
       bool success = GetVRegPair(m, 2, kLongLoVReg, kLongHiVReg, &value);
-      if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+      if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+        CHECK(!success);
+      }
 
       success = GetVRegPair(m, 4, kLongLoVReg, kLongHiVReg, &value);
-      if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+      if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+        CHECK(!success);
+      }
 
       uint32_t value32 = 0;
       CHECK(GetVReg(m, 6, kReferenceVReg, &value32));
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index c21168b..6fcebdb 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -15,6 +15,7 @@
  */
 
 #include "arch/context.h"
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "jni.h"
 #include "scoped_thread_state_change.h"
@@ -63,7 +64,9 @@
       CHECK_EQ(value, 1u);
 
       bool success = GetVReg(m, 2, kIntVReg, &value);
-      if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+      if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+        CHECK(!success);
+      }
 
       CHECK(GetVReg(m, 3, kReferenceVReg, &value));
       CHECK_EQ(value, 1u);
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 7e9a583..2a56a7f 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -15,6 +15,7 @@
  */
 
 #include "arch/context.h"
+#include "art_code.h"
 #include "art_method-inl.h"
 #include "jni.h"
 #include "scoped_thread_state_change.h"
@@ -43,7 +44,7 @@
       found_method_ = true;
       uint32_t value = 0;
       if (GetCurrentQuickFrame() != nullptr &&
-          m->IsOptimized(sizeof(void*)) &&
+          GetCurrentCode().IsOptimized(sizeof(void*)) &&
           !Runtime::Current()->IsDebuggable()) {
         CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false);
       } else {