Add JIT

Currently disabled by default unless -Xjit is passed in.

The proposed JIT is a method JIT which works by utilizing interpreter
instrumentation to request compilation of hot methods async during
runtime.

JIT options:
-Xjit / -Xnojit
-Xjitcodecachesize:N
-Xjitthreshold:integervalue

The JIT has a shared copy of a compiler driver which is accessed
by worker threads to compile individual methods.

Added JIT code cache and data cache, currently sized at 2 MB
capacity by default. Most apps will only fill a small fraction of
this cache however.

Added support to the compiler for compiling interpreter quickened
byte codes.

Added test target ART_TEST_JIT=TRUE and --jit for run-test.

TODO:
Clean up code cache.
Delete compiled methods after they are added to code cache.
Add more optimizations related to runtime checks e.g. direct pointers
for invokes.
Add method recompilation.
Move instrumentation to DexFile to improve performance and reduce
memory usage.

Bug: 17950037

Change-Id: Ifa5b2684a2d5059ec5a5210733900aafa3c51bca
diff --git a/compiler/Android.mk b/compiler/Android.mk
index beb34dc..86a27c1 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -79,6 +79,7 @@
 	driver/compiler_driver.cc \
 	driver/compiler_options.cc \
 	driver/dex_compilation_unit.cc \
+	jit/jit_compiler.cc \
 	jni/quick/arm/calling_convention_arm.cc \
 	jni/quick/arm64/calling_convention_arm64.cc \
 	jni/quick/mips/calling_convention_mips.cc \
@@ -161,8 +162,7 @@
   driver/compiler_options.h \
   image_writer.h \
   optimizing/locations.h \
-  utils/arm/constants_arm.h \
-  utils/dex_instruction_utils.h
+  utils/arm/constants_arm.h
 
 # $(1): target or host
 # $(2): ndebug or debug
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1cd78f8..e8354b2 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -52,19 +52,19 @@
     const SwapVector<uint8_t>* code = compiled_method->GetQuickCode();
     uint32_t code_size = code->size();
     CHECK_NE(0u, code_size);
-    const SwapVector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
-    uint32_t vmap_table_offset = vmap_table.empty() ? 0u
-        : sizeof(OatQuickMethodHeader) + vmap_table.size();
+    const SwapVector<uint8_t>* vmap_table = compiled_method->GetVmapTable();
+    uint32_t vmap_table_offset = vmap_table->empty() ? 0u
+        : sizeof(OatQuickMethodHeader) + vmap_table->size();
     const SwapVector<uint8_t>* mapping_table = compiled_method->GetMappingTable();
     bool mapping_table_used = mapping_table != nullptr && !mapping_table->empty();
     size_t mapping_table_size = mapping_table_used ? mapping_table->size() : 0U;
     uint32_t mapping_table_offset = !mapping_table_used ? 0u
-        : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size;
+        : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size;
     const SwapVector<uint8_t>* gc_map = compiled_method->GetGcMap();
     bool gc_map_used = gc_map != nullptr && !gc_map->empty();
     size_t gc_map_size = gc_map_used ? gc_map->size() : 0U;
     uint32_t gc_map_offset = !gc_map_used ? 0u
-        : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size + gc_map_size;
+        : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size + gc_map_size;
     OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
                                        compiled_method->GetFrameSizeInBytes(),
                                        compiled_method->GetCoreSpillMask(),
@@ -72,14 +72,14 @@
 
     header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
     std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
-    size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table_size +
+    size_t size = sizeof(method_header) + code_size + vmap_table->size() + mapping_table_size +
         gc_map_size;
     size_t code_offset = compiled_method->AlignCode(size - code_size);
     size_t padding = code_offset - (size - code_size);
     chunk->reserve(padding + size);
     chunk->resize(sizeof(method_header));
     memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
-    chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
+    chunk->insert(chunk->begin(), vmap_table->begin(), vmap_table->end());
     if (mapping_table_used) {
       chunk->insert(chunk->begin(), mapping_table->begin(), mapping_table->end());
     }
@@ -212,7 +212,7 @@
   CHECK(method != nullptr);
   TimingLogger timings("CommonTest::CompileMethod", false, false);
   TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
-  compiler_driver_->CompileOne(method, &timings);
+  compiler_driver_->CompileOne(Thread::Current(), method, &timings);
   TimingLogger::ScopedTiming t2("MakeExecutable", &timings);
   MakeExecutable(method);
 }
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 22be28c..1849e7e 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -20,16 +20,29 @@
 namespace art {
 
 CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
-                           const ArrayRef<const uint8_t>& quick_code)
+                           const ArrayRef<const uint8_t>& quick_code, bool owns_code_array)
     : compiler_driver_(compiler_driver), instruction_set_(instruction_set),
-      quick_code_(nullptr) {
+      owns_code_array_(owns_code_array), quick_code_(nullptr) {
   SetCode(&quick_code);
 }
 
 void CompiledCode::SetCode(const ArrayRef<const uint8_t>* quick_code) {
   if (quick_code != nullptr) {
     CHECK(!quick_code->empty());
-    quick_code_ = compiler_driver_->DeduplicateCode(*quick_code);
+    if (owns_code_array_) {
+      // If we are supposed to own the code, don't deduplicate it.
+      CHECK(quick_code_ == nullptr);
+      quick_code_ = new SwapVector<uint8_t>(quick_code->begin(), quick_code->end(),
+                                            compiler_driver_->GetSwapSpaceAllocator());
+    } else {
+      quick_code_ = compiler_driver_->DeduplicateCode(*quick_code);
+    }
+  }
+}
+
+CompiledCode::~CompiledCode() {
+  if (owns_code_array_) {
+    delete quick_code_;
   }
 }
 
@@ -46,11 +59,11 @@
   return (rhs.quick_code_ == nullptr);
 }
 
-uint32_t CompiledCode::AlignCode(uint32_t offset) const {
+size_t CompiledCode::AlignCode(size_t offset) const {
   return AlignCode(offset, instruction_set_);
 }
 
-uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set) {
+size_t CompiledCode::AlignCode(size_t offset, InstructionSet instruction_set) {
   return RoundUp(offset, GetInstructionSetAlignment(instruction_set));
 }
 
@@ -120,17 +133,39 @@
                                const ArrayRef<const uint8_t>& native_gc_map,
                                const ArrayRef<const uint8_t>& cfi_info,
                                const ArrayRef<LinkerPatch>& patches)
-    : CompiledCode(driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
-      core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
-      src_mapping_table_(src_mapping_table == nullptr ?
-          driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
-          driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(src_mapping_table->Arrange()))),
-      mapping_table_(mapping_table.data() == nullptr ?
-          nullptr : driver->DeduplicateMappingTable(mapping_table)),
-      vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
-      gc_map_(native_gc_map.data() == nullptr ? nullptr : driver->DeduplicateGCMap(native_gc_map)),
-      cfi_info_(cfi_info.data() == nullptr ? nullptr : driver->DeduplicateCFIInfo(cfi_info)),
+    : CompiledCode(driver, instruction_set, quick_code, !driver->DedupeEnabled()),
+      owns_arrays_(!driver->DedupeEnabled()),
+      frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
+      fp_spill_mask_(fp_spill_mask),
       patches_(patches.begin(), patches.end(), driver->GetSwapSpaceAllocator()) {
+  if (owns_arrays_) {
+    if (src_mapping_table == nullptr) {
+      src_mapping_table_ = new SwapSrcMap(driver->GetSwapSpaceAllocator());
+    } else {
+      src_mapping_table->Arrange();
+      src_mapping_table_ = new SwapSrcMap(src_mapping_table->begin(), src_mapping_table->end(),
+                                          driver->GetSwapSpaceAllocator());
+    }
+    mapping_table_ = mapping_table.empty() ?
+        nullptr : new SwapVector<uint8_t>(mapping_table.begin(), mapping_table.end(),
+                                          driver->GetSwapSpaceAllocator());
+    vmap_table_ = new SwapVector<uint8_t>(vmap_table.begin(), vmap_table.end(),
+                                          driver->GetSwapSpaceAllocator());
+    gc_map_ = native_gc_map.empty() ? nullptr :
+        new SwapVector<uint8_t>(native_gc_map.begin(), native_gc_map.end(),
+                                driver->GetSwapSpaceAllocator());
+    cfi_info_ = cfi_info.empty() ? nullptr :
+        new SwapVector<uint8_t>(cfi_info.begin(), cfi_info.end(), driver->GetSwapSpaceAllocator());
+  } else {
+    src_mapping_table_ = src_mapping_table == nullptr ?
+        driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
+        driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(src_mapping_table->Arrange()));
+    mapping_table_ = mapping_table.empty() ?
+        nullptr : driver->DeduplicateMappingTable(mapping_table);
+    vmap_table_ = driver->DeduplicateVMapTable(vmap_table);
+    gc_map_ = native_gc_map.empty() ? nullptr : driver->DeduplicateGCMap(native_gc_map);
+    cfi_info_ = cfi_info.empty() ? nullptr : driver->DeduplicateCFIInfo(cfi_info);
+  }
 }
 
 CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
@@ -194,4 +229,14 @@
   alloc.deallocate(m, 1);
 }
 
+CompiledMethod::~CompiledMethod() {
+  if (owns_arrays_) {
+    delete src_mapping_table_;
+    delete mapping_table_;
+    delete vmap_table_;
+    delete gc_map_;
+    delete cfi_info_;
+  }
+}
+
 }  // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 6013507..d6a07f6 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -27,10 +27,6 @@
 #include "utils/array_ref.h"
 #include "utils/swap_space.h"
 
-namespace llvm {
-  class Function;
-}  // namespace llvm
-
 namespace art {
 
 class CompilerDriver;
@@ -39,7 +35,9 @@
  public:
   // For Quick to supply an code blob
   CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
-               const ArrayRef<const uint8_t>& quick_code);
+               const ArrayRef<const uint8_t>& quick_code, bool owns_code_array);
+
+  virtual ~CompiledCode();
 
   InstructionSet GetInstructionSet() const {
     return instruction_set_;
@@ -56,8 +54,8 @@
   // To align an offset from a page-aligned value to make it suitable
   // for code storage. For example on ARM, to ensure that PC relative
   // valu computations work out as expected.
-  uint32_t AlignCode(uint32_t offset) const;
-  static uint32_t AlignCode(uint32_t offset, InstructionSet instruction_set);
+  size_t AlignCode(size_t offset) const;
+  static size_t AlignCode(size_t offset, InstructionSet instruction_set);
 
   // returns the difference between the code address and a usable PC.
   // mainly to cope with kThumb2 where the lower bit must be set.
@@ -78,6 +76,9 @@
 
   const InstructionSet instruction_set_;
 
+  // If we own the code array (means that we free in destructor).
+  const bool owns_code_array_;
+
   // Used to store the PIC code for Quick.
   SwapVector<uint8_t>* quick_code_;
 
@@ -122,6 +123,7 @@
   using std::vector<SrcMapElem, Allocator>::size;
 
   explicit SrcMap() {}
+  explicit SrcMap(const Allocator& alloc) : std::vector<SrcMapElem, Allocator>(alloc) {}
 
   template <class InputIt>
   SrcMap(InputIt first, InputIt last, const Allocator& alloc)
@@ -291,7 +293,7 @@
                  const ArrayRef<const uint8_t>& cfi_info,
                  const ArrayRef<LinkerPatch>& patches = ArrayRef<LinkerPatch>());
 
-  ~CompiledMethod() {}
+  virtual ~CompiledMethod();
 
   static CompiledMethod* SwapAllocCompiledMethod(
       CompilerDriver* driver,
@@ -347,9 +349,9 @@
     return mapping_table_;
   }
 
-  const SwapVector<uint8_t>& GetVmapTable() const {
+  const SwapVector<uint8_t>* GetVmapTable() const {
     DCHECK(vmap_table_ != nullptr);
-    return *vmap_table_;
+    return vmap_table_;
   }
 
   SwapVector<uint8_t> const* GetGcMap() const {
@@ -365,6 +367,8 @@
   }
 
  private:
+  // Whether or not the arrays are owned by the compiled method or dedupe sets.
+  const bool owns_arrays_;
   // For quick code, the size of the activation used by the code.
   const size_t frame_size_in_bytes_;
   // For quick code, a bit mask describing spilled GPR callee-save registers.
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index 54e34ea..b91c3ca 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -142,7 +142,7 @@
     cu_.mir_graph->ifield_lowering_infos_.reserve(count);
     for (size_t i = 0u; i != count; ++i) {
       const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
       if (def->declaring_dex_file != 0u) {
         field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
         field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
index 954e9f1..4d2b8b3 100644
--- a/compiler/dex/gvn_dead_code_elimination_test.cc
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -143,7 +143,7 @@
     cu_.mir_graph->ifield_lowering_infos_.reserve(count);
     for (size_t i = 0u; i != count; ++i) {
       const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
       if (def->declaring_dex_file != 0u) {
         field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
         field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 97ea05a..379c952 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,8 @@
 
 #include "base/arena_object.h"
 #include "base/logging.h"
+#include "dex_instruction_utils.h"
 #include "global_value_numbering.h"
-#include "utils/dex_instruction_utils.h"
 
 namespace art {
 
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index d1c3a6b..566527a 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -96,7 +96,7 @@
     cu_.mir_graph->ifield_lowering_infos_.reserve(count);
     for (size_t i = 0u; i != count; ++i) {
       const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
       if (def->declaring_dex_file != 0u) {
         field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
         field_info.declaring_field_idx_ = def->declaring_field_idx;
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 31dbc60..a89b250 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -416,8 +416,8 @@
   // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
   kAnInvoke | kAnHeavyWeight,
 
-  // 73 UNUSED_73
-  kAnNone,
+  // 73 RETURN_VOID_BARRIER
+  kAnBranch,
 
   // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
   kAnInvoke | kAnHeavyWeight,
@@ -752,88 +752,88 @@
   // E2 USHR_INT_LIT8 vAA, vBB, #+CC
   kAnMath | kAnInt,
 
-  // E3 IGET_VOLATILE
+  // E3 IGET_QUICK
   kAnNone,
 
-  // E4 IPUT_VOLATILE
+  // E4 IGET_WIDE_QUICK
   kAnNone,
 
-  // E5 SGET_VOLATILE
+  // E5 IGET_OBJECT_QUICK
   kAnNone,
 
-  // E6 SPUT_VOLATILE
+  // E6 IPUT_QUICK
   kAnNone,
 
-  // E7 IGET_OBJECT_VOLATILE
+  // E7 IPUT_WIDE_QUICK
   kAnNone,
 
-  // E8 IGET_WIDE_VOLATILE
+  // E8 IPUT_OBJECT_QUICK
   kAnNone,
 
-  // E9 IPUT_WIDE_VOLATILE
-  kAnNone,
-
-  // EA SGET_WIDE_VOLATILE
-  kAnNone,
-
-  // EB SPUT_WIDE_VOLATILE
-  kAnNone,
-
-  // EC BREAKPOINT
-  kAnNone,
-
-  // ED THROW_VERIFICATION_ERROR
-  kAnHeavyWeight | kAnBranch,
-
-  // EE EXECUTE_INLINE
-  kAnNone,
-
-  // EF EXECUTE_INLINE_RANGE
-  kAnNone,
-
-  // F0 INVOKE_OBJECT_INIT_RANGE
+  // E9 INVOKE_VIRTUAL_QUICK
   kAnInvoke | kAnHeavyWeight,
 
-  // F1 RETURN_VOID_BARRIER
-  kAnBranch,
-
-  // F2 IGET_QUICK
-  kAnNone,
-
-  // F3 IGET_WIDE_QUICK
-  kAnNone,
-
-  // F4 IGET_OBJECT_QUICK
-  kAnNone,
-
-  // F5 IPUT_QUICK
-  kAnNone,
-
-  // F6 IPUT_WIDE_QUICK
-  kAnNone,
-
-  // F7 IPUT_OBJECT_QUICK
-  kAnNone,
-
-  // F8 INVOKE_VIRTUAL_QUICK
+  // EA INVOKE_VIRTUAL_RANGE_QUICK
   kAnInvoke | kAnHeavyWeight,
 
-  // F9 INVOKE_VIRTUAL_QUICK_RANGE
-  kAnInvoke | kAnHeavyWeight,
-
-  // FA INVOKE_SUPER_QUICK
-  kAnInvoke | kAnHeavyWeight,
-
-  // FB INVOKE_SUPER_QUICK_RANGE
-  kAnInvoke | kAnHeavyWeight,
-
-  // FC IPUT_OBJECT_VOLATILE
+  // EB IPUT_BOOLEAN_QUICK
   kAnNone,
 
-  // FD SGET_OBJECT_VOLATILE
+  // EC IPUT_BYTE_QUICK
   kAnNone,
 
-  // FE SPUT_OBJECT_VOLATILE
+  // ED IPUT_CHAR_QUICK
+  kAnNone,
+
+  // EE IPUT_SHORT_QUICK
+  kAnNone,
+
+  // EF IGET_BOOLEAN_QUICK
+  kAnNone,
+
+  // F0 IGET_BYTE_QUICK
+  kAnNone,
+
+  // F1 IGET_CHAR_QUICK
+  kAnNone,
+
+  // F2 IGET_SHORT_QUICK
+  kAnNone,
+
+  // F3 UNUSED_F3
+  kAnNone,
+
+  // F4 UNUSED_F4
+  kAnNone,
+
+  // F5 UNUSED_F5
+  kAnNone,
+
+  // F6 UNUSED_F6
+  kAnNone,
+
+  // F7 UNUSED_F7
+  kAnNone,
+
+  // F8 UNUSED_F8
+  kAnNone,
+
+  // F9 UNUSED_F9
+  kAnNone,
+
+  // FA UNUSED_FA
+  kAnNone,
+
+  // FB UNUSED_FB
+  kAnNone,
+
+  // FC UNUSED_FC
+  kAnNone,
+
+  // FD UNUSED_FD
+  kAnNone,
+
+  // FE UNUSED_FE
   kAnNone,
 
   // FF UNUSED_FF
@@ -1203,12 +1203,13 @@
 }
 
 void MIRGraph::DoCacheFieldLoweringInfo() {
+  static constexpr uint32_t kFieldIndexFlagQuickened = 0x80000000;
   // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
   const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 2u;
   ScopedArenaAllocator allocator(&cu_->arena_stack);
-  uint16_t* field_idxs = allocator.AllocArray<uint16_t>(max_refs, kArenaAllocMisc);
-  DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(max_refs, kArenaAllocMisc);
-
+  auto* field_idxs = allocator.AllocArray<uint32_t>(max_refs, kArenaAllocMisc);
+  DexMemAccessType* field_types = allocator.AllocArray<DexMemAccessType>(
+      max_refs, kArenaAllocMisc);
   // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
   size_t ifield_pos = 0u;
   size_t sfield_pos = max_refs;
@@ -1221,23 +1222,36 @@
       // Get field index and try to find it among existing indexes. If found, it's usually among
       // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
       // is a linear search, it actually performs much better than map based approach.
-      if (IsInstructionIGetOrIPut(mir->dalvikInsn.opcode)) {
-        uint16_t field_idx = mir->dalvikInsn.vC;
+      const bool is_iget_or_iput = IsInstructionIGetOrIPut(mir->dalvikInsn.opcode);
+      const bool is_iget_or_iput_quick = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode);
+      if (is_iget_or_iput || is_iget_or_iput_quick) {
+        uint32_t field_idx;
+        DexMemAccessType access_type;
+        if (is_iget_or_iput) {
+          field_idx = mir->dalvikInsn.vC;
+          access_type = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
+        } else {
+          DCHECK(is_iget_or_iput_quick);
+          // Set kFieldIndexFlagQuickened so that we don't deduplicate against non quickened field
+          // indexes.
+          field_idx = mir->offset | kFieldIndexFlagQuickened;
+          access_type = IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode);
+        }
         size_t i = ifield_pos;
         while (i != 0u && field_idxs[i - 1] != field_idx) {
           --i;
         }
         if (i != 0u) {
           mir->meta.ifield_lowering_info = i - 1;
-          DCHECK_EQ(field_types[i - 1], IGetOrIPutMemAccessType(mir->dalvikInsn.opcode));
+          DCHECK_EQ(field_types[i - 1], access_type);
         } else {
           mir->meta.ifield_lowering_info = ifield_pos;
           field_idxs[ifield_pos] = field_idx;
-          field_types[ifield_pos] = IGetOrIPutMemAccessType(mir->dalvikInsn.opcode);
+          field_types[ifield_pos] = access_type;
           ++ifield_pos;
         }
       } else if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
-        uint16_t field_idx = mir->dalvikInsn.vB;
+        auto field_idx = mir->dalvikInsn.vB;
         size_t i = sfield_pos;
         while (i != max_refs && field_idxs[i] != field_idx) {
           ++i;
@@ -1261,7 +1275,12 @@
     DCHECK_EQ(ifield_lowering_infos_.size(), 0u);
     ifield_lowering_infos_.reserve(ifield_pos);
     for (size_t pos = 0u; pos != ifield_pos; ++pos) {
-      ifield_lowering_infos_.push_back(MirIFieldLoweringInfo(field_idxs[pos], field_types[pos]));
+      const uint32_t field_idx = field_idxs[pos];
+      const bool is_quickened = (field_idx & kFieldIndexFlagQuickened) != 0;
+      const uint32_t masked_field_idx = field_idx & ~kFieldIndexFlagQuickened;
+      CHECK_LT(masked_field_idx, 1u << 16);
+      ifield_lowering_infos_.push_back(
+          MirIFieldLoweringInfo(masked_field_idx, field_types[pos], is_quickened));
     }
     MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
                                    ifield_lowering_infos_.data(), ifield_pos);
@@ -1282,18 +1301,19 @@
 
 void MIRGraph::DoCacheMethodLoweringInfo() {
   static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
+  static constexpr uint32_t kMethodIdxFlagQuickened = 0x80000000;
 
   // Embed the map value in the entry to avoid extra padding in 64-bit builds.
   struct MapEntry {
     // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
     const MethodReference* devirt_target;
-    uint16_t target_method_idx;
+    uint32_t target_method_idx;
+    uint32_t vtable_idx;
     uint16_t invoke_type;
     // Map value.
     uint32_t lowering_info_index;
   };
 
-  // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
   struct MapEntryComparator {
     bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
       if (lhs.target_method_idx != rhs.target_method_idx) {
@@ -1302,6 +1322,9 @@
       if (lhs.invoke_type != rhs.invoke_type) {
         return lhs.invoke_type < rhs.invoke_type;
       }
+      if (lhs.vtable_idx != rhs.vtable_idx) {
+        return lhs.vtable_idx < rhs.vtable_idx;
+      }
       if (lhs.devirt_target != rhs.devirt_target) {
         if (lhs.devirt_target == nullptr) {
           return true;
@@ -1319,7 +1342,7 @@
   ScopedArenaAllocator allocator(&cu_->arena_stack);
 
   // All INVOKE instructions take 3 code units and there must also be a RETURN.
-  uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
+  const uint32_t max_refs = (GetNumDalvikInsns() - 1u) / 3u;
 
   // Map invoke key (see MapEntry) to lowering info index and vice versa.
   // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
@@ -1330,28 +1353,43 @@
       allocator.AllocArray<const MapEntry*>(max_refs, kArenaAllocMisc);
 
   // Find INVOKE insns and their devirtualization targets.
+  const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
   AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     if (bb->block_type != kDalvikByteCode) {
       continue;
     }
     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      if (IsInstructionInvoke(mir->dalvikInsn.opcode)) {
-        // Decode target method index and invoke type.
-        uint16_t target_method_idx = mir->dalvikInsn.vB;
-        DexInvokeType invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
-
+      const bool is_quick_invoke = IsInstructionQuickInvoke(mir->dalvikInsn.opcode);
+      const bool is_invoke = IsInstructionInvoke(mir->dalvikInsn.opcode);
+      if (is_quick_invoke || is_invoke) {
+        uint32_t vtable_index = 0;
+        uint32_t target_method_idx = 0;
+        uint32_t invoke_type_idx = 0;  // Default to virtual (in case of quickened).
+        DCHECK_EQ(invoke_types[invoke_type_idx], kVirtual);
+        if (is_quick_invoke) {
+          // We need to store the vtable index since we can't necessarily recreate it at resolve
+          // phase if the dequickening resolved to an interface method.
+          vtable_index = mir->dalvikInsn.vB;
+          // Fake up the method index by storing the mir offset so that we can read the dequicken
+          // info in resolve.
+          target_method_idx = mir->offset | kMethodIdxFlagQuickened;
+        } else {
+          DCHECK(is_invoke);
+          // Decode target method index and invoke type.
+          invoke_type_idx = InvokeInstructionType(mir->dalvikInsn.opcode);
+          target_method_idx = mir->dalvikInsn.vB;
+        }
         // Find devirtualization target.
         // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
         // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
         // and increment it as needed instead of making O(log n) lookups.
-        const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
         const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
-
         // Try to insert a new entry. If the insertion fails, we will have found an old one.
         MapEntry entry = {
             devirt_target,
             target_method_idx,
+            vtable_index,
             invoke_types[invoke_type_idx],
             static_cast<uint32_t>(invoke_map.size())
         };
@@ -1362,22 +1400,24 @@
       }
     }
   }
-
   if (invoke_map.empty()) {
     return;
   }
-
   // Prepare unique method infos, set method info indexes for their MIRs.
-  DCHECK_EQ(method_lowering_infos_.size(), 0u);
   const size_t count = invoke_map.size();
   method_lowering_infos_.reserve(count);
   for (size_t pos = 0u; pos != count; ++pos) {
     const MapEntry* entry = sequential_entries[pos];
-    MirMethodLoweringInfo method_info(entry->target_method_idx,
-                                      static_cast<InvokeType>(entry->invoke_type));
+    const bool is_quick = (entry->target_method_idx & kMethodIdxFlagQuickened) != 0;
+    const uint32_t masked_method_idx = entry->target_method_idx & ~kMethodIdxFlagQuickened;
+    MirMethodLoweringInfo method_info(masked_method_idx,
+                                      static_cast<InvokeType>(entry->invoke_type), is_quick);
     if (entry->devirt_target != nullptr) {
       method_info.SetDevirtualizationTarget(*entry->devirt_target);
     }
+    if (is_quick) {
+      method_info.SetVTableIndex(entry->vtable_idx);
+    }
     method_lowering_infos_.push_back(method_info);
   }
   MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index f9f7e22..dfaff6c 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -374,7 +374,7 @@
   // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
   DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
 
-  // 73 UNUSED_73
+  // 73 RETURN_VOID_BARRIER
   DF_NOP,
 
   // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
@@ -710,89 +710,89 @@
   // E2 USHR_INT_LIT8 vAA, vBB, #+CC
   DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
 
-  // E3 IGET_VOLATILE
+  // E3 IGET_QUICK
   DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
 
-  // E4 IPUT_VOLATILE
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E5 SGET_VOLATILE
-  DF_DA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // E6 SPUT_VOLATILE
-  DF_UA | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // E7 IGET_OBJECT_VOLATILE
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
-
-  // E8 IGET_WIDE_VOLATILE
+  // E4 IGET_WIDE_QUICK
   DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
 
-  // E9 IPUT_WIDE_VOLATILE
+  // E5 IGET_OBJECT_QUICK
+  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // E6 IPUT_QUICK
+  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // E7 IPUT_WIDE_QUICK
   DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
 
-  // EA SGET_WIDE_VOLATILE
-  DF_DA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // EB SPUT_WIDE_VOLATILE
-  DF_UA | DF_A_WIDE | DF_SFIELD | DF_CLINIT | DF_UMS,
-
-  // EC BREAKPOINT
-  DF_NOP,
-
-  // ED THROW_VERIFICATION_ERROR
-  DF_NOP | DF_UMS,
-
-  // EE EXECUTE_INLINE
-  DF_FORMAT_35C,
-
-  // EF EXECUTE_INLINE_RANGE
-  DF_FORMAT_3RC,
-
-  // F0 INVOKE_OBJECT_INIT_RANGE
-  DF_NOP,
-
-  // F1 RETURN_VOID_BARRIER
-  DF_NOP,
-
-  // F2 IGET_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
-  // F3 IGET_WIDE_QUICK
-  DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
-  // F4 IGET_OBJECT_QUICK
-  DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
-  // F5 IPUT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
-  // F6 IPUT_WIDE_QUICK
-  DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
-  // F7 IPUT_OBJECT_QUICK
-  DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
-
-  // F8 INVOKE_VIRTUAL_QUICK
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // F9 INVOKE_VIRTUAL_QUICK_RANGE
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // FA INVOKE_SUPER_QUICK
-  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // FB INVOKE_SUPER_QUICK_RANGE
-  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
-  // FC IPUT_OBJECT_VOLATILE
+  // E8 IPUT_OBJECT_QUICK
   DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
 
-  // FD SGET_OBJECT_VOLATILE
-  DF_DA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
+  // E9 INVOKE_VIRTUAL_QUICK
+  DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
 
-  // FE SPUT_OBJECT_VOLATILE
-  DF_UA | DF_REF_A | DF_SFIELD | DF_CLINIT | DF_UMS,
+  // EA INVOKE_VIRTUAL_RANGE_QUICK
+  DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
+
+  // EB IPUT_BOOLEAN_QUICK vA, vB, index
+  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // EC IPUT_BYTE_QUICK vA, vB, index
+  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // ED IPUT_CHAR_QUICK vA, vB, index
+  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // EE IPUT_SHORT_QUICK vA, vB, index
+  DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // EF IGET_BOOLEAN_QUICK vA, vB, index
+  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // F0 IGET_BYTE_QUICK vA, vB, index
+  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // F1 IGET_CHAR_QUICK vA, vB, index
+  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // F2 IGET_SHORT_QUICK vA, vB, index
+  DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
+
+  // F3 UNUSED_F3
+  DF_NOP,
+
+  // F4 UNUSED_F4
+  DF_NOP,
+
+  // F5 UNUSED_F5
+  DF_NOP,
+
+  // F6 UNUSED_F6
+  DF_NOP,
+
+  // F7 UNUSED_F7
+  DF_NOP,
+
+  // F8 UNUSED_F8
+  DF_NOP,
+
+  // F9 UNUSED_F9
+  DF_NOP,
+
+  // FA UNUSED_FA
+  DF_NOP,
+
+  // FB UNUSED_FB
+  DF_NOP,
+
+  // FC UNUSED_FC
+  DF_NOP,
+
+  // FD UNUSED_FD
+  DF_NOP,
+
+  // FE UNUSED_FE
+  DF_NOP,
 
   // FF UNUSED_FF
   DF_NOP,
diff --git a/compiler/dex/mir_field_info.cc b/compiler/dex/mir_field_info.cc
index 53afcad..d2079a2 100644
--- a/compiler/dex/mir_field_info.cc
+++ b/compiler/dex/mir_field_info.cc
@@ -35,8 +35,9 @@
     DCHECK(field_infos != nullptr);
     DCHECK_NE(count, 0u);
     for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
-      MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType());
-      DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+      MirIFieldLoweringInfo unresolved(it->field_idx_, it->MemAccessType(), it->IsQuickened());
+      unresolved.field_offset_ = it->field_offset_;
+      unresolved.CheckEquals(*it);
     }
   }
 
@@ -49,13 +50,30 @@
       hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
   Handle<mirror::Class> referrer_class(hs.NewHandle(
       compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
+  const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
   // Even if the referrer class is unresolved (i.e. we're compiling a method without class
   // definition) we still want to resolve fields and record all available info.
-
   for (auto it = field_infos, end = field_infos + count; it != end; ++it) {
-    uint32_t field_idx = it->field_idx_;
-    mirror::ArtField* resolved_field =
-        compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit, field_idx, false);
+    uint32_t field_idx;
+    mirror::ArtField* resolved_field;
+    if (!it->IsQuickened()) {
+      field_idx = it->field_idx_;
+      resolved_field = compiler_driver->ResolveField(soa, dex_cache, class_loader, mUnit,
+                                                     field_idx, false);
+    } else {
+      const auto mir_offset = it->field_idx_;
+      // For quickened instructions, it->field_offset_ actually contains the mir offset.
+      // We need to use the de-quickening info to get dex file / field idx
+      auto* field_idx_ptr = verified_method->GetDequickenIndex(mir_offset);
+      CHECK(field_idx_ptr != nullptr);
+      field_idx = field_idx_ptr->index;
+      StackHandleScope<1> hs2(soa.Self());
+      auto h_dex_cache = hs2.NewHandle(compiler_driver->FindDexCache(field_idx_ptr->dex_file));
+      resolved_field = compiler_driver->ResolveFieldWithDexFile(
+          soa, h_dex_cache, class_loader, field_idx_ptr->dex_file, field_idx, false);
+      // Since we don't have a valid field index we can't go slow path later.
+      CHECK(resolved_field != nullptr);
+    }
     if (UNLIKELY(resolved_field == nullptr)) {
       continue;
     }
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 98b2da8..ca56958 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -19,8 +19,8 @@
 
 #include "base/macros.h"
 #include "dex_file.h"
+#include "dex_instruction_utils.h"
 #include "offsets.h"
-#include "utils/dex_instruction_utils.h"
 
 namespace art {
 
@@ -39,6 +39,9 @@
   uint16_t FieldIndex() const {
     return field_idx_;
   }
+  void SetFieldIndex(uint16_t field_idx) {
+    field_idx_ = field_idx;
+  }
 
   bool IsStatic() const {
     return (flags_ & kFlagIsStatic) != 0u;
@@ -51,6 +54,9 @@
   const DexFile* DeclaringDexFile() const {
     return declaring_dex_file_;
   }
+  void SetDeclaringDexFile(const DexFile* dex_file) {
+    declaring_dex_file_ = dex_file;
+  }
 
   uint16_t DeclaringClassIndex() const {
     return declaring_class_idx_;
@@ -64,20 +70,35 @@
     return (flags_ & kFlagIsVolatile) != 0u;
   }
 
+  // IGET_QUICK, IGET_BYTE_QUICK, ...
+  bool IsQuickened() const {
+    return (flags_ & kFlagIsQuickened) != 0u;
+  }
+
   DexMemAccessType MemAccessType() const {
     return static_cast<DexMemAccessType>((flags_ >> kBitMemAccessTypeBegin) & kMemAccessTypeMask);
   }
 
+  void CheckEquals(const MirFieldInfo& other) const {
+    CHECK_EQ(field_idx_, other.field_idx_);
+    CHECK_EQ(flags_, other.flags_);
+    CHECK_EQ(declaring_field_idx_, other.declaring_field_idx_);
+    CHECK_EQ(declaring_class_idx_, other.declaring_class_idx_);
+    CHECK_EQ(declaring_dex_file_, other.declaring_dex_file_);
+  }
+
  protected:
   enum {
     kBitIsStatic = 0,
     kBitIsVolatile,
+    kBitIsQuickened,
     kBitMemAccessTypeBegin,
     kBitMemAccessTypeEnd = kBitMemAccessTypeBegin + 3,  // 3 bits for raw type.
     kFieldInfoBitEnd = kBitMemAccessTypeEnd
   };
   static constexpr uint16_t kFlagIsVolatile = 1u << kBitIsVolatile;
   static constexpr uint16_t kFlagIsStatic = 1u << kBitIsStatic;
+  static constexpr uint16_t kFlagIsQuickened = 1u << kBitIsQuickened;
   static constexpr uint16_t kMemAccessTypeMask = 7u;
   static_assert((1u << (kBitMemAccessTypeEnd - kBitMemAccessTypeBegin)) - 1u == kMemAccessTypeMask,
                 "Invalid raw type mask");
@@ -117,8 +138,10 @@
       LOCKS_EXCLUDED(Locks::mutator_lock_);
 
   // Construct an unresolved instance field lowering info.
-  explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type)
-      : MirFieldInfo(field_idx, kFlagIsVolatile, type),  // Without kFlagIsStatic.
+  explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened)
+      : MirFieldInfo(field_idx,
+                     kFlagIsVolatile | (is_quickened ? kFlagIsQuickened : 0u),
+                     type),  // Without kFlagIsStatic.
         field_offset_(0u) {
   }
 
@@ -134,6 +157,11 @@
     return field_offset_;
   }
 
+  void CheckEquals(const MirIFieldLoweringInfo& other) const {
+    MirFieldInfo::CheckEquals(other);
+    CHECK_EQ(field_offset_.Uint32Value(), other.field_offset_.Uint32Value());
+  }
+
  private:
   enum {
     kBitFastGet = kFieldInfoBitEnd,
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 76b5e44..f354a49 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -1673,12 +1673,6 @@
   }
 }
 
-const char* MIRGraph::GetShortyFromTargetIdx(int target_idx) {
-  // TODO: for inlining support, use current code unit.
-  const DexFile::MethodId& method_id = cu_->dex_file->GetMethodId(target_idx);
-  return cu_->dex_file->GetShorty(method_id.proto_idx_);
-}
-
 const char* MIRGraph::GetShortyFromMethodReference(const MethodReference& target_method) {
   const DexFile::MethodId& method_id =
       target_method.dex_file->GetMethodId(target_method.dex_method_index);
@@ -1724,8 +1718,7 @@
  * high-word loc for wide arguments.  Also pull up any following
  * MOVE_RESULT and incorporate it into the invoke.
  */
-CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
-                                  bool is_range) {
+CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range) {
   CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
                                                         kArenaAllocMisc));
   MIR* move_result_mir = FindMoveResult(bb, mir);
@@ -1744,6 +1737,13 @@
   info->opt_flags = mir->optimization_flags;
   info->type = type;
   info->is_range = is_range;
+  if (IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) {
+    const auto& method_info = GetMethodLoweringInfo(mir);
+    info->method_ref = method_info.GetTargetMethod();
+  } else {
+    info->method_ref = MethodReference(GetCurrentDexCompilationUnit()->GetDexFile(),
+                                       mir->dalvikInsn.vB);
+  }
   info->index = mir->dalvikInsn.vB;
   info->offset = mir->offset;
   info->mir = mir;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index e5abd3b..3dae5b4 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -504,6 +504,7 @@
   int opt_flags;
   InvokeType type;
   uint32_t dex_idx;
+  MethodReference method_ref;
   uint32_t index;         // Method idx for invokes, type idx for FilledNewArray.
   uintptr_t direct_code;
   uintptr_t direct_method;
@@ -687,7 +688,7 @@
 
   void DoCacheMethodLoweringInfo();
 
-  const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) {
+  const MirMethodLoweringInfo& GetMethodLoweringInfo(MIR* mir) const {
     DCHECK_LT(mir->meta.method_lowering_info, method_lowering_infos_.size());
     return method_lowering_infos_[mir->meta.method_lowering_info];
   }
@@ -1132,7 +1133,6 @@
   std::string GetSSAName(int ssa_reg);
   std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
   void GetBlockName(BasicBlock* bb, char* name);
-  const char* GetShortyFromTargetIdx(int);
   const char* GetShortyFromMethodReference(const MethodReference& target_method);
   void DumpMIRGraph();
   CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
diff --git a/compiler/dex/mir_method_info.cc b/compiler/dex/mir_method_info.cc
index b234950..3d3d979 100644
--- a/compiler/dex/mir_method_info.cc
+++ b/compiler/dex/mir_method_info.cc
@@ -33,51 +33,103 @@
     DCHECK(method_infos != nullptr);
     DCHECK_NE(count, 0u);
     for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
-      MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType());
+      MirMethodLoweringInfo unresolved(it->MethodIndex(), it->GetInvokeType(), it->IsQuickened());
+      unresolved.declaring_dex_file_ = it->declaring_dex_file_;
+      unresolved.vtable_idx_ = it->vtable_idx_;
       if (it->target_dex_file_ != nullptr) {
         unresolved.target_dex_file_ = it->target_dex_file_;
         unresolved.target_method_idx_ = it->target_method_idx_;
       }
-      DCHECK_EQ(memcmp(&unresolved, &*it, sizeof(*it)), 0);
+      if (kIsDebugBuild) {
+        unresolved.CheckEquals(*it);
+      }
     }
   }
 
   // We're going to resolve methods and check access in a tight loop. It's better to hold
   // the lock and needed references once than re-acquiring them again and again.
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<3> hs(soa.Self());
+  StackHandleScope<4> hs(soa.Self());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(compiler_driver->GetDexCache(mUnit)));
   Handle<mirror::ClassLoader> class_loader(
       hs.NewHandle(compiler_driver->GetClassLoader(soa, mUnit)));
   Handle<mirror::Class> referrer_class(hs.NewHandle(
       compiler_driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit)));
+  auto current_dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
   // Even if the referrer class is unresolved (i.e. we're compiling a method without class
   // definition) we still want to resolve methods and record all available info.
+  const DexFile* const dex_file = mUnit->GetDexFile();
+  const bool use_jit = Runtime::Current()->UseJit();
+  const VerifiedMethod* const verified_method = mUnit->GetVerifiedMethod();
 
   for (auto it = method_infos, end = method_infos + count; it != end; ++it) {
+    // For quickened invokes, the dex method idx is actually the mir offset.
+    if (it->IsQuickened()) {
+      const auto* dequicken_ref = verified_method->GetDequickenIndex(it->method_idx_);
+      CHECK(dequicken_ref != nullptr);
+      it->target_dex_file_ = dequicken_ref->dex_file;
+      it->target_method_idx_ = dequicken_ref->index;
+    }
     // Remember devirtualized invoke target and set the called method to the default.
     MethodReference devirt_ref(it->target_dex_file_, it->target_method_idx_);
     MethodReference* devirt_target = (it->target_dex_file_ != nullptr) ? &devirt_ref : nullptr;
-    it->target_dex_file_ = mUnit->GetDexFile();
-    it->target_method_idx_ = it->MethodIndex();
-
     InvokeType invoke_type = it->GetInvokeType();
-    mirror::ArtMethod* resolved_method =
-        compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit, it->MethodIndex(),
-                                       invoke_type);
+    mirror::ArtMethod* resolved_method = nullptr;
+    if (!it->IsQuickened()) {
+      it->target_dex_file_ = dex_file;
+      it->target_method_idx_ = it->MethodIndex();
+      current_dex_cache.Assign(dex_cache.Get());
+      resolved_method = compiler_driver->ResolveMethod(soa, dex_cache, class_loader, mUnit,
+                                                       it->MethodIndex(), invoke_type);
+    } else {
+      // The method index is actually the dex PC in this case.
+      // Calculate the proper dex file and target method idx.
+      CHECK(use_jit);
+      CHECK_EQ(invoke_type, kVirtual);
+      // Don't devirt if we are in a different dex file since we can't have direct invokes in
+      // another dex file unless we always put a direct / patch pointer.
+      devirt_target = nullptr;
+      current_dex_cache.Assign(
+          Runtime::Current()->GetClassLinker()->FindDexCache(*it->target_dex_file_));
+      CHECK(current_dex_cache.Get() != nullptr);
+      DexCompilationUnit cu(
+          mUnit->GetCompilationUnit(), mUnit->GetClassLoader(), mUnit->GetClassLinker(),
+          *it->target_dex_file_, nullptr /* code_item not used */, 0u /* class_def_idx not used */,
+          it->target_method_idx_, 0u /* access_flags not used */,
+          nullptr /* verified_method not used */);
+      resolved_method = compiler_driver->ResolveMethod(soa, current_dex_cache, class_loader, &cu,
+                                                       it->target_method_idx_, invoke_type, false);
+      if (resolved_method != nullptr) {
+        // Since this was a dequickened virtual, it is guaranteed to be resolved. However, it may be
+        // resolved to an interface method. If this is the case then change the invoke type to
+        // interface with the assumption that sharp_type will be kVirtual.
+        if (resolved_method->GetInvokeType() == kInterface) {
+          it->flags_ = (it->flags_ & ~(kInvokeTypeMask << kBitInvokeTypeBegin)) |
+              (static_cast<uint16_t>(kInterface) << kBitInvokeTypeBegin);
+        }
+      }
+    }
     if (UNLIKELY(resolved_method == nullptr)) {
       continue;
     }
     compiler_driver->GetResolvedMethodDexFileLocation(resolved_method,
         &it->declaring_dex_file_, &it->declaring_class_idx_, &it->declaring_method_idx_);
-    it->vtable_idx_ = compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
+    if (!it->IsQuickened()) {
+      // For quickened invoke virtuals we may have desharpened to an interface method which
+      // wont give us the right method index, in this case blindly dispatch or else we can't
+      // compile the method. Converting the invoke to interface dispatch doesn't work since we
+      // have no way to get the dex method index for quickened invoke virtuals in the interface
+      // trampolines.
+      it->vtable_idx_ =
+          compiler_driver->GetResolvedMethodVTableIndex(resolved_method, invoke_type);
+    }
 
-    MethodReference target_method(mUnit->GetDexFile(), it->MethodIndex());
+    MethodReference target_method(it->target_dex_file_, it->target_method_idx_);
     int fast_path_flags = compiler_driver->IsFastInvoke(
-        soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method, &invoke_type,
-        &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
-    bool is_referrers_class = (referrer_class.Get() == resolved_method->GetDeclaringClass());
-    bool is_class_initialized =
+        soa, dex_cache, class_loader, mUnit, referrer_class.Get(), resolved_method,
+        &invoke_type, &target_method, devirt_target, &it->direct_code_, &it->direct_method_);
+    const bool is_referrers_class = referrer_class.Get() == resolved_method->GetDeclaringClass();
+    const bool is_class_initialized =
         compiler_driver->IsMethodsClassInitialized(referrer_class.Get(), resolved_method);
     uint16_t other_flags = it->flags_ &
         ~(kFlagFastPath | kFlagClassIsInitialized | (kInvokeTypeMask << kBitSharpTypeBegin));
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 08fb103..e131c96 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -46,6 +46,9 @@
   const DexFile* DeclaringDexFile() const {
     return declaring_dex_file_;
   }
+  void SetDeclaringDexFile(const DexFile* dex_file) {
+    declaring_dex_file_ = dex_file;
+  }
 
   uint16_t DeclaringClassIndex() const {
     return declaring_class_idx_;
@@ -98,11 +101,12 @@
                       MirMethodLoweringInfo* method_infos, size_t count)
       LOCKS_EXCLUDED(Locks::mutator_lock_);
 
-  MirMethodLoweringInfo(uint16_t method_idx, InvokeType type)
+  MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened)
       : MirMethodInfo(method_idx,
                       ((type == kStatic) ? kFlagIsStatic : 0u) |
                       (static_cast<uint16_t>(type) << kBitInvokeTypeBegin) |
-                      (static_cast<uint16_t>(type) << kBitSharpTypeBegin)),
+                      (static_cast<uint16_t>(type) << kBitSharpTypeBegin) |
+                      (is_quickened ? kFlagQuickened : 0u)),
         direct_code_(0u),
         direct_method_(0u),
         target_dex_file_(nullptr),
@@ -131,6 +135,11 @@
     return (flags_ & kFlagClassIsInitialized) != 0u;
   }
 
+  // Returns true iff the method invoke is INVOKE_VIRTUAL_QUICK or INVOKE_VIRTUAL_RANGE_QUICK.
+  bool IsQuickened() const {
+    return (flags_ & kFlagQuickened) != 0u;
+  }
+
   InvokeType GetInvokeType() const {
     return static_cast<InvokeType>((flags_ >> kBitInvokeTypeBegin) & kInvokeTypeMask);
   }
@@ -146,6 +155,9 @@
   uint16_t VTableIndex() const {
     return vtable_idx_;
   }
+  void SetVTableIndex(uint16_t index) {
+    vtable_idx_ = index;
+  }
 
   uintptr_t DirectCode() const {
     return direct_code_;
@@ -159,6 +171,20 @@
     return stats_flags_;
   }
 
+  void CheckEquals(const MirMethodLoweringInfo& info) const {
+    CHECK_EQ(method_idx_, info.method_idx_);
+    CHECK_EQ(flags_, info.flags_);
+    CHECK_EQ(declaring_method_idx_, info.declaring_method_idx_);
+    CHECK_EQ(declaring_class_idx_, info.declaring_class_idx_);
+    CHECK_EQ(declaring_dex_file_, info.declaring_dex_file_);
+    CHECK_EQ(direct_code_, info.direct_code_);
+    CHECK_EQ(direct_method_, info.direct_method_);
+    CHECK_EQ(target_dex_file_, info.target_dex_file_);
+    CHECK_EQ(target_method_idx_, info.target_method_idx_);
+    CHECK_EQ(vtable_idx_, info.vtable_idx_);
+    CHECK_EQ(stats_flags_, info.stats_flags_);
+  }
+
  private:
   enum {
     kBitFastPath = kMethodInfoBitEnd,
@@ -168,12 +194,14 @@
     kBitSharpTypeEnd = kBitSharpTypeBegin + 3,  // 3 bits for sharp type.
     kBitIsReferrersClass = kBitSharpTypeEnd,
     kBitClassIsInitialized,
+    kBitQuickened,
     kMethodLoweringInfoBitEnd
   };
   static_assert(kMethodLoweringInfoBitEnd <= 16, "Too many flags");
   static constexpr uint16_t kFlagFastPath = 1u << kBitFastPath;
   static constexpr uint16_t kFlagIsReferrersClass = 1u << kBitIsReferrersClass;
   static constexpr uint16_t kFlagClassIsInitialized = 1u << kBitClassIsInitialized;
+  static constexpr uint16_t kFlagQuickened = 1u << kBitQuickened;
   static constexpr uint16_t kInvokeTypeMask = 7u;
   static_assert((1u << (kBitInvokeTypeEnd - kBitInvokeTypeBegin)) - 1u == kInvokeTypeMask,
                 "assert invoke type bits failed");
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index fd67d4e..93749e4 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1437,7 +1437,7 @@
       nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
       0u /* access_flags not used */, nullptr /* verified_method not used */);
   DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode);
-  MirIFieldLoweringInfo inlined_field_info(field_idx, type);
+  MirIFieldLoweringInfo inlined_field_info(field_idx, type, false);
   MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
   DCHECK(inlined_field_info.IsResolved());
 
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index be05b80..9ce5ebb 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -254,7 +254,7 @@
     cu_.mir_graph->method_lowering_infos_.reserve(count);
     for (size_t i = 0u; i != count; ++i) {
       const MethodDef* def = &defs[i];
-      MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type);
+      MirMethodLoweringInfo method_info(def->method_idx, def->invoke_type, false);
       if (def->declaring_dex_file != 0u) {
         method_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
         method_info.declaring_class_idx_ = def->declaring_class_idx;
@@ -407,7 +407,7 @@
     cu_.mir_graph->ifield_lowering_infos_.reserve(count);
     for (size_t i = 0u; i != count; ++i) {
       const IFieldDef* def = &defs[i];
-      MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+      MirIFieldLoweringInfo field_info(def->field_idx, def->type, false);
       if (def->declaring_dex_file != 0u) {
         field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
         field_info.declaring_class_idx_ = def->declaring_class_idx;
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 7245853..f636e3b 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -427,7 +427,7 @@
   InlineMethod intrinsic;
   {
     ReaderMutexLock mu(Thread::Current(), lock_);
-    auto it = inline_methods_.find(info->index);
+    auto it = inline_methods_.find(info->method_ref.dex_method_index);
     if (it == inline_methods_.end() || (it->second.flags & kInlineIntrinsic) == 0) {
       return false;
     }
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3c9b7a3..6f68d1a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -865,7 +865,12 @@
 void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
                       RegLocation rl_dest, RegLocation rl_obj) {
   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
-  DCHECK_EQ(IGetMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
+  if (kIsDebugBuild) {
+    auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
+        IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
+        IGetMemAccessType(mir->dalvikInsn.opcode);
+    DCHECK_EQ(mem_access_type, field_info.MemAccessType()) << mir->dalvikInsn.opcode;
+  }
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
   if (!ForceSlowFieldPath(cu_) && field_info.FastGet()) {
     RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
@@ -890,6 +895,9 @@
       StoreValue(rl_dest, rl_result);
     }
   } else {
+    if (field_info.DeclaringDexFile() != nullptr) {
+      DCHECK_EQ(field_info.DeclaringDexFile(), cu_->dex_file);
+    }
     DCHECK(SizeMatchesTypeForEntrypoint(size, type));
     QuickEntrypointEnum target;
     switch (type) {
@@ -939,7 +947,12 @@
 void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
                       RegLocation rl_src, RegLocation rl_obj) {
   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
-  DCHECK_EQ(IPutMemAccessType(mir->dalvikInsn.opcode), field_info.MemAccessType());
+  if (kIsDebugBuild) {
+    auto mem_access_type = IsInstructionIGetQuickOrIPutQuick(mir->dalvikInsn.opcode) ?
+        IGetQuickOrIPutQuickMemAccessType(mir->dalvikInsn.opcode) :
+        IPutMemAccessType(mir->dalvikInsn.opcode);
+    DCHECK_EQ(mem_access_type, field_info.MemAccessType());
+  }
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
   if (!ForceSlowFieldPath(cu_) && field_info.FastPut()) {
     RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 8e3df7c..040b07c 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -863,11 +863,12 @@
   RegLocation res;
   if (info->result.location == kLocInvalid) {
     // If result is unused, return a sink target based on type of invoke target.
-    res = GetReturn(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+    res = GetReturn(
+        ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   } else {
     res = info->result;
     DCHECK_EQ(LocToRegClass(res),
-              ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   }
   return res;
 }
@@ -876,11 +877,12 @@
   RegLocation res;
   if (info->result.location == kLocInvalid) {
     // If result is unused, return a sink target based on type of invoke target.
-    res = GetReturnWide(ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+    res = GetReturnWide(ShortyToRegClass(
+        mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   } else {
     res = info->result;
     DCHECK_EQ(LocToRegClass(res),
-              ShortyToRegClass(mir_graph_->GetShortyFromTargetIdx(info->index)[0]));
+              ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
   }
   return res;
 }
@@ -1418,7 +1420,8 @@
 
 void Mir2Lir::GenInvoke(CallInfo* info) {
   DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
-  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+  const DexFile* dex_file = info->method_ref.dex_file;
+  if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file)
       ->GenIntrinsic(this, info)) {
     return;
   }
@@ -1428,7 +1431,7 @@
 void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
   int call_state = 0;
   LIR* null_ck;
-  LIR** p_null_ck = NULL;
+  LIR** p_null_ck = nullptr;
   NextCallInsn next_call_insn;
   FlushAllRegs();  /* Everything to home location */
   // Explicit register usage
@@ -1440,6 +1443,7 @@
   info->type = method_info.GetSharpType();
   bool fast_path = method_info.FastPath();
   bool skip_this;
+
   if (info->type == kInterface) {
     next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
     skip_this = fast_path;
@@ -1469,7 +1473,8 @@
   // Finish up any of the call sequence not interleaved in arg loading
   while (call_state >= 0) {
     call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
-                                method_info.DirectCode(), method_info.DirectMethod(), original_type);
+                                method_info.DirectCode(), method_info.DirectMethod(),
+                                original_type);
   }
   LIR* call_insn = GenCallInsn(method_info);
   MarkSafepointPC(call_insn);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 34e5e25..966a92d 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -540,6 +540,7 @@
       GenMoveException(rl_dest);
       break;
 
+    case Instruction::RETURN_VOID_BARRIER:
     case Instruction::RETURN_VOID:
       if (((cu_->access_flags & kAccConstructor) != 0) &&
           cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
@@ -790,10 +791,12 @@
       GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
       break;
 
+    case Instruction::IGET_OBJECT_QUICK:
     case Instruction::IGET_OBJECT:
       GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]);
       break;
 
+    case Instruction::IGET_WIDE_QUICK:
     case Instruction::IGET_WIDE:
       // kPrimLong and kPrimDouble share the same entrypoints.
       if (rl_dest.fp) {
@@ -803,6 +806,7 @@
       }
       break;
 
+    case Instruction::IGET_QUICK:
     case Instruction::IGET:
       if (rl_dest.fp) {
         GenIGet(mir, opt_flags, kSingle, Primitive::kPrimFloat, rl_dest, rl_src[0]);
@@ -811,43 +815,54 @@
       }
       break;
 
+    case Instruction::IGET_CHAR_QUICK:
     case Instruction::IGET_CHAR:
       GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]);
       break;
 
+    case Instruction::IGET_SHORT_QUICK:
     case Instruction::IGET_SHORT:
       GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]);
       break;
 
+    case Instruction::IGET_BOOLEAN_QUICK:
     case Instruction::IGET_BOOLEAN:
       GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]);
       break;
 
+    case Instruction::IGET_BYTE_QUICK:
     case Instruction::IGET_BYTE:
       GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]);
       break;
 
+    case Instruction::IPUT_WIDE_QUICK:
     case Instruction::IPUT_WIDE:
       GenIPut(mir, opt_flags, rl_src[0].fp ? kDouble : k64, rl_src[0], rl_src[1]);
       break;
 
+    case Instruction::IPUT_OBJECT_QUICK:
     case Instruction::IPUT_OBJECT:
       GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]);
       break;
 
+    case Instruction::IPUT_QUICK:
     case Instruction::IPUT:
       GenIPut(mir, opt_flags, rl_src[0].fp ? kSingle : k32, rl_src[0], rl_src[1]);
       break;
 
+    case Instruction::IPUT_BYTE_QUICK:
+    case Instruction::IPUT_BOOLEAN_QUICK:
     case Instruction::IPUT_BYTE:
     case Instruction::IPUT_BOOLEAN:
       GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]);
       break;
 
+    case Instruction::IPUT_CHAR_QUICK:
     case Instruction::IPUT_CHAR:
       GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]);
       break;
 
+    case Instruction::IPUT_SHORT_QUICK:
     case Instruction::IPUT_SHORT:
       GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]);
       break;
@@ -921,9 +936,12 @@
       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
       break;
 
+    case Instruction::INVOKE_VIRTUAL_QUICK:
     case Instruction::INVOKE_VIRTUAL:
       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
       break;
+
+    case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
     case Instruction::INVOKE_VIRTUAL_RANGE:
       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
       break;
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 19c2a5a..fcf4716 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -542,6 +542,11 @@
 void QuickCompiler::InitCompilationUnit(CompilationUnit& cu) const {
   // Disable optimizations according to instruction set.
   cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set];
+  if (Runtime::Current()->UseJit()) {
+    // Disable these optimizations for JIT until quickened byte codes are done being implemented.
+    // TODO: Find a cleaner way to do this.
+    cu.disable_opt |= 1u << kLocalValueNumbering;
+  }
 }
 
 void QuickCompiler::Init() {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 4ff173d..51a3d84 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -66,8 +66,10 @@
     // TODO: Investigate why are we doing the work again for this method and try to avoid it.
     LOG(WARNING) << "Method processed more than once: "
         << PrettyMethod(ref.dex_method_index, *ref.dex_file);
-    DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
-    DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
+    if (!Runtime::Current()->UseJit()) {
+      DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size());
+      DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
+    }
     DCHECK_EQ(it->second->GetDexGcMap().size(), verified_method->GetDexGcMap().size());
     delete it->second;
     verified_methods_.erase(it);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 21e965d..42d66be 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -24,6 +24,7 @@
 #include "base/stl_util.h"
 #include "dex_file.h"
 #include "dex_instruction-inl.h"
+#include "dex_instruction_utils.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
@@ -52,6 +53,11 @@
     if (method_verifier->HasVirtualOrInterfaceInvokes()) {
       verified_method->GenerateDevirtMap(method_verifier);
     }
+
+    // Only need dequicken info for JIT so far.
+    if (Runtime::Current()->UseJit()) {
+      verified_method->GenerateDequickenMap(method_verifier);
+    }
   }
 
   if (method_verifier->HasCheckCasts()) {
@@ -65,6 +71,12 @@
   return (it != devirt_map_.end()) ? &it->second : nullptr;
 }
 
+const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const {
+  DCHECK(Runtime::Current()->UseJit());
+  auto it = dequicken_map_.find(dex_pc);
+  return (it != dequicken_map_.end()) ? &it->second : nullptr;
+}
+
 bool VerifiedMethod::IsSafeCast(uint32_t pc) const {
   return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc);
 }
@@ -182,7 +194,7 @@
   *log2_max_gc_pc = i;
 }
 
-void VerifiedMethod::GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier) {
+void VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) {
   if (method_verifier->HasFailures()) {
     return;
   }
@@ -196,13 +208,24 @@
     if (is_virtual_quick || is_range_quick) {
       uint32_t dex_pc = inst->GetDexPc(insns);
       verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
-      mirror::ArtMethod* method = method_verifier->GetQuickInvokedMethod(inst, line,
-                                                                         is_range_quick);
+      mirror::ArtMethod* method =
+          method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick);
       CHECK(method != nullptr);
       // The verifier must know what the type of the object was or else we would have gotten a
       // failure. Put the dex method index in the dequicken map since we need this to get number of
       // arguments in the compiler.
-      dequicken_map_.Put(dex_pc, method->ToMethodReference());
+      dequicken_map_.Put(dex_pc, DexFileReference(method->GetDexFile(),
+                                                  method->GetDexMethodIndex()));
+    } else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) {
+      uint32_t dex_pc = inst->GetDexPc(insns);
+      verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
+      mirror::ArtField* field = method_verifier->GetQuickFieldAccess(inst, line);
+      CHECK(field != nullptr);
+      // The verifier must know what the type of the field was or else we would have gotten a
+      // failure. Put the dex field index in the dequicken map since we need this for lowering
+      // in the compiler.
+      // TODO: Putting a field index in a method reference is gross.
+      dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex()));
     }
   }
 }
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index fe9dfd1..748bdcb 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -20,6 +20,7 @@
 #include <vector>
 
 #include "base/mutex.h"
+#include "dex_file.h"
 #include "method_reference.h"
 #include "safe_map.h"
 
@@ -39,6 +40,9 @@
   // Devirtualization map type maps dex offset to concrete method reference.
   typedef SafeMap<uint32_t, MethodReference> DevirtualizationMap;
 
+  // Devirtualization map type maps dex offset to field / method idx.
+  typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
+
   static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   ~VerifiedMethod() = default;
@@ -58,6 +62,10 @@
   // Returns the devirtualization target method, or nullptr if none.
   const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
 
+  // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
+  // no entry for that dex pc.
+  const DexFileReference* GetDequickenIndex(uint32_t dex_pc) const;
+
   // Returns true if the cast can statically be verified to be redundant
   // by using the check-cast elision peephole optimization in the verifier.
   bool IsSafeCast(uint32_t pc) const;
@@ -86,7 +94,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Generate dequickening map into dequicken_map_.
-  void GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier)
+  void GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Generate safe case set into safe_cast_set_.
@@ -95,9 +103,9 @@
 
   std::vector<uint8_t> dex_gc_map_;
   DevirtualizationMap devirt_map_;
-  // Dequicken map is required for having the compiler compiled quickened invokes. The quicken map
-  // enables us to get the dex method index so that we can get the required argument count.
-  DevirtualizationMap dequicken_map_;
+  // Dequicken map is required for compiling quickened byte codes. The quicken maps from
+  // dex PC to dex method index or dex field index based on the instruction.
+  DequickenMap dequicken_map_;
   SafeCastSet safe_cast_set_;
 };
 
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index b620969..2b78e38 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -19,6 +19,7 @@
 #include "compiler_ir.h"
 #include "dex/dataflow_iterator-inl.h"
 #include "dex_flags.h"
+#include "driver/dex_compilation_unit.h"
 
 namespace art {
 
@@ -259,8 +260,8 @@
     if ((flags & Instruction::kInvoke) &&
         (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
       DCHECK_EQ(next, 0);
-      int target_idx = mir->dalvikInsn.vB;
-      const char* shorty = GetShortyFromTargetIdx(target_idx);
+      const auto& lowering_info = GetMethodLoweringInfo(mir);
+      const char* shorty = GetShortyFromMethodReference(lowering_info.GetTargetMethod());
       // Handle result type if floating point
       if ((shorty[0] == 'F') || (shorty[0] == 'D')) {
         MIR* move_result_mir = FindMoveResult(bb, mir);
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 9948c82..4a35e9f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -56,14 +56,13 @@
   return referrer_class;
 }
 
-inline mirror::ArtField* CompilerDriver::ResolveField(
+inline mirror::ArtField* CompilerDriver::ResolveFieldWithDexFile(
     const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
-    Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
+    Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
     uint32_t field_idx, bool is_static) {
-  DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
-  DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
-  mirror::ArtField* resolved_field = mUnit->GetClassLinker()->ResolveField(
-      *mUnit->GetDexFile(), field_idx, dex_cache, class_loader, is_static);
+  DCHECK_EQ(dex_cache->GetDexFile(), dex_file);
+  mirror::ArtField* resolved_field = Runtime::Current()->GetClassLinker()->ResolveField(
+      *dex_file, field_idx, dex_cache, class_loader, is_static);
   DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
   if (UNLIKELY(resolved_field == nullptr)) {
     // Clean up any exception left by type resolution.
@@ -78,6 +77,19 @@
   return resolved_field;
 }
 
+inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) {
+  return Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file);
+}
+
+inline mirror::ArtField* CompilerDriver::ResolveField(
+    const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+    Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
+    uint32_t field_idx, bool is_static) {
+  DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
+  return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx,
+                                 is_static);
+}
+
 inline void CompilerDriver::GetResolvedFieldDexFileLocation(
     mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
     uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) {
@@ -172,7 +184,7 @@
 inline mirror::ArtMethod* CompilerDriver::ResolveMethod(
     ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
     Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
-    uint32_t method_idx, InvokeType invoke_type) {
+    uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) {
   DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
   DCHECK_EQ(class_loader.Get(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader()));
   mirror::ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod(
@@ -184,7 +196,8 @@
     soa.Self()->ClearException();
     return nullptr;
   }
-  if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
+  if (check_incompatible_class_change &&
+      UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
     // Silently return nullptr on incompatible class change.
     return nullptr;
   }
@@ -227,14 +240,14 @@
                                                         target_method->dex_method_index))) {
     return 0;
   }
-
   // Sharpen a virtual call into a direct call when the target is known not to have been
   // overridden (ie is final).
-  bool can_sharpen_virtual_based_on_type =
+  const bool same_dex_file = target_method->dex_file == mUnit->GetDexFile();
+  bool can_sharpen_virtual_based_on_type = same_dex_file &&
       (*invoke_type == kVirtual) && (resolved_method->IsFinal() || methods_class->IsFinal());
   // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
   // the super class.
-  bool can_sharpen_super_based_on_type = (*invoke_type == kSuper) &&
+  bool can_sharpen_super_based_on_type = same_dex_file && (*invoke_type == kSuper) &&
       (referrer_class != methods_class) && referrer_class->IsSubClass(methods_class) &&
       resolved_method->GetMethodIndex() < methods_class->GetVTableLength() &&
       (methods_class->GetVTableEntry(resolved_method->GetMethodIndex()) == resolved_method) &&
@@ -243,10 +256,10 @@
   if (can_sharpen_virtual_based_on_type || can_sharpen_super_based_on_type) {
     // Sharpen a virtual call into a direct call. The method_idx is into referrer's
     // dex cache, check that this resolved method is where we expect it.
-    CHECK(target_method->dex_file == mUnit->GetDexFile());
-    DCHECK(dex_cache.Get() == mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
-    CHECK(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index) ==
-        resolved_method) << PrettyMethod(resolved_method);
+    CHECK_EQ(target_method->dex_file, mUnit->GetDexFile());
+    DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile()));
+    CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(target_method->dex_method_index),
+             resolved_method) << PrettyMethod(resolved_method);
     int stats_flags = kFlagMethodResolved;
     GetCodeAndMethodForDirectCall(/*out*/invoke_type,
                                   kDirect,  // Sharp type
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index b8a8936..15b3d08 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -360,6 +360,7 @@
       classes_to_compile_(compiled_classes),
       thread_count_(thread_count),
       stats_(new AOTCompilationStats),
+      dedupe_enabled_(true),
       dump_stats_(dump_stats),
       dump_passes_(dump_passes),
       dump_cfg_file_name_(dump_cfg_file_name),
@@ -380,12 +381,7 @@
 
   compiler_->Init();
 
-  CHECK(!Runtime::Current()->IsStarted());
-  if (image_) {
-    CHECK(image_classes_.get() != nullptr);
-  } else {
-    CHECK(image_classes_.get() == nullptr);
-  }
+  CHECK_EQ(image_, image_classes_.get() != nullptr);
 
   // Read the profile file if one is provided.
   if (!profile_file.empty()) {
@@ -399,26 +395,32 @@
 }
 
 SwapVector<uint8_t>* CompilerDriver::DeduplicateCode(const ArrayRef<const uint8_t>& code) {
+  DCHECK(dedupe_enabled_);
   return dedupe_code_.Add(Thread::Current(), code);
 }
 
 SwapSrcMap* CompilerDriver::DeduplicateSrcMappingTable(const ArrayRef<SrcMapElem>& src_map) {
+  DCHECK(dedupe_enabled_);
   return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
 }
 
 SwapVector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const ArrayRef<const uint8_t>& code) {
+  DCHECK(dedupe_enabled_);
   return dedupe_mapping_table_.Add(Thread::Current(), code);
 }
 
 SwapVector<uint8_t>* CompilerDriver::DeduplicateVMapTable(const ArrayRef<const uint8_t>& code) {
+  DCHECK(dedupe_enabled_);
   return dedupe_vmap_table_.Add(Thread::Current(), code);
 }
 
 SwapVector<uint8_t>* CompilerDriver::DeduplicateGCMap(const ArrayRef<const uint8_t>& code) {
+  DCHECK(dedupe_enabled_);
   return dedupe_gc_map_.Add(Thread::Current(), code);
 }
 
 SwapVector<uint8_t>* CompilerDriver::DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info) {
+  DCHECK(dedupe_enabled_);
   return dedupe_cfi_info_.Add(Thread::Current(), cfi_info);
 }
 
@@ -491,8 +493,12 @@
 static DexToDexCompilationLevel GetDexToDexCompilationlevel(
     Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file,
     const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  auto* const runtime = Runtime::Current();
+  if (runtime->UseJit()) {
+    return kDontDexToDexCompile;
+  }
   const char* descriptor = dex_file.GetClassDescriptor(class_def);
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ClassLinker* class_linker = runtime->GetClassLinker();
   mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader);
   if (klass == nullptr) {
     CHECK(self->IsExceptionPending());
@@ -518,9 +524,8 @@
   }
 }
 
-void CompilerDriver::CompileOne(mirror::ArtMethod* method, TimingLogger* timings) {
+void CompilerDriver::CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings) {
   DCHECK(!Runtime::Current()->IsStarted());
-  Thread* self = Thread::Current();
   jobject jclass_loader;
   const DexFile* dex_file;
   uint16_t class_def_idx;
@@ -529,9 +534,8 @@
   InvokeType invoke_type = method->GetInvokeType();
   {
     ScopedObjectAccessUnchecked soa(self);
-    ScopedLocalRef<jobject>
-      local_class_loader(soa.Env(),
-                    soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
+    ScopedLocalRef<jobject> local_class_loader(
+        soa.Env(), soa.AddLocalReference<jobject>(method->GetDeclaringClass()->GetClassLoader()));
     jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
     // Find the dex_file
     dex_file = method->GetDexFile();
@@ -549,7 +553,7 @@
   // Can we run DEX-to-DEX compiler on this class ?
   DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
   {
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
     const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::ClassLoader> class_loader(
@@ -557,14 +561,35 @@
     dex_to_dex_compilation_level = GetDexToDexCompilationlevel(self, class_loader, *dex_file,
                                                                class_def);
   }
-  CompileMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx, jclass_loader,
-                *dex_file, dex_to_dex_compilation_level, true);
+  CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+                jclass_loader, *dex_file, dex_to_dex_compilation_level, true);
 
   self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
-
   self->TransitionFromSuspendedToRunnable();
 }
 
+CompiledMethod* CompilerDriver::CompileMethod(Thread* self, mirror::ArtMethod* method) {
+  const uint32_t method_idx = method->GetDexMethodIndex();
+  const uint32_t access_flags = method->GetAccessFlags();
+  const InvokeType invoke_type = method->GetInvokeType();
+  StackHandleScope<1> hs(self);
+  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+      method->GetDeclaringClass()->GetClassLoader()));
+  jobject jclass_loader = class_loader.ToJObject();
+  const DexFile* dex_file = method->GetDexFile();
+  const uint16_t class_def_idx = method->GetClassDefIndex();
+  const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+  DexToDexCompilationLevel dex_to_dex_compilation_level =
+      GetDexToDexCompilationlevel(self, class_loader, *dex_file, class_def);
+  const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+  self->TransitionFromRunnableToSuspended(kNative);
+  CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+                jclass_loader, *dex_file, dex_to_dex_compilation_level, true);
+  auto* compiled_method = GetCompiledMethod(MethodReference(dex_file, method_idx));
+  self->TransitionFromSuspendedToRunnable();
+  return compiled_method;
+}
+
 void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
                              ThreadPool* thread_pool, TimingLogger* timings) {
   for (size_t i = 0; i != dex_files.size(); ++i) {
@@ -1035,7 +1060,8 @@
                                         bool* is_type_initialized, bool* use_direct_type_ptr,
                                         uintptr_t* direct_type_ptr, bool* out_is_finalizable) {
   ScopedObjectAccess soa(Thread::Current());
-  mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file);
+  Runtime* runtime = Runtime::Current();
+  mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache(dex_file);
   mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx);
   if (resolved_class == nullptr) {
     return false;
@@ -1045,7 +1071,8 @@
     return false;
   }
   *out_is_finalizable = resolved_class->IsFinalizable();
-  const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+  gc::Heap* heap = runtime->GetHeap();
+  const bool compiling_boot = heap->IsCompilingBoot();
   const bool support_boot_image_fixup = GetSupportBootImageFixup();
   if (compiling_boot) {
     // boot -> boot class pointers.
@@ -1061,10 +1088,15 @@
     } else {
       return false;
     }
+  } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) {
+    *is_type_initialized = resolved_class->IsInitialized();
+    // If the class may move around, then don't embed it as a direct pointer.
+    *use_direct_type_ptr = true;
+    *direct_type_ptr = reinterpret_cast<uintptr_t>(resolved_class);
+    return true;
   } else {
     // True if the class is in the image at app compiling time.
-    const bool class_in_image =
-        Runtime::Current()->GetHeap()->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
+    const bool class_in_image = heap->FindSpaceFromObject(resolved_class, false)->IsImageSpace();
     if (class_in_image && support_boot_image_fixup) {
       // boot -> app class pointers.
       *is_type_initialized = resolved_class->IsInitialized();
@@ -1257,8 +1289,10 @@
   // invoked, so this can be passed to the out-of-line runtime support code.
   *direct_code = 0;
   *direct_method = 0;
+  Runtime* const runtime = Runtime::Current();
+  gc::Heap* const heap = runtime->GetHeap();
   bool use_dex_cache = GetCompilerOptions().GetCompilePic();  // Off by default
-  const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
+  const bool compiling_boot = heap->IsCompilingBoot();
   // TODO This is somewhat hacky. We should refactor all of this invoke codepath.
   const bool force_relocations = (compiling_boot ||
                                   GetCompilerOptions().GetIncludePatchInformation());
@@ -1267,14 +1301,15 @@
   }
   // TODO: support patching on all architectures.
   use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
-  bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
+  mirror::Class* declaring_class = method->GetDeclaringClass();
+  bool method_code_in_boot = declaring_class->GetClassLoader() == nullptr;
   if (!use_dex_cache) {
     if (!method_code_in_boot) {
       use_dex_cache = true;
     } else {
       bool has_clinit_trampoline =
-          method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
-      if (has_clinit_trampoline && (method->GetDeclaringClass() != referrer_class)) {
+          method->IsStatic() && !declaring_class->IsInitialized();
+      if (has_clinit_trampoline && declaring_class != referrer_class) {
         // Ensure we run the clinit trampoline unless we are invoking a static method in the same
         // class.
         use_dex_cache = true;
@@ -1302,7 +1337,9 @@
   // The method is defined not within this dex file. We need a dex cache slot within the current
   // dex file or direct pointers.
   bool must_use_direct_pointers = false;
-  if (target_method->dex_file == method->GetDeclaringClass()->GetDexCache()->GetDexFile()) {
+  mirror::DexCache* dex_cache = declaring_class->GetDexCache();
+  if (target_method->dex_file == dex_cache->GetDexFile() &&
+    !(runtime->UseJit() && dex_cache->GetResolvedMethod(method->GetDexMethodIndex()) == nullptr)) {
     target_method->dex_method_index = method->GetDexMethodIndex();
   } else {
     if (no_guarantee_of_dex_cache_entry) {
@@ -1315,7 +1352,7 @@
       } else {
         if (force_relocations && !use_dex_cache) {
           target_method->dex_method_index = method->GetDexMethodIndex();
-          target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+          target_method->dex_file = dex_cache->GetDexFile();
         }
         must_use_direct_pointers = true;
       }
@@ -1330,8 +1367,7 @@
       *type = sharp_type;
     }
   } else {
-    bool method_in_image =
-        Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
+    bool method_in_image = heap->FindSpaceFromObject(method, false)->IsImageSpace();
     if (method_in_image || compiling_boot) {
       // We know we must be able to get to the method in the image, so use that pointer.
       CHECK(!method->IsAbstract());
@@ -2000,10 +2036,11 @@
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
   ClassLinker* class_linker = manager->GetClassLinker();
   jobject jclass_loader = manager->GetClassLoader();
+  Thread* self = Thread::Current();
   {
     // Use a scoped object access to perform to the quick SkipClass check.
     const char* descriptor = dex_file.GetClassDescriptor(class_def);
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
     StackHandleScope<3> hs(soa.Self());
     Handle<mirror::ClassLoader> class_loader(
         hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
@@ -2030,7 +2067,7 @@
   // Can we run DEX-to-DEX compiler on this class ?
   DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile;
   {
-    ScopedObjectAccess soa(Thread::Current());
+    ScopedObjectAccess soa(self);
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::ClassLoader> class_loader(
         hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
@@ -2061,7 +2098,7 @@
       continue;
     }
     previous_direct_method_idx = method_idx;
-    driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+    driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
                           it.GetMethodInvokeType(class_def), class_def_index,
                           method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
                           compilation_enabled);
@@ -2078,7 +2115,7 @@
       continue;
     }
     previous_virtual_method_idx = method_idx;
-    driver->CompileMethod(it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
+    driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(),
                           it.GetMethodInvokeType(class_def), class_def_index,
                           method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level,
                           compilation_enabled);
@@ -2111,10 +2148,10 @@
   }
 }
 
-void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
-                                   InvokeType invoke_type, uint16_t class_def_idx,
-                                   uint32_t method_idx, jobject class_loader,
-                                   const DexFile& dex_file,
+void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_item,
+                                   uint32_t access_flags, InvokeType invoke_type,
+                                   uint16_t class_def_idx, uint32_t method_idx,
+                                   jobject class_loader, const DexFile& dex_file,
                                    DexToDexCompilationLevel dex_to_dex_compilation_level,
                                    bool compilation_enabled) {
   CompiledMethod* compiled_method = nullptr;
@@ -2162,7 +2199,6 @@
     }
   }
 
-  Thread* self = Thread::Current();
   if (compiled_method != nullptr) {
     // Count non-relative linker patches.
     size_t non_relative_linker_patch_count = 0u;
@@ -2194,6 +2230,21 @@
   }
 }
 
+void CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) {
+  CompiledMethod* compiled_method = nullptr;
+  {
+    MutexLock mu(Thread::Current(), compiled_methods_lock_);
+    auto it = compiled_methods_.find(method_ref);
+    if (it != compiled_methods_.end()) {
+      compiled_method = it->second;
+      compiled_methods_.erase(it);
+    }
+  }
+  if (compiled_method != nullptr) {
+    CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, compiled_method);
+  }
+}
+
 CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const {
   MutexLock mu(Thread::Current(), compiled_classes_lock_);
   ClassTable::const_iterator it = compiled_classes_.find(ref);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index b756244..24b6f17 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -45,6 +45,10 @@
 
 namespace art {
 
+namespace mirror {
+class DexCache;
+}  // namespace mirror
+
 namespace verifier {
 class MethodVerifier;
 }  // namespace verifier
@@ -107,8 +111,11 @@
                   TimingLogger* timings)
       LOCKS_EXCLUDED(Locks::mutator_lock_);
 
+  CompiledMethod* CompileMethod(Thread* self, mirror::ArtMethod*)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED;
+
   // Compile a single Method.
-  void CompileOne(mirror::ArtMethod* method, TimingLogger* timings)
+  void CompileOne(Thread* self, mirror::ArtMethod* method, TimingLogger* timings)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   VerificationResults* GetVerificationResults() const {
@@ -172,6 +179,9 @@
   size_t GetNonRelativeLinkerPatchCount() const
       LOCKS_EXCLUDED(compiled_methods_lock_);
 
+  // Remove and delete a compiled method.
+  void RemoveCompiledMethod(const MethodReference& method_ref);
+
   void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file,
                                      uint16_t class_def_index);
   bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index);
@@ -226,6 +236,13 @@
       uint32_t field_idx, bool is_static)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Resolve a field with a given dex file.
+  mirror::ArtField* ResolveFieldWithDexFile(
+      const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
+      Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
+      uint32_t field_idx, bool is_static)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Get declaration location of a resolved field.
   void GetResolvedFieldDexFileLocation(
       mirror::ArtField* resolved_field, const DexFile** declaring_dex_file,
@@ -235,6 +252,10 @@
   bool IsFieldVolatile(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   MemberOffset GetFieldOffset(mirror::ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Find a dex cache for a dex file.
+  inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
   std::pair<bool, bool> IsFastInstanceField(
       mirror::DexCache* dex_cache, mirror::Class* referrer_class,
@@ -261,7 +282,7 @@
   mirror::ArtMethod* ResolveMethod(
       ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
-      uint32_t method_idx, InvokeType invoke_type)
+      uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get declaration location of a resolved field.
@@ -295,6 +316,13 @@
   void ProcessedStaticField(bool resolved, bool local);
   void ProcessedInvoke(InvokeType invoke_type, int flags);
 
+  void ComputeFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
+                        const ScopedObjectAccess& soa, bool is_static,
+                        mirror::ArtField** resolved_field,
+                        mirror::Class** referrer_class,
+                        mirror::DexCache** dex_cache)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Can we fast path instance field access? Computes field's offset and volatility.
   bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
                                 MemberOffset* field_offset, bool* is_volatile)
@@ -380,6 +408,13 @@
     return timings_logger_;
   }
 
+  void SetDedupeEnabled(bool dedupe_enabled) {
+    dedupe_enabled_ = dedupe_enabled;
+  }
+  bool DedupeEnabled() const {
+    return dedupe_enabled_;
+  }
+
   // Checks if class specified by type_idx is one of the image_classes_
   bool IsImageClass(const char* descriptor) const;
 
@@ -484,7 +519,7 @@
                       const std::vector<const DexFile*>& dex_files,
                       ThreadPool* thread_pool, TimingLogger* timings)
       LOCKS_EXCLUDED(Locks::mutator_lock_);
-  void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
+  void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags,
                      InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx,
                      jobject class_loader, const DexFile& dex_file,
                      DexToDexCompilationLevel dex_to_dex_compilation_level,
@@ -545,6 +580,7 @@
   class AOTCompilationStats;
   std::unique_ptr<AOTCompilationStats> stats_;
 
+  bool dedupe_enabled_;
   bool dump_stats_;
   const bool dump_passes_;
   const std::string& dump_cfg_file_name_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
new file mode 100644
index 0000000..b1d972e
--- /dev/null
+++ b/compiler/jit/jit_compiler.cc
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_compiler.h"
+
+#include "arch/instruction_set.h"
+#include "arch/instruction_set_features.h"
+#include "compiler_callbacks.h"
+#include "dex/pass_manager.h"
+#include "dex/quick_compiler_callbacks.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "mirror/art_method-inl.h"
+#include "oat_file-inl.h"
+#include "object_lock.h"
+#include "thread_list.h"
+#include "verifier/method_verifier-inl.h"
+
+namespace art {
+namespace jit {
+
+JitCompiler* JitCompiler::Create() {
+  return new JitCompiler();
+}
+
+extern "C" void* jit_load(CompilerCallbacks** callbacks) {
+  VLOG(jit) << "loading jit compiler";
+  auto* const jit_compiler = JitCompiler::Create();
+  CHECK(jit_compiler != nullptr);
+  *callbacks = jit_compiler->GetCompilerCallbacks();
+  VLOG(jit) << "Done loading jit compiler";
+  return jit_compiler;
+}
+
+extern "C" void jit_unload(void* handle) {
+  DCHECK(handle != nullptr);
+  delete reinterpret_cast<JitCompiler*>(handle);
+}
+
+extern "C" bool jit_compile_method(void* handle, mirror::ArtMethod* method, Thread* self)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+  DCHECK(jit_compiler != nullptr);
+  return jit_compiler->CompileMethod(self, method);
+}
+
+JitCompiler::JitCompiler() : total_time_(0) {
+  auto* pass_manager_options = new PassManagerOptions;
+  pass_manager_options->SetDisablePassList("GVN,DCE");
+  compiler_options_.reset(new CompilerOptions(
+      CompilerOptions::kDefaultCompilerFilter,
+      CompilerOptions::kDefaultHugeMethodThreshold,
+      CompilerOptions::kDefaultLargeMethodThreshold,
+      CompilerOptions::kDefaultSmallMethodThreshold,
+      CompilerOptions::kDefaultTinyMethodThreshold,
+      CompilerOptions::kDefaultNumDexMethodsThreshold,
+      false,
+      false,
+      CompilerOptions::kDefaultTopKProfileThreshold,
+      false,
+      false,
+      false,
+      false,
+      true,  // pic
+      nullptr,
+      pass_manager_options,
+      nullptr));
+  const InstructionSet instruction_set = kRuntimeISA;
+  instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+  cumulative_logger_.reset(new CumulativeLogger("jit times"));
+  verification_results_.reset(new VerificationResults(compiler_options_.get()));
+  method_inliner_map_.reset(new DexFileToMethodInlinerMap);
+  callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
+                                              method_inliner_map_.get()));
+  compiler_driver_.reset(new CompilerDriver(
+      compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
+      Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
+      nullptr, new std::set<std::string>, 1, false, true,
+      std::string(), cumulative_logger_.get(), -1, std::string()));
+  // Disable dedupe so we can remove compiled methods.
+  compiler_driver_->SetDedupeEnabled(false);
+  compiler_driver_->SetSupportBootImageFixup(false);
+}
+
+JitCompiler::~JitCompiler() {
+}
+
+bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
+  uint64_t start_time = NanoTime();
+  StackHandleScope<2> hs(self);
+  self->AssertNoPendingException();
+  Runtime* runtime = Runtime::Current();
+  Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
+  if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+    VLOG(jit) << "Already compiled " << PrettyMethod(method);
+    return true;  // Already compiled
+  }
+  Handle<mirror::Class> h_class(hs.NewHandle(h_method->GetDeclaringClass()));
+  if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+    VLOG(jit) << "JIT failed to initialize " << PrettyMethod(h_method.Get());
+    return false;
+  }
+  const DexFile* dex_file = h_class->GetDexCache()->GetDexFile();
+  MethodReference method_ref(dex_file, h_method->GetDexMethodIndex());
+  // Only verify if we don't already have verification results.
+  if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) {
+    std::string error;
+    if (verifier::MethodVerifier::VerifyMethod(h_method.Get(), true, &error) ==
+        verifier::MethodVerifier::kHardFailure) {
+      VLOG(jit) << "Not compile method " << PrettyMethod(h_method.Get())
+          << " due to verification failure " << error;
+      return false;
+    }
+  }
+  CompiledMethod* compiled_method(compiler_driver_->CompileMethod(self, h_method.Get()));
+  if (compiled_method == nullptr) {
+    return false;
+  }
+  total_time_ += NanoTime() - start_time;
+  const bool result = MakeExecutable(compiled_method, h_method.Get());
+  // Remove the compiled method to save memory.
+  compiler_driver_->RemoveCompiledMethod(method_ref);
+  return result;
+}
+
+CompilerCallbacks* JitCompiler::GetCompilerCallbacks() const {
+  return callbacks_.get();
+}
+
+uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_method,
+                                               uint8_t* reserve_begin, uint8_t* reserve_end,
+                                               const uint8_t* mapping_table,
+                                               const uint8_t* vmap_table,
+                                               const uint8_t* gc_map) {
+  reserve_begin += sizeof(OatQuickMethodHeader);
+  reserve_begin = reinterpret_cast<uint8_t*>(
+      compiled_method->AlignCode(reinterpret_cast<uintptr_t>(reserve_begin)));
+  const auto* quick_code = compiled_method->GetQuickCode();
+  CHECK_LE(reserve_begin, reserve_end);
+  CHECK_LE(quick_code->size(), static_cast<size_t>(reserve_end - reserve_begin));
+  auto* code_ptr = reserve_begin;
+  OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+  // Construct the header last.
+  const auto frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
+  const auto core_spill_mask = compiled_method->GetCoreSpillMask();
+  const auto fp_spill_mask = compiled_method->GetFpSpillMask();
+  const auto code_size = quick_code->size();
+  CHECK_NE(code_size, 0U);
+  std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr);
+  // After we are done writing we need to update the method header.
+  // Write out the method header last.
+  method_header = new(method_header)OatQuickMethodHeader(
+      code_ptr - mapping_table, code_ptr - vmap_table, code_ptr - gc_map, frame_size_in_bytes,
+      core_spill_mask, fp_spill_mask, code_size);
+  // Return the code ptr.
+  return code_ptr;
+}
+
+bool JitCompiler::AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
+                                 OatFile::OatMethod* out_method) {
+  Runtime* runtime = Runtime::Current();
+  JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
+  const auto* quick_code = compiled_method->GetQuickCode();
+  if (quick_code == nullptr) {
+    return false;
+  }
+  const auto code_size = quick_code->size();
+  Thread* const self = Thread::Current();
+  const uint8_t* base = code_cache->CodeCachePtr();
+  auto* const mapping_table = compiled_method->GetMappingTable();
+  auto* const vmap_table = compiled_method->GetVmapTable();
+  auto* const gc_map = compiled_method->GetGcMap();
+  // Write out pre-header stuff.
+  uint8_t* const mapping_table_ptr = code_cache->AddDataArray(
+      self, mapping_table->data(), mapping_table->data() + mapping_table->size());
+  if (mapping_table == nullptr) {
+    return false;  // Out of data cache.
+  }
+  uint8_t* const vmap_table_ptr = code_cache->AddDataArray(
+      self, vmap_table->data(), vmap_table->data() + vmap_table->size());
+  if (vmap_table == nullptr) {
+    return false;  // Out of data cache.
+  }
+  uint8_t* const gc_map_ptr = code_cache->AddDataArray(
+      self, gc_map->data(), gc_map->data() + gc_map->size());
+  if (gc_map == nullptr) {
+    return false;  // Out of data cache.
+  }
+  // Don't touch this until you protect / unprotect the code.
+  const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32;
+  uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size);
+  if (code_reserve == nullptr) {
+    return false;
+  }
+  auto* code_ptr = WriteMethodHeaderAndCode(
+      compiled_method, code_reserve, code_reserve + reserve_size, mapping_table_ptr,
+      vmap_table_ptr, gc_map_ptr);
+
+  const size_t thumb_offset = compiled_method->CodeDelta();
+  const uint32_t code_offset = code_ptr - base + thumb_offset;
+  *out_method = OatFile::OatMethod(base, code_offset);
+  DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr);
+  DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr);
+  DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr);
+  DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
+  DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
+  DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask());
+  VLOG(jit)  << "JIT added " << PrettyMethod(method) << "@" << method << " ccache_size="
+      << PrettySize(code_cache->CodeCacheSize()) << ": " << reinterpret_cast<void*>(code_ptr)
+      << "," << reinterpret_cast<void*>(code_ptr + code_size);
+  return true;
+}
+
+bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method) {
+  CHECK(method != nullptr);
+  CHECK(compiled_method != nullptr);
+  OatFile::OatMethod oat_method(nullptr, 0);
+  if (!AddToCodeCache(method, compiled_method, &oat_method)) {
+    return false;
+  }
+  // TODO: Flush instruction cache.
+  oat_method.LinkMethod(method);
+  CHECK(Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method))
+      << PrettyMethod(method);
+  return true;
+}
+
+}  // namespace jit
+}  // namespace art
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
new file mode 100644
index 0000000..0876499
--- /dev/null
+++ b/compiler/jit/jit_compiler.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JIT_JIT_COMPILER_H_
+#define ART_COMPILER_JIT_JIT_COMPILER_H_
+
+#include "base/mutex.h"
+#include "compiler_callbacks.h"
+#include "compiled_method.h"
+#include "dex/verification_results.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
+#include "oat_file.h"
+
+namespace art {
+
+class InstructionSetFeatures;
+
+namespace mirror {
+class ArtMethod;
+}
+
+namespace jit {
+
+class JitCompiler {
+ public:
+  static JitCompiler* Create();
+  virtual ~JitCompiler();
+  bool CompileMethod(Thread* self, mirror::ArtMethod* method)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // This is in the compiler since the runtime doesn't have access to the compiled method
+  // structures.
+  bool AddToCodeCache(mirror::ArtMethod* method, const CompiledMethod* compiled_method,
+                      OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  CompilerCallbacks* GetCompilerCallbacks() const;
+  size_t GetTotalCompileTime() const {
+    return total_time_;
+  }
+
+ private:
+  uint64_t total_time_;
+  std::unique_ptr<CompilerOptions> compiler_options_;
+  std::unique_ptr<CumulativeLogger> cumulative_logger_;
+  std::unique_ptr<VerificationResults> verification_results_;
+  std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
+  std::unique_ptr<CompilerCallbacks> callbacks_;
+  std::unique_ptr<CompilerDriver> compiler_driver_;
+  std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+
+  explicit JitCompiler();
+  uint8_t* WriteMethodHeaderAndCode(
+      const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
+      const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
+  bool MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+};
+
+}  // namespace jit
+
+}  // namespace art
+
+#endif  // ART_COMPILER_JIT_JIT_COMPILER_H_
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 9c0157e..8411091 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -549,7 +549,7 @@
 
 struct OatWriter::VmapTableDataAccess {
   static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
-    return &compiled_method->GetVmapTable();
+    return compiled_method->GetVmapTable();
   }
 
   static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index e020d31..980611f 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -341,8 +341,8 @@
       if (UNLIKELY(lhs->GetMappingTable() != rhs->GetMappingTable())) {
         return lhs->GetMappingTable() < rhs->GetMappingTable();
       }
-      if (UNLIKELY(&lhs->GetVmapTable() != &rhs->GetVmapTable())) {
-        return &lhs->GetVmapTable() < &rhs->GetVmapTable();
+      if (UNLIKELY(lhs->GetVmapTable() != rhs->GetVmapTable())) {
+        return lhs->GetVmapTable() < rhs->GetVmapTable();
       }
       if (UNLIKELY(lhs->GetGcMap() != rhs->GetGcMap())) {
         return lhs->GetGcMap() < rhs->GetGcMap();
diff --git a/compiler/utils/dex_instruction_utils.h b/compiler/utils/dex_instruction_utils.h
deleted file mode 100644
index bb2c592..0000000
--- a/compiler/utils/dex_instruction_utils.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
-#define ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_
-
-#include "dex_instruction.h"
-
-namespace art {
-
-// Dex invoke type corresponds to the ordering of INVOKE instructions;
-// this order is the same for range and non-range invokes.
-enum DexInvokeType : uint8_t {
-  kDexInvokeVirtual = 0,  // invoke-virtual, invoke-virtual-range
-  kDexInvokeSuper,        // invoke-super, invoke-super-range
-  kDexInvokeDirect,       // invoke-direct, invoke-direct-range
-  kDexInvokeStatic,       // invoke-static, invoke-static-range
-  kDexInvokeInterface,    // invoke-interface, invoke-interface-range
-  kDexInvokeTypeCount
-};
-
-// Dex instruction memory access types correspond to the ordering of GET/PUT instructions;
-// this order is the same for IGET, IPUT, SGET, SPUT, AGET and APUT.
-enum DexMemAccessType : uint8_t {
-  kDexMemAccessWord = 0,  // op         0; int or float, the actual type is not encoded.
-  kDexMemAccessWide,      // op_WIDE    1; long or double, the actual type is not encoded.
-  kDexMemAccessObject,    // op_OBJECT  2; the actual reference type is not encoded.
-  kDexMemAccessBoolean,   // op_BOOLEAN 3
-  kDexMemAccessByte,      // op_BYTE    4
-  kDexMemAccessChar,      // op_CHAR    5
-  kDexMemAccessShort,     // op_SHORT   6
-  kDexMemAccessTypeCount
-};
-
-std::ostream& operator<<(std::ostream& os, const DexMemAccessType& type);
-
-// NOTE: The following functions disregard quickened instructions.
-
-constexpr bool IsInstructionReturn(Instruction::Code opcode) {
-  return Instruction::RETURN_VOID <= opcode && opcode <= Instruction::RETURN_OBJECT;
-}
-
-constexpr bool IsInstructionInvoke(Instruction::Code opcode) {
-  return Instruction::INVOKE_VIRTUAL <= opcode && opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
-      opcode != Instruction::RETURN_VOID_BARRIER;
-}
-
-constexpr bool IsInstructionInvokeStatic(Instruction::Code opcode) {
-  return opcode == Instruction::INVOKE_STATIC || opcode == Instruction::INVOKE_STATIC_RANGE;
-}
-
-constexpr bool IsInstructionGoto(Instruction::Code opcode) {
-  return Instruction::GOTO <= opcode && opcode <= Instruction::GOTO_32;
-}
-
-constexpr bool IsInstructionIfCc(Instruction::Code opcode) {
-  return Instruction::IF_EQ <= opcode && opcode <= Instruction::IF_LE;
-}
-
-constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
-  return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
-}
-
-constexpr bool IsInstructionIGet(Instruction::Code code) {
-  return Instruction::IGET <= code && code <= Instruction::IGET_SHORT;
-}
-
-constexpr bool IsInstructionIPut(Instruction::Code code) {
-  return Instruction::IPUT <= code && code <= Instruction::IPUT_SHORT;
-}
-
-constexpr bool IsInstructionSGet(Instruction::Code code) {
-  return Instruction::SGET <= code && code <= Instruction::SGET_SHORT;
-}
-
-constexpr bool IsInstructionSPut(Instruction::Code code) {
-  return Instruction::SPUT <= code && code <= Instruction::SPUT_SHORT;
-}
-
-constexpr bool IsInstructionAGet(Instruction::Code code) {
-  return Instruction::AGET <= code && code <= Instruction::AGET_SHORT;
-}
-
-constexpr bool IsInstructionAPut(Instruction::Code code) {
-  return Instruction::APUT <= code && code <= Instruction::APUT_SHORT;
-}
-
-constexpr bool IsInstructionIGetOrIPut(Instruction::Code code) {
-  return Instruction::IGET <= code && code <= Instruction::IPUT_SHORT;
-}
-
-constexpr bool IsInstructionSGetOrSPut(Instruction::Code code) {
-  return Instruction::SGET <= code && code <= Instruction::SPUT_SHORT;
-}
-
-constexpr bool IsInstructionAGetOrAPut(Instruction::Code code) {
-  return Instruction::AGET <= code && code <= Instruction::APUT_SHORT;
-}
-
-constexpr bool IsInstructionBinOp2Addr(Instruction::Code code) {
-  return Instruction::ADD_INT_2ADDR <= code && code <= Instruction::REM_DOUBLE_2ADDR;
-}
-
-// TODO: Remove the #if guards below when we fully migrate to C++14.
-
-constexpr bool IsInvokeInstructionRange(Instruction::Code opcode) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionInvoke(opcode));
-#endif
-  return opcode >= Instruction::INVOKE_VIRTUAL_RANGE;
-}
-
-constexpr DexInvokeType InvokeInstructionType(Instruction::Code opcode) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionInvoke(opcode));
-#endif
-  return static_cast<DexInvokeType>(IsInvokeInstructionRange(opcode)
-                                    ? (opcode - Instruction::INVOKE_VIRTUAL_RANGE)
-                                    : (opcode - Instruction::INVOKE_VIRTUAL));
-}
-
-constexpr DexMemAccessType IGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionIGet(opcode));
-#endif
-  return static_cast<DexMemAccessType>(code - Instruction::IGET);
-}
-
-constexpr DexMemAccessType IPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionIPut(opcode));
-#endif
-  return static_cast<DexMemAccessType>(code - Instruction::IPUT);
-}
-
-constexpr DexMemAccessType SGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionSGet(opcode));
-#endif
-  return static_cast<DexMemAccessType>(code - Instruction::SGET);
-}
-
-constexpr DexMemAccessType SPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionSPut(opcode));
-#endif
-  return static_cast<DexMemAccessType>(code - Instruction::SPUT);
-}
-
-constexpr DexMemAccessType AGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionAGet(opcode));
-#endif
-  return static_cast<DexMemAccessType>(code - Instruction::AGET);
-}
-
-constexpr DexMemAccessType APutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionAPut(opcode));
-#endif
-  return static_cast<DexMemAccessType>(code - Instruction::APUT);
-}
-
-constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionIGetOrIPut(opcode));
-#endif
-  return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
-}
-
-constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionSGetOrSPut(opcode));
-#endif
-  return (code >= Instruction::SPUT) ? SPutMemAccessType(code) : SGetMemAccessType(code);
-}
-
-constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402  // C++14 allows the DCHECK() in constexpr functions.
-  DCHECK(IsInstructionAGetOrAPut(opcode));
-#endif
-  return (code >= Instruction::APUT) ? APutMemAccessType(code) : AGetMemAccessType(code);
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_DEX_INSTRUCTION_UTILS_H_