Merge "x86_64: Fix issues in entrypoints"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index d4e2cbb..c986c57 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -78,6 +78,7 @@
 	compiler/oat_test.cc \
 	compiler/optimizing/codegen_test.cc \
 	compiler/optimizing/dominator_test.cc \
+	compiler/optimizing/find_loops_test.cc \
 	compiler/optimizing/liveness_test.cc \
 	compiler/optimizing/pretty_printer_test.cc \
 	compiler/optimizing/ssa_test.cc \
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 8bba84a..8f39212 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -134,9 +134,6 @@
  public:
   // Create an OatMethod based on pointers (for unit tests).
   OatFile::OatMethod CreateOatMethod(const void* code,
-                                     const size_t frame_size_in_bytes,
-                                     const uint32_t core_spill_mask,
-                                     const uint32_t fp_spill_mask,
                                      const uint8_t* gc_map) {
     CHECK(code != nullptr);
     const byte* base;
@@ -154,9 +151,6 @@
     }
     return OatFile::OatMethod(base,
                               code_offset,
-                              frame_size_in_bytes,
-                              core_spill_mask,
-                              fp_spill_mask,
                               gc_map_offset);
   }
 
@@ -179,11 +173,14 @@
         CHECK_NE(0u, code_size);
         const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
         uint32_t vmap_table_offset = vmap_table.empty() ? 0u
-            : sizeof(OatMethodHeader) + vmap_table.size();
+            : sizeof(OatQuickMethodHeader) + vmap_table.size();
         const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
         uint32_t mapping_table_offset = mapping_table.empty() ? 0u
-            : sizeof(OatMethodHeader) + vmap_table.size() + mapping_table.size();
-        OatMethodHeader method_header(vmap_table_offset, mapping_table_offset, code_size);
+            : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size();
+        OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
+                                           compiled_method->GetFrameSizeInBytes(),
+                                           compiled_method->GetCoreSpillMask(),
+                                           compiled_method->GetFpSpillMask(), code_size);
 
         header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
         std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
@@ -207,11 +204,7 @@
       const void* method_code = CompiledMethod::CodePointer(code_ptr,
                                                             compiled_method->GetInstructionSet());
       LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
-      OatFile::OatMethod oat_method = CreateOatMethod(method_code,
-                                                      compiled_method->GetFrameSizeInBytes(),
-                                                      compiled_method->GetCoreSpillMask(),
-                                                      compiled_method->GetFpSpillMask(),
-                                                      nullptr);
+      OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
       oat_method.LinkMethod(method);
       method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
     } else {
@@ -220,28 +213,13 @@
       if (!method->IsNative()) {
         const void* method_code = kUsePortableCompiler ? GetPortableToInterpreterBridge()
                                                        : GetQuickToInterpreterBridge();
-        OatFile::OatMethod oat_method = CreateOatMethod(method_code,
-                                                        kStackAlignment,
-                                                        0,
-                                                        0,
-                                                        nullptr);
+        OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
         oat_method.LinkMethod(method);
         method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
       } else {
         const void* method_code = GetQuickGenericJniTrampoline();
-        mirror::ArtMethod* callee_save_method = runtime_->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
 
-        // Compute Sirt size, as Sirt goes into frame
-        MethodHelper mh(method);
-        uint32_t sirt_refs = mh.GetNumberOfReferenceArgsWithoutReceiver() + 1;
-        uint32_t sirt_size = StackIndirectReferenceTable::SizeOf(sirt_refs);
-
-        OatFile::OatMethod oat_method = CreateOatMethod(method_code,
-                                                        callee_save_method->GetFrameSizeInBytes() +
-                                                            sirt_size,
-                                                        callee_save_method->GetCoreSpillMask(),
-                                                        callee_save_method->GetFpSpillMask(),
-                                                        nullptr);
+        OatFile::OatMethod oat_method = CreateOatMethod(method_code, nullptr);
         oat_method.LinkMethod(method);
         method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
       }
@@ -323,11 +301,12 @@
       compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
 #endif
 
+      runtime_->SetInstructionSet(instruction_set);
       for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
         Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
         if (!runtime_->HasCalleeSaveMethod(type)) {
           runtime_->SetCalleeSaveMethod(
-              runtime_->CreateCalleeSaveMethod(instruction_set, type), type);
+              runtime_->CreateCalleeSaveMethod(type), type);
         }
       }
 
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 963c216..7aaffcb 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1759,7 +1759,7 @@
       if (Is64BitInstructionSet(cu_->instruction_set)) {
         call_inst = GenInvokeNoInlineCall<8>(this, info->type);
       } else {
-        call_inst = GenInvokeNoInlineCall<8>(this, info->type);
+        call_inst = GenInvokeNoInlineCall<4>(this, info->type);
       }
     }
   }
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 4dfa29a..52490e6 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -133,7 +133,7 @@
 }
 
 uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
-  return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 | 1 << R13 |
+  return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 |
       1 << kNumberOfCpuRegisters;
 }
 
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index b5d3923..66972cb 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -17,11 +17,12 @@
 #include "common_compiler_test.h"
 #include "compiler/compiler.h"
 #include "compiler/oat_writer.h"
+#include "entrypoints/quick/quick_entrypoints.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
-#include "oat_file.h"
+#include "oat_file-inl.h"
 #include "vector_output_stream.h"
 
 namespace art {
@@ -176,8 +177,9 @@
   // If this test is failing and you have to update these constants,
   // it is time to update OatHeader::kOatVersion
   EXPECT_EQ(80U, sizeof(OatHeader));
-  EXPECT_EQ(20U, sizeof(OatMethodOffsets));
-  EXPECT_EQ(12U, sizeof(OatMethodHeader));
+  EXPECT_EQ(8U, sizeof(OatMethodOffsets));
+  EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
+  EXPECT_EQ(320U, sizeof(QuickEntryPoints));
 }
 
 TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index bbc9c3e..39311d9 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -331,9 +331,6 @@
     if (compiled_method != nullptr) {
       // Derived from CompiledMethod.
       uint32_t quick_code_offset = 0;
-      uint32_t frame_size_in_bytes = kStackAlignment;
-      uint32_t core_spill_mask = 0;
-      uint32_t fp_spill_mask = 0;
 
       const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
       const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
@@ -351,7 +348,7 @@
         uint32_t code_size = quick_code->size() * sizeof(uint8_t);
         CHECK_NE(code_size, 0U);
         uint32_t thumb_offset = compiled_method->CodeDelta();
-        quick_code_offset = offset_ + sizeof(OatMethodHeader) + thumb_offset;
+        quick_code_offset = offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
 
         std::vector<uint8_t>* cfi_info = writer_->compiler_driver_->GetCallFrameInformation();
         if (cfi_info != nullptr) {
@@ -374,27 +371,45 @@
           }
         }
 
-        DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
-        OatMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
-        method_header->code_size_ = code_size;
-
         // Deduplicate code arrays.
         auto code_iter = dedupe_map_.find(compiled_method);
         if (code_iter != dedupe_map_.end()) {
           quick_code_offset = code_iter->second;
-          FixupMethodHeader(method_header, quick_code_offset - thumb_offset);
         } else {
           dedupe_map_.Put(compiled_method, quick_code_offset);
-          FixupMethodHeader(method_header, quick_code_offset - thumb_offset);
+        }
+
+        // Update quick method header.
+        DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
+        OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
+        uint32_t mapping_table_offset = method_header->mapping_table_offset_;
+        uint32_t vmap_table_offset = method_header->vmap_table_offset_;
+        // The code offset was 0 when the mapping/vmap table offset was set, so it's set
+        // to 0-offset and we need to adjust it by code_offset.
+        uint32_t code_offset = quick_code_offset - thumb_offset;
+        if (mapping_table_offset != 0u) {
+          mapping_table_offset += code_offset;
+          DCHECK_LT(mapping_table_offset, code_offset);
+        }
+        if (vmap_table_offset != 0u) {
+          vmap_table_offset += code_offset;
+          DCHECK_LT(vmap_table_offset, code_offset);
+        }
+        uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
+        uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
+        uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
+        *method_header = OatQuickMethodHeader(mapping_table_offset, vmap_table_offset,
+                                              frame_size_in_bytes, core_spill_mask, fp_spill_mask,
+                                              code_size);
+
+        // Update checksum if this wasn't a duplicate.
+        if (code_iter == dedupe_map_.end()) {
           writer_->oat_header_->UpdateChecksum(method_header, sizeof(*method_header));
           offset_ += sizeof(*method_header);  // Method header is prepended before code.
           writer_->oat_header_->UpdateChecksum(&(*quick_code)[0], code_size);
           offset_ += code_size;
         }
       }
-      frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
-      core_spill_mask = compiled_method->GetCoreSpillMask();
-      fp_spill_mask = compiled_method->GetFpSpillMask();
 
       if (kIsDebugBuild) {
         // We expect GC maps except when the class hasn't been verified or the method is native.
@@ -421,9 +436,6 @@
       DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
       OatMethodOffsets* offsets = &oat_class->method_offsets_[method_offsets_index_];
       offsets->code_offset_ = quick_code_offset;
-      offsets->frame_size_in_bytes_ = frame_size_in_bytes;
-      offsets->core_spill_mask_ = core_spill_mask;
-      offsets->fp_spill_mask_ = fp_spill_mask;
       ++method_offsets_index_;
     }
 
@@ -431,19 +443,6 @@
   }
 
  private:
-  static void FixupMethodHeader(OatMethodHeader* method_header, uint32_t code_offset) {
-    // The code offset was 0 when the mapping/vmap table offset was set, so it's set
-    // to 0-offset and we need to adjust it by code_offset.
-    if (method_header->mapping_table_offset_ != 0u) {
-      method_header->mapping_table_offset_ += code_offset;
-      DCHECK_LT(method_header->mapping_table_offset_, code_offset);
-    }
-    if (method_header->vmap_table_offset_ != 0u) {
-      method_header->vmap_table_offset_ += code_offset;
-      DCHECK_LT(method_header->vmap_table_offset_, code_offset);
-    }
-  }
-
   // Deduplication is already done on a pointer basis by the compiler driver,
   // so we can simply compare the pointers to find out if things are duplicated.
   SafeMap<const CompiledMethod*, uint32_t, CodeOffsetsKeyComparator> dedupe_map_;
@@ -501,55 +500,22 @@
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
-    OatMethodOffsets offsets(0u, kStackAlignment, 0u, 0u, 0u);
+    OatMethodOffsets offsets(0u, 0u);
     if (compiled_method != nullptr) {
       DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
       offsets = oat_class->method_offsets_[method_offsets_index_];
       ++method_offsets_index_;
     }
 
-    // Derive frame size and spill masks for native methods without code:
-    // These are generic JNI methods...
-    uint32_t method_idx = it.GetMemberIndex();
-    bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
-    if (is_native && compiled_method == nullptr) {
-      // Compute Sirt size as putting _every_ reference into it, even null ones.
-      uint32_t s_len;
-      const char* shorty = dex_file_->GetMethodShorty(dex_file_->GetMethodId(method_idx),
-                                                      &s_len);
-      DCHECK(shorty != nullptr);
-      uint32_t refs = 1;    // Native method always has "this" or class.
-      for (uint32_t i = 1; i < s_len; ++i) {
-        if (shorty[i] == 'L') {
-          refs++;
-        }
-      }
-      size_t pointer_size = GetInstructionSetPointerSize(
-          writer_->compiler_driver_->GetInstructionSet());
-      size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(pointer_size, refs);
-
-      // Get the generic spill masks and base frame size.
-      mirror::ArtMethod* callee_save_method =
-          Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
-      offsets.frame_size_in_bytes_ = callee_save_method->GetFrameSizeInBytes() + sirt_size;
-      offsets.core_spill_mask_ = callee_save_method->GetCoreSpillMask();
-      offsets.fp_spill_mask_ = callee_save_method->GetFpSpillMask();
-      DCHECK_EQ(offsets.gc_map_offset_, 0u);
-    }
-
     ClassLinker* linker = Runtime::Current()->GetClassLinker();
     InvokeType invoke_type = it.GetMethodInvokeType(dex_file_->GetClassDef(class_def_index_));
     // Unchecked as we hold mutator_lock_ on entry.
     ScopedObjectAccessUnchecked soa(Thread::Current());
     SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(*dex_file_));
     SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
-    mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, method_idx, dex_cache,
+    mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, it.GetMemberIndex(), dex_cache,
                                                       class_loader, nullptr, invoke_type);
     CHECK(method != NULL);
-    method->SetFrameSizeInBytes(offsets.frame_size_in_bytes_);
-    method->SetCoreSpillMask(offsets.core_spill_mask_);
-    method->SetFpSpillMask(offsets.fp_spill_mask_);
     // Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
     method->SetQuickOatCodeOffset(offsets.code_offset_);
     method->SetOatNativeGcMapOffset(offsets.gc_map_offset_);
@@ -601,10 +567,10 @@
         // Deduplicate code arrays.
         const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_];
         DCHECK(method_offsets.code_offset_ < offset_ || method_offsets.code_offset_ ==
-                   offset_ + sizeof(OatMethodHeader) + compiled_method->CodeDelta())
+                   offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta())
             << PrettyMethod(it.GetMemberIndex(), *dex_file_);
         if (method_offsets.code_offset_ >= offset_) {
-          const OatMethodHeader& method_header = oat_class->method_headers_[method_offsets_index_];
+          const OatQuickMethodHeader& method_header = oat_class->method_headers_[method_offsets_index_];
           if (!out->WriteFully(&method_header, sizeof(method_header))) {
             ReportWriteFailure("method header", it);
             return false;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 7cdd532..85c9b47 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -231,7 +231,7 @@
     // oat_method_offsets_offsets_from_oat_class_ should contain 0
     // values in this case).
     std::vector<OatMethodOffsets> method_offsets_;
-    std::vector<OatMethodHeader> method_headers_;
+    std::vector<OatQuickMethodHeader> method_headers_;
 
    private:
     DISALLOW_COPY_AND_ASSIGN(OatClass);
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 0417050..3062e37 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -167,7 +167,8 @@
     0,
     1,
     1,
-    3
+    3,
+    1,  // Synthesized block to avoid critical edge.
   };
 
   TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -185,7 +186,9 @@
     0,
     1,
     1,
-    -1  // exit block is not dominated by any block due to the spin loop.
+    -1,  // exit block is not dominated by any block due to the spin loop.
+    1,   // block to avoid critical edge.
+    1    // block to avoid critical edge.
   };
 
   TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -205,7 +208,8 @@
     1,
     1,
     1,
-    -1  // exit block is not dominated by any block due to the spin loop.
+    -1,  // exit block is not dominated by any block due to the spin loop.
+    1    // block to avoid critical edge.
   };
 
   TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -225,7 +229,8 @@
     1,
     1,
     1,
-    -1  // exit block is not dominated by any block due to the spin loop.
+    -1,  // exit block is not dominated by any block due to the spin loop.
+    1    // block to avoid critical edge.
   };
 
   TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -247,7 +252,9 @@
     2,
     2,
     1,
-    5  // Block number 5 dominates exit block
+    5,    // Block number 5 dominates exit block
+    1,    // block to avoid critical edge.
+    2     // block to avoid critical edge.
   };
 
   TestCode(data, dominators, sizeof(dominators) / sizeof(int));
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
new file mode 100644
index 0000000..fab9f7a
--- /dev/null
+++ b/compiler/optimizing/find_loops_test.cc
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "builder.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "ssa_liveness_analysis.h"
+#include "utils/arena_allocator.h"
+#include "pretty_printer.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+static HGraph* TestCode(const uint16_t* data, ArenaPool* pool) {
+  ArenaAllocator allocator(pool);
+  HGraphBuilder builder(&allocator);
+  const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+  HGraph* graph = builder.BuildGraph(*item);
+  graph->BuildDominatorTree();
+  graph->FindNaturalLoops();
+  return graph;
+}
+
+TEST(FindLoopsTest, CFG1) {
+  // Constant is not used.
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::RETURN_VOID);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+  for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
+    ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+  }
+}
+
+TEST(FindLoopsTest, CFG2) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::RETURN);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+  for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
+    ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+  }
+}
+
+TEST(FindLoopsTest, CFG3) {
+  const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 3 << 12 | 0,
+    Instruction::CONST_4 | 4 << 12 | 1 << 8,
+    Instruction::ADD_INT_2ADDR | 1 << 12,
+    Instruction::GOTO | 0x100,
+    Instruction::RETURN);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+  for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
+    ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+  }
+}
+
+TEST(FindLoopsTest, CFG4) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 4,
+    Instruction::CONST_4 | 4 << 12 | 0,
+    Instruction::GOTO | 0x200,
+    Instruction::CONST_4 | 5 << 12 | 0,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+  for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
+    ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+  }
+}
+
+TEST(FindLoopsTest, CFG5) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::CONST_4 | 4 << 12 | 0,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+  for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
+    ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+  }
+}
+
+static void TestBlock(HGraph* graph,
+                      int block_id,
+                      bool is_loop_header,
+                      int parent_loop_header_id,
+                      const int* blocks_in_loop = nullptr,
+                      size_t number_of_blocks = 0) {
+  HBasicBlock* block = graph->GetBlocks().Get(block_id);
+  ASSERT_EQ(block->IsLoopHeader(), is_loop_header);
+  if (parent_loop_header_id == -1) {
+    ASSERT_EQ(block->GetLoopInformation(), nullptr);
+  } else {
+    ASSERT_EQ(block->GetLoopInformation()->GetHeader()->GetBlockId(), parent_loop_header_id);
+  }
+
+  if (blocks_in_loop != nullptr) {
+    HLoopInformation* info = block->GetLoopInformation();
+    const BitVector& blocks = info->GetBlocks();
+    ASSERT_EQ(blocks.NumSetBits(), number_of_blocks);
+    for (size_t i = 0; i < number_of_blocks; ++i) {
+      ASSERT_TRUE(blocks.IsBitSet(blocks_in_loop[i]));
+    }
+  } else {
+    ASSERT_FALSE(block->IsLoopHeader());
+  }
+}
+
+TEST(FindLoopsTest, Loop1) {
+  // Simple loop with one preheader and one back edge.
+  // var a = 0;
+  // while (a == a) {
+  // }
+  // return;
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0xFE00,
+    Instruction::RETURN_VOID);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // pre header
+  const int blocks2[] = {2, 3};
+  TestBlock(graph, 2, true, 2, blocks2, 2);  // loop header
+  TestBlock(graph, 3, false, 2);             // block in loop
+  TestBlock(graph, 4, false, -1);            // return block
+  TestBlock(graph, 5, false, -1);            // exit block
+}
+
+TEST(FindLoopsTest, Loop2) {
+  // Make sure we support a preheader of a loop not being the first predecessor
+  // in the predecessor list of the header.
+  // var a = 0;
+  // while (a == a) {
+  // }
+  // return a;
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::GOTO | 0x400,
+    Instruction::IF_EQ, 4,
+    Instruction::GOTO | 0xFE00,
+    Instruction::GOTO | 0xFD00,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // goto block
+  const int blocks2[] = {2, 3};
+  TestBlock(graph, 2, true, 2, blocks2, 2);  // loop header
+  TestBlock(graph, 3, false, 2);             // block in loop
+  TestBlock(graph, 4, false, -1);            // pre header
+  TestBlock(graph, 5, false, -1);            // return block
+  TestBlock(graph, 6, false, -1);            // exit block
+}
+
+TEST(FindLoopsTest, Loop3) {
+  // Make sure we create a preheader of a loop when a header originally has two
+  // incoming blocks and one back edge.
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0x100,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0xFE00,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // goto block
+  TestBlock(graph, 2, false, -1);
+  const int blocks2[] = {3, 4};
+  TestBlock(graph, 3, true, 3, blocks2, 2);  // loop header
+  TestBlock(graph, 4, false, 3);             // block in loop
+  TestBlock(graph, 5, false, -1);            // pre header
+  TestBlock(graph, 6, false, -1);            // return block
+  TestBlock(graph, 7, false, -1);            // exit block
+  TestBlock(graph, 8, false, -1);            // synthesized pre header
+}
+
+TEST(FindLoopsTest, Loop4) {
+  // Test loop with originally two back edges.
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 6,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0xFC00,
+    Instruction::GOTO | 0xFB00,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // pre header
+  const int blocks2[] = {2, 3, 4, 5, 8};
+  TestBlock(graph, 2, true, 2, blocks2, 5);  // loop header
+  TestBlock(graph, 3, false, 2);             // block in loop
+  TestBlock(graph, 4, false, 2);             // original back edge
+  TestBlock(graph, 5, false, 2);             // original back edge
+  TestBlock(graph, 6, false, -1);            // return block
+  TestBlock(graph, 7, false, -1);            // exit block
+  TestBlock(graph, 8, false, 2);             // synthesized back edge
+}
+
+
+TEST(FindLoopsTest, Loop5) {
+  // Test loop with two exit edges.
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 6,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0x0200,
+    Instruction::GOTO | 0xFB00,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // pre header
+  const int blocks2[] = {2, 3, 5};
+  TestBlock(graph, 2, true, 2, blocks2, 3);  // loop header
+  TestBlock(graph, 3, false, 2);             // block in loop
+  TestBlock(graph, 4, false, -1);            // loop exit
+  TestBlock(graph, 5, false, 2);             // back edge
+  TestBlock(graph, 6, false, -1);            // return block
+  TestBlock(graph, 7, false, -1);            // exit block
+  TestBlock(graph, 8, false, -1);            // synthesized block at the loop exit
+}
+
+TEST(FindLoopsTest, InnerLoop) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 6,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0xFE00,  // inner loop
+    Instruction::GOTO | 0xFB00,
+    Instruction::RETURN | 0 << 8);
+
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // pre header of outer loop
+  const int blocks2[] = {2, 3, 4, 5, 8};
+  TestBlock(graph, 2, true, 2, blocks2, 5);  // outer loop header
+  const int blocks3[] = {3, 4};
+  TestBlock(graph, 3, true, 3, blocks3, 2);  // inner loop header
+  TestBlock(graph, 4, false, 3);             // back edge on inner loop
+  TestBlock(graph, 5, false, 2);             // back edge on outer loop
+  TestBlock(graph, 6, false, -1);            // return block
+  TestBlock(graph, 7, false, -1);            // exit block
+  TestBlock(graph, 8, false, 2);             // synthesized block as pre header of inner loop
+
+  ASSERT_TRUE(graph->GetBlocks().Get(3)->GetLoopInformation()->IsIn(
+                    *graph->GetBlocks().Get(2)->GetLoopInformation()));
+  ASSERT_FALSE(graph->GetBlocks().Get(2)->GetLoopInformation()->IsIn(
+                    *graph->GetBlocks().Get(3)->GetLoopInformation()));
+}
+
+TEST(FindLoopsTest, TwoLoops) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0xFE00,  // first loop
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0xFE00,  // second loop
+    Instruction::RETURN | 0 << 8);
+
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // pre header of first loop
+  const int blocks2[] = {2, 3};
+  TestBlock(graph, 2, true, 2, blocks2, 2);  // first loop header
+  TestBlock(graph, 3, false, 2);             // back edge of first loop
+  const int blocks4[] = {4, 5};
+  TestBlock(graph, 4, true, 4, blocks4, 2);  // second loop header
+  TestBlock(graph, 5, false, 4);             // back edge of second loop
+  TestBlock(graph, 6, false, -1);            // return block
+  TestBlock(graph, 7, false, -1);            // exit block
+
+  ASSERT_FALSE(graph->GetBlocks().Get(4)->GetLoopInformation()->IsIn(
+                    *graph->GetBlocks().Get(2)->GetLoopInformation()));
+  ASSERT_FALSE(graph->GetBlocks().Get(2)->GetLoopInformation()->IsIn(
+                    *graph->GetBlocks().Get(4)->GetLoopInformation()));
+}
+
+TEST(FindLoopsTest, NonNaturalLoop) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0x0100,
+    Instruction::IF_EQ, 3,
+    Instruction::GOTO | 0xFD00,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+  ASSERT_TRUE(graph->GetBlocks().Get(3)->IsLoopHeader());
+  HLoopInformation* info = graph->GetBlocks().Get(3)->GetLoopInformation();
+  ASSERT_FALSE(info->GetHeader()->Dominates(info->GetBackEdges().Get(0)));
+}
+
+TEST(FindLoopsTest, DoWhileLoop) {
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::GOTO | 0x0100,
+    Instruction::IF_EQ, 0xFFFF,
+    Instruction::RETURN | 0 << 8);
+
+  ArenaPool arena;
+  HGraph* graph = TestCode(data, &arena);
+
+  TestBlock(graph, 0, false, -1);            // entry block
+  TestBlock(graph, 1, false, -1);            // pre header of first loop
+  const int blocks2[] = {2, 3, 6};
+  TestBlock(graph, 2, true, 2, blocks2, 3);  // loop header
+  TestBlock(graph, 3, false, 2);             // back edge of first loop
+  TestBlock(graph, 4, false, -1);            // return block
+  TestBlock(graph, 5, false, -1);            // exit block
+  TestBlock(graph, 6, false, 2);             // synthesized block to avoid a critical edge
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index aa4d35e..d665ab9 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -188,7 +188,7 @@
     "  kill: (1100)\n"
     "Block 1\n"  // block with if
     "  live in: (1100)\n"
-    "  live out: (0100)\n"
+    "  live out: (1100)\n"
     "  kill: (0010)\n"
     "Block 2\n"  // else block
     "  live in: (0100)\n"
@@ -201,6 +201,10 @@
     "Block 4\n"  // exit block
     "  live in: (0000)\n"
     "  live out: (0000)\n"
+    "  kill: (0000)\n"
+    "Block 5\n"  // block to avoid critical edge. Predecessor is 1, successor is 3.
+    "  live in: (1000)\n"
+    "  live out: (0000)\n"
     "  kill: (0000)\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
@@ -412,40 +416,45 @@
 
 TEST(LivenessTest, Loop6) {
   // Bitsets are made of:
-  // (constant0, constant4, constant5, phi in block 2, equal in block 2, equal in block 3)
+  // (constant0, constant4, constant5, phi in block 2, equal in block 2, equal in block 3,
+  //  phi in block 8)
   const char* expected =
     "Block 0\n"
-    "  live in: (000000)\n"
-    "  live out: (111000)\n"
-    "  kill: (111000)\n"
+    "  live in: (0000000)\n"
+    "  live out: (1110000)\n"
+    "  kill: (1110000)\n"
     "Block 1\n"
-    "  live in: (111000)\n"
-    "  live out: (011000)\n"
-    "  kill: (000000)\n"
+    "  live in: (1110000)\n"
+    "  live out: (0110000)\n"
+    "  kill: (0000000)\n"
     "Block 2\n"  // loop header
-    "  live in: (011000)\n"
-    "  live out: (011100)\n"
-    "  kill: (000110)\n"
+    "  live in: (0110000)\n"
+    "  live out: (0111000)\n"
+    "  kill: (0001100)\n"
     "Block 3\n"
-    "  live in: (011000)\n"
-    "  live out: (011000)\n"
-    "  kill: (000001)\n"
-    "Block 4\n"  // back edge
-    "  live in: (011000)\n"
-    "  live out: (011000)\n"
-    "  kill: (000000)\n"
-    "Block 5\n"  // back edge
-    "  live in: (011000)\n"
-    "  live out: (011000)\n"
-    "  kill: (000000)\n"
+    "  live in: (0110000)\n"
+    "  live out: (0110000)\n"
+    "  kill: (0000010)\n"
+    "Block 4\n"  // original back edge
+    "  live in: (0110000)\n"
+    "  live out: (0110000)\n"
+    "  kill: (0000000)\n"
+    "Block 5\n"  // original back edge
+    "  live in: (0110000)\n"
+    "  live out: (0110000)\n"
+    "  kill: (0000000)\n"
     "Block 6\n"  // return block
-    "  live in: (000100)\n"
-    "  live out: (000000)\n"
-    "  kill: (000000)\n"
+    "  live in: (0001000)\n"
+    "  live out: (0000000)\n"
+    "  kill: (0000000)\n"
     "Block 7\n"  // exit block
-    "  live in: (000000)\n"
-    "  live out: (000000)\n"
-    "  kill: (000000)\n";
+    "  live in: (0000000)\n"
+    "  live out: (0000000)\n"
+    "  kill: (0000000)\n"
+    "Block 8\n"  // synthesized back edge
+    "  live in: (0110000)\n"
+    "  live out: (0110000)\n"
+    "  kill: (0000001)\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -476,7 +485,7 @@
     "  kill: (0000000)\n"
     "Block 2\n"  // loop header
     "  live in: (0110000)\n"
-    "  live out: (0110000)\n"
+    "  live out: (0111000)\n"
     "  kill: (0001100)\n"
     "Block 3\n"
     "  live in: (0110000)\n"
@@ -497,6 +506,10 @@
     "Block 7\n"  // exit block
     "  live in: (0000000)\n"
     "  live out: (0000000)\n"
+    "  kill: (0000000)\n"
+    "Block 8\n"  // synthesized block to avoid critical edge.
+    "  live in: (0001000)\n"
+    "  live out: (0000000)\n"
     "  kill: (0000000)\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d153bf7..cf2d1ee 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -31,11 +31,11 @@
 }
 
 void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) const {
-  for (size_t i = 0; i < blocks_.Size(); i++) {
+  for (size_t i = 0; i < blocks_.Size(); ++i) {
     if (!visited.IsBitSet(i)) {
       HBasicBlock* block = blocks_.Get(i);
-      for (size_t j = 0; j < block->GetSuccessors()->Size(); j++) {
-        block->GetSuccessors()->Get(j)->RemovePredecessor(block, false);
+      for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
+        block->GetSuccessors().Get(j)->RemovePredecessor(block, false);
       }
       for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
         block->RemovePhi(it.Current()->AsPhi());
@@ -55,15 +55,14 @@
 
   visited->SetBit(id);
   visiting->SetBit(id);
-  for (size_t i = 0; i < block->GetSuccessors()->Size(); i++) {
-    HBasicBlock* successor = block->GetSuccessors()->Get(i);
+  for (size_t i = 0; i < block->GetSuccessors().Size(); i++) {
+    HBasicBlock* successor = block->GetSuccessors().Get(i);
     if (visiting->IsBitSet(successor->GetBlockId())) {
       successor->AddBackEdge(block);
     } else {
       VisitBlockForBackEdges(successor, visited, visiting);
     }
   }
-  post_order_.Add(block);
   visiting->ClearBit(id);
 }
 
@@ -78,13 +77,18 @@
   //     predecessors list of live blocks.
   RemoveDeadBlocks(visited);
 
-  // (3) Compute the immediate dominator of each block. We visit
+  // (3) Simplify the CFG now, so that we don't need to recompute
+  //     dominators and the reverse post order.
+  SimplifyCFG();
+
+  // (4) Compute the immediate dominator of each block. We visit
   //     the successors of a block only when all its forward branches
   //     have been processed.
   GrowableArray<size_t> visits(arena_, blocks_.Size());
   visits.SetSize(blocks_.Size());
-  for (size_t i = 0; i < entry_block_->GetSuccessors()->Size(); i++) {
-    VisitBlockForDominatorTree(entry_block_->GetSuccessors()->Get(i), entry_block_, &visits);
+  reverse_post_order_.Add(entry_block_);
+  for (size_t i = 0; i < entry_block_->GetSuccessors().Size(); i++) {
+    VisitBlockForDominatorTree(entry_block_->GetSuccessors().Get(i), entry_block_, &visits);
   }
 }
 
@@ -119,59 +123,172 @@
   // Once all the forward edges have been visited, we know the immediate
   // dominator of the block. We can then start visiting its successors.
   if (visits->Get(block->GetBlockId()) ==
-      block->GetPredecessors()->Size() - block->NumberOfBackEdges()) {
-    for (size_t i = 0; i < block->GetSuccessors()->Size(); i++) {
-      VisitBlockForDominatorTree(block->GetSuccessors()->Get(i), block, visits);
+      block->GetPredecessors().Size() - block->NumberOfBackEdges()) {
+    reverse_post_order_.Add(block);
+    for (size_t i = 0; i < block->GetSuccessors().Size(); i++) {
+      VisitBlockForDominatorTree(block->GetSuccessors().Get(i), block, visits);
     }
   }
 }
 
 void HGraph::TransformToSSA() {
-  DCHECK(!post_order_.IsEmpty());
-  SimplifyCFG();
+  DCHECK(!reverse_post_order_.IsEmpty());
   SsaBuilder ssa_builder(this);
   ssa_builder.BuildSsa();
 }
 
-void HGraph::SimplifyCFG() {
-  for (size_t i = post_order_.Size(); i > 0; --i) {
-    HBasicBlock* current = post_order_.Get(i - 1);
-    if (current->IsLoopHeader()) {
-      // Make sure the loop has only one pre header. This simplifies SSA building by having
-      // to just look at the pre header to know which locals are initialized at entry of the
-      // loop.
-      HLoopInformation* info = current->GetLoopInformation();
-      size_t number_of_incomings = current->GetPredecessors()->Size() - info->NumberOfBackEdges();
-      if (number_of_incomings != 1) {
-        HBasicBlock* pre_header = new (arena_) HBasicBlock(this);
-        AddBlock(pre_header);
-        pre_header->AddInstruction(new (arena_) HGoto());
-        pre_header->SetDominator(current->GetDominator());
-        current->SetDominator(pre_header);
-        post_order_.InsertAt(i, pre_header);
-
-        ArenaBitVector back_edges(arena_, GetBlocks().Size(), false);
-        for (size_t pred = 0; pred < info->GetBackEdges()->Size(); pred++) {
-          back_edges.SetBit(info->GetBackEdges()->Get(pred)->GetBlockId());
-        }
-        for (size_t pred = 0; pred < current->GetPredecessors()->Size(); pred++) {
-          HBasicBlock* predecessor = current->GetPredecessors()->Get(pred);
-          if (!back_edges.IsBitSet(predecessor->GetBlockId())) {
-            current->RemovePredecessor(predecessor);
-            pred--;
-            predecessor->AddSuccessor(pre_header);
-          }
-        }
-        pre_header->AddSuccessor(current);
-      }
-      info->SetPreHeader(current->GetDominator());
+void HGraph::SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor) {
+  // Insert a new node between `block` and `successor` to split the
+  // critical edge.
+  HBasicBlock* new_block = new (arena_) HBasicBlock(this);
+  AddBlock(new_block);
+  new_block->AddInstruction(new (arena_) HGoto());
+  block->RemoveSuccessor(successor);
+  block->AddSuccessor(new_block);
+  new_block->AddSuccessor(successor);
+  if (successor->IsLoopHeader()) {
+    // If we split at a back edge boundary, make the new block the back edge.
+    HLoopInformation* info = successor->GetLoopInformation();
+    if (info->IsBackEdge(block)) {
+      info->RemoveBackEdge(block);
+      info->AddBackEdge(new_block);
     }
   }
 }
 
-void HLoopInformation::SetPreHeader(HBasicBlock* block) {
-  DCHECK_EQ(header_->GetDominator(), block);
-  pre_header_ = block;
+void HGraph::SimplifyLoop(HBasicBlock* header) {
+  HLoopInformation* info = header->GetLoopInformation();
+
+  // If there are more than one back edge, make them branch to the same block that
+  // will become the only back edge. This simplifies finding natural loops in the
+  // graph.
+  if (info->NumberOfBackEdges() > 1) {
+    HBasicBlock* new_back_edge = new (arena_) HBasicBlock(this);
+    AddBlock(new_back_edge);
+    new_back_edge->AddInstruction(new (arena_) HGoto());
+    for (size_t pred = 0, e = info->GetBackEdges().Size(); pred < e; ++pred) {
+      HBasicBlock* back_edge = info->GetBackEdges().Get(pred);
+      header->RemovePredecessor(back_edge);
+      back_edge->AddSuccessor(new_back_edge);
+    }
+    info->ClearBackEdges();
+    info->AddBackEdge(new_back_edge);
+    new_back_edge->AddSuccessor(header);
+  }
+
+  // Make sure the loop has only one pre header. This simplifies SSA building by having
+  // to just look at the pre header to know which locals are initialized at entry of the
+  // loop.
+  size_t number_of_incomings = header->GetPredecessors().Size() - info->NumberOfBackEdges();
+  if (number_of_incomings != 1) {
+    HBasicBlock* pre_header = new (arena_) HBasicBlock(this);
+    AddBlock(pre_header);
+    pre_header->AddInstruction(new (arena_) HGoto());
+
+    ArenaBitVector back_edges(arena_, GetBlocks().Size(), false);
+    HBasicBlock* back_edge = info->GetBackEdges().Get(0);
+    for (size_t pred = 0; pred < header->GetPredecessors().Size(); ++pred) {
+      HBasicBlock* predecessor = header->GetPredecessors().Get(pred);
+      if (predecessor != back_edge) {
+        header->RemovePredecessor(predecessor);
+        pred--;
+        predecessor->AddSuccessor(pre_header);
+      }
+    }
+    pre_header->AddSuccessor(header);
+  }
+}
+
+void HGraph::SimplifyCFG() {
+  // Simplify the CFG for future analysis, and code generation:
+  // (1): Split critical edges.
+  // (2): Simplify loops by having only one back edge, and one preheader.
+  for (size_t i = 0; i < blocks_.Size(); ++i) {
+    HBasicBlock* block = blocks_.Get(i);
+    if (block->GetSuccessors().Size() > 1) {
+      for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) {
+        HBasicBlock* successor = block->GetSuccessors().Get(j);
+        if (successor->GetPredecessors().Size() > 1) {
+          SplitCriticalEdge(block, successor);
+          --j;
+        }
+      }
+    }
+    if (block->IsLoopHeader()) {
+      SimplifyLoop(block);
+    }
+  }
+}
+
+bool HGraph::FindNaturalLoops() const {
+  for (size_t i = 0; i < blocks_.Size(); ++i) {
+    HBasicBlock* block = blocks_.Get(i);
+    if (block->IsLoopHeader()) {
+      HLoopInformation* info = block->GetLoopInformation();
+      if (!info->Populate()) {
+        // Abort if the loop is non natural. We currently bailout in such cases.
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+void HLoopInformation::PopulateRecursive(HBasicBlock* block) {
+  if (blocks_.IsBitSet(block->GetBlockId())) {
+    return;
+  }
+
+  blocks_.SetBit(block->GetBlockId());
+  block->SetInLoop(this);
+  for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) {
+    PopulateRecursive(block->GetPredecessors().Get(i));
+  }
+}
+
+bool HLoopInformation::Populate() {
+  DCHECK_EQ(GetBackEdges().Size(), 1u);
+  HBasicBlock* back_edge = GetBackEdges().Get(0);
+  DCHECK(back_edge->GetDominator() != nullptr);
+  if (!header_->Dominates(back_edge)) {
+    // This loop is not natural. Do not bother going further.
+    return false;
+  }
+
+  // Populate this loop: starting with the back edge, recursively add predecessors
+  // that are not already part of that loop. Set the header as part of the loop
+  // to end the recursion.
+  // This is a recursive implementation of the algorithm described in
+  // "Advanced Compiler Design & Implementation" (Muchnick) p192.
+  blocks_.SetBit(header_->GetBlockId());
+  PopulateRecursive(back_edge);
+  return true;
+}
+
+HBasicBlock* HLoopInformation::GetPreHeader() const {
+  DCHECK_EQ(header_->GetPredecessors().Size(), 2u);
+  return header_->GetDominator();
+}
+
+bool HLoopInformation::Contains(const HBasicBlock& block) const {
+  return blocks_.IsBitSet(block.GetBlockId());
+}
+
+bool HLoopInformation::IsIn(const HLoopInformation& other) const {
+  return other.blocks_.IsBitSet(header_->GetBlockId());
+}
+
+bool HBasicBlock::Dominates(HBasicBlock* other) const {
+  // Walk up the dominator tree from `other`, to find out if `this`
+  // is an ancestor.
+  HBasicBlock* current = other;
+  while (current != nullptr) {
+    if (current == this) {
+      return true;
+    }
+    current = current->GetDominator();
+  }
+  return false;
 }
 
 static void Add(HInstructionList* instruction_list,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index bd3d703..081c2bd 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -60,7 +60,7 @@
   explicit HGraph(ArenaAllocator* arena)
       : arena_(arena),
         blocks_(arena, kDefaultNumberOfBlocks),
-        post_order_(arena, kDefaultNumberOfBlocks),
+        reverse_post_order_(arena, kDefaultNumberOfBlocks),
         maximum_number_of_out_vregs_(0),
         number_of_vregs_(0),
         number_of_in_vregs_(0),
@@ -81,6 +81,14 @@
   void TransformToSSA();
   void SimplifyCFG();
 
+  // Find all natural loops in this graph. Aborts computation and returns false
+  // if one loop is not natural, that is the header does not dominated the back
+  // edge.
+  bool FindNaturalLoops() const;
+
+  void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
+  void SimplifyLoop(HBasicBlock* header);
+
   int GetNextInstructionId() {
     return current_instruction_id_++;
   }
@@ -109,8 +117,8 @@
     return number_of_in_vregs_;
   }
 
-  const GrowableArray<HBasicBlock*>& GetPostOrder() const {
-    return post_order_;
+  const GrowableArray<HBasicBlock*>& GetReversePostOrder() const {
+    return reverse_post_order_;
   }
 
  private:
@@ -129,8 +137,8 @@
   // List of blocks in insertion order.
   GrowableArray<HBasicBlock*> blocks_;
 
-  // List of blocks to perform a post order tree traversal.
-  GrowableArray<HBasicBlock*> post_order_;
+  // List of blocks to perform a reverse post order tree traversal.
+  GrowableArray<HBasicBlock*> reverse_post_order_;
 
   HBasicBlock* entry_block_;
   HBasicBlock* exit_block_;
@@ -154,30 +162,63 @@
  public:
   HLoopInformation(HBasicBlock* header, HGraph* graph)
       : header_(header),
-        back_edges_(graph->GetArena(), kDefaultNumberOfBackEdges) { }
+        back_edges_(graph->GetArena(), kDefaultNumberOfBackEdges),
+        blocks_(graph->GetArena(), graph->GetBlocks().Size(), false) {}
+
+  HBasicBlock* GetHeader() const {
+    return header_;
+  }
 
   void AddBackEdge(HBasicBlock* back_edge) {
     back_edges_.Add(back_edge);
   }
 
+  void RemoveBackEdge(HBasicBlock* back_edge) {
+    back_edges_.Delete(back_edge);
+  }
+
+  bool IsBackEdge(HBasicBlock* block) {
+    for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
+      if (back_edges_.Get(i) == block) return true;
+    }
+    return false;
+  }
+
   int NumberOfBackEdges() const {
     return back_edges_.Size();
   }
 
-  void SetPreHeader(HBasicBlock* block);
+  HBasicBlock* GetPreHeader() const;
 
-  HBasicBlock* GetPreHeader() const {
-    return pre_header_;
+  const GrowableArray<HBasicBlock*>& GetBackEdges() const {
+    return back_edges_;
   }
 
-  const GrowableArray<HBasicBlock*>* GetBackEdges() const {
-    return &back_edges_;
+  void ClearBackEdges() {
+    back_edges_.Reset();
   }
 
+  // Find blocks that are part of this loop. Returns whether the loop is a natural loop,
+  // that is the header dominates the back edge.
+  bool Populate();
+
+  // Returns whether this loop information contains `block`.
+  // Note that this loop information *must* be populated before entering this function.
+  bool Contains(const HBasicBlock& block) const;
+
+  // Returns whether this loop information is an inner loop of `other`.
+  // Note that `other` *must* be populated before entering this function.
+  bool IsIn(const HLoopInformation& other) const;
+
+  const ArenaBitVector& GetBlocks() const { return blocks_; }
+
  private:
-  HBasicBlock* pre_header_;
+  // Internal recursive implementation of `Populate`.
+  void PopulateRecursive(HBasicBlock* block);
+
   HBasicBlock* header_;
   GrowableArray<HBasicBlock*> back_edges_;
+  ArenaBitVector blocks_;
 
   DISALLOW_COPY_AND_ASSIGN(HLoopInformation);
 };
@@ -195,18 +236,19 @@
         dominator_(nullptr),
         block_id_(-1) { }
 
-  const GrowableArray<HBasicBlock*>* GetPredecessors() const {
-    return &predecessors_;
+  const GrowableArray<HBasicBlock*>& GetPredecessors() const {
+    return predecessors_;
   }
 
-  const GrowableArray<HBasicBlock*>* GetSuccessors() const {
-    return &successors_;
+  const GrowableArray<HBasicBlock*>& GetSuccessors() const {
+    return successors_;
   }
 
   void AddBackEdge(HBasicBlock* back_edge) {
     if (loop_information_ == nullptr) {
       loop_information_ = new (graph_->GetArena()) HLoopInformation(this, graph_);
     }
+    DCHECK_EQ(loop_information_->GetHeader(), this);
     loop_information_->AddBackEdge(back_edge);
   }
 
@@ -241,19 +283,57 @@
     }
   }
 
+  void RemoveSuccessor(HBasicBlock* block, bool remove_in_predecessor = true) {
+    successors_.Delete(block);
+    if (remove_in_predecessor) {
+      block->predecessors_.Delete(this);
+    }
+  }
+
+  void ClearAllPredecessors() {
+    predecessors_.Reset();
+  }
+
+  void AddPredecessor(HBasicBlock* block) {
+    predecessors_.Add(block);
+    block->successors_.Add(this);
+  }
+
   void AddInstruction(HInstruction* instruction);
   void RemoveInstruction(HInstruction* instruction);
   void AddPhi(HPhi* phi);
   void RemovePhi(HPhi* phi);
 
   bool IsLoopHeader() const {
-    return loop_information_ != nullptr;
+    return (loop_information_ != nullptr) && (loop_information_->GetHeader() == this);
   }
 
   HLoopInformation* GetLoopInformation() const {
     return loop_information_;
   }
 
+  // Set the loop_information_ on this block. This method overrides the current
+  // loop_information if it is an outer loop of the passed loop information.
+  void SetInLoop(HLoopInformation* info) {
+    if (IsLoopHeader()) {
+      // Nothing to do. This just means `info` is an outer loop.
+    } else if (loop_information_ == nullptr) {
+      loop_information_ = info;
+    } else if (loop_information_->Contains(*info->GetHeader())) {
+      // Block is currently part of an outer loop. Make it part of this inner loop.
+      // Note that a non loop header having a loop information means this loop information
+      // has already been populated
+      loop_information_ = info;
+    } else {
+      // Block is part of an inner loop. Do not update the loop information.
+      // Note that we cannot do the check `info->Contains(loop_information_)->GetHeader()`
+      // at this point, because this method is being called while populating `info`.
+    }
+  }
+
+  // Returns wheter this block dominates the blocked passed as parameter.
+  bool Dominates(HBasicBlock* block) const;
+
  private:
   HGraph* const graph_;
   GrowableArray<HBasicBlock*> predecessors_;
@@ -638,7 +718,7 @@
   HGoto() { }
 
   HBasicBlock* GetSuccessor() const {
-    return GetBlock()->GetSuccessors()->Get(0);
+    return GetBlock()->GetSuccessors().Get(0);
   }
 
   DECLARE_INSTRUCTION(Goto)
@@ -656,11 +736,11 @@
   }
 
   HBasicBlock* IfTrueSuccessor() const {
-    return GetBlock()->GetSuccessors()->Get(0);
+    return GetBlock()->GetSuccessors().Get(0);
   }
 
   HBasicBlock* IfFalseSuccessor() const {
-    return GetBlock()->GetSuccessors()->Get(1);
+    return GetBlock()->GetSuccessors().Get(1);
   }
 
   DECLARE_INSTRUCTION(If)
@@ -1011,35 +1091,35 @@
   DISALLOW_COPY_AND_ASSIGN(HInsertionOrderIterator);
 };
 
-class HPostOrderIterator : public ValueObject {
+class HReversePostOrderIterator : public ValueObject {
  public:
-  explicit HPostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
+  explicit HReversePostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
 
-  bool Done() const { return index_ == graph_.GetPostOrder().Size(); }
-  HBasicBlock* Current() const { return graph_.GetPostOrder().Get(index_); }
+  bool Done() const { return index_ == graph_.GetReversePostOrder().Size(); }
+  HBasicBlock* Current() const { return graph_.GetReversePostOrder().Get(index_); }
   void Advance() { ++index_; }
 
  private:
   const HGraph& graph_;
   size_t index_;
 
-  DISALLOW_COPY_AND_ASSIGN(HPostOrderIterator);
+  DISALLOW_COPY_AND_ASSIGN(HReversePostOrderIterator);
 };
 
-class HReversePostOrderIterator : public ValueObject {
+class HPostOrderIterator : public ValueObject {
  public:
-  explicit HReversePostOrderIterator(const HGraph& graph)
-      : graph_(graph), index_(graph_.GetPostOrder().Size()) {}
+  explicit HPostOrderIterator(const HGraph& graph)
+      : graph_(graph), index_(graph_.GetReversePostOrder().Size()) {}
 
   bool Done() const { return index_ == 0; }
-  HBasicBlock* Current() const { return graph_.GetPostOrder().Get(index_ - 1); }
+  HBasicBlock* Current() const { return graph_.GetReversePostOrder().Get(index_ - 1); }
   void Advance() { --index_; }
 
  private:
   const HGraph& graph_;
   size_t index_;
 
-  DISALLOW_COPY_AND_ASSIGN(HReversePostOrderIterator);
+  DISALLOW_COPY_AND_ASSIGN(HPostOrderIterator);
 };
 
 }  // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8594c69..a5031e0 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -104,6 +104,7 @@
   // Run these phases to get some test coverage.
   graph->BuildDominatorTree();
   graph->TransformToSSA();
+  graph->FindNaturalLoops();
   SsaLivenessAnalysis(*graph).Analyze();
 
   return new CompiledMethod(GetCompilerDriver(),
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index c82d0cc..dfeafe7 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -70,23 +70,23 @@
   virtual void VisitBasicBlock(HBasicBlock* block) {
     PrintString("BasicBlock ");
     PrintInt(block->GetBlockId());
-    const GrowableArray<HBasicBlock*>* blocks = block->GetPredecessors();
-    if (!blocks->IsEmpty()) {
+    const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
+    if (!predecessors.IsEmpty()) {
       PrintString(", pred: ");
-      for (size_t i = 0; i < blocks->Size() -1; i++) {
-        PrintInt(blocks->Get(i)->GetBlockId());
+      for (size_t i = 0; i < predecessors.Size() -1; i++) {
+        PrintInt(predecessors.Get(i)->GetBlockId());
         PrintString(", ");
       }
-      PrintInt(blocks->Peek()->GetBlockId());
+      PrintInt(predecessors.Peek()->GetBlockId());
     }
-    blocks = block->GetSuccessors();
-    if (!blocks->IsEmpty()) {
+    const GrowableArray<HBasicBlock*>& successors = block->GetSuccessors();
+    if (!successors.IsEmpty()) {
       PrintString(", succ: ");
-      for (size_t i = 0; i < blocks->Size() - 1; i++) {
-        PrintInt(blocks->Get(i)->GetBlockId());
+      for (size_t i = 0; i < successors.Size() - 1; i++) {
+        PrintInt(successors.Get(i)->GetBlockId());
         PrintString(", ");
       }
-      PrintInt(blocks->Peek()->GetBlockId());
+      PrintInt(successors.Peek()->GetBlockId());
     }
     PrintNewLine();
     HGraphVisitor::VisitBasicBlock(block);
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 04db7a6..006349c 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -57,7 +57,7 @@
     PrintString("  ");
     PrintInt(gota->GetId());
     PrintString(": Goto ");
-    PrintInt(current_block_->GetSuccessors()->Get(0)->GetBlockId());
+    PrintInt(current_block_->GetSuccessors().Get(0)->GetBlockId());
     PrintNewLine();
   }
 
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index ee1e1e4..1fc041c 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -32,8 +32,8 @@
     HBasicBlock* block = loop_headers_.Get(i);
     for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
       HPhi* phi = it.Current()->AsPhi();
-      for (size_t pred = 0; pred < block->GetPredecessors()->Size(); pred++) {
-        phi->AddInput(ValueOfLocal(block->GetPredecessors()->Get(pred), phi->GetRegNumber()));
+      for (size_t pred = 0; pred < block->GetPredecessors().Size(); pred++) {
+        phi->AddInput(ValueOfLocal(block->GetPredecessors().Get(pred), phi->GetRegNumber()));
       }
     }
   }
@@ -75,14 +75,14 @@
     // Save the loop header so that the last phase of the analysis knows which
     // blocks need to be updated.
     loop_headers_.Add(block);
-  } else if (block->GetPredecessors()->Size() > 0) {
+  } else if (block->GetPredecessors().Size() > 0) {
     // All predecessors have already been visited because we are visiting in reverse post order.
     // We merge the values of all locals, creating phis if those values differ.
     for (size_t local = 0; local < current_locals_->Size(); local++) {
       bool is_different = false;
-      HInstruction* value = ValueOfLocal(block->GetPredecessors()->Get(0), local);
-      for (size_t i = 1; i < block->GetPredecessors()->Size(); i++) {
-        if (ValueOfLocal(block->GetPredecessors()->Get(i), local) != value) {
+      HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(0), local);
+      for (size_t i = 1; i < block->GetPredecessors().Size(); i++) {
+        if (ValueOfLocal(block->GetPredecessors().Get(i), local) != value) {
           is_different = true;
           break;
         }
@@ -90,9 +90,9 @@
       if (is_different) {
         // TODO: Compute union type.
         HPhi* phi = new (GetGraph()->GetArena()) HPhi(
-            GetGraph()->GetArena(), local, block->GetPredecessors()->Size(), Primitive::kPrimVoid);
-        for (size_t i = 0; i < block->GetPredecessors()->Size(); i++) {
-          phi->SetRawInputAt(i, ValueOfLocal(block->GetPredecessors()->Get(i), local));
+            GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid);
+        for (size_t i = 0; i < block->GetPredecessors().Size(); i++) {
+          phi->SetRawInputAt(i, ValueOfLocal(block->GetPredecessors().Get(i), local));
         }
         block->AddPhi(phi);
         value = phi;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 838597d..0ab77ca 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -110,7 +110,7 @@
       for (size_t i = 0, e = current->InputCount(); i < e; ++i) {
         HInstruction* input = current->InputAt(i);
 
-        HBasicBlock* predecessor = block->GetPredecessors()->Get(i);
+        HBasicBlock* predecessor = block->GetPredecessors().Get(i);
         size_t ssa_index = input->GetSsaIndex();
         BitVector* predecessor_kill = GetKillSet(*predecessor);
         BitVector* predecessor_live_in = GetLiveInSet(*predecessor);
@@ -147,8 +147,8 @@
   BitVector* live_out = GetLiveOutSet(block);
   bool changed = false;
   // The live_out set of a block is the union of live_in sets of its successors.
-  for (size_t i = 0, e = block.GetSuccessors()->Size(); i < e; ++i) {
-    HBasicBlock* successor = block.GetSuccessors()->Get(i);
+  for (size_t i = 0, e = block.GetSuccessors().Size(); i < e; ++i) {
+    HBasicBlock* successor = block.GetSuccessors().Get(i);
     if (live_out->Union(GetLiveInSet(*successor))) {
       changed = true;
     }
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index e4aafb7..9be2197 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -98,15 +98,18 @@
     "BasicBlock 0, succ: 1\n"
     "  0: IntConstant 0 [2, 2]\n"
     "  1: Goto\n"
-    "BasicBlock 1, pred: 0, succ: 3, 2\n"
+    "BasicBlock 1, pred: 0, succ: 2, 5\n"
     "  2: Equal(0, 0) [3]\n"
     "  3: If(2)\n"
     "BasicBlock 2, pred: 1, succ: 3\n"
     "  4: Goto\n"
-    "BasicBlock 3, pred: 1, 2, succ: 4\n"
+    "BasicBlock 3, pred: 2, 5, succ: 4\n"
     "  5: ReturnVoid\n"
     "BasicBlock 4, pred: 3\n"
-    "  6: Exit\n";
+    "  6: Exit\n"
+    // Synthesized block to avoid critical edge.
+    "BasicBlock 5, pred: 1, succ: 3\n"
+    "  7: Goto\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -125,16 +128,19 @@
     "  0: IntConstant 0 [6, 3, 3]\n"
     "  1: IntConstant 4 [6]\n"
     "  2: Goto\n"
-    "BasicBlock 1, pred: 0, succ: 3, 2\n"
+    "BasicBlock 1, pred: 0, succ: 2, 5\n"
     "  3: Equal(0, 0) [4]\n"
     "  4: If(3)\n"
     "BasicBlock 2, pred: 1, succ: 3\n"
     "  5: Goto\n"
-    "BasicBlock 3, pred: 1, 2, succ: 4\n"
-    "  6: Phi(0, 1) [7]\n"
+    "BasicBlock 3, pred: 2, 5, succ: 4\n"
+    "  6: Phi(1, 0) [7]\n"
     "  7: Return(6)\n"
     "BasicBlock 4, pred: 3\n"
-    "  8: Exit\n";
+    "  8: Exit\n"
+    // Synthesized block to avoid critical edge.
+    "BasicBlock 5, pred: 1, succ: 3\n"
+    "  9: Goto\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -184,16 +190,21 @@
     "BasicBlock 0, succ: 1\n"
     "  0: IntConstant 0 [6, 4, 2, 2]\n"
     "  1: Goto\n"
-    "BasicBlock 1, pred: 0, succ: 3, 2\n"
+    "BasicBlock 1, pred: 0, succ: 5, 6\n"
     "  2: Equal(0, 0) [3]\n"
     "  3: If(2)\n"
-    "BasicBlock 2, pred: 1, 3, succ: 3\n"
-    "  4: Phi(0, 6) [6]\n"
+    "BasicBlock 2, pred: 3, 6, succ: 3\n"
+    "  4: Phi(6, 0) [6]\n"
     "  5: Goto\n"
-    "BasicBlock 3, pred: 1, 2, succ: 2\n"
-    "  6: Phi(0, 4) [4]\n"
+    "BasicBlock 3, pred: 2, 5, succ: 2\n"
+    "  6: Phi(4, 0) [4]\n"
     "  7: Goto\n"
-    "BasicBlock 4\n";
+    "BasicBlock 4\n"
+    // Synthesized blocks to avoid critical edge.
+    "BasicBlock 5, pred: 1, succ: 3\n"
+    "  8: Goto\n"
+    "BasicBlock 6, pred: 1, succ: 2\n"
+    "  9: Goto\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -349,26 +360,30 @@
   const char* expected =
     "BasicBlock 0, succ: 1\n"
     "  0: IntConstant 0 [5]\n"
-    "  1: IntConstant 4 [5, 8, 8]\n"
-    "  2: IntConstant 5 [5]\n"
+    "  1: IntConstant 4 [14, 8, 8]\n"
+    "  2: IntConstant 5 [14]\n"
     "  3: Goto\n"
     "BasicBlock 1, pred: 0, succ: 2\n"
     "  4: Goto\n"
-    "BasicBlock 2, pred: 1, 4, 5, succ: 6, 3\n"
-    "  5: Phi(0, 2, 1) [12, 6, 6]\n"
+    "BasicBlock 2, pred: 1, 8, succ: 6, 3\n"
+    "  5: Phi(0, 14) [12, 6, 6]\n"
     "  6: Equal(5, 5) [7]\n"
     "  7: If(6)\n"
     "BasicBlock 3, pred: 2, succ: 5, 4\n"
     "  8: Equal(1, 1) [9]\n"
     "  9: If(8)\n"
-    "BasicBlock 4, pred: 3, succ: 2\n"
+    "BasicBlock 4, pred: 3, succ: 8\n"
     "  10: Goto\n"
-    "BasicBlock 5, pred: 3, succ: 2\n"
+    "BasicBlock 5, pred: 3, succ: 8\n"
     "  11: Goto\n"
     "BasicBlock 6, pred: 2, succ: 7\n"
     "  12: Return(5)\n"
     "BasicBlock 7, pred: 6\n"
-    "  13: Exit\n";
+    "  13: Exit\n"
+    // Synthesized single back edge of loop.
+    "BasicBlock 8, pred: 5, 4, succ: 2\n"
+    "  14: Phi(1, 2) [5]\n"
+    "  15: Goto\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
@@ -393,7 +408,7 @@
     "  3: Goto\n"
     "BasicBlock 1, pred: 0, succ: 2\n"
     "  4: Goto\n"
-    "BasicBlock 2, pred: 1, 5, succ: 6, 3\n"
+    "BasicBlock 2, pred: 1, 5, succ: 3, 8\n"
     "  5: Phi(0, 1) [12, 6, 6]\n"
     "  6: Equal(5, 5) [7]\n"
     "  7: If(6)\n"
@@ -404,11 +419,13 @@
     "  10: Goto\n"
     "BasicBlock 5, pred: 3, succ: 2\n"
     "  11: Goto\n"
-    "BasicBlock 6, pred: 2, 4, succ: 7\n"
-    "  12: Phi(5, 2) [13]\n"
+    "BasicBlock 6, pred: 4, 8, succ: 7\n"
+    "  12: Phi(2, 5) [13]\n"
     "  13: Return(12)\n"
     "BasicBlock 7, pred: 6\n"
-    "  14: Exit\n";
+    "  14: Exit\n"
+    "BasicBlock 8, pred: 2, succ: 6\n"
+    "  15: Goto\n";
 
   const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
     Instruction::CONST_4 | 0 | 0,
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e90006e..e0bfc6b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -444,10 +444,11 @@
       return false;
     }
     Runtime* runtime = Runtime::Current();
+    runtime->SetInstructionSet(instruction_set);
     for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
       Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
       if (!runtime->HasCalleeSaveMethod(type)) {
-        runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(instruction_set, type), type);
+        runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(type), type);
       }
     }
     runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index c285088..45ff21f 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -17,7 +17,8 @@
 #include <stdint.h>
 
 #include "common_runtime_test.h"
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "quick/quick_method_frame_info.h"
 
 namespace art {
 
@@ -30,10 +31,13 @@
     Thread* t = Thread::Current();
     t->TransitionFromSuspendedToRunnable();  // So we can create callee-save methods.
 
-    mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(isa, type);
-    EXPECT_EQ(save_method->GetFrameSizeInBytes(), save_size) << "Expected and real size differs for "
-        << type << " core spills=" << std::hex << save_method->GetCoreSpillMask() << " fp spills="
-        << save_method->GetFpSpillMask() << std::dec;
+    r->SetInstructionSet(isa);
+    mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+    r->SetCalleeSaveMethod(save_method, type);
+    QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+    EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
+        << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
+        << frame_info.FpSpillMask() << std::dec;
 
     t->TransitionFromRunnableToSuspended(ThreadState::kNative);  // So we can shut down.
   }
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 0e1b25e..6a337b3 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -16,8 +16,9 @@
 
 #include "context_arm.h"
 
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
 #include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
 #include "stack.h"
 #include "thread.h"
 
@@ -42,17 +43,15 @@
 
 void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
   mirror::ArtMethod* method = fr.GetMethod();
-  uint32_t core_spills = method->GetCoreSpillMask();
-  uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = POPCOUNT(core_spills);
-  size_t fp_spill_count = POPCOUNT(fp_core_spills);
-  size_t frame_size = method->GetFrameSizeInBytes();
+  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+  size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context
     int j = 1;
     for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
-      if (((core_spills >> i) & 1) != 0) {
-        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+      if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
         j++;
       }
     }
@@ -61,8 +60,9 @@
     // Lowest number spill is farthest away, walk registers and fill into context
     int j = 1;
     for (size_t i = 0; i < kNumberOfSRegisters; i++) {
-      if (((fp_core_spills >> i) & 1) != 0) {
-        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+      if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+                                        frame_info.FrameSizeInBytes());
         j++;
       }
     }
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
new file mode 100644
index 0000000..8d08190
--- /dev/null
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_arm.h"
+#include "runtime.h"  // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace arm {
+
+static constexpr uint32_t kArmCalleeSaveRefSpills =
+    (1 << art::arm::R5) | (1 << art::arm::R6)  | (1 << art::arm::R7) | (1 << art::arm::R8) |
+    (1 << art::arm::R10) | (1 << art::arm::R11);
+static constexpr uint32_t kArmCalleeSaveArgSpills =
+    (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
+static constexpr uint32_t kArmCalleeSaveAllSpills =
+    (1 << art::arm::R4) | (1 << art::arm::R9);
+static constexpr uint32_t kArmCalleeSaveFpAllSpills =
+    (1 << art::arm::S0)  | (1 << art::arm::S1)  | (1 << art::arm::S2)  | (1 << art::arm::S3)  |
+    (1 << art::arm::S4)  | (1 << art::arm::S5)  | (1 << art::arm::S6)  | (1 << art::arm::S7)  |
+    (1 << art::arm::S8)  | (1 << art::arm::S9)  | (1 << art::arm::S10) | (1 << art::arm::S11) |
+    (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) | (1 << art::arm::S15) |
+    (1 << art::arm::S16) | (1 << art::arm::S17) | (1 << art::arm::S18) | (1 << art::arm::S19) |
+    (1 << art::arm::S20) | (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
+    (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) | (1 << art::arm::S27) |
+    (1 << art::arm::S28) | (1 << art::arm::S29) | (1 << art::arm::S30) | (1 << art::arm::S31);
+
+constexpr uint32_t ArmCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+  return kArmCalleeSaveRefSpills | (type == Runtime::kRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+      (type == Runtime::kSaveAll ? kArmCalleeSaveAllSpills : 0) | (1 << art::arm::LR);
+}
+
+constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+  return type == Runtime::kSaveAll ? kArmCalleeSaveFpAllSpills : 0;
+}
+
+constexpr uint32_t ArmCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+  return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
+                  POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
+                  1 /* Method* */) * kArmPointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+  return QuickMethodFrameInfo(ArmCalleeSaveFrameSize(type),
+                              ArmCalleeSaveCoreSpills(type),
+                              ArmCalleeSaveFpSpills(type));
+}
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 0890fa9..fae44af 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -18,8 +18,9 @@
 
 #include "context_arm64.h"
 
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
 #include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
 #include "stack.h"
 #include "thread.h"
 
@@ -45,18 +46,15 @@
 
 void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
   mirror::ArtMethod* method = fr.GetMethod();
-  uint32_t core_spills = method->GetCoreSpillMask();
-  uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = POPCOUNT(core_spills);
-  size_t fp_spill_count = POPCOUNT(fp_core_spills);
-  size_t frame_size = method->GetFrameSizeInBytes();
-
+  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+  size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context.
     int j = 1;
     for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
-      if (((core_spills >> i) & 1) != 0) {
-        gprs_[i] = fr.CalleeSaveAddress(spill_count  - j, frame_size);
+      if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+        gprs_[i] = fr.CalleeSaveAddress(spill_count  - j, frame_info.FrameSizeInBytes());
         j++;
       }
     }
@@ -66,8 +64,9 @@
     // Lowest number spill is farthest away, walk registers and fill into context.
     int j = 1;
     for (size_t i = 0; i < kNumberOfDRegisters; i++) {
-      if (((fp_core_spills >> i) & 1) != 0) {
-        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+      if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+                                        frame_info.FrameSizeInBytes());
         j++;
       }
     }
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
new file mode 100644
index 0000000..cb830ac
--- /dev/null
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_arm64.h"
+#include "runtime.h"  // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace arm64 {
+
+// Callee saved registers
+static constexpr uint32_t kArm64CalleeSaveRefSpills =
+    (1 << art::arm64::X19) | (1 << art::arm64::X20) | (1 << art::arm64::X21) |
+    (1 << art::arm64::X22) | (1 << art::arm64::X23) | (1 << art::arm64::X24) |
+    (1 << art::arm64::X25) | (1 << art::arm64::X26) | (1 << art::arm64::X27) |
+    (1 << art::arm64::X28);
+// X0 is the method pointer. Not saved.
+static constexpr uint32_t kArm64CalleeSaveArgSpills =
+    (1 << art::arm64::X1) | (1 << art::arm64::X2) | (1 << art::arm64::X3) |
+    (1 << art::arm64::X4) | (1 << art::arm64::X5) | (1 << art::arm64::X6) |
+    (1 << art::arm64::X7);
+// TODO  This is conservative. Only ALL should include the thread register.
+// The thread register is not preserved by the aapcs64.
+// LR is always saved.
+static constexpr uint32_t kArm64CalleeSaveAllSpills =  0;  // (1 << art::arm64::LR);
+
+// Save callee-saved floating point registers. Rest are scratch/parameters.
+static constexpr uint32_t kArm64CalleeSaveFpArgSpills =
+    (1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
+    (1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
+    (1 << art::arm64::D6) | (1 << art::arm64::D7);
+static constexpr uint32_t kArm64CalleeSaveFpRefSpills =
+    (1 << art::arm64::D8)  | (1 << art::arm64::D9)  | (1 << art::arm64::D10) |
+    (1 << art::arm64::D11)  | (1 << art::arm64::D12)  | (1 << art::arm64::D13) |
+    (1 << art::arm64::D14)  | (1 << art::arm64::D15);
+static constexpr uint32_t kArm64FpAllSpills =
+    kArm64CalleeSaveFpArgSpills |
+    (1 << art::arm64::D16)  | (1 << art::arm64::D17) | (1 << art::arm64::D18) |
+    (1 << art::arm64::D19)  | (1 << art::arm64::D20) | (1 << art::arm64::D21) |
+    (1 << art::arm64::D22)  | (1 << art::arm64::D23) | (1 << art::arm64::D24) |
+    (1 << art::arm64::D25)  | (1 << art::arm64::D26) | (1 << art::arm64::D27) |
+    (1 << art::arm64::D28)  | (1 << art::arm64::D29) | (1 << art::arm64::D30) |
+    (1 << art::arm64::D31);
+
+constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+  return kArm64CalleeSaveRefSpills |
+      (type == Runtime::kRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
+      (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0) | (1 << art::arm64::FP) |
+      (1 << art::arm64::X18) | (1 << art::arm64::LR);
+}
+
+constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+  return kArm64CalleeSaveFpRefSpills |
+      (type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
+      (type == Runtime::kSaveAll ? kArm64FpAllSpills : 0);
+}
+
+constexpr uint32_t Arm64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+  return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
+                  POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
+                  1 /* Method* */) * kArm64PointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+  return QuickMethodFrameInfo(Arm64CalleeSaveFrameSize(type),
+                              Arm64CalleeSaveCoreSpills(type),
+                              Arm64CalleeSaveFpSpills(type));
+}
+
+}  // namespace arm64
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 0950e71..ad28891 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -16,8 +16,9 @@
 
 #include "context_mips.h"
 
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
 #include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
 #include "stack.h"
 
 namespace art {
@@ -41,17 +42,15 @@
 
 void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
   mirror::ArtMethod* method = fr.GetMethod();
-  uint32_t core_spills = method->GetCoreSpillMask();
-  uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = POPCOUNT(core_spills);
-  size_t fp_spill_count = POPCOUNT(fp_core_spills);
-  size_t frame_size = method->GetFrameSizeInBytes();
+  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+  size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context.
     int j = 1;
     for (size_t i = 0; i < kNumberOfCoreRegisters; i++) {
-      if (((core_spills >> i) & 1) != 0) {
-        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+      if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
         j++;
       }
     }
@@ -60,8 +59,9 @@
     // Lowest number spill is farthest away, walk registers and fill into context.
     int j = 1;
     for (size_t i = 0; i < kNumberOfFRegisters; i++) {
-      if (((fp_core_spills >> i) & 1) != 0) {
-        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+      if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+                                        frame_info.FrameSizeInBytes());
         j++;
       }
     }
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/quick_method_frame_info_mips.h
new file mode 100644
index 0000000..2a8bcf0
--- /dev/null
+++ b/runtime/arch/mips/quick_method_frame_info_mips.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_mips.h"
+#include "runtime.h"  // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace mips {
+
+static constexpr uint32_t kMipsCalleeSaveRefSpills =
+    (1 << art::mips::S2) | (1 << art::mips::S3) | (1 << art::mips::S4) | (1 << art::mips::S5) |
+    (1 << art::mips::S6) | (1 << art::mips::S7) | (1 << art::mips::GP) | (1 << art::mips::FP);
+static constexpr uint32_t kMipsCalleeSaveArgSpills =
+    (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3);
+static constexpr uint32_t kMipsCalleeSaveAllSpills =
+    (1 << art::mips::S0) | (1 << art::mips::S1);
+
+constexpr uint32_t MipsCalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+  return kMipsCalleeSaveRefSpills |
+      (type == Runtime::kRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
+      (type == Runtime::kSaveAll ? kMipsCalleeSaveAllSpills : 0) | (1 << art::mips::RA);
+}
+
+constexpr uint32_t MipsCalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+  return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
+                  (type == Runtime::kRefsAndArgs ? 0 : 3) + 1 /* Method* */) *
+                 kMipsPointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+  return QuickMethodFrameInfo(MipsCalleeSaveFrameSize(type),
+                              MipsCalleeSaveCoreSpills(type),
+                              0u);
+}
+
+}  // namespace mips
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 76d028d..6a2bfb5 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -33,10 +33,11 @@
     {
       // Create callee-save methods
       ScopedObjectAccess soa(Thread::Current());
+      runtime_->SetInstructionSet(kRuntimeISA);
       for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
         Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
         if (!runtime_->HasCalleeSaveMethod(type)) {
-          runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(kRuntimeISA, type), type);
+          runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
         }
       }
     }
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index c68d76a..8c98d91 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,8 +16,9 @@
 
 #include "context_x86.h"
 
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
 #include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
 #include "stack.h"
 
 namespace art {
@@ -37,16 +38,15 @@
 
 void X86Context::FillCalleeSaves(const StackVisitor& fr) {
   mirror::ArtMethod* method = fr.GetMethod();
-  uint32_t core_spills = method->GetCoreSpillMask();
-  size_t spill_count = POPCOUNT(core_spills);
-  DCHECK_EQ(method->GetFpSpillMask(), 0u);
-  size_t frame_size = method->GetFrameSizeInBytes();
+  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+  DCHECK_EQ(frame_info.FpSpillMask(), 0u);
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context.
     int j = 2;  // Offset j to skip return address spill.
     for (int i = 0; i < kNumberOfCpuRegisters; i++) {
-      if (((core_spills >> i) & 1) != 0) {
-        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+      if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
         j++;
       }
     }
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
new file mode 100644
index 0000000..b9dc0d8
--- /dev/null
+++ b/runtime/arch/x86/quick_method_frame_info_x86.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
+#define ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86.h"
+#include "runtime.h"  // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace x86 {
+
+static constexpr uint32_t kX86CalleeSaveRefSpills =
+    (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
+static constexpr uint32_t kX86CalleeSaveArgSpills =
+    (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+
+constexpr uint32_t X86CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+  return kX86CalleeSaveRefSpills | (type == Runtime::kRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+      (1 << art::x86::kNumberOfCpuRegisters);  // fake return address callee save
+}
+
+constexpr uint32_t X86CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+  return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
+                  1 /* Method* */) * kX86PointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+  return QuickMethodFrameInfo(X86CalleeSaveFrameSize(type),
+                              X86CalleeSaveCoreSpills(type),
+                              0u);
+}
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 29a7065..810ef94 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,8 +16,9 @@
 
 #include "context_x86_64.h"
 
-#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
 #include "mirror/object-inl.h"
+#include "quick/quick_method_frame_info.h"
 #include "stack.h"
 
 namespace art {
@@ -40,17 +41,15 @@
 
 void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
   mirror::ArtMethod* method = fr.GetMethod();
-  uint32_t core_spills = method->GetCoreSpillMask();
-  uint32_t fp_core_spills = method->GetFpSpillMask();
-  size_t spill_count = POPCOUNT(core_spills);
-  size_t fp_spill_count = POPCOUNT(fp_core_spills);
-  size_t frame_size = method->GetFrameSizeInBytes();
+  const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+  size_t spill_count = POPCOUNT(frame_info.CoreSpillMask());
+  size_t fp_spill_count = POPCOUNT(frame_info.FpSpillMask());
   if (spill_count > 0) {
     // Lowest number spill is farthest away, walk registers and fill into context.
     size_t j = 2;  // Offset j to skip return address spill.
     for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
-      if (((core_spills >> i) & 1) != 0) {
-        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_size);
+      if (((frame_info.CoreSpillMask() >> i) & 1) != 0) {
+        gprs_[i] = fr.CalleeSaveAddress(spill_count - j, frame_info.FrameSizeInBytes());
         j++;
       }
     }
@@ -59,8 +58,9 @@
     // Lowest number spill is farthest away, walk registers and fill into context.
     size_t j = 2;  // Offset j to skip return address spill.
     for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) {
-      if (((fp_core_spills >> i) & 1) != 0) {
-        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j, frame_size);
+      if (((frame_info.FpSpillMask() >> i) & 1) != 0) {
+        fprs_[i] = fr.CalleeSaveAddress(spill_count + fp_spill_count - j,
+                                        frame_info.FrameSizeInBytes());
         j++;
       }
     }
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
new file mode 100644
index 0000000..6183909
--- /dev/null
+++ b/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
+
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86_64.h"
+#include "runtime.h"  // for Runtime::CalleeSaveType.
+
+namespace art {
+namespace x86_64 {
+
+static constexpr uint32_t kX86_64CalleeSaveRefSpills =
+    (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
+    (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
+static constexpr uint32_t kX86_64CalleeSaveArgSpills =
+    (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
+    (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
+static constexpr uint32_t kX86_64CalleeSaveFpArgSpills =
+    (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
+    (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
+    (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
+
+constexpr uint32_t X86_64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
+  return kX86_64CalleeSaveRefSpills |
+      (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
+      (1 << art::x86_64::kNumberOfCpuRegisters);  // fake return address callee save;
+}
+
+constexpr uint32_t X86_64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
+  return (type == Runtime::kRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0);
+}
+
+constexpr uint32_t X86_64CalleeSaveFrameSize(Runtime::CalleeSaveType type) {
+  return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
+                  POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
+                  1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
+}
+
+constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(Runtime::CalleeSaveType type) {
+  return QuickMethodFrameInfo(X86_64CalleeSaveFrameSize(type),
+                              X86_64CalleeSaveCoreSpills(type),
+                              X86_64CalleeSaveFpSpills(type));
+}
+
+}  // namespace x86_64
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 0e01dc2..a3e2b15 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -399,13 +399,13 @@
   return count;
 }
 
-void BitVector::Dump(std::ostream& os, const char *prefix) {
+void BitVector::Dump(std::ostream& os, const char *prefix) const {
   std::ostringstream buffer;
   DumpHelper(buffer, prefix);
   os << buffer.str() << std::endl;
 }
 
-void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) {
+void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) const {
   std::ostringstream buffer;
   Dump(buffer, prefix);
 
@@ -421,7 +421,7 @@
   fprintf(file, "\\\n");
 }
 
-void BitVector::DumpHelper(std::ostringstream& buffer, const char* prefix) {
+void BitVector::DumpHelper(std::ostringstream& buffer, const char* prefix) const {
   // Initialize it.
   if (prefix != nullptr) {
     buffer << prefix;
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 6ee6b00..2a68396 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -148,11 +148,11 @@
 
     bool EnsureSizeAndClear(unsigned int num);
 
-    void Dump(std::ostream& os, const char* prefix);
-    void DumpDot(FILE* file, const char* prefix, bool last_entry = false);
+    void Dump(std::ostream& os, const char* prefix) const;
+    void DumpDot(FILE* file, const char* prefix, bool last_entry = false) const;
 
   protected:
-    void DumpHelper(std::ostringstream& buffer, const char* prefix);
+    void DumpHelper(std::ostringstream& buffer, const char* prefix) const;
 
   private:
     Allocator* const allocator_;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e3c162b..b2d8b37 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1828,27 +1828,6 @@
       DCHECK(method->GetEntryPointFromQuickCompiledCode() ==
           GetQuickResolutionTrampoline(runtime->GetClassLinker())
           || method->GetEntryPointFromQuickCompiledCode() == GetQuickGenericJniTrampoline());
-
-      DCHECK_EQ(method->GetFrameSizeInBytes<false>(), 0U);
-
-      // Fix up method metadata if necessary.
-      uint32_t s_len;
-      const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(dex_method_index), &s_len);
-      uint32_t refs = 1;    // Native method always has "this" or class.
-      for (uint32_t i = 1; i < s_len; ++i) {
-        if (shorty[i] == 'L') {
-          refs++;
-        }
-      }
-      size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(refs);
-
-      // Get the generic spill masks and base frame size.
-      mirror::ArtMethod* callee_save_method =
-          Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
-      method->SetFrameSizeInBytes(callee_save_method->GetFrameSizeInBytes() + sirt_size);
-      method->SetCoreSpillMask(callee_save_method->GetCoreSpillMask());
-      method->SetFpSpillMask(callee_save_method->GetFpSpillMask());
     }
   }
 
@@ -3027,11 +3006,6 @@
 
   // At runtime the method looks like a reference and argument saving method, clone the code
   // related parameters from this method.
-  mirror::ArtMethod* refs_and_args =
-      Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-  method->SetCoreSpillMask(refs_and_args->GetCoreSpillMask());
-  method->SetFpSpillMask(refs_and_args->GetFpSpillMask());
-  method->SetFrameSizeInBytes(refs_and_args->GetFrameSizeInBytes());
   method->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler());
   method->SetEntryPointFromPortableCompiledCode(GetPortableProxyInvokeHandler());
   method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 1218357..b68ab4a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -491,9 +491,6 @@
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_),           "dexCodeItemOffset"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_),               "dexMethodIndex"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_),                   "methodIndex"));
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_core_spill_mask_),          "quickCoreSpillMask"));
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_fp_spill_mask_),            "quickFpSpillMask"));
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_frame_size_in_bytes_),      "quickFrameSizeInBytes"));
   };
 };
 
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 97a8367..feb2331 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -72,9 +72,10 @@
 
     const std::vector<uint8_t>& fake_vmap_table_data = fake_vmap_table_data_.GetData();
     const std::vector<uint8_t>& fake_mapping_data = fake_mapping_data_.GetData();
-    uint32_t vmap_table_offset = sizeof(OatMethodHeader) + fake_vmap_table_data.size();
+    uint32_t vmap_table_offset = sizeof(OatQuickMethodHeader) + fake_vmap_table_data.size();
     uint32_t mapping_table_offset = vmap_table_offset + fake_mapping_data.size();
-    OatMethodHeader method_header(vmap_table_offset, mapping_table_offset, code_size);
+    OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset,
+                                       4 * kPointerSize, 0u, 0u, code_size);
     fake_header_code_and_maps_.resize(sizeof(method_header));
     memcpy(&fake_header_code_and_maps_[0], &method_header, sizeof(method_header));
     fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
@@ -91,13 +92,11 @@
 
     method_f_ = my_klass_->FindVirtualMethod("f", "()I");
     ASSERT_TRUE(method_f_ != NULL);
-    method_f_->SetFrameSizeInBytes(4 * kPointerSize);
     method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
     method_f_->SetNativeGcMap(&fake_gc_map_[0]);
 
     method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
     ASSERT_TRUE(method_g_ != NULL);
-    method_g_->SetFrameSizeInBytes(4 * kPointerSize);
     method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
     method_g_->SetNativeGcMap(&fake_gc_map_[0]);
   }
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index a7795e6..446f898 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -245,21 +245,6 @@
     return nullptr;
   }
 
-  Runtime* runtime = Runtime::Current();
-  mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
-  runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
-  mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
-  runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
-  mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
-  runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
-
-  mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kSaveAll);
-  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsOnly);
-  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsAndArgs);
-
   UniquePtr<ImageSpace> space(new ImageSpace(image_filename, image_location,
                                              map.release(), bitmap.release()));
   if (kIsDebugBuild) {
@@ -277,6 +262,23 @@
     return nullptr;
   }
 
+  Runtime* runtime = Runtime::Current();
+  runtime->SetInstructionSet(space->oat_file_->GetOatHeader().GetInstructionSet());
+
+  mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+  runtime->SetResolutionMethod(down_cast<mirror::ArtMethod*>(resolution_method));
+  mirror::Object* imt_conflict_method = image_header.GetImageRoot(ImageHeader::kImtConflictMethod);
+  runtime->SetImtConflictMethod(down_cast<mirror::ArtMethod*>(imt_conflict_method));
+  mirror::Object* default_imt = image_header.GetImageRoot(ImageHeader::kDefaultImt);
+  runtime->SetDefaultImt(down_cast<mirror::ObjectArray<mirror::ArtMethod>*>(default_imt));
+
+  mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kSaveAll);
+  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsOnly);
+  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::ArtMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "ImageSpace::Init exiting (" << PrettyDuration(NanoTime() - start_time)
              << ") " << *space.get();
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index fb9a09a..91753df 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -23,7 +23,8 @@
 #include "entrypoints/entrypoint_utils.h"
 #include "object_array.h"
 #include "oat.h"
-#include "runtime.h"
+#include "quick/quick_method_frame_info.h"
+#include "runtime-inl.h"
 
 namespace art {
 namespace mirror {
@@ -81,7 +82,7 @@
   if (code == nullptr) {
     return 0u;
   }
-  return reinterpret_cast<const OatMethodHeader*>(code)[-1].code_size_;
+  return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
 }
 
 inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
@@ -201,6 +202,40 @@
       OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
 }
 
+inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
+  if (UNLIKELY(IsPortableCompiled())) {
+    // Portable compiled dex bytecode or jni stub.
+    return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
+  }
+  Runtime* runtime = Runtime::Current();
+  if (UNLIKELY(IsAbstract()) || UNLIKELY(IsProxyMethod())) {
+    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+  }
+  if (UNLIKELY(IsRuntimeMethod())) {
+    return runtime->GetRuntimeMethodFrameInfo(this);
+  }
+
+  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+  // On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
+  // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+  // for non-native methods. And we really shouldn't see a failure for non-native methods here.
+  DCHECK(entry_point != GetQuickToInterpreterBridgeTrampoline(runtime->GetClassLinker()));
+  CHECK(entry_point != GetQuickToInterpreterBridge());
+
+  if (UNLIKELY(entry_point == GetQuickGenericJniTrampoline())) {
+    // Generic JNI frame.
+    DCHECK(IsNative());
+    uint32_t sirt_refs = MethodHelper(this).GetNumberOfReferenceArgsWithoutReceiver() + 1;
+    size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(sirt_refs);
+    QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+    return QuickMethodFrameInfo(callee_info.FrameSizeInBytes() + sirt_size,
+                                callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+  }
+
+  const void* code_pointer = EntryPointToCodePointer(entry_point);
+  return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 4275f25..eef60f7 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -389,7 +389,7 @@
   if (code == nullptr) {
     return nullptr;
   }
-  uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].mapping_table_offset_;
+  uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].mapping_table_offset_;
   if (UNLIKELY(offset == 0u)) {
     return nullptr;
   }
@@ -401,7 +401,7 @@
   if (code == nullptr) {
     return nullptr;
   }
-  uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].vmap_table_offset_;
+  uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].vmap_table_offset_;
   if (UNLIKELY(offset == 0u)) {
     return nullptr;
   }
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 71f0210..49d22ab 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -23,6 +23,7 @@
 #include "modifiers.h"
 #include "object.h"
 #include "object_callbacks.h"
+#include "quick/quick_method_frame_info.h"
 
 namespace art {
 
@@ -318,19 +319,14 @@
 
   template <bool kCheckFrameSize = true>
   uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    uint32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_));
+    uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
     if (kCheckFrameSize) {
       DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
     }
     return result;
   }
 
-  void SetFrameSizeInBytes(size_t new_frame_size_in_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // Not called within a transaction.
-    SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_),
-                      new_frame_size_in_bytes);
-  }
+  QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   size_t GetReturnPcOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return GetFrameSizeInBytes() - kPointerSize;
@@ -362,26 +358,6 @@
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
   }
 
-  uint32_t GetCoreSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_));
-  }
-
-  void SetCoreSpillMask(uint32_t core_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // Computed during compilation.
-    // Not called within a transaction.
-    SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), core_spill_mask);
-  }
-
-  uint32_t GetFpSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_));
-  }
-
-  void SetFpSpillMask(uint32_t fp_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    // Computed during compilation.
-    // Not called within a transaction.
-    SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), fp_spill_mask);
-  }
-
   // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
   // conventions for a method of managed code. Returns false for Proxy methods.
   bool IsRuntimeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -474,20 +450,6 @@
   // ifTable.
   uint32_t method_index_;
 
-  // --- Quick compiler meta-data. ---
-  // TODO: merge and place in native heap, such as done with the code size.
-
-  // Bit map of spilled machine registers.
-  uint32_t quick_core_spill_mask_;
-
-  // Bit map of spilled floating point machine registers.
-  uint32_t quick_fp_spill_mask_;
-
-  // Fixed frame size for this method when executed.
-  uint32_t quick_frame_size_in_bytes_;
-
-  // --- End of quick compiler meta-data. ---
-
   static Class* java_lang_reflect_ArtMethod_;
 
  private:
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index f541633..7490e6a 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -58,12 +58,12 @@
     Runtime* runtime = Runtime::Current();
     JavaVMExt* vm = runtime->GetJavaVM();
     if (!vm->check_jni) {
-      VLOG(jni) << "Late-enabling -Xcheck:jni";
+      LOG(INFO) << "Late-enabling -Xcheck:jni";
       vm->SetCheckJniEnabled(true);
       // There's only one thread running at this point, so only one JNIEnv to fix up.
       Thread::Current()->GetJniEnv()->SetCheckJniEnabled(true);
     } else {
-      VLOG(jni) << "Not late-enabling -Xcheck:jni (already on)";
+      LOG(INFO) << "Not late-enabling -Xcheck:jni (already on)";
     }
     debug_flags &= ~DEBUG_ENABLE_CHECKJNI;
   }
diff --git a/runtime/oat.cc b/runtime/oat.cc
index a1f4fd0..cb9334a 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '2', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '2', '8', '\0' };
 
 OatHeader::OatHeader() {
   memset(this, 0, sizeof(*this));
@@ -345,40 +345,34 @@
 
 OatMethodOffsets::OatMethodOffsets()
   : code_offset_(0),
-    frame_size_in_bytes_(0),
-    core_spill_mask_(0),
-    fp_spill_mask_(0),
     gc_map_offset_(0)
 {}
 
 OatMethodOffsets::OatMethodOffsets(uint32_t code_offset,
-                                   uint32_t frame_size_in_bytes,
-                                   uint32_t core_spill_mask,
-                                   uint32_t fp_spill_mask,
                                    uint32_t gc_map_offset
                                    )
   : code_offset_(code_offset),
-    frame_size_in_bytes_(frame_size_in_bytes),
-    core_spill_mask_(core_spill_mask),
-    fp_spill_mask_(fp_spill_mask),
     gc_map_offset_(gc_map_offset)
 {}
 
 OatMethodOffsets::~OatMethodOffsets() {}
 
-OatMethodHeader::OatMethodHeader()
+OatQuickMethodHeader::OatQuickMethodHeader()
   : mapping_table_offset_(0),
     vmap_table_offset_(0),
+    frame_info_(0, 0, 0),
     code_size_(0)
 {}
 
-OatMethodHeader::OatMethodHeader(uint32_t vmap_table_offset, uint32_t mapping_table_offset,
-                                 uint32_t code_size)
+OatQuickMethodHeader::OatQuickMethodHeader(
+    uint32_t mapping_table_offset, uint32_t vmap_table_offset, uint32_t frame_size_in_bytes,
+    uint32_t core_spill_mask, uint32_t fp_spill_mask, uint32_t code_size)
   : mapping_table_offset_(mapping_table_offset),
     vmap_table_offset_(vmap_table_offset),
+    frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
     code_size_(code_size)
 {}
 
-OatMethodHeader::~OatMethodHeader() {}
+OatQuickMethodHeader::~OatQuickMethodHeader() {}
 
 }  // namespace art
diff --git a/runtime/oat.h b/runtime/oat.h
index e9dfae9..7be768c 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -22,6 +22,7 @@
 #include "base/macros.h"
 #include "dex_file.h"
 #include "instruction_set.h"
+#include "quick/quick_method_frame_info.h"
 
 namespace art {
 
@@ -137,34 +138,31 @@
   OatMethodOffsets();
 
   OatMethodOffsets(uint32_t code_offset,
-                   uint32_t frame_size_in_bytes,
-                   uint32_t core_spill_mask,
-                   uint32_t fp_spill_mask,
                    uint32_t gc_map_offset);
 
   ~OatMethodOffsets();
 
   uint32_t code_offset_;
-  uint32_t frame_size_in_bytes_;
-  uint32_t core_spill_mask_;
-  uint32_t fp_spill_mask_;
   uint32_t gc_map_offset_;
 };
 
-// OatMethodHeader precedes the raw code chunk generated by the Quick compiler.
-class PACKED(4) OatMethodHeader {
+// OatQuickMethodHeader precedes the raw code chunk generated by the Quick compiler.
+class PACKED(4) OatQuickMethodHeader {
  public:
-  OatMethodHeader();
+  OatQuickMethodHeader();
 
-  explicit OatMethodHeader(uint32_t mapping_table_offset, uint32_t vmap_table_offset,
-                           uint32_t code_size);
+  explicit OatQuickMethodHeader(uint32_t mapping_table_offset, uint32_t vmap_table_offset,
+                                uint32_t frame_size_in_bytes, uint32_t core_spill_mask,
+                                uint32_t fp_spill_mask, uint32_t code_size);
 
-  ~OatMethodHeader();
+  ~OatQuickMethodHeader();
 
   // The offset in bytes from the start of the mapping table to the end of the header.
   uint32_t mapping_table_offset_;
   // The offset in bytes from the start of the vmap table to the end of the header.
   uint32_t vmap_table_offset_;
+  // The stack frame information.
+  QuickMethodFrameInfo frame_info_;
   // The code size in bytes.
   uint32_t code_size_;
 };
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 00ae797..97ca6b2 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -21,6 +21,30 @@
 
 namespace art {
 
+inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
+  const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  if (code == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FrameSizeInBytes();
+}
+
+inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
+  const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  if (code == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.CoreSpillMask();
+}
+
+inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
+  const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+  if (code == nullptr) {
+    return 0u;
+  }
+  return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FpSpillMask();
+}
+
 inline uint32_t OatFile::OatMethod::GetMappingTableOffset() const {
   const uint8_t* mapping_table = GetMappingTable();
   return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u);
@@ -36,7 +60,7 @@
   if (code == nullptr) {
     return nullptr;
   }
-  uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].mapping_table_offset_;
+  uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].mapping_table_offset_;
   if (UNLIKELY(offset == 0u)) {
     return nullptr;
   }
@@ -48,7 +72,7 @@
   if (code == nullptr) {
     return nullptr;
   }
-  uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].vmap_table_offset_;
+  uint32_t offset = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].vmap_table_offset_;
   if (UNLIKELY(offset == 0u)) {
     return nullptr;
   }
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 56e1f05..7976f6a 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -464,7 +464,7 @@
   // NOTE: We don't keep the number of methods and cannot do a bounds check for method_index.
   if (methods_pointer_ == NULL) {
     CHECK_EQ(kOatClassNoneCompiled, type_);
-    return OatMethod(NULL, 0, 0, 0, 0, 0);
+    return OatMethod(NULL, 0, 0);
   }
   size_t methods_pointer_index;
   if (bitmap_ == NULL) {
@@ -473,7 +473,7 @@
   } else {
     CHECK_EQ(kOatClassSomeCompiled, type_);
     if (!BitVector::IsBitSet(bitmap_, method_index)) {
-      return OatMethod(NULL, 0, 0, 0, 0, 0);
+      return OatMethod(NULL, 0, 0);
     }
     size_t num_set_bits = BitVector::NumSetBits(bitmap_, method_index);
     methods_pointer_index = num_set_bits;
@@ -482,23 +482,14 @@
   return OatMethod(
       oat_file_->Begin(),
       oat_method_offsets.code_offset_,
-      oat_method_offsets.frame_size_in_bytes_,
-      oat_method_offsets.core_spill_mask_,
-      oat_method_offsets.fp_spill_mask_,
       oat_method_offsets.gc_map_offset_);
 }
 
 OatFile::OatMethod::OatMethod(const byte* base,
                               const uint32_t code_offset,
-                              const size_t frame_size_in_bytes,
-                              const uint32_t core_spill_mask,
-                              const uint32_t fp_spill_mask,
                               const uint32_t gc_map_offset)
   : begin_(base),
     code_offset_(code_offset),
-    frame_size_in_bytes_(frame_size_in_bytes),
-    core_spill_mask_(core_spill_mask),
-    fp_spill_mask_(fp_spill_mask),
     native_gc_map_offset_(gc_map_offset) {
 }
 
@@ -519,9 +510,6 @@
   CHECK(method != NULL);
   method->SetEntryPointFromPortableCompiledCode(GetPortableCode());
   method->SetEntryPointFromQuickCompiledCode(GetQuickCode());
-  method->SetFrameSizeInBytes(frame_size_in_bytes_);
-  method->SetCoreSpillMask(core_spill_mask_);
-  method->SetFpSpillMask(fp_spill_mask_);
   method->SetNativeGcMap(GetNativeGcMap());  // Used by native methods in work around JNI mode.
 }
 
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index b358a00..8477723 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -78,15 +78,6 @@
     uint32_t GetCodeOffset() const {
       return code_offset_;
     }
-    size_t GetFrameSizeInBytes() const {
-      return frame_size_in_bytes_;
-    }
-    uint32_t GetCoreSpillMask() const {
-      return core_spill_mask_;
-    }
-    uint32_t GetFpSpillMask() const {
-      return fp_spill_mask_;
-    }
     uint32_t GetNativeGcMapOffset() const {
       return native_gc_map_offset_;
     }
@@ -120,6 +111,9 @@
       return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
     }
 
+    size_t GetFrameSizeInBytes() const;
+    uint32_t GetCoreSpillMask() const;
+    uint32_t GetFpSpillMask() const;
     uint32_t GetMappingTableOffset() const;
     uint32_t GetVmapTableOffset() const;
     const uint8_t* GetMappingTable() const;
@@ -130,9 +124,6 @@
     // Create an OatMethod with offsets relative to the given base address
     OatMethod(const byte* base,
               const uint32_t code_offset,
-              const size_t frame_size_in_bytes,
-              const uint32_t core_spill_mask,
-              const uint32_t fp_spill_mask,
               const uint32_t gc_map_offset);
 
    private:
@@ -147,9 +138,6 @@
     const byte* begin_;
 
     uint32_t code_offset_;
-    size_t frame_size_in_bytes_;
-    uint32_t core_spill_mask_;
-    uint32_t fp_spill_mask_;
     uint32_t native_gc_map_offset_;
 
     friend class OatClass;
diff --git a/runtime/quick/quick_method_frame_info.h b/runtime/quick/quick_method_frame_info.h
new file mode 100644
index 0000000..684d4da
--- /dev/null
+++ b/runtime/quick/quick_method_frame_info.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_QUICK_QUICK_METHOD_FRAME_INFO_H_
+#define ART_RUNTIME_QUICK_QUICK_METHOD_FRAME_INFO_H_
+
+#include <stdint.h>
+
+#include "base/macros.h"
+
+namespace art {
+
+class PACKED(4) QuickMethodFrameInfo {
+ public:
+  constexpr QuickMethodFrameInfo()
+    : frame_size_in_bytes_(0u),
+      core_spill_mask_(0u),
+      fp_spill_mask_(0u) {
+  }
+
+  constexpr QuickMethodFrameInfo(uint32_t frame_size_in_bytes, uint32_t core_spill_mask,
+                                 uint32_t fp_spill_mask)
+    : frame_size_in_bytes_(frame_size_in_bytes),
+      core_spill_mask_(core_spill_mask),
+      fp_spill_mask_(fp_spill_mask) {
+  }
+
+  uint32_t FrameSizeInBytes() const {
+    return frame_size_in_bytes_;
+  }
+
+  uint32_t CoreSpillMask() const {
+    return core_spill_mask_;
+  }
+
+  uint32_t FpSpillMask() const {
+    return fp_spill_mask_;
+  }
+
+ private:
+  uint32_t frame_size_in_bytes_;
+  uint32_t core_spill_mask_;
+  uint32_t fp_spill_mask_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_QUICK_QUICK_METHOD_FRAME_INFO_H_
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
new file mode 100644
index 0000000..29ddd1d
--- /dev/null
+++ b/runtime/runtime-inl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_RUNTIME_INL_H_
+#define ART_RUNTIME_RUNTIME_INL_H_
+
+#include "runtime.h"
+
+namespace art {
+
+inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) const {
+  DCHECK(method != nullptr);
+  // Cannot be imt-conflict-method or resolution-method.
+  DCHECK(method != GetImtConflictMethod());
+  DCHECK(method != GetResolutionMethod());
+  // Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
+  if (method == callee_save_methods_[Runtime::kRefsAndArgs]) {
+    return GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+  } else if (method == callee_save_methods_[Runtime::kSaveAll]) {
+    return GetCalleeSaveMethodFrameInfo(Runtime::kSaveAll);
+  } else {
+    DCHECK(method == callee_save_methods_[Runtime::kRefsOnly]);
+    return GetCalleeSaveMethodFrameInfo(Runtime::kRefsOnly);
+  }
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_RUNTIME_INL_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a390ac6..99d43f4 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -30,10 +30,15 @@
 #include <vector>
 #include <fcntl.h>
 
+#include "arch/arm/quick_method_frame_info_arm.h"
 #include "arch/arm/registers_arm.h"
+#include "arch/arm64/quick_method_frame_info_arm64.h"
 #include "arch/arm64/registers_arm64.h"
+#include "arch/mips/quick_method_frame_info_mips.h"
 #include "arch/mips/registers_mips.h"
+#include "arch/x86/quick_method_frame_info_x86.h"
 #include "arch/x86/registers_x86.h"
+#include "arch/x86_64/quick_method_frame_info_x86_64.h"
 #include "arch/x86_64/registers_x86_64.h"
 #include "atomic.h"
 #include "class_linker.h"
@@ -55,6 +60,7 @@
 #include "monitor.h"
 #include "parsed_options.h"
 #include "oat_file.h"
+#include "quick/quick_method_frame_info.h"
 #include "reflection.h"
 #include "ScopedLocalRef.h"
 #include "scoped_thread_state_change.h"
@@ -88,6 +94,7 @@
       resolution_method_(nullptr),
       imt_conflict_method_(nullptr),
       default_imt_(nullptr),
+      instruction_set_(kNone),
       compiler_callbacks_(nullptr),
       is_zygote_(false),
       is_concurrent_gc_enabled_(true),
@@ -202,6 +209,8 @@
     Thread* self = Thread::Current();
     if (self == nullptr) {
       os << "(Aborting thread was not attached to runtime!)\n";
+      DumpKernelStack(os, GetTid(), "  kernel: ", false);
+      DumpNativeStack(os, GetTid(), "  native: ", nullptr);
     } else {
       os << "Aborting thread:\n";
       if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
@@ -981,8 +990,7 @@
   return method.get();
 }
 
-mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(InstructionSet instruction_set,
-                                                   CalleeSaveType type) {
+mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(CalleeSaveType type) {
   Thread* self = Thread::Current();
   Runtime* runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
@@ -992,118 +1000,7 @@
   method->SetDexMethodIndex(DexFile::kDexNoIndex);
   method->SetEntryPointFromPortableCompiledCode(nullptr);
   method->SetEntryPointFromQuickCompiledCode(nullptr);
-  if ((instruction_set == kThumb2) || (instruction_set == kArm)) {
-    uint32_t ref_spills = (1 << art::arm::R5) | (1 << art::arm::R6)  | (1 << art::arm::R7) |
-                          (1 << art::arm::R8) | (1 << art::arm::R10) | (1 << art::arm::R11);
-    uint32_t arg_spills = (1 << art::arm::R1) | (1 << art::arm::R2) | (1 << art::arm::R3);
-    uint32_t all_spills = (1 << art::arm::R4) | (1 << art::arm::R9);
-    uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
-                           (type == kSaveAll ? all_spills : 0) | (1 << art::arm::LR);
-    uint32_t fp_all_spills = (1 << art::arm::S0)  | (1 << art::arm::S1)  | (1 << art::arm::S2) |
-                             (1 << art::arm::S3)  | (1 << art::arm::S4)  | (1 << art::arm::S5) |
-                             (1 << art::arm::S6)  | (1 << art::arm::S7)  | (1 << art::arm::S8) |
-                             (1 << art::arm::S9)  | (1 << art::arm::S10) | (1 << art::arm::S11) |
-                             (1 << art::arm::S12) | (1 << art::arm::S13) | (1 << art::arm::S14) |
-                             (1 << art::arm::S15) | (1 << art::arm::S16) | (1 << art::arm::S17) |
-                             (1 << art::arm::S18) | (1 << art::arm::S19) | (1 << art::arm::S20) |
-                             (1 << art::arm::S21) | (1 << art::arm::S22) | (1 << art::arm::S23) |
-                             (1 << art::arm::S24) | (1 << art::arm::S25) | (1 << art::arm::S26) |
-                             (1 << art::arm::S27) | (1 << art::arm::S28) | (1 << art::arm::S29) |
-                             (1 << art::arm::S30) | (1 << art::arm::S31);
-    uint32_t fp_spills = type == kSaveAll ? fp_all_spills : 0;
-    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
-                                 POPCOUNT(fp_spills) /* fprs */ +
-                                 1 /* Method* */) * kArmPointerSize, kStackAlignment);
-    method->SetFrameSizeInBytes(frame_size);
-    method->SetCoreSpillMask(core_spills);
-    method->SetFpSpillMask(fp_spills);
-  } else if (instruction_set == kMips) {
-    uint32_t ref_spills = (1 << art::mips::S2) | (1 << art::mips::S3) | (1 << art::mips::S4) |
-                          (1 << art::mips::S5) | (1 << art::mips::S6) | (1 << art::mips::S7) |
-                          (1 << art::mips::GP) | (1 << art::mips::FP);
-    uint32_t arg_spills = (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3);
-    uint32_t all_spills = (1 << art::mips::S0) | (1 << art::mips::S1);
-    uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
-                           (type == kSaveAll ? all_spills : 0) | (1 << art::mips::RA);
-    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
-                                (type == kRefsAndArgs ? 0 : 3) + 1 /* Method* */) *
-                                kMipsPointerSize, kStackAlignment);
-    method->SetFrameSizeInBytes(frame_size);
-    method->SetCoreSpillMask(core_spills);
-    method->SetFpSpillMask(0);
-  } else if (instruction_set == kX86) {
-    uint32_t ref_spills = (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
-    uint32_t arg_spills = (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
-    uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
-                         (1 << art::x86::kNumberOfCpuRegisters);  // fake return address callee save
-    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
-                                 1 /* Method* */) * kX86PointerSize, kStackAlignment);
-    method->SetFrameSizeInBytes(frame_size);
-    method->SetCoreSpillMask(core_spills);
-    method->SetFpSpillMask(0);
-  } else if (instruction_set == kX86_64) {
-    uint32_t ref_spills =
-        (1 << art::x86_64::RBX) | (1 << art::x86_64::RBP) | (1 << art::x86_64::R12) |
-        (1 << art::x86_64::R13) | (1 << art::x86_64::R14) | (1 << art::x86_64::R15);
-    uint32_t arg_spills =
-        (1 << art::x86_64::RSI) | (1 << art::x86_64::RDX) | (1 << art::x86_64::RCX) |
-        (1 << art::x86_64::R8) | (1 << art::x86_64::R9);
-    uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
-        (1 << art::x86_64::kNumberOfCpuRegisters);  // fake return address callee save
-    uint32_t fp_arg_spills =
-        (1 << art::x86_64::XMM0) | (1 << art::x86_64::XMM1) | (1 << art::x86_64::XMM2) |
-        (1 << art::x86_64::XMM3) | (1 << art::x86_64::XMM4) | (1 << art::x86_64::XMM5) |
-        (1 << art::x86_64::XMM6) | (1 << art::x86_64::XMM7);
-    uint32_t fp_spills = (type == kRefsAndArgs ? fp_arg_spills : 0);
-    size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
-                                 POPCOUNT(fp_spills) /* fprs */ +
-                                 1 /* Method* */) * kX86_64PointerSize, kStackAlignment);
-    method->SetFrameSizeInBytes(frame_size);
-    method->SetCoreSpillMask(core_spills);
-    method->SetFpSpillMask(fp_spills);
-  } else if (instruction_set == kArm64) {
-      // Callee saved registers
-      uint32_t ref_spills = (1 << art::arm64::X19) | (1 << art::arm64::X20) | (1 << art::arm64::X21) |
-                            (1 << art::arm64::X22) | (1 << art::arm64::X23) | (1 << art::arm64::X24) |
-                            (1 << art::arm64::X25) | (1 << art::arm64::X26) | (1 << art::arm64::X27) |
-                            (1 << art::arm64::X28);
-      // X0 is the method pointer. Not saved.
-      uint32_t arg_spills = (1 << art::arm64::X1) | (1 << art::arm64::X2) | (1 << art::arm64::X3) |
-                            (1 << art::arm64::X4) | (1 << art::arm64::X5) | (1 << art::arm64::X6) |
-                            (1 << art::arm64::X7);
-      // TODO  This is conservative. Only ALL should include the thread register.
-      // The thread register is not preserved by the aapcs64.
-      // LR is always saved.
-      uint32_t all_spills =  0;  // (1 << art::arm64::LR);
-      uint32_t core_spills = ref_spills | (type == kRefsAndArgs ? arg_spills : 0) |
-                             (type == kSaveAll ? all_spills : 0) | (1 << art::arm64::FP)
-                             | (1 << art::arm64::X18) | (1 << art::arm64::LR);
-
-      // Save callee-saved floating point registers. Rest are scratch/parameters.
-      uint32_t fp_arg_spills = (1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
-                            (1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
-                            (1 << art::arm64::D6) | (1 << art::arm64::D7);
-      uint32_t fp_ref_spills = (1 << art::arm64::D8)  | (1 << art::arm64::D9)  | (1 << art::arm64::D10) |
-                               (1 << art::arm64::D11)  | (1 << art::arm64::D12)  | (1 << art::arm64::D13) |
-                               (1 << art::arm64::D14)  | (1 << art::arm64::D15);
-      uint32_t fp_all_spills = fp_arg_spills |
-                          (1 << art::arm64::D16)  | (1 << art::arm64::D17) | (1 << art::arm64::D18) |
-                          (1 << art::arm64::D19)  | (1 << art::arm64::D20) | (1 << art::arm64::D21) |
-                          (1 << art::arm64::D22)  | (1 << art::arm64::D23) | (1 << art::arm64::D24) |
-                          (1 << art::arm64::D25)  | (1 << art::arm64::D26) | (1 << art::arm64::D27) |
-                          (1 << art::arm64::D28)  | (1 << art::arm64::D29) | (1 << art::arm64::D30) |
-                          (1 << art::arm64::D31);
-      uint32_t fp_spills = fp_ref_spills | (type == kRefsAndArgs ? fp_arg_spills: 0)
-                          | (type == kSaveAll ? fp_all_spills : 0);
-      size_t frame_size = RoundUp((POPCOUNT(core_spills) /* gprs */ +
-                                   POPCOUNT(fp_spills) /* fprs */ +
-                                   1 /* Method* */) * kArm64PointerSize, kStackAlignment);
-      method->SetFrameSizeInBytes(frame_size);
-      method->SetCoreSpillMask(core_spills);
-      method->SetFpSpillMask(fp_spills);
-  } else {
-    UNIMPLEMENTED(FATAL) << instruction_set;
-  }
+  DCHECK_NE(instruction_set_, kNone);
   return method.get();
 }
 
@@ -1121,6 +1018,38 @@
   Dbg::AllowNewObjectRegistryObjects();
 }
 
+void Runtime::SetInstructionSet(InstructionSet instruction_set) {
+  instruction_set_ = instruction_set;
+  if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
+    for (int i = 0; i != kLastCalleeSaveType; ++i) {
+      CalleeSaveType type = static_cast<CalleeSaveType>(i);
+      callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
+    }
+  } else if (instruction_set_ == kMips) {
+    for (int i = 0; i != kLastCalleeSaveType; ++i) {
+      CalleeSaveType type = static_cast<CalleeSaveType>(i);
+      callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
+    }
+  } else if (instruction_set_ == kX86) {
+    for (int i = 0; i != kLastCalleeSaveType; ++i) {
+      CalleeSaveType type = static_cast<CalleeSaveType>(i);
+      callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
+    }
+  } else if (instruction_set_ == kX86_64) {
+    for (int i = 0; i != kLastCalleeSaveType; ++i) {
+      CalleeSaveType type = static_cast<CalleeSaveType>(i);
+      callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
+    }
+  } else if (instruction_set_ == kArm64) {
+    for (int i = 0; i != kLastCalleeSaveType; ++i) {
+      CalleeSaveType type = static_cast<CalleeSaveType>(i);
+      callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
+    }
+  } else {
+    UNIMPLEMENTED(FATAL) << instruction_set_;
+  }
+}
+
 void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) {
   DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
   callee_save_methods_[type] = method;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 1ee0b1a..07b47c3 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -34,6 +34,7 @@
 #include "instrumentation.h"
 #include "jobject_comparator.h"
 #include "object_callbacks.h"
+#include "quick/quick_method_frame_info.h"
 #include "runtime_stats.h"
 #include "safe_map.h"
 #include "fault_handler.h"
@@ -325,20 +326,25 @@
     return callee_save_methods_[type];
   }
 
+  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
+    return callee_save_method_frame_infos_[type];
+  }
+
+  QuickMethodFrameInfo GetRuntimeMethodFrameInfo(mirror::ArtMethod* method) const;
+
   static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
     return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
   }
 
+  InstructionSet GetInstructionSet() const {
+    return instruction_set_;
+  }
+
+  void SetInstructionSet(InstructionSet instruction_set);
+
   void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
 
-  mirror::ArtMethod* CreateCalleeSaveMethod(InstructionSet instruction_set,
-                                                 CalleeSaveType type)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  mirror::ArtMethod* CreateRefOnlyCalleeSaveMethod(InstructionSet instruction_set)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  mirror::ArtMethod* CreateRefAndArgsCalleeSaveMethod(InstructionSet instruction_set)
+  mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   int32_t GetStat(int kind);
@@ -468,6 +474,9 @@
   mirror::ArtMethod* imt_conflict_method_;
   mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
 
+  InstructionSet instruction_set_;
+  QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
+
   CompilerCallbacks* compiler_callbacks_;
   bool is_zygote_;
   bool is_concurrent_gc_enabled_;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 6667419..fd31ec6 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -23,6 +23,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "object_utils.h"
+#include "quick/quick_method_frame_info.h"
 #include "runtime.h"
 #include "thread.h"
 #include "thread_list.h"
@@ -142,18 +143,17 @@
     DCHECK(m == GetMethod());
     const VmapTable vmap_table(m->GetVmapTable());
     uint32_t vmap_offset;
+    QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo();
     // TODO: IsInContext stops before spotting floating point registers.
     if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
       bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
-      uint32_t spill_mask = is_float ? m->GetFpSpillMask()
-                                     : m->GetCoreSpillMask();
+      uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
       return GetGPR(vmap_table.ComputeRegister(spill_mask, vmap_offset, kind));
     } else {
       const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
       DCHECK(code_item != NULL) << PrettyMethod(m);  // Can't be NULL or how would we compile its instructions?
-      size_t frame_size = m->GetFrameSizeInBytes();
-      return *GetVRegAddr(cur_quick_frame_, code_item, m->GetCoreSpillMask(), m->GetFpSpillMask(),
-                          frame_size, vreg);
+      return *GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+                          frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
     }
   } else {
     return cur_shadow_frame_->GetVReg(vreg);
@@ -167,19 +167,18 @@
     DCHECK(m == GetMethod());
     const VmapTable vmap_table(m->GetVmapTable());
     uint32_t vmap_offset;
+    QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo();
     // TODO: IsInContext stops before spotting floating point registers.
     if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
       bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
-      uint32_t spill_mask = is_float ? m->GetFpSpillMask() : m->GetCoreSpillMask();
+      uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
       const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kReferenceVReg);
       SetGPR(reg, new_value);
     } else {
       const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
       DCHECK(code_item != NULL) << PrettyMethod(m);  // Can't be NULL or how would we compile its instructions?
-      uint32_t core_spills = m->GetCoreSpillMask();
-      uint32_t fp_spills = m->GetFpSpillMask();
-      size_t frame_size = m->GetFrameSizeInBytes();
-      int offset = GetVRegOffset(code_item, core_spills, fp_spills, frame_size, vreg, kRuntimeISA);
+      int offset = GetVRegOffset(code_item, frame_info.CoreSpillMask(), frame_info.FpSpillMask(),
+                                 frame_info.FrameSizeInBytes(), vreg, kRuntimeISA);
       byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
       *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
     }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 00a66d7..7ed0cb4 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -55,6 +55,7 @@
 #include "monitor.h"
 #include "object_utils.h"
 #include "quick_exception_handler.h"
+#include "quick/quick_method_frame_info.h"
 #include "reflection.h"
 #include "runtime.h"
 #include "scoped_thread_state_change.h"
@@ -2019,9 +2020,7 @@
         const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
         DCHECK(reg_bitmap != nullptr);
         const VmapTable vmap_table(m->GetVmapTable());
-        uint32_t core_spills = m->GetCoreSpillMask();
-        uint32_t fp_spills = m->GetFpSpillMask();
-        size_t frame_size = m->GetFrameSizeInBytes();
+        QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo();
         // For all dex registers in the bitmap
         mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
         DCHECK(cur_quick_frame != nullptr);
@@ -2030,7 +2029,8 @@
           if (TestBitmap(reg, reg_bitmap)) {
             uint32_t vmap_offset;
             if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
-              int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+              int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
+                                                        kReferenceVReg);
               // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
               mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
               if (*ref_addr != nullptr) {
@@ -2039,8 +2039,8 @@
             } else {
               StackReference<mirror::Object>* ref_addr =
                   reinterpret_cast<StackReference<mirror::Object>*>(
-                      GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
-                                  reg));
+                      GetVRegAddr(cur_quick_frame, code_item, frame_info.CoreSpillMask(),
+                                  frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
               mirror::Object* ref = ref_addr->AsMirrorPtr();
               if (ref != nullptr) {
                 mirror::Object* new_ref = ref;