Merge "Use canonical paths when searching for dex files"
diff --git a/Android.mk b/Android.mk
index c5e90f2..e536a71 100644
--- a/Android.mk
+++ b/Android.mk
@@ -389,8 +389,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags ""
-	adb shell setprop dalvik.vm.image-dex2oat-flags ""
+	adb shell setprop dalvik.vm.dex2oat-filter ""
+	adb shell setprop dalvik.vm.image-dex2oat-filter ""
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
@@ -399,8 +399,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags ""
-	adb shell setprop dalvik.vm.image-dex2oat-flags ""
+	adb shell setprop dalvik.vm.dex2oat-filter ""
+	adb shell setprop dalvik.vm.image-dex2oat-filter ""
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
 	adb shell start
 
@@ -409,8 +409,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-flags ""
+	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	adb shell setprop dalvik.vm.image-dex2oat-filter ""
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
@@ -419,8 +419,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=interpret-only"
+	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
@@ -429,8 +429,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=interpret-only"
-	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=interpret-only"
+	adb shell setprop dalvik.vm.dex2oat-filter "interpret-only"
+	adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only"
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so
 	adb shell start
 
@@ -439,8 +439,8 @@
 	adb root && sleep 3
 	adb shell stop
 	adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*
-	adb shell setprop dalvik.vm.dex2oat-flags "--compiler-filter=verify-none"
-	adb shell setprop dalvik.vm.image-dex2oat-flags "--compiler-filter=verify-none"
+	adb shell setprop dalvik.vm.dex2oat-filter "verify-none"
+	adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none"
 	adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so
 	adb shell start
 
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 8d0a5a3..6aee563 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -69,6 +69,7 @@
 
 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
     : reg_location_(NULL),
+      block_id_map_(std::less<unsigned int>(), arena->Adapter()),
       cu_(cu),
       ssa_base_vregs_(NULL),
       ssa_subscripts_(NULL),
@@ -101,11 +102,14 @@
       num_blocks_(0),
       current_code_item_(NULL),
       dex_pc_to_block_map_(arena, 0, kGrowableArrayMisc),
+      m_units_(arena->Adapter()),
+      method_stack_(arena->Adapter()),
       current_method_(kInvalidEntry),
       current_offset_(kInvalidEntry),
       def_count_(0),
       opcode_count_(NULL),
       num_ssa_regs_(0),
+      extended_basic_blocks_(arena->Adapter()),
       method_sreg_(0),
       attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
       checkstats_(NULL),
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 768ae21..491d72e 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -27,6 +27,7 @@
 #include "mir_method_info.h"
 #include "utils/arena_bit_vector.h"
 #include "utils/growable_array.h"
+#include "utils/arena_containers.h"
 #include "utils/scoped_arena_containers.h"
 #include "reg_location.h"
 #include "reg_storage.h"
@@ -1051,8 +1052,8 @@
   std::set<uint32_t> catches_;
 
   // TODO: make these private.
-  RegLocation* reg_location_;                         // Map SSA names to location.
-  SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
+  RegLocation* reg_location_;                               // Map SSA names to location.
+  ArenaSafeMap<unsigned int, unsigned int> block_id_map_;   // Block collapse lookup cache.
 
   static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
   static const uint32_t analysis_attributes_[kMirOpLast];
@@ -1171,15 +1172,15 @@
   unsigned int num_blocks_;
   const DexFile::CodeItem* current_code_item_;
   GrowableArray<uint16_t> dex_pc_to_block_map_;  // FindBlock lookup cache.
-  std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
+  ArenaVector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
-  std::vector<MIRLocation> method_stack_;        // Include stack
+  ArenaVector<MIRLocation> method_stack_;        // Include stack
   int current_method_;
   DexOffset current_offset_;                     // Offset in code units
   int def_count_;                                // Used to estimate size of ssa name storage.
   int* opcode_count_;                            // Dex opcode coverage stats.
   int num_ssa_regs_;                             // Number of names following SSA transformation.
-  std::vector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
+  ArenaVector<BasicBlockId> extended_basic_blocks_;  // Heads of block "traces".
   int method_sreg_;
   unsigned int attributes_;
   Checkstats* checkstats_;
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index e0b8ec6..21322a6 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -84,6 +84,8 @@
     RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
     // Required for target - Dalvik-level generators.
+    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2) OVERRIDE;
     void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2);
     void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
@@ -92,12 +94,6 @@
                      RegLocation rl_src, int scale, bool card_mark);
     void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_shift);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
     void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                           RegLocation rl_src2);
     void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -112,16 +108,6 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
-    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                   RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2, bool is_div);
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -201,6 +187,9 @@
     size_t GetInstructionOffset(LIR* lir);
 
   private:
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
     void GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, int64_t val,
                                   ConditionCode ccode);
     LIR* LoadFPConstantValue(int r_dest, int value);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index dd14ed9..6711ab3 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1039,15 +1039,6 @@
 #endif
 }
 
-void ArmMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
-  LOG(FATAL) << "Unexpected use GenNotLong()";
-}
-
-void ArmMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2, bool is_div) {
-  LOG(FATAL) << "Unexpected use GenDivRemLong()";
-}
-
 void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -1173,29 +1164,23 @@
     StoreValueWide(rl_dest, rl_result);
 }
 
-void ArmMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
-}
+void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                RegLocation rl_src2) {
+  switch (opcode) {
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::NEG_LONG:
+      GenNegLong(rl_dest, rl_src2);
+      return;
 
-void ArmMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
-}
+    default:
+      break;
+  }
 
-void ArmMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
-}
-
-void ArmMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
-}
-
-void ArmMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of genXoLong for Arm";
+  // Fallback for all other ops.
+  Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
 }
 
 /*
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 3a8ea3f..a449cbd 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -267,6 +267,8 @@
   kA64Fcvtzs2xf,     // fcvtzs [100111100s111000000000] rn[9-5] rd[4-0].
   kA64Fcvt2Ss,       // fcvt   [0001111000100010110000] rn[9-5] rd[4-0].
   kA64Fcvt2sS,       // fcvt   [0001111001100010010000] rn[9-5] rd[4-0].
+  kA64Fcvtms2ws,     // fcvtms [0001111000110000000000] rn[9-5] rd[4-0].
+  kA64Fcvtms2xS,     // fcvtms [1001111001110000000000] rn[9-5] rd[4-0].
   kA64Fdiv3fff,      // fdiv[000111100s1] rm[20-16] [000110] rn[9-5] rd[4-0].
   kA64Fmax3fff,      // fmax[000111100s1] rm[20-16] [010010] rn[9-5] rd[4-0].
   kA64Fmin3fff,      // fmin[000111100s1] rm[20-16] [010110] rn[9-5] rd[4-0].
@@ -278,6 +280,9 @@
   kA64Fmov2xS,       // fmov[1001111001101111000000] rn[9-5] rd[4-0].
   kA64Fmul3fff,      // fmul[000111100s1] rm[20-16] [000010] rn[9-5] rd[4-0].
   kA64Fneg2ff,       // fneg[000111100s100001010000] rn[9-5] rd[4-0].
+  kA64Frintp2ff,     // frintp [000111100s100100110000] rn[9-5] rd[4-0].
+  kA64Frintm2ff,     // frintm [000111100s100101010000] rn[9-5] rd[4-0].
+  kA64Frintn2ff,     // frintn [000111100s100100010000] rn[9-5] rd[4-0].
   kA64Frintz2ff,     // frintz [000111100s100101110000] rn[9-5] rd[4-0].
   kA64Fsqrt2ff,      // fsqrt[000111100s100001110000] rn[9-5] rd[4-0].
   kA64Fsub3fff,      // fsub[000111100s1] rm[20-16] [001110] rn[9-5] rd[4-0].
@@ -331,6 +336,7 @@
   kA64Stp4ffXD,      // stp [0s10110100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64Stp4rrXD,      // stp [s010100100] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64StpPost4rrXD,  // stp [s010100010] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
+  kA64StpPre4ffXD,   // stp [0s10110110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64StpPre4rrXD,   // stp [s010100110] imm_7[21-15] rt2[14-10] rn[9-5] rt[4-0].
   kA64Str3fXD,       // str [1s11110100] imm_12[21-10] rn[9-5] rt[4-0].
   kA64Str4fXxG,      // str [1s111100001] rm[20-16] [011] S[12] [10] rn[9-5] rt[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 1d7cdab..15c89f2 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -260,6 +260,14 @@
                  kFmtRegS, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fcvt", "!0s, !1S", kFixupNone),
+    ENCODING_MAP(kA64Fcvtms2ws, NO_VARIANTS(0x1e300000),
+                 kFmtRegW, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "fcvtms", "!0w, !1s", kFixupNone),
+    ENCODING_MAP(kA64Fcvtms2xS, NO_VARIANTS(0x9e700000),
+                 kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "fcvtms", "!0x, !1S", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Fdiv3fff), FLOAT_VARIANTS(0x1e201800),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtRegF, 20, 16,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -304,6 +312,18 @@
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
                  "fneg", "!0f, !1f", kFixupNone),
+    ENCODING_MAP(FWIDE(kA64Frintp2ff), FLOAT_VARIANTS(0x1e24c000),
+                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "frintp", "!0f, !1f", kFixupNone),
+    ENCODING_MAP(FWIDE(kA64Frintm2ff), FLOAT_VARIANTS(0x1e254000),
+                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "frintm", "!0f, !1f", kFixupNone),
+    ENCODING_MAP(FWIDE(kA64Frintn2ff), FLOAT_VARIANTS(0x1e244000),
+                 kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+                 "frintn", "!0f, !1f", kFixupNone),
     ENCODING_MAP(FWIDE(kA64Frintz2ff), FLOAT_VARIANTS(0x1e25c000),
                  kFmtRegF, 4, 0, kFmtRegF, 9, 5, kFmtUnused, -1, -1,
                  kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -518,6 +538,10 @@
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
                  "stp", "!0r, !1r, [!2X], #!3D", kFixupNone),
+    ENCODING_MAP(WIDE(kA64StpPre4ffXD), CUSTOM_VARIANTS(0x2d800000, 0x6d800000),
+                 kFmtRegF, 4, 0, kFmtRegF, 14, 10, kFmtRegXOrSp, 9, 5,
+                 kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
+                 "stp", "!0f, !1f, [!2X, #!3D]!!", kFixupNone),
     ENCODING_MAP(WIDE(kA64StpPre4rrXD), CUSTOM_VARIANTS(0x29800000, 0xa9800000),
                  kFmtRegR, 4, 0, kFmtRegR, 14, 10, kFmtRegXOrSp, 9, 5,
                  kFmtBitBlt, 21, 15, IS_QUAD_OP | REG_DEF2 | REG_USE012 | IS_STORE,
@@ -723,6 +747,7 @@
                              << " @ 0x" << std::hex << lir->dalvik_offset;
                 if (kFailOnSizeError) {
                   LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+                             << "(" << UNWIDE(encoder->opcode) << ", " << encoder->fmt << ")"
                              << ". Expected " << expected << ", got 0x" << std::hex << operand;
                 } else {
                   LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index e584548..6fa8a4a 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -330,19 +330,14 @@
 
   NewLIR0(kPseudoMethodEntry);
 
-  const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64) -
-      Thread::kStackOverflowSignalReservedBytes;
-  const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
   const int spill_count = num_core_spills_ + num_fp_spills_;
   const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf;  // SP 16 byte alignment.
   const int frame_size_without_spills = frame_size_ - spill_size;
 
   if (!skip_overflow_check) {
     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
-      if (!large_frame) {
-        // Load stack limit
-        LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
-      }
+      // Load stack limit
+      LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP1);
     } else {
       // TODO(Arm64) Implement implicit checks.
       // Implicit stack overflow check.
@@ -350,24 +345,21 @@
       // redzone we will get a segmentation fault.
       // Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr);
       // MarkPossibleStackOverflowException();
+      //
+      // TODO: If the frame size is small enough, is it possible to make this a pre-indexed load,
+      //       so that we can avoid the following "sub sp" when spilling?
       LOG(FATAL) << "Implicit stack overflow checks not implemented.";
     }
   }
 
-  if (frame_size_ > 0) {
-    OpRegImm64(kOpSub, rs_sp, spill_size);
+  int spilled_already = 0;
+  if (spill_size > 0) {
+    spilled_already = SpillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
+    DCHECK(spill_size == spilled_already || frame_size_ == spilled_already);
   }
 
-  /* Need to spill any FP regs? */
-  if (fp_spill_mask_) {
-    int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
-    SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
-  }
-
-  /* Spill core callee saves. */
-  if (core_spill_mask_) {
-    int spill_offset = spill_size - kArm64PointerSize*num_core_spills_;
-    SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
+  if (spilled_already != frame_size_) {
+    OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
   }
 
   if (!skip_overflow_check) {
@@ -396,29 +388,9 @@
         const size_t sp_displace_;
       };
 
-      if (large_frame) {
-        // Compare Expected SP against bottom of stack.
-        // Branch to throw target if there is not enough room.
-        OpRegRegImm(kOpSub, rs_xIP1, rs_sp, frame_size_without_spills);
-        LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_xIP0);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_xIP1, rs_xIP0, nullptr);
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size));
-        OpRegCopy(rs_sp, rs_xIP1);  // Establish stack after checks.
-      } else {
-        /*
-         * If the frame is small enough we are guaranteed to have enough space that remains to
-         * handle signals on the user stack.
-         * Establishes stack before checks.
-         */
-        OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills);
-        LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
-        AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
-      }
-    } else {
-      OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
+      LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_xIP1, nullptr);
+      AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
     }
-  } else {
-    OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
   }
 
   FlushIns(ArgLocs, rl_method);
@@ -445,57 +417,7 @@
 
   NewLIR0(kPseudoMethodExit);
 
-  // Restore saves and drop stack frame.
-  // 2 versions:
-  //
-  // 1. (Original): Try to address directly, then drop the whole frame.
-  //                Limitation: ldp is a 7b signed immediate. There should have been a DCHECK!
-  //
-  // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
-  //           in range. Then drop the rest.
-  //
-  // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
-  //       in variant 1.
-
-  if (frame_size_ <= 504) {
-    // "Magic" constant, 63 (max signed 7b) * 8. Do variant 1.
-    // Could be tighter, as the last load is below frame_size_ offset.
-    if (fp_spill_mask_) {
-      int spill_offset = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-      UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
-    }
-    if (core_spill_mask_) {
-      int spill_offset = frame_size_ - kArm64PointerSize * num_core_spills_;
-      UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
-    }
-
-    OpRegImm64(kOpAdd, rs_sp, frame_size_);
-  } else {
-    // Second variant. Drop the frame part.
-    int drop = 0;
-    // TODO: Always use the first formula, as num_fp_spills would be zero?
-    if (fp_spill_mask_) {
-      drop = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-    } else {
-      drop = frame_size_ - kArm64PointerSize * num_core_spills_;
-    }
-
-    // Drop needs to be 16B aligned, so that SP keeps aligned.
-    drop = RoundDown(drop, 16);
-
-    OpRegImm64(kOpAdd, rs_sp, drop);
-
-    if (fp_spill_mask_) {
-      int offset = frame_size_ - drop - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
-      UnSpillFPRegs(rs_sp, offset, fp_spill_mask_);
-    }
-    if (core_spill_mask_) {
-      int offset = frame_size_ - drop - kArm64PointerSize * num_core_spills_;
-      UnSpillCoreRegs(rs_sp, offset, core_spill_mask_);
-    }
-
-    OpRegImm64(kOpAdd, rs_sp, frame_size_ - drop);
-  }
+  UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
 
   // Finally return.
   NewLIR0(kA64Ret);
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index fd2f541..b182cc0 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -59,330 +59,340 @@
     bool initialized_;
   };
 
-  public:
-    Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+ public:
+  Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
-    // Required for target - codegen helpers.
-    bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                            RegLocation rl_dest, int lit) OVERRIDE;
-    bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                              RegLocation rl_dest, int64_t lit);
-    bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
-                          RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-    bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
-                            RegLocation rl_src, RegLocation rl_dest, int64_t lit);
-    bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
-    LIR* CheckSuspendUsingLoad() OVERRIDE;
-    RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
-    LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                      OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
-                     VolatileKind is_volatile)
-        OVERRIDE;
-    LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
-                         OpSize size) OVERRIDE;
-    LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
-        OVERRIDE;
-    LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
-    LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
-    LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                       OpSize size, VolatileKind is_volatile) OVERRIDE;
-    LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
-                      VolatileKind is_volatile) OVERRIDE;
-    LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
-                          OpSize size) OVERRIDE;
-    LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale)
-        OVERRIDE;
-    void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
-    LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
-                           int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
+  // Required for target - codegen helpers.
+  bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+                          RegLocation rl_dest, int lit) OVERRIDE;
+  bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
+                        RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+  bool HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
+                          RegLocation rl_src, RegLocation rl_dest, int64_t lit);
+  bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+  LIR* CheckSuspendUsingLoad() OVERRIDE;
+  RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+  LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+                    OpSize size, VolatileKind is_volatile) OVERRIDE;
+  LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+                   VolatileKind is_volatile) OVERRIDE;
+  LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+                       OpSize size) OVERRIDE;
+  LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
+      OVERRIDE;
+  LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
+  LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
+  LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+                     VolatileKind is_volatile) OVERRIDE;
+  LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, VolatileKind is_volatile)
+      OVERRIDE;
+  LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+                        OpSize size) OVERRIDE;
+  LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) OVERRIDE;
+  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+  LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
+                         int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
 
-    // Required for target - register utilities.
-    RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
-    RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
-      if (wide_kind == kWide || wide_kind == kRef) {
-        return As64BitReg(TargetReg(symbolic_reg));
-      } else {
-        return Check32BitReg(TargetReg(symbolic_reg));
-      }
-    }
-    RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+  // Required for target - register utilities.
+  RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+  RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
+    if (wide_kind == kWide || wide_kind == kRef) {
       return As64BitReg(TargetReg(symbolic_reg));
+    } else {
+      return Check32BitReg(TargetReg(symbolic_reg));
     }
-    RegStorage GetArgMappingToPhysicalReg(int arg_num);
-    RegLocation GetReturnAlt();
-    RegLocation GetReturnWideAlt();
-    RegLocation LocCReturn();
-    RegLocation LocCReturnRef();
-    RegLocation LocCReturnDouble();
-    RegLocation LocCReturnFloat();
-    RegLocation LocCReturnWide();
-    ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-    void AdjustSpillMask();
-    void ClobberCallerSave();
-    void FreeCallTemps();
-    void LockCallTemps();
-    void CompilerInitializeRegAlloc();
+  }
+  RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+    return As64BitReg(TargetReg(symbolic_reg));
+  }
+  RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+  RegLocation GetReturnAlt() OVERRIDE;
+  RegLocation GetReturnWideAlt() OVERRIDE;
+  RegLocation LocCReturn() OVERRIDE;
+  RegLocation LocCReturnRef() OVERRIDE;
+  RegLocation LocCReturnDouble() OVERRIDE;
+  RegLocation LocCReturnFloat() OVERRIDE;
+  RegLocation LocCReturnWide() OVERRIDE;
+  ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+  void AdjustSpillMask() OVERRIDE;
+  void ClobberCallerSave() OVERRIDE;
+  void FreeCallTemps() OVERRIDE;
+  void LockCallTemps() OVERRIDE;
+  void CompilerInitializeRegAlloc() OVERRIDE;
 
-    // Required for target - miscellaneous.
-    void AssembleLIR();
-    uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
-    int AssignInsnOffsets();
-    void AssignOffsets();
-    uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
-    void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
-    void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
-                                  ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-    const char* GetTargetInstFmt(int opcode);
-    const char* GetTargetInstName(int opcode);
-    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
-    ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-    uint64_t GetTargetInstFlags(int opcode);
-    size_t GetInsnSize(LIR* lir) OVERRIDE;
-    bool IsUnconditionalBranch(LIR* lir);
+  // Required for target - miscellaneous.
+  void AssembleLIR() OVERRIDE;
+  void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+  void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
+                                ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
+  const char* GetTargetInstFmt(int opcode) OVERRIDE;
+  const char* GetTargetInstName(int opcode) OVERRIDE;
+  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
+  ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
+  size_t GetInsnSize(LIR* lir) OVERRIDE;
+  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
 
-    // Get the register class for load/store of a field.
-    RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+  // Get the register class for load/store of a field.
+  RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
-    // Required for target - Dalvik-level generators.
-    void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation lr_shift);
-    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2);
-    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
-                     RegLocation rl_index, RegLocation rl_dest, int scale);
-    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                     RegLocation rl_src, int scale, bool card_mark);
-    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_shift);
-    void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                          RegLocation rl_src2);
-    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2);
-    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-    bool GenInlinedReverseBits(CallInfo* info, OpSize size);
-    bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
-    bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-    bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
-    bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
-    bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
-    bool GenInlinedSqrt(CallInfo* info);
-    bool GenInlinedPeek(CallInfo* info, OpSize size);
-    bool GenInlinedPoke(CallInfo* info, OpSize size);
-    bool GenInlinedAbsLong(CallInfo* info);
-    void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                   RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2, bool is_div);
-    RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
-    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-    void GenDivZeroCheckWide(RegStorage reg);
-    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
-    void GenExitSequence();
-    void GenSpecialExitSequence();
-    void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
-    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
-    void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
-    void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                          int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                          int dest_reg_class) OVERRIDE;
-    // Helper used in the above two.
-    void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
-                   int result_reg_class);
+  // Required for target - Dalvik-level generators.
+  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation lr_shift) OVERRIDE;
+  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2) OVERRIDE;
+  void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_dest, int scale) OVERRIDE;
+  void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+                   RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
+  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_shift) OVERRIDE;
+  void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2) OVERRIDE;
+  void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                       RegLocation rl_src2) OVERRIDE;
+  void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                RegLocation rl_src2) OVERRIDE;
+  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+  bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
+  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
+  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
+  bool GenInlinedCeil(CallInfo* info) OVERRIDE;
+  bool GenInlinedFloor(CallInfo* info) OVERRIDE;
+  bool GenInlinedRint(CallInfo* info) OVERRIDE;
+  bool GenInlinedRound(CallInfo* info, bool is_double) OVERRIDE;
+  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
+  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation rl_src2) OVERRIDE;
+  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
+      OVERRIDE;
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div)
+      OVERRIDE;
+  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)  OVERRIDE;
+  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
+  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
+  void GenExitSequence() OVERRIDE;
+  void GenSpecialExitSequence() OVERRIDE;
+  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
+  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
+                        int dest_reg_class) OVERRIDE;
 
-    bool GenMemBarrier(MemBarrierKind barrier_kind);
-    void GenMonitorEnter(int opt_flags, RegLocation rl_src);
-    void GenMonitorExit(int opt_flags, RegLocation rl_src);
-    void GenMoveException(RegLocation rl_dest);
-    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                       int first_bit, int second_bit);
-    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-    void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-    void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
+  void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
+  void GenMonitorExit(int opt_flags, RegLocation rl_src) OVERRIDE;
+  void GenMoveException(RegLocation rl_dest) OVERRIDE;
+  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                     int first_bit, int second_bit) OVERRIDE;
+  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
 
-    uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2);
-    void UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
-    void SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask);
+  // Required for target - single operation generators.
+  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
+  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
+  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
+  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
+  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
+  void OpEndIT(LIR* it) OVERRIDE;
+  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
+  LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
+  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
+  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
+  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
+  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
+  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpTestSuspend(LIR* target) OVERRIDE;
+  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
+  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
+  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
 
-    // Required for target - single operation generators.
-    LIR* OpUnconditionalBranch(LIR* target);
-    LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
-    LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
-    LIR* OpCondBranch(ConditionCode cc, LIR* target);
-    LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
-    LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpIT(ConditionCode cond, const char* guide);
-    void OpEndIT(LIR* it);
-    LIR* OpMem(OpKind op, RegStorage r_base, int disp);
-    LIR* OpPcRelLoad(RegStorage reg, LIR* target);
-    LIR* OpReg(OpKind op, RegStorage r_dest_src);
-    void OpRegCopy(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
-    LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-    LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
-    LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
-    LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
-    LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
-    LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
-    LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
-    LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
-    LIR* OpTestSuspend(LIR* target);
-    LIR* OpVldm(RegStorage r_base, int count);
-    LIR* OpVstm(RegStorage r_base, int count);
-    void OpRegCopyWide(RegStorage dest, RegStorage src);
+  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+  bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) OVERRIDE;
+  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
 
-    LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
-    LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
-    LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
-                          int shift);
-    LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
-                           A64RegExtEncodings ext, uint8_t amount);
-    LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
-    LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
-    static const ArmEncodingMap EncodingMap[kA64Last];
-    int EncodeShift(int code, int amount);
-    int EncodeExtend(int extend_type, int amount);
-    bool IsExtendEncoding(int encoded_value);
-    int EncodeLogicalImmediate(bool is_wide, uint64_t value);
-    uint64_t DecodeLogicalImmediate(bool is_wide, int value);
+  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
 
-    ArmConditionCode ArmConditionEncoding(ConditionCode code);
-    bool InexpensiveConstantInt(int32_t value);
-    bool InexpensiveConstantFloat(int32_t value);
-    bool InexpensiveConstantLong(int64_t value);
-    bool InexpensiveConstantDouble(int64_t value);
-
-    void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
-
-    int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
-                             NextCallInsn next_call_insn,
-                             const MethodReference& target_method,
-                             uint32_t vtable_idx,
-                             uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                             bool skip_this);
-
-    int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+  int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
                            NextCallInsn next_call_insn,
                            const MethodReference& target_method,
                            uint32_t vtable_idx,
                            uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                           bool skip_this);
-    InToRegStorageMapping in_to_reg_storage_mapping_;
+                           bool skip_this) OVERRIDE;
 
-    bool WideGPRsAreAliases() OVERRIDE {
-      return true;  // 64b architecture.
-    }
-    bool WideFPRsAreAliases() OVERRIDE {
-      return true;  // 64b architecture.
-    }
-    size_t GetInstructionOffset(LIR* lir);
+  int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+                         NextCallInsn next_call_insn,
+                         const MethodReference& target_method,
+                         uint32_t vtable_idx,
+                         uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+                         bool skip_this) OVERRIDE;
 
-    LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+  bool WideGPRsAreAliases() OVERRIDE {
+    return true;  // 64b architecture.
+  }
+  bool WideFPRsAreAliases() OVERRIDE {
+    return true;  // 64b architecture.
+  }
 
-  private:
-    /**
-     * @brief Given register xNN (dNN), returns register wNN (sNN).
-     * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
-     * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
-     * @see As64BitReg
-     */
-    RegStorage As32BitReg(RegStorage reg) {
-      DCHECK(!reg.IsPair());
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Expected 64b register";
-        } else {
-          LOG(WARNING) << "Expected 64b register";
-          return reg;
-        }
+  size_t GetInstructionOffset(LIR* lir) OVERRIDE;
+
+  LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
+ private:
+  /**
+   * @brief Given register xNN (dNN), returns register wNN (sNN).
+   * @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
+   * @return A Solo32 with the same register number as the @p reg (e.g. @c w1 or @c s2).
+   * @see As64BitReg
+   */
+  RegStorage As32BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 64b register";
+      } else {
+        LOG(WARNING) << "Expected 64b register";
+        return reg;
       }
-      RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
-                                      reg.GetRawBits() & RegStorage::kRegTypeMask);
-      DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
-                               ->GetReg().GetReg(),
-                ret_val.GetReg());
-      return ret_val;
     }
+    RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
 
-    RegStorage Check32BitReg(RegStorage reg) {
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Checked for 32b register";
-        } else {
-          LOG(WARNING) << "Checked for 32b register";
-          return As32BitReg(reg);
-        }
+  RegStorage Check32BitReg(RegStorage reg) {
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Checked for 32b register";
+      } else {
+        LOG(WARNING) << "Checked for 32b register";
+        return As32BitReg(reg);
       }
-      return reg;
     }
+    return reg;
+  }
 
-    /**
-     * @brief Given register wNN (sNN), returns register xNN (dNN).
-     * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
-     * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
-     * @see As32BitReg
-     */
-    RegStorage As64BitReg(RegStorage reg) {
-      DCHECK(!reg.IsPair());
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Expected 32b register";
-        } else {
-          LOG(WARNING) << "Expected 32b register";
-          return reg;
-        }
+  /**
+   * @brief Given register wNN (sNN), returns register xNN (dNN).
+   * @param reg #RegStorage containing a Solo32 input register (e.g. @c w1 or @c s2).
+   * @return A Solo64 with the same register number as the @p reg (e.g. @c x1 or @c d2).
+   * @see As32BitReg
+   */
+  RegStorage As64BitReg(RegStorage reg) {
+    DCHECK(!reg.IsPair());
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Expected 32b register";
+      } else {
+        LOG(WARNING) << "Expected 32b register";
+        return reg;
       }
-      RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
-                                      reg.GetRawBits() & RegStorage::kRegTypeMask);
-      DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
-                               ->GetReg().GetReg(),
-                ret_val.GetReg());
-      return ret_val;
     }
+    RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+                                    reg.GetRawBits() & RegStorage::kRegTypeMask);
+    DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+              ->GetReg().GetReg(),
+              ret_val.GetReg());
+    return ret_val;
+  }
 
-    RegStorage Check64BitReg(RegStorage reg) {
-      if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
-        if (kFailOnSizeError) {
-          LOG(FATAL) << "Checked for 64b register";
-        } else {
-          LOG(WARNING) << "Checked for 64b register";
-          return As64BitReg(reg);
-        }
+  RegStorage Check64BitReg(RegStorage reg) {
+    if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+      if (kFailOnSizeError) {
+        LOG(FATAL) << "Checked for 64b register";
+      } else {
+        LOG(WARNING) << "Checked for 64b register";
+        return As64BitReg(reg);
       }
-      return reg;
     }
+    return reg;
+  }
 
-    LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
-    LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
-    void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-    void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
-    void AssignDataOffsets();
-    RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
-                          bool is_div, bool check_zero);
-    RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
-    size_t GetLoadStoreSize(LIR* lir);
+  int32_t EncodeImmSingle(uint32_t bits);
+  int32_t EncodeImmDouble(uint64_t bits);
+  LIR* LoadFPConstantValue(RegStorage r_dest, int32_t value);
+  LIR* LoadFPConstantValueWide(RegStorage r_dest, int64_t value);
+  void ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+  void InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir);
+  void AssignDataOffsets();
+  RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+                        bool is_div, bool check_zero);
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div);
+  size_t GetLoadStoreSize(LIR* lir);
+
+  bool SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+                            RegLocation rl_dest, int64_t lit);
+
+  uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
+  int AssignInsnOffsets();
+  void AssignOffsets();
+  uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir);
+
+  // Spill core and FP registers. Returns the SP difference: either spill size, or whole
+  // frame size.
+  int SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+  // Unspill core and FP registers.
+  void UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask, int frame_size);
+
+  void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+
+  LIR* OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value);
+  LIR* OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value);
+
+  LIR* OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift);
+  LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+                        int shift);
+  int EncodeShift(int code, int amount);
+
+  LIR* OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+                      A64RegExtEncodings ext, uint8_t amount);
+  LIR* OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2,
+                         A64RegExtEncodings ext, uint8_t amount);
+  int EncodeExtend(int extend_type, int amount);
+  bool IsExtendEncoding(int encoded_value);
+
+  LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+  LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+
+  int EncodeLogicalImmediate(bool is_wide, uint64_t value);
+  uint64_t DecodeLogicalImmediate(bool is_wide, int value);
+  ArmConditionCode ArmConditionEncoding(ConditionCode code);
+
+  // Helper used in the two GenSelect variants.
+  void GenSelect(int32_t left, int32_t right, ConditionCode code, RegStorage rs_dest,
+                 int result_reg_class);
+
+  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                     RegLocation rl_src2, bool is_div);
+
+  InToRegStorageMapping in_to_reg_storage_mapping_;
+  static const ArmEncodingMap EncodingMap[kA64Last];
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index ed13c04..d0b2636 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -17,6 +17,7 @@
 #include "arm64_lir.h"
 #include "codegen_arm64.h"
 #include "dex/quick/mir_to_lir-inl.h"
+#include "utils.h"
 
 namespace art {
 
@@ -386,6 +387,52 @@
   return true;
 }
 
+bool Arm64Mir2Lir::GenInlinedCeil(CallInfo* info) {
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(FWIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+  return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedFloor(CallInfo* info) {
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(FWIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+  return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRint(CallInfo* info) {
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = InlineTargetWide(info);
+  rl_src = LoadValueWide(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
+  NewLIR2(FWIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  StoreValueWide(rl_dest, rl_result);
+  return true;
+}
+
+bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+  int32_t encoded_imm = EncodeImmSingle(bit_cast<float, uint32_t>(0.5f));
+  ArmOpcode wide = (is_double) ? FWIDE(0) : FUNWIDE(0);
+  RegLocation rl_src = info->args[0];
+  RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
+  rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  RegStorage r_tmp = (is_double) ? AllocTempDouble() : AllocTempSingle();
+  // 0.5f and 0.5d are encoded in the same way.
+  NewLIR2(kA64Fmov2fI | wide, r_tmp.GetReg(), encoded_imm);
+  NewLIR3(kA64Fadd3fff | wide, rl_src.reg.GetReg(), rl_src.reg.GetReg(), r_tmp.GetReg());
+  NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+  (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
+  return true;
+}
+
 bool Arm64Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
   DCHECK_EQ(cu_->instruction_set, kArm64);
   int op = (is_min) ? kA64Fmin3fff : kA64Fmax3fff;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 360acd5..147fee8 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -22,6 +22,7 @@
 #include "dex/reg_storage_eq.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "mirror/array.h"
+#include "utils.h"
 
 namespace art {
 
@@ -930,34 +931,52 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
-                              RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                              RegLocation rl_src2) {
-  GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) {
-  GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
-}
-
-void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
-  GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2) {
+  switch (opcode) {
+    case Instruction::NOT_LONG:
+      GenNotLong(rl_dest, rl_src2);
+      return;
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::DIV_LONG:
+    case Instruction::DIV_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+      return;
+    case Instruction::REM_LONG:
+    case Instruction::REM_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+      return;
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::AND_LONG:
+      GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::NEG_LONG: {
+      GenNegLong(rl_dest, rl_src2);
+      return;
+    }
+    default:
+      LOG(FATAL) << "Invalid long arith op";
+      return;
+  }
 }
 
 /*
@@ -1191,22 +1210,7 @@
 
 void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                      RegLocation rl_src1, RegLocation rl_src2) {
-  if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
-    if (!rl_src2.is_const) {
-      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
-    }
-  } else {
-    // Associativity.
-    if (!rl_src2.is_const) {
-      DCHECK(rl_src1.is_const);
-      std::swap(rl_src1, rl_src2);
-    }
-  }
-  DCHECK(rl_src2.is_const);
-
   OpKind op = kOpBkpt;
-  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-
   switch (opcode) {
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
@@ -1232,12 +1236,34 @@
       LOG(FATAL) << "Unexpected opcode";
   }
 
+  if (op == kOpSub) {
+    if (!rl_src2.is_const) {
+      return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+    }
+  } else {
+    // Associativity.
+    if (!rl_src2.is_const) {
+      DCHECK(rl_src1.is_const);
+      std::swap(rl_src1, rl_src2);
+    }
+  }
+  DCHECK(rl_src2.is_const);
+  int64_t val = mir_graph_->ConstantValueWide(rl_src2);
+
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
   RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
   OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
   StoreValueWide(rl_dest, rl_result);
 }
 
+static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
+  // Find first register.
+  int first_bit_set = CTZ(reg_mask) + 1;
+  *reg = *reg + first_bit_set;
+  reg_mask >>= first_bit_set;
+  return reg_mask;
+}
+
 /**
  * @brief Split a register list in pairs or registers.
  *
@@ -1254,15 +1280,15 @@
  *   }
  * @endcode
  */
-uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
+static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
   // Find first register.
-  int first_bit_set = __builtin_ctz(reg_mask) + 1;
+  int first_bit_set = CTZ(reg_mask) + 1;
   int reg = *reg1 + first_bit_set;
   reg_mask >>= first_bit_set;
 
   if (LIKELY(reg_mask)) {
     // Save the first register, find the second and use the pair opcode.
-    int second_bit_set = __builtin_ctz(reg_mask) + 1;
+    int second_bit_set = CTZ(reg_mask) + 1;
     *reg2 = reg;
     reg_mask >>= second_bit_set;
     *reg1 = reg + second_bit_set;
@@ -1275,68 +1301,274 @@
   return reg_mask;
 }
 
-void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    } else {
-      DCHECK_LE(offset, 63);
-      NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    }
-  }
-}
-
-void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
   int reg1 = -1, reg2 = -1;
   const int reg_log2_size = 3;
 
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
     } else {
-      NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
-              RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
-    }
-  }
-}
-
-void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
-  int reg1 = -1, reg2 = -1;
-  const int reg_log2_size = 3;
-
-  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
-     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
-    if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
-    } else {
-      NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
     }
   }
 }
 
 // TODO(Arm64): consider using ld1 and st1?
-void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
+static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
   int reg1 = -1, reg2 = -1;
   const int reg_log2_size = 3;
 
   for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
     if (UNLIKELY(reg2 < 0)) {
-      NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                   offset);
     } else {
-      NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
-              RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+      m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
     }
   }
 }
 
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+                           uint32_t fp_reg_mask, int frame_size) {
+  m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
+
+  int core_count = POPCOUNT(core_reg_mask);
+
+  if (fp_reg_mask != 0) {
+    // Spill FP regs.
+    int fp_count = POPCOUNT(fp_reg_mask);
+    int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
+    SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
+  }
+
+  if (core_reg_mask != 0) {
+    // Spill core regs.
+    int spill_offset = frame_size - (core_count * kArm64PointerSize);
+    SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
+  }
+
+  return frame_size;
+}
+
+static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
+                               uint32_t fp_reg_mask, int frame_size) {
+  // Otherwise, spill both core and fp regs at the same time.
+  // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
+  // down. From then on, we fill upwards. This will generate overall the same number of instructions
+  // as the specialized code above in most cases (exception being odd number of core and even
+  // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
+  //
+  // Some demonstrative fill cases : (c) = core, (f) = fp
+  // cc    44   cc    44   cc    22   cc    33   fc => 1[1/2]
+  // fc => 23   fc => 23   ff => 11   ff => 22
+  // ff    11    f    11               f    11
+  //
+  int reg1 = -1, reg2 = -1;
+  int core_count = POPCOUNT(core_reg_mask);
+  int fp_count = POPCOUNT(fp_reg_mask);
+
+  int combined = fp_count + core_count;
+  int all_offset = RoundUp(combined, 2);  // Needs to be 16B = 2-reg aligned.
+
+  int cur_offset = 2;  // What's the starting offset after the first stp? We expect the base slot
+                       // to be filled.
+
+  // First figure out whether the bottom is FP or core.
+  if (fp_count > 0) {
+    // Some FP spills.
+    //
+    // Four cases: (d0 is dummy to fill up stp)
+    // 1) Single FP, even number of core -> stp d0, fp_reg
+    // 2) Single FP, odd number of core -> stp fp_reg, d0
+    // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
+    // 4) More FP, odd number combined -> stp d0, fp_reg
+    if (fp_count == 1) {
+      fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
+      DCHECK_EQ(fp_reg_mask, 0U);
+      if (core_count % 2 == 0) {
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+      } else {
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+        cur_offset = 0;  // That core reg needs to go into the upper half.
+      }
+    } else {
+      if (combined % 2 == 0) {
+        fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
+      } else {
+        fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
+        m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
+                     base.GetReg(), -all_offset);
+      }
+    }
+  } else {
+    // No FP spills.
+    //
+    // Two cases:
+    // 1) Even number of core -> stp core1, core2
+    // 2) Odd number of core -> stp xzr, core1
+    if (core_count % 2 == 1) {
+      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
+      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+    } else {
+      core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
+      m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
+    }
+  }
+
+  if (fp_count != 0) {
+    for (; fp_reg_mask != 0;) {
+      // Have some FP regs to do.
+      fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
+      if (UNLIKELY(reg2 < 0)) {
+        m2l->NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                     cur_offset);
+        // Do not increment offset here, as the second half will be filled by a core reg.
+      } else {
+        m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                     RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
+        cur_offset += 2;
+      }
+    }
+
+    // Reset counting.
+    reg1 = -1;
+
+    // If there is an odd number of core registers, we need to store the bottom now.
+    if (core_count % 2 == 1) {
+      core_reg_mask = ExtractReg(core_reg_mask, &reg1);
+      m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
+                   cur_offset + 1);
+      cur_offset += 2;  // Half-slot filled now.
+    }
+  }
+
+  // Spill the rest of the core regs. They are guaranteed to be even.
+  DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
+  for (; core_reg_mask != 0; cur_offset += 2) {
+    core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
+    m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
+  }
+
+  DCHECK_EQ(cur_offset, all_offset);
+
+  return all_offset * 8;
+}
+
+int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+                            int frame_size) {
+  // If the frame size is small enough that all offsets would fit into the immediates, use that
+  // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
+  // instruction-count wise than the complicated code below.
+  //
+  // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
+  // number of fp spills.
+  if ((RoundUp(frame_size, 8) / 8 <= 63)) {
+    return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+  } else {
+    return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+  }
+}
+
+static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+  int reg1 = -1, reg2 = -1;
+  const int reg_log2_size = 3;
+
+  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+    reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+    if (UNLIKELY(reg2 < 0)) {
+      m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+    } else {
+      DCHECK_LE(offset, 63);
+      m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
+                   RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
+    }
+  }
+}
+
+static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
+  int reg1 = -1, reg2 = -1;
+  const int reg_log2_size = 3;
+
+  for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
+     reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
+    if (UNLIKELY(reg2 < 0)) {
+      m2l->NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
+                   offset);
+    } else {
+      m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
+                   RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
+    }
+  }
+}
+
+void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+                               int frame_size) {
+  // Restore saves and drop stack frame.
+  // 2 versions:
+  //
+  // 1. (Original): Try to address directly, then drop the whole frame.
+  //                Limitation: ldp is a 7b signed immediate.
+  //
+  // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
+  //           in range. Then drop the rest.
+  //
+  // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
+  //       in variant 1.
+
+  // "Magic" constant, 63 (max signed 7b) * 8.
+  static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
+
+  const int num_core_spills = POPCOUNT(core_reg_mask);
+  const int num_fp_spills = POPCOUNT(fp_reg_mask);
+
+  int early_drop = 0;
+
+  if (frame_size > kMaxFramesizeForOffset) {
+    // Second variant. Drop the frame part.
+
+    // TODO: Always use the first formula, as num_fp_spills would be zero?
+    if (fp_reg_mask != 0) {
+      early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
+    } else {
+      early_drop = frame_size - kArm64PointerSize * num_core_spills;
+    }
+
+    // Drop needs to be 16B aligned, so that SP keeps aligned.
+    early_drop = RoundDown(early_drop, 16);
+
+    OpRegImm64(kOpAdd, rs_sp, early_drop);
+  }
+
+  // Unspill.
+  if (fp_reg_mask != 0) {
+    int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
+    UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
+  }
+  if (core_reg_mask != 0) {
+    int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
+    UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
+  }
+
+  // Drop the (rest of) the frame.
+  OpRegImm64(kOpAdd, rs_sp, frame_size - early_drop);
+}
+
 bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
   ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
   RegLocation rl_src_i = info->args[0];
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 221dbfa..5326e74 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -23,7 +23,7 @@
 
 /* This file contains codegen for the A64 ISA. */
 
-static int32_t EncodeImmSingle(uint32_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmSingle(uint32_t bits) {
   /*
    * Valid values will have the form:
    *
@@ -55,7 +55,7 @@
   return (bit7 | bit6 | bit5_to_0);
 }
 
-static int32_t EncodeImmDouble(uint64_t bits) {
+int32_t Arm64Mir2Lir::EncodeImmDouble(uint64_t bits) {
   /*
    * Valid values will have the form:
    *
@@ -269,8 +269,47 @@
   return (n << 12 | imm_r << 6 | imm_s);
 }
 
+// Maximum number of instructions to use for encoding the immediate.
+static const int max_num_ops_per_const_load = 2;
+
+/**
+ * @brief Return the number of fast halfwords in the given uint64_t integer.
+ * @details The input integer is split into 4 halfwords (bits 0-15, 16-31, 32-47, 48-63). The
+ *   number of fast halfwords (halfwords that are either 0 or 0xffff) is returned. See below for
+ *   a more accurate description.
+ * @param value The input 64-bit integer.
+ * @return Return @c retval such that (retval & 0x7) is the maximum between n and m, where n is
+ *   the number of halfwords with all bits unset (0) and m is the number of halfwords with all bits
+ *   set (0xffff). Additionally (retval & 0x8) is set when m > n.
+ */
+static int GetNumFastHalfWords(uint64_t value) {
+  unsigned int num_0000_halfwords = 0;
+  unsigned int num_ffff_halfwords = 0;
+  for (int shift = 0; shift < 64; shift += 16) {
+    uint16_t halfword = static_cast<uint16_t>(value >> shift);
+    if (halfword == 0)
+      num_0000_halfwords++;
+    else if (halfword == UINT16_C(0xffff))
+      num_ffff_halfwords++;
+  }
+  if (num_0000_halfwords >= num_ffff_halfwords) {
+    DCHECK_LE(num_0000_halfwords, 4U);
+    return num_0000_halfwords;
+  } else {
+    DCHECK_LE(num_ffff_halfwords, 4U);
+    return num_ffff_halfwords | 0x8;
+  }
+}
+
+// The InexpensiveConstantXXX variants below are used in the promotion algorithm to determine how a
+// constant is considered for promotion. If the constant is "inexpensive" then the promotion
+// algorithm will give it a low priority for promotion, even when it is referenced many times in
+// the code.
+
 bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
-  return false;  // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
+  // A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
+  // We therefore return true and give it a low priority for promotion.
+  return true;
 }
 
 bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
@@ -278,13 +317,70 @@
 }
 
 bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
-  return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
+  int num_slow_halfwords = 4 - (GetNumFastHalfWords(value) & 0x7);
+  if (num_slow_halfwords <= max_num_ops_per_const_load) {
+    return true;
+  }
+  return (EncodeLogicalImmediate(/*is_wide=*/true, value) >= 0);
 }
 
 bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
   return EncodeImmDouble(value) >= 0;
 }
 
+// The InexpensiveConstantXXX variants below are used to determine which A64 instructions to use
+// when one of the operands is an immediate (e.g. register version or immediate version of add).
+
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+  switch (opcode) {
+  case Instruction::IF_EQ:
+  case Instruction::IF_NE:
+  case Instruction::IF_LT:
+  case Instruction::IF_GE:
+  case Instruction::IF_GT:
+  case Instruction::IF_LE:
+  case Instruction::ADD_INT:
+  case Instruction::ADD_INT_2ADDR:
+  case Instruction::SUB_INT:
+  case Instruction::SUB_INT_2ADDR:
+    // The code below is consistent with the implementation of OpRegRegImm().
+    {
+      int32_t abs_value = std::abs(value);
+      if (abs_value < 0x1000) {
+        return true;
+      } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
+        return true;
+      }
+      return false;
+    }
+  case Instruction::SHL_INT:
+  case Instruction::SHL_INT_2ADDR:
+  case Instruction::SHR_INT:
+  case Instruction::SHR_INT_2ADDR:
+  case Instruction::USHR_INT:
+  case Instruction::USHR_INT_2ADDR:
+    return true;
+  case Instruction::AND_INT:
+  case Instruction::AND_INT_2ADDR:
+  case Instruction::AND_INT_LIT16:
+  case Instruction::AND_INT_LIT8:
+  case Instruction::OR_INT:
+  case Instruction::OR_INT_2ADDR:
+  case Instruction::OR_INT_LIT16:
+  case Instruction::OR_INT_LIT8:
+  case Instruction::XOR_INT:
+  case Instruction::XOR_INT_2ADDR:
+  case Instruction::XOR_INT_LIT16:
+  case Instruction::XOR_INT_LIT8:
+    if (value == 0 || value == INT32_C(-1)) {
+      return true;
+    }
+    return (EncodeLogicalImmediate(/*is_wide=*/false, value) >= 0);
+  default:
+    return false;
+  }
+}
+
 /*
  * Load a immediate using one single instruction when possible; otherwise
  * use a pair of movz and movk instructions.
@@ -358,9 +454,6 @@
 
 // TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
 LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-  // Maximum number of instructions to use for encoding the immediate.
-  const int max_num_ops = 2;
-
   if (r_dest.IsFloat()) {
     return LoadFPConstantValueWide(r_dest, value);
   }
@@ -378,19 +471,12 @@
   }
 
   // At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
-  int num_0000_halfwords = 0;
-  int num_ffff_halfwords = 0;
   uint64_t uvalue = static_cast<uint64_t>(value);
-  for (int shift = 0; shift < 64; shift += 16) {
-    uint16_t halfword = static_cast<uint16_t>(uvalue >> shift);
-    if (halfword == 0)
-      num_0000_halfwords++;
-    else if (halfword == UINT16_C(0xffff))
-      num_ffff_halfwords++;
-  }
-  int num_fast_halfwords = std::max(num_0000_halfwords, num_ffff_halfwords);
+  int num_fast_halfwords = GetNumFastHalfWords(uvalue);
+  int num_slow_halfwords = 4 - (num_fast_halfwords & 0x7);
+  bool more_ffff_halfwords = (num_fast_halfwords & 0x8) != 0;
 
-  if (num_fast_halfwords < 3) {
+  if (num_slow_halfwords > 1) {
     // A single movz/movn is not enough. Try the logical immediate route.
     int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
     if (log_imm >= 0) {
@@ -398,19 +484,19 @@
     }
   }
 
-  if (num_fast_halfwords >= 4 - max_num_ops) {
+  if (num_slow_halfwords <= max_num_ops_per_const_load) {
     // We can encode the number using a movz/movn followed by one or more movk.
     ArmOpcode op;
     uint16_t background;
     LIR* res = nullptr;
 
     // Decide whether to use a movz or a movn.
-    if (num_0000_halfwords >= num_ffff_halfwords) {
-      op = WIDE(kA64Movz3rdM);
-      background = 0;
-    } else {
+    if (more_ffff_halfwords) {
       op = WIDE(kA64Movn3rdM);
       background = 0xffff;
+    } else {
+      op = WIDE(kA64Movz3rdM);
+      background = 0;
     }
 
     // Emit the first instruction (movz, movn).
@@ -545,7 +631,8 @@
   return NULL;
 }
 
-LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int extend) {
+LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
+                                  A64RegExtEncodings ext, uint8_t amount) {
   ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
   ArmOpcode opcode = kA64Brk1d;
 
@@ -556,6 +643,11 @@
     case kOpCmp:
       opcode = kA64Cmp3Rre;
       break;
+    case kOpAdd:
+      // Note: intentional fallthrough
+    case kOpSub:
+      return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
+      break;
     default:
       LOG(FATAL) << "Bad Opcode: " << opcode;
       break;
@@ -565,7 +657,8 @@
   if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
     ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
     if (kind == kFmtExtend) {
-      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), extend);
+      return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
+                     EncodeExtend(ext, amount));
     }
   }
 
@@ -575,10 +668,10 @@
 
 LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
   /* RegReg operations with SP in first parameter need extended register instruction form.
-   * Only CMN and CMP instructions are implemented.
+   * Only CMN, CMP, ADD & SUB instructions are implemented.
    */
   if (r_dest_src1 == rs_sp) {
-    return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND);
+    return OpRegRegExtend(op, r_dest_src1, r_src2, kA64Uxtx, 0);
   } else {
     return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
   }
@@ -719,7 +812,7 @@
   int64_t abs_value = (neg) ? -value : value;
   ArmOpcode opcode = kA64Brk1d;
   ArmOpcode alt_opcode = kA64Brk1d;
-  int32_t log_imm = -1;
+  bool is_logical = false;
   bool is_wide = r_dest.Is64Bit();
   ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
   int info = 0;
@@ -754,65 +847,89 @@
         opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
         return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
       } else {
-        log_imm = -1;
         alt_opcode = (op == kOpAdd) ? kA64Add4RRre : kA64Sub4RRre;
         info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
       }
       break;
-    // case kOpRsub:
-    //   opcode = kThumb2RsubRRI8M;
-    //   alt_opcode = kThumb2RsubRRR;
-    //   break;
     case kOpAdc:
-      log_imm = -1;
       alt_opcode = kA64Adc3rrr;
       break;
     case kOpSbc:
-      log_imm = -1;
       alt_opcode = kA64Sbc3rrr;
       break;
     case kOpOr:
-      log_imm = EncodeLogicalImmediate(is_wide, value);
+      is_logical = true;
       opcode = kA64Orr3Rrl;
       alt_opcode = kA64Orr4rrro;
       break;
     case kOpAnd:
-      log_imm = EncodeLogicalImmediate(is_wide, value);
+      is_logical = true;
       opcode = kA64And3Rrl;
       alt_opcode = kA64And4rrro;
       break;
     case kOpXor:
-      log_imm = EncodeLogicalImmediate(is_wide, value);
+      is_logical = true;
       opcode = kA64Eor3Rrl;
       alt_opcode = kA64Eor4rrro;
       break;
     case kOpMul:
       // TUNING: power of 2, shift & add
-      log_imm = -1;
       alt_opcode = kA64Mul3rrr;
       break;
     default:
       LOG(FATAL) << "Bad opcode: " << op;
   }
 
-  if (log_imm >= 0) {
-    return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
-  } else {
-    RegStorage r_scratch;
-    if (is_wide) {
-      r_scratch = AllocTempWide();
-      LoadConstantWide(r_scratch, value);
+  if (is_logical) {
+    int log_imm = EncodeLogicalImmediate(is_wide, value);
+    if (log_imm >= 0) {
+      return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
     } else {
-      r_scratch = AllocTemp();
-      LoadConstant(r_scratch, value);
+      // When the immediate is either 0 or ~0, the logical operation can be trivially reduced
+      // to a - possibly negated - assignment.
+      if (value == 0) {
+        switch (op) {
+          case kOpOr:
+          case kOpXor:
+            // Or/Xor by zero reduces to an assignment.
+            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+          default:
+            // And by zero reduces to a `mov rdest, xzr'.
+            DCHECK(op == kOpAnd);
+            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
+        }
+      } else if (value == INT64_C(-1)
+                 || (!is_wide && static_cast<uint32_t>(value) == ~UINT32_C(0))) {
+        switch (op) {
+          case kOpAnd:
+            // And by -1 reduces to an assignment.
+            return NewLIR2(kA64Mov2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+          case kOpXor:
+            // Xor by -1 reduces to an `mvn rdest, rsrc'.
+            return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), r_src1.GetReg());
+          default:
+            // Or by -1 reduces to a `mvn rdest, xzr'.
+            DCHECK(op == kOpOr);
+            return NewLIR2(kA64Mvn2rr | wide, r_dest.GetReg(), (is_wide) ? rxzr : rwzr);
+        }
+      }
     }
-    if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
-      res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
-    else
-      res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
-    FreeTemp(r_scratch);
-    return res;
   }
+
+  RegStorage r_scratch;
+  if (is_wide) {
+    r_scratch = AllocTempWide();
+    LoadConstantWide(r_scratch, value);
+  } else {
+    r_scratch = AllocTemp();
+    LoadConstant(r_scratch, value);
+  }
+  if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+    res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
+  else
+    res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+  FreeTemp(r_scratch);
+  return res;
 }
 
 LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
@@ -845,15 +962,6 @@
     }
     OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
     return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
-  } else if (LIKELY(A64_REG_IS_SP(r_dest_src1.GetReg()) && (op == kOpAdd || op == kOpSub))) {
-    // Note: "sub sp, sp, Xm" is not correct on arm64.
-    // We need special instructions for SP.
-    // Also operation on 32-bit SP should be avoided.
-    DCHECK(IS_WIDE(wide));
-    RegStorage r_tmp = AllocTempWide();
-    OpRegRegImm(kOpAdd, r_tmp, r_dest_src1, 0);
-    OpRegImm64(op, r_tmp, value);
-    return OpRegRegImm(kOpAdd, r_dest_src1, r_tmp, 0);
   } else {
     RegStorage r_tmp;
     LIR* res;
@@ -898,10 +1006,14 @@
 }
 
 int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
+  DCHECK_EQ(shift_type & 0x3, shift_type);
+  DCHECK_EQ(amount & 0x3f, amount);
   return ((shift_type & 0x3) << 7) | (amount & 0x3f);
 }
 
 int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
+  DCHECK_EQ(extend_type & 0x7, extend_type);
+  DCHECK_EQ(amount & 0x7, amount);
   return  (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
 }
 
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 463f277..2a51b49 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -983,6 +983,8 @@
       estimated_native_code_size_(0),
       reg_pool_(NULL),
       live_sreg_(0),
+      core_vmap_table_(mir_graph->GetArena()->Adapter()),
+      fp_vmap_table_(mir_graph->GetArena()->Adapter()),
       num_core_spills_(0),
       num_fp_spills_(0),
       frame_size_(0),
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0e46c96..7abf3e7 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -48,6 +48,11 @@
     true,   // kIntrinsicMinMaxFloat
     true,   // kIntrinsicMinMaxDouble
     true,   // kIntrinsicSqrt
+    true,   // kIntrinsicCeil
+    true,   // kIntrinsicFloor
+    true,   // kIntrinsicRint
+    true,   // kIntrinsicRoundFloat
+    true,   // kIntrinsicRoundDouble
     false,  // kIntrinsicGet
     false,  // kIntrinsicCharAt
     false,  // kIntrinsicCompareTo
@@ -75,6 +80,11 @@
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCeil], Ceil_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
@@ -155,6 +165,10 @@
     "max",                   // kNameCacheMax
     "min",                   // kNameCacheMin
     "sqrt",                  // kNameCacheSqrt
+    "ceil",                  // kNameCacheCeil
+    "floor",                 // kNameCacheFloor
+    "rint",                  // kNameCacheRint
+    "round",                 // kNameCacheRound
     "get",                   // kNameCacheGet
     "charAt",                // kNameCacheCharAt
     "compareTo",             // kNameCacheCompareTo
@@ -314,6 +328,17 @@
     INTRINSIC(JavaLangMath,       Sqrt, D_D, kIntrinsicSqrt, 0),
     INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0),
 
+    INTRINSIC(JavaLangMath,       Ceil, D_D, kIntrinsicCeil, 0),
+    INTRINSIC(JavaLangStrictMath, Ceil, D_D, kIntrinsicCeil, 0),
+    INTRINSIC(JavaLangMath,       Floor, D_D, kIntrinsicFloor, 0),
+    INTRINSIC(JavaLangStrictMath, Floor, D_D, kIntrinsicFloor, 0),
+    INTRINSIC(JavaLangMath,       Rint, D_D, kIntrinsicRint, 0),
+    INTRINSIC(JavaLangStrictMath, Rint, D_D, kIntrinsicRint, 0),
+    INTRINSIC(JavaLangMath,       Round, F_I, kIntrinsicRoundFloat, 0),
+    INTRINSIC(JavaLangStrictMath, Round, F_I, kIntrinsicRoundFloat, 0),
+    INTRINSIC(JavaLangMath,       Round, D_J, kIntrinsicRoundDouble, 0),
+    INTRINSIC(JavaLangStrictMath, Round, D_J, kIntrinsicRoundDouble, 0),
+
     INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
 
     INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
@@ -436,6 +461,16 @@
       return backend->GenInlinedMinMaxFP(info, intrinsic.d.data & kIntrinsicFlagMin, true /* is_double */);
     case kIntrinsicSqrt:
       return backend->GenInlinedSqrt(info);
+    case kIntrinsicCeil:
+      return backend->GenInlinedCeil(info);
+    case kIntrinsicFloor:
+      return backend->GenInlinedFloor(info);
+    case kIntrinsicRint:
+      return backend->GenInlinedRint(info);
+    case kIntrinsicRoundFloat:
+      return backend->GenInlinedRound(info, false /* is_double */);
+    case kIntrinsicRoundDouble:
+      return backend->GenInlinedRound(info, true /* is_double */);
     case kIntrinsicGet:
       return backend->GenInlinedGet(info);
     case kIntrinsicCharAt:
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index cb8c165..1bd3c48 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -141,6 +141,10 @@
       kNameCacheMax,
       kNameCacheMin,
       kNameCacheSqrt,
+      kNameCacheCeil,
+      kNameCacheFloor,
+      kNameCacheRint,
+      kNameCacheRound,
       kNameCacheGet,
       kNameCacheCharAt,
       kNameCacheCompareTo,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index aae9155..0054f34 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -256,7 +256,7 @@
     RegLocation rl_temp = UpdateLoc(rl_src2);
     int32_t constant_value = mir_graph_->ConstantValue(rl_src2);
     if ((rl_temp.location == kLocDalvikFrame) &&
-        InexpensiveConstantInt(constant_value)) {
+        InexpensiveConstantInt(constant_value, opcode)) {
       // OK - convert this to a compare immediate and branch
       OpCmpImmBranch(cond, rl_src1.reg, mir_graph_->ConstantValue(rl_src2), taken);
       return;
@@ -1808,10 +1808,6 @@
 
   switch (opcode) {
     case Instruction::NOT_LONG:
-      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
-        GenNotLong(rl_dest, rl_src2);
-        return;
-      }
       rl_src2 = LoadValueWide(rl_src2, kCoreReg);
       rl_result = EvalLoc(rl_dest, kCoreReg, true);
       // Check for destructive overlap
@@ -1829,39 +1825,22 @@
       return;
     case Instruction::ADD_LONG:
     case Instruction::ADD_LONG_2ADDR:
-      if (cu_->instruction_set != kThumb2) {
-        GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpAdd;
       second_op = kOpAdc;
       break;
     case Instruction::SUB_LONG:
     case Instruction::SUB_LONG_2ADDR:
-      if (cu_->instruction_set != kThumb2) {
-        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpSub;
       second_op = kOpSbc;
       break;
     case Instruction::MUL_LONG:
     case Instruction::MUL_LONG_2ADDR:
-      if (cu_->instruction_set != kMips) {
-        GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      } else {
-        call_out = true;
-        TargetReg(kRet0, kNotWide).GetReg();
-        target = kQuickLmul;
-      }
+      call_out = true;
+      ret_reg = TargetReg(kRet0, kNotWide).GetReg();
+      target = kQuickLmul;
       break;
     case Instruction::DIV_LONG:
     case Instruction::DIV_LONG_2ADDR:
-      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
-        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
-        return;
-      }
       call_out = true;
       check_zero = true;
       ret_reg = TargetReg(kRet0, kNotWide).GetReg();
@@ -1869,10 +1848,6 @@
       break;
     case Instruction::REM_LONG:
     case Instruction::REM_LONG_2ADDR:
-      if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
-        GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
-        return;
-      }
       call_out = true;
       check_zero = true;
       target = kQuickLmod;
@@ -1882,37 +1857,19 @@
       break;
     case Instruction::AND_LONG_2ADDR:
     case Instruction::AND_LONG:
-      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
-          cu_->instruction_set == kArm64) {
-        return GenAndLong(opcode, rl_dest, rl_src1, rl_src2);
-      }
       first_op = kOpAnd;
       second_op = kOpAnd;
       break;
     case Instruction::OR_LONG:
     case Instruction::OR_LONG_2ADDR:
-      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
-          cu_->instruction_set == kArm64) {
-        GenOrLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpOr;
       second_op = kOpOr;
       break;
     case Instruction::XOR_LONG:
     case Instruction::XOR_LONG_2ADDR:
-      if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 ||
-          cu_->instruction_set == kArm64) {
-        GenXorLong(opcode, rl_dest, rl_src1, rl_src2);
-        return;
-      }
       first_op = kOpXor;
       second_op = kOpXor;
       break;
-    case Instruction::NEG_LONG: {
-      GenNegLong(rl_dest, rl_src2);
-      return;
-    }
     default:
       LOG(FATAL) << "Invalid long arith op";
   }
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index a0a2ed0..8e7f6a6 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1427,6 +1427,22 @@
   return false;
 }
 
+bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+  return false;
+}
+
+bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+  return false;
+}
+
+bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+  return false;
+}
+
+bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+  return false;
+}
+
 bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
   if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
@@ -1568,16 +1584,6 @@
                   kNotVolatile);
       break;
 
-    case kX86:
-      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
-                                                          Thread::PeerOffset<4>());
-      break;
-
-    case kX86_64:
-      reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg,
-                                                          Thread::PeerOffset<8>());
-      break;
-
     default:
       LOG(FATAL) << "Unexpected isa " << cu_->instruction_set;
   }
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 4bd2748..bd0c020 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -92,12 +92,6 @@
                      RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
     void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                            RegLocation rl_shift);
-    void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
     void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
                           RegLocation rl_src2);
     void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -112,16 +106,8 @@
     bool GenInlinedSqrt(CallInfo* info);
     bool GenInlinedPeek(CallInfo* info, OpSize size);
     bool GenInlinedPoke(CallInfo* info, OpSize size);
-    void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-    void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                   RegLocation rl_src2);
-    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                    RegLocation rl_src2);
-    void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2, bool is_div);
+    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                        RegLocation rl_src2) OVERRIDE;
     RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
     RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
     void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -196,6 +182,12 @@
     LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
 
   private:
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+    void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                    RegLocation rl_src2);
+
     void ConvertShortToLongBranch(LIR* lir);
     RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
                           RegLocation rl_src2, bool is_div, bool check_zero);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index d727615..ea56989 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -392,11 +392,6 @@
 }
 
 
-void MipsMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenMulLong for Mips";
-}
-
 void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_src2) {
   rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -441,13 +436,27 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
-  LOG(FATAL) << "Unexpected use GenNotLong()";
-}
+void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                 RegLocation rl_src2) {
+  switch (opcode) {
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+    case Instruction::NEG_LONG:
+      GenNegLong(rl_dest, rl_src2);
+      return;
 
-void MipsMir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2, bool is_div) {
-  LOG(FATAL) << "Unexpected use GenDivRemLong()";
+    default:
+      break;
+  }
+
+  // Fallback for all other ops.
+  Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
 }
 
 void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
@@ -470,22 +479,6 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void MipsMir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1,
-                             RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenAndLong for Mips";
-}
-
-void MipsMir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenOrLong for Mips";
-}
-
-void MipsMir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
-                             RegLocation rl_src1, RegLocation rl_src2) {
-  LOG(FATAL) << "Unexpected use of GenXorLong for Mips";
-}
-
 /*
  * Generate array load
  */
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 4d8b91e..e519011 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -926,11 +926,11 @@
     case Instruction::XOR_INT:
     case Instruction::XOR_INT_2ADDR:
       if (rl_src[0].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]))) {
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]), opcode)) {
         GenArithOpIntLit(opcode, rl_dest, rl_src[1],
                              mir_graph_->ConstantValue(rl_src[0].orig_sreg));
       } else if (rl_src[1].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+                 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
         GenArithOpIntLit(opcode, rl_dest, rl_src[0],
                              mir_graph_->ConstantValue(rl_src[1].orig_sreg));
       } else {
@@ -951,7 +951,7 @@
     case Instruction::USHR_INT:
     case Instruction::USHR_INT_2ADDR:
       if (rl_src[1].is_const &&
-          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]))) {
+          InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
         GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
       } else {
         GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index d03b859..4ed9929 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -32,6 +32,7 @@
 #include "safe_map.h"
 #include "utils/array_ref.h"
 #include "utils/arena_allocator.h"
+#include "utils/arena_containers.h"
 #include "utils/growable_array.h"
 #include "utils/stack_checks.h"
 
@@ -867,8 +868,8 @@
                         RegLocation rl_src1, RegLocation rl_shift);
     void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
                           RegLocation rl_src, int lit);
-    void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                        RegLocation rl_src1, RegLocation rl_src2);
+    virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                RegLocation rl_src1, RegLocation rl_src2);
     void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
     virtual void GenSuspendTest(int opt_flags);
     virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
@@ -965,10 +966,14 @@
     virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
     bool GenInlinedFloatCvt(CallInfo* info);
     bool GenInlinedDoubleCvt(CallInfo* info);
+    virtual bool GenInlinedCeil(CallInfo* info);
+    virtual bool GenInlinedFloor(CallInfo* info);
+    virtual bool GenInlinedRint(CallInfo* info);
+    virtual bool GenInlinedRound(CallInfo* info, bool is_double);
     virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
     virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
     bool GenInlinedStringCompareTo(CallInfo* info);
-    bool GenInlinedCurrentThread(CallInfo* info);
+    virtual bool GenInlinedCurrentThread(CallInfo* info);
     bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
     bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
                              bool is_volatile, bool is_ordered);
@@ -1246,15 +1251,6 @@
     // Required for target - Dalvik-level generators.
     virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
                                    RegLocation rl_src1, RegLocation rl_src2) = 0;
-    virtual void GenMulLong(Instruction::Code,
-                            RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenAddLong(Instruction::Code,
-                            RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenAndLong(Instruction::Code,
-                            RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
     virtual void GenArithOpDouble(Instruction::Code opcode,
                                   RegLocation rl_dest, RegLocation rl_src1,
                                   RegLocation rl_src2) = 0;
@@ -1282,16 +1278,6 @@
     virtual bool GenInlinedSqrt(CallInfo* info) = 0;
     virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
     virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
-    virtual void GenNotLong(RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0;
-    virtual void GenOrLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2) = 0;
-    virtual void GenSubLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) = 0;
-    virtual void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2, bool is_div) = 0;
     virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
                                   bool is_div) = 0;
     virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
@@ -1441,6 +1427,9 @@
     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
     virtual bool InexpensiveConstantLong(int64_t value) = 0;
     virtual bool InexpensiveConstantDouble(int64_t value) = 0;
+    virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+      return InexpensiveConstantInt(value);
+    }
 
     // May be optimized by targets.
     virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
@@ -1711,8 +1700,8 @@
     CodeBuffer code_buffer_;
     // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
     std::vector<uint8_t> encoded_mapping_table_;
-    std::vector<uint32_t> core_vmap_table_;
-    std::vector<uint32_t> fp_vmap_table_;
+    ArenaVector<uint32_t> core_vmap_table_;
+    ArenaVector<uint32_t> fp_vmap_table_;
     std::vector<uint8_t> native_gc_map_;
     int num_core_spills_;
     int num_fp_spills_;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 45244e1..be966e1 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1171,12 +1171,13 @@
       } else {
         counts[p_map_idx].count += use_count;
       }
-    } else if (!IsInexpensiveConstant(loc)) {
+    } else {
       if (loc.wide && WideGPRsAreAliases()) {
-        // Longs and doubles can be counted together.
         i++;
       }
-      counts[p_map_idx].count += use_count;
+      if (!IsInexpensiveConstant(loc)) {
+        counts[p_map_idx].count += use_count;
+      }
     }
   }
 }
@@ -1185,9 +1186,10 @@
 static int SortCounts(const void *val1, const void *val2) {
   const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1);
   const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2);
-  // Note that we fall back to sorting on reg so we get stable output
-  // on differing qsort implementations (such as on host and target or
-  // between local host and build servers).
+  // Note that we fall back to sorting on reg so we get stable output on differing qsort
+  // implementations (such as on host and target or between local host and build servers).
+  // Note also that if a wide val1 and a non-wide val2 have the same count, then val1 always
+  // ``loses'' (as STARTING_WIDE_SREG is or-ed in val1->s_reg).
   return (op1->count == op2->count)
           ? (op1->s_reg - op2->s_reg)
           : (op1->count < op2->count ? 1 : -1);
@@ -1230,8 +1232,8 @@
    * TUNING: replace with linear scan once we have the ability
    * to describe register live ranges for GC.
    */
-  size_t core_reg_count_size = cu_->target64 ? num_regs * 2 : num_regs;
-  size_t fp_reg_count_size = num_regs * 2;
+  size_t core_reg_count_size = WideGPRsAreAliases() ? num_regs : num_regs * 2;
+  size_t fp_reg_count_size = WideFPRsAreAliases() ? num_regs : num_regs * 2;
   RefCounts *core_regs =
       static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * core_reg_count_size,
                                             kArenaAllocRegAlloc));
@@ -1261,7 +1263,6 @@
   // Sum use counts of SSA regs by original Dalvik vreg.
   CountRefs(core_regs, fp_regs, num_regs);
 
-
   // Sort the count arrays
   qsort(core_regs, core_reg_count_size, sizeof(RefCounts), SortCounts);
   qsort(fp_regs, fp_reg_count_size, sizeof(RefCounts), SortCounts);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 0a46f2e..266191a 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -65,7 +65,7 @@
 
   // Required for target - codegen helpers.
   bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
-                          RegLocation rl_dest, int lit);
+                          RegLocation rl_dest, int lit) OVERRIDE;
   bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
   LIR* CheckSuspendUsingLoad() OVERRIDE;
   RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
@@ -73,22 +73,17 @@
                     OpSize size, VolatileKind is_volatile) OVERRIDE;
   LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
                        OpSize size) OVERRIDE;
-  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                           RegStorage r_dest, OpSize size);
   LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
   LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
   LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                      OpSize size, VolatileKind is_volatile) OVERRIDE;
   LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
                         OpSize size) OVERRIDE;
-  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                            RegStorage r_src, OpSize size);
-  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
-  void GenImplicitNullCheck(RegStorage reg, int opt_flags);
+  void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+  void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
 
   // Required for target - register utilities.
   RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
-  RegStorage TargetReg32(SpecialTargetRegister reg);
   RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE {
     if (wide_kind == kWide) {
       if (cu_->target64) {
@@ -110,111 +105,78 @@
   RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
     return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide);
   }
-  RegStorage GetArgMappingToPhysicalReg(int arg_num);
-  RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
-  RegLocation GetReturnAlt();
-  RegLocation GetReturnWideAlt();
-  RegLocation LocCReturn();
-  RegLocation LocCReturnRef();
-  RegLocation LocCReturnDouble();
-  RegLocation LocCReturnFloat();
-  RegLocation LocCReturnWide();
+
+  RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+
+  RegLocation GetReturnAlt() OVERRIDE;
+  RegLocation GetReturnWideAlt() OVERRIDE;
+  RegLocation LocCReturn() OVERRIDE;
+  RegLocation LocCReturnRef() OVERRIDE;
+  RegLocation LocCReturnDouble() OVERRIDE;
+  RegLocation LocCReturnFloat() OVERRIDE;
+  RegLocation LocCReturnWide() OVERRIDE;
+
   ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
-  void AdjustSpillMask();
-  void ClobberCallerSave();
-  void FreeCallTemps();
-  void LockCallTemps();
-  void CompilerInitializeRegAlloc();
-  int VectorRegisterSize();
-  int NumReservableVectorRegisters(bool fp_used);
+  void AdjustSpillMask() OVERRIDE;
+  void ClobberCallerSave() OVERRIDE;
+  void FreeCallTemps() OVERRIDE;
+  void LockCallTemps() OVERRIDE;
+
+  void CompilerInitializeRegAlloc() OVERRIDE;
+  int VectorRegisterSize() OVERRIDE;
+  int NumReservableVectorRegisters(bool fp_used) OVERRIDE;
 
   // Required for target - miscellaneous.
-  void AssembleLIR();
-  int AssignInsnOffsets();
-  void AssignOffsets();
-  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+  void AssembleLIR() OVERRIDE;
   void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
   void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
                                 ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
-  const char* GetTargetInstFmt(int opcode);
-  const char* GetTargetInstName(int opcode);
-  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+  const char* GetTargetInstFmt(int opcode) OVERRIDE;
+  const char* GetTargetInstName(int opcode) OVERRIDE;
+  std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) OVERRIDE;
   ResourceMask GetPCUseDefEncoding() const OVERRIDE;
-  uint64_t GetTargetInstFlags(int opcode);
+  uint64_t GetTargetInstFlags(int opcode) OVERRIDE;
   size_t GetInsnSize(LIR* lir) OVERRIDE;
-  bool IsUnconditionalBranch(LIR* lir);
+  bool IsUnconditionalBranch(LIR* lir) OVERRIDE;
 
   // Get the register class for load/store of a field.
   RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
   // Required for target - Dalvik-level generators.
-  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                         RegLocation rl_src2);
   void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
-                   RegLocation rl_dest, int scale);
+                   RegLocation rl_dest, int scale) OVERRIDE;
   void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
-                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
-  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                         RegLocation rl_src1, RegLocation rl_shift);
-  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
+                   RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) OVERRIDE;
+
   void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                        RegLocation rl_src2);
+                        RegLocation rl_src2) OVERRIDE;
   void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                       RegLocation rl_src2);
-  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
+                       RegLocation rl_src2) OVERRIDE;
   void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                RegLocation rl_src2);
-  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
-  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
-  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
-  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
-  bool GenInlinedSqrt(CallInfo* info);
+                RegLocation rl_src2) OVERRIDE;
+  void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+
+  bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
+  bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
+  bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+  bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
   bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
   bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
-  bool GenInlinedPeek(CallInfo* info, OpSize size);
-  bool GenInlinedPoke(CallInfo* info, OpSize size);
+  bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
   bool GenInlinedCharAt(CallInfo* info) OVERRIDE;
-  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
-  void GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                 RegLocation rl_src2);
-  void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
-                  RegLocation rl_src2);
-  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                     RegLocation rl_src2, bool is_div);
-  // TODO: collapse reg_lo, reg_hi
-  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
-  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
-  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
-  void GenDivZeroCheckWide(RegStorage reg);
-  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
-  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
-  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
-  void GenExitSequence();
-  void GenSpecialExitSequence();
-  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
-  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
-  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
-  void GenSelect(BasicBlock* bb, MIR* mir);
-  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
-                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
-                        int dest_reg_class) OVERRIDE;
-  bool GenMemBarrier(MemBarrierKind barrier_kind);
-  void GenMoveException(RegLocation rl_dest);
-  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
-                                     int first_bit, int second_bit);
-  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
-  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
-  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
-  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
+
+  // Long instructions.
+  void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                      RegLocation rl_src2) OVERRIDE;
+  void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2) OVERRIDE;
+  void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                         RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
+  void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) OVERRIDE;
+  void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                      RegLocation rl_src1, RegLocation rl_shift) OVERRIDE;
 
   /*
    * @brief Generate a two address long operation with a constant value
@@ -224,6 +186,7 @@
    * @return success or not
    */
   bool GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
+
   /*
    * @brief Generate a three address long operation with a constant value
    * @param rl_dest location of result
@@ -234,7 +197,6 @@
    */
   bool GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
                       Instruction::Code op);
-
   /**
    * @brief Generate a long arithmetic operation.
    * @param rl_dest The destination.
@@ -262,6 +224,31 @@
    */
   virtual void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op);
 
+
+  // TODO: collapse reg_lo, reg_hi
+  RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div)
+      OVERRIDE;
+  RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) OVERRIDE;
+  void GenDivZeroCheckWide(RegStorage reg) OVERRIDE;
+  void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
+  void GenExitSequence() OVERRIDE;
+  void GenSpecialExitSequence() OVERRIDE;
+  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
+  void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
+  void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+                        int32_t true_val, int32_t false_val, RegStorage rs_dest,
+                        int dest_reg_class) OVERRIDE;
+  bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
+  void GenMoveException(RegLocation rl_dest) OVERRIDE;
+  void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+                                     int first_bit, int second_bit) OVERRIDE;
+  void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+  void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+
   /**
    * @brief Implement instanceof a final class with x86 specific code.
    * @param use_declaring_class 'true' if we can use the class itself.
@@ -270,56 +257,39 @@
    * @param rl_src Object to be tested.
    */
   void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
-                          RegLocation rl_src);
-
-  void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
-                      RegLocation rl_src1, RegLocation rl_shift);
+                          RegLocation rl_src) OVERRIDE;
 
   // Single operation generators.
-  LIR* OpUnconditionalBranch(LIR* target);
-  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
-  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
-  LIR* OpCondBranch(ConditionCode cc, LIR* target);
-  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
-  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
-  LIR* OpIT(ConditionCode cond, const char* guide);
-  void OpEndIT(LIR* it);
-  LIR* OpMem(OpKind op, RegStorage r_base, int disp);
-  LIR* OpPcRelLoad(RegStorage reg, LIR* target);
-  LIR* OpReg(OpKind op, RegStorage r_dest_src);
-  void OpRegCopy(RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
-  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
-  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
-  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
-  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
-  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
-  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
-  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
-  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
-  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
-  LIR* OpTestSuspend(LIR* target);
-  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
-  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
-  LIR* OpVldm(RegStorage r_base, int count);
-  LIR* OpVstm(RegStorage r_base, int count);
-  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
-  void OpRegCopyWide(RegStorage dest, RegStorage src);
-  void OpTlsCmp(ThreadOffset<4> offset, int val);
-  void OpTlsCmp(ThreadOffset<8> offset, int val);
+  LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
+  LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) OVERRIDE;
+  LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) OVERRIDE;
+  LIR* OpCondBranch(ConditionCode cc, LIR* target) OVERRIDE;
+  LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpIT(ConditionCode cond, const char* guide) OVERRIDE;
+  void OpEndIT(LIR* it) OVERRIDE;
+  LIR* OpMem(OpKind op, RegStorage r_base, int disp) OVERRIDE;
+  LIR* OpPcRelLoad(RegStorage reg, LIR* target) OVERRIDE;
+  LIR* OpReg(OpKind op, RegStorage r_dest_src) OVERRIDE;
+  void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) OVERRIDE;
+  LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) OVERRIDE;
+  LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
+  LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
+  LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) OVERRIDE;
+  LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) OVERRIDE;
+  LIR* OpTestSuspend(LIR* target) OVERRIDE;
+  LIR* OpVldm(RegStorage r_base, int count) OVERRIDE;
+  LIR* OpVstm(RegStorage r_base, int count) OVERRIDE;
+  void OpRegCopyWide(RegStorage dest, RegStorage src) OVERRIDE;
+  bool GenInlinedCurrentThread(CallInfo* info) OVERRIDE;
 
-  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
-  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
-  void SpillCoreRegs();
-  void UnSpillCoreRegs();
-  void UnSpillFPRegs();
-  void SpillFPRegs();
-  static const X86EncodingMap EncodingMap[kX86Last];
-  bool InexpensiveConstantInt(int32_t value);
-  bool InexpensiveConstantFloat(int32_t value);
-  bool InexpensiveConstantLong(int64_t value);
-  bool InexpensiveConstantDouble(int64_t value);
+  bool InexpensiveConstantInt(int32_t value) OVERRIDE;
+  bool InexpensiveConstantFloat(int32_t value) OVERRIDE;
+  bool InexpensiveConstantLong(int64_t value) OVERRIDE;
+  bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
 
   /*
    * @brief Should try to optimize for two address instructions?
@@ -335,13 +305,7 @@
    * @param rl_rhs Right hand operand.
    */
   void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_lhs,
-                     RegLocation rl_rhs);
-
-  /*
-   * @brief Dump a RegLocation using printf
-   * @param loc Register location to dump
-   */
-  static void DumpRegLocation(RegLocation loc);
+                     RegLocation rl_rhs) OVERRIDE;
 
   /*
    * @brief Load the Method* of a dex method into the register.
@@ -351,7 +315,7 @@
    * @note register will be passed to TargetReg to get physical register.
    */
   void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
-                         SpecialTargetRegister symbolic_reg);
+                         SpecialTargetRegister symbolic_reg) OVERRIDE;
 
   /*
    * @brief Load the Class* of a Dex Class type into the register.
@@ -359,23 +323,23 @@
    * @param register that will contain the code address.
    * @note register will be passed to TargetReg to get physical register.
    */
-  void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
+  void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) OVERRIDE;
 
-  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
+  void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
 
   int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
                            NextCallInsn next_call_insn,
                            const MethodReference& target_method,
                            uint32_t vtable_idx,
                            uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                           bool skip_this);
+                           bool skip_this) OVERRIDE;
 
   int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
                          NextCallInsn next_call_insn,
                          const MethodReference& target_method,
                          uint32_t vtable_idx,
                          uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
-                         bool skip_this);
+                         bool skip_this) OVERRIDE;
 
   /*
    * @brief Generate a relative call to the method that will be patched at link time.
@@ -388,7 +352,7 @@
   /*
    * @brief Handle x86 specific literals
    */
-  void InstallLiteralPools();
+  void InstallLiteralPools() OVERRIDE;
 
   /*
    * @brief Generate the debug_frame CFI information.
@@ -400,11 +364,12 @@
    * @brief Generate the debug_frame FDE information.
    * @returns pointer to vector containing CFE information
    */
-  std::vector<uint8_t>* ReturnCallFrameInformation();
+  std::vector<uint8_t>* ReturnCallFrameInformation() OVERRIDE;
 
   LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
 
  protected:
+  RegStorage TargetReg32(SpecialTargetRegister reg);
   // Casting of RegStorage
   RegStorage As32BitReg(RegStorage reg) {
     DCHECK(!reg.IsPair());
@@ -442,6 +407,17 @@
     return ret_val;
   }
 
+  LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+                           RegStorage r_dest, OpSize size);
+  LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
+                            RegStorage r_src, OpSize size);
+
+  RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
+
+  int AssignInsnOffsets();
+  void AssignOffsets();
+  AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+
   size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
                      int32_t raw_base, int32_t displacement);
   void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
@@ -528,6 +504,9 @@
    * @returns true if a register is byte addressable.
    */
   bool IsByteRegister(RegStorage reg);
+
+  void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
+
   bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
 
   /*
@@ -736,8 +715,9 @@
    * @param divisor divisor number for calculation
    * @param magic hold calculated magic number
    * @param shift hold calculated shift
+   * @param is_long 'true' if divisor is jlong, 'false' for jint.
    */
-  void CalculateMagicAndShift(int divisor, int& magic, int& shift);
+  void CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long);
 
   /*
    * @brief Generate an integer div or rem operation.
@@ -800,6 +780,8 @@
   LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
                          int offset, int check_value, LIR* target, LIR** compare);
 
+  void GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_double);
+
   /*
    * Can this operation be using core registers without temporaries?
    * @param rl_lhs Left hand operand.
@@ -816,6 +798,36 @@
    */
   virtual void GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_double);
 
+  void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
+  void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
+
+  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
+  LIR* OpRegMem(OpKind op, RegStorage r_dest, RegLocation value);
+  LIR* OpMemReg(OpKind op, RegLocation rl_dest, int value);
+  LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
+  LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset);
+  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
+  void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
+  void OpTlsCmp(ThreadOffset<4> offset, int val);
+  void OpTlsCmp(ThreadOffset<8> offset, int val);
+
+  void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
+
+  // Try to do a long multiplication where rl_src2 is a constant. This simplified setup might fail,
+  // in which case false will be returned.
+  bool GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val);
+  void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                  RegLocation rl_src2);
+  void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+  void GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                     RegLocation rl_src2, bool is_div);
+
+  void SpillCoreRegs();
+  void UnSpillCoreRegs();
+  void UnSpillFPRegs();
+  void SpillFPRegs();
+
   /*
    * @brief Perform MIR analysis before compiling method.
    * @note Invokes Mir2LiR::Materialize after analysis.
@@ -938,6 +950,14 @@
     return true;  // xmm registers have 64b views even on x86.
   }
 
+  /*
+   * @brief Dump a RegLocation using printf
+   * @param loc Register location to dump
+   */
+  static void DumpRegLocation(RegLocation loc);
+
+  static const X86EncodingMap EncodingMap[kX86Last];
+
  private:
   // The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
   int num_reserved_vector_regs_;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b9abdbf..fdc46e2 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -513,7 +513,7 @@
   OpCondBranch(ccode, taken);
 }
 
-void X86Mir2Lir::CalculateMagicAndShift(int divisor, int& magic, int& shift) {
+void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& shift, bool is_long) {
   // It does not make sense to calculate magic and shift for zero divisor.
   DCHECK_NE(divisor, 0);
 
@@ -525,8 +525,8 @@
    * Let nc be the most negative value of numerator(n) such that nc = kd + 1,
    * where divisor(d) <= -2.
    * Thus nc can be calculated like:
-   * nc = 2^31 + 2^31 % d - 1, where d >= 2
-   * nc = -2^31 + (2^31 + 1) % d, where d >= 2.
+   * nc = exp + exp % d - 1, where d >= 2 and exp = 2^31 for int or 2^63 for long
+   * nc = -exp + (exp + 1) % d, where d >= 2 and exp = 2^31 for int or 2^63 for long
    *
    * So the shift p is the smallest p satisfying
    * 2^p > nc * (d - 2^p % d), where d >= 2
@@ -536,27 +536,28 @@
    * M = (2^p + d - 2^p % d) / d, where d >= 2
    * M = (2^p - d - 2^p % d) / d, where d <= -2.
    *
-   * Notice that p is always bigger than or equal to 32, so we just return 32-p as
+   * Notice that p is always bigger than or equal to 32/64, so we just return 32-p/64-p as
    * the shift number S.
    */
 
-  int32_t p = 31;
-  const uint32_t two31 = 0x80000000U;
+  int64_t p = (is_long) ? 63 : 31;
+  const uint64_t exp = (is_long) ? 0x8000000000000000ULL : 0x80000000U;
 
   // Initialize the computations.
-  uint32_t abs_d = (divisor >= 0) ? divisor : -divisor;
-  uint32_t tmp = two31 + (static_cast<uint32_t>(divisor) >> 31);
-  uint32_t abs_nc = tmp - 1 - tmp % abs_d;
-  uint32_t quotient1 = two31 / abs_nc;
-  uint32_t remainder1 = two31 % abs_nc;
-  uint32_t quotient2 = two31 / abs_d;
-  uint32_t remainder2 = two31 % abs_d;
+  uint64_t abs_d = (divisor >= 0) ? divisor : -divisor;
+  uint64_t tmp = exp + ((is_long) ? static_cast<uint64_t>(divisor) >> 63 :
+                                    static_cast<uint32_t>(divisor) >> 31);
+  uint64_t abs_nc = tmp - 1 - tmp % abs_d;
+  uint64_t quotient1 = exp / abs_nc;
+  uint64_t remainder1 = exp % abs_nc;
+  uint64_t quotient2 = exp / abs_d;
+  uint64_t remainder2 = exp % abs_d;
 
   /*
    * To avoid handling both positive and negative divisor, Hacker's Delight
    * introduces a method to handle these 2 cases together to avoid duplication.
    */
-  uint32_t delta;
+  uint64_t delta;
   do {
     p++;
     quotient1 = 2 * quotient1;
@@ -575,7 +576,12 @@
   } while (quotient1 < delta || (quotient1 == delta && remainder1 == 0));
 
   magic = (divisor > 0) ? (quotient2 + 1) : (-quotient2 - 1);
-  shift = p - 32;
+
+  if (!is_long) {
+    magic = static_cast<int>(magic);
+  }
+
+  shift = (is_long) ? p - 64 : p - 32;
 }
 
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
@@ -586,52 +592,57 @@
 RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
                                      int imm, bool is_div) {
   // Use a multiply (and fixup) to perform an int div/rem by a constant.
+  RegLocation rl_result;
 
-  // We have to use fixed registers, so flush all the temps.
-  FlushAllRegs();
-  LockCallTemps();  // Prepare for explicit register usage.
-
-  // Assume that the result will be in EDX.
-  RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG};
-
-  // handle div/rem by 1 special case.
   if (imm == 1) {
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
     if (is_div) {
       // x / 1 == x.
-      StoreValue(rl_result, rl_src);
+      LoadValueDirectFixed(rl_src, rl_result.reg);
     } else {
       // x % 1 == 0.
-      LoadConstantNoClobber(rs_r0, 0);
-      // For this case, return the result in EAX.
-      rl_result.reg.SetReg(r0);
+      LoadConstantNoClobber(rl_result.reg, 0);
     }
   } else if (imm == -1) {  // handle 0x80000000 / -1 special case.
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
     if (is_div) {
-      LIR *minint_branch = 0;
-      LoadValueDirectFixed(rl_src, rs_r0);
-      OpRegImm(kOpCmp, rs_r0, 0x80000000);
-      minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+      LoadValueDirectFixed(rl_src, rl_result.reg);
+      OpRegImm(kOpCmp, rl_result.reg, 0x80000000);
+      LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
 
       // for x != MIN_INT, x / -1 == -x.
-      NewLIR1(kX86Neg32R, r0);
+      NewLIR1(kX86Neg32R, rl_result.reg.GetReg());
 
-      LIR* branch_around = NewLIR1(kX86Jmp8, 0);
-      // The target for cmp/jmp above.
-      minint_branch->target = NewLIR0(kPseudoTargetLabel);
       // EAX already contains the right value (0x80000000),
-      branch_around->target = NewLIR0(kPseudoTargetLabel);
+      minint_branch->target = NewLIR0(kPseudoTargetLabel);
     } else {
       // x % -1 == 0.
-      LoadConstantNoClobber(rs_r0, 0);
+      LoadConstantNoClobber(rl_result.reg, 0);
     }
-    // For this case, return the result in EAX.
-    rl_result.reg.SetReg(r0);
+  } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
+    // Division using shifting.
+    rl_src = LoadValue(rl_src, kCoreReg);
+    rl_result = EvalLoc(rl_dest, kCoreReg, true);
+    if (IsSameReg(rl_result.reg, rl_src.reg)) {
+      RegStorage rs_temp = AllocTypedTemp(false, kCoreReg);
+      rl_result.reg.SetReg(rs_temp.GetReg());
+    }
+    NewLIR3(kX86Lea32RM, rl_result.reg.GetReg(), rl_src.reg.GetReg(), std::abs(imm) - 1);
+    NewLIR2(kX86Test32RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
+    OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
+    int shift_amount = LowestSetBit(imm);
+    OpRegImm(kOpAsr, rl_result.reg, shift_amount);
+    if (imm < 0) {
+      OpReg(kOpNeg, rl_result.reg);
+    }
   } else {
     CHECK(imm <= -2 || imm >= 2);
+
     // Use H.S.Warren's Hacker's Delight Chapter 10 and
     // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
-    int magic, shift;
-    CalculateMagicAndShift(imm, magic, shift);
+    int64_t magic;
+    int shift;
+    CalculateMagicAndShift((int64_t)imm, magic, shift, false /* is_long */);
 
     /*
      * For imm >= 2,
@@ -649,18 +660,22 @@
      * 5. Thus, EDX is the quotient
      */
 
+    FlushReg(rs_r0);
+    Clobber(rs_r0);
+    LockTemp(rs_r0);
+    FlushReg(rs_r2);
+    Clobber(rs_r2);
+    LockTemp(rs_r2);
+
+    // Assume that the result will be in EDX.
+    rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r2, INVALID_SREG, INVALID_SREG};
+
     // Numerator into EAX.
     RegStorage numerator_reg;
     if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
       // We will need the value later.
-      if (rl_src.location == kLocPhysReg) {
-        // We can use it directly.
-        DCHECK(rl_src.reg.GetReg() != rs_r0.GetReg() && rl_src.reg.GetReg() != rs_r2.GetReg());
-        numerator_reg = rl_src.reg;
-      } else {
-        numerator_reg = rs_r1;
-        LoadValueDirectFixed(rl_src, numerator_reg);
-      }
+      rl_src = LoadValue(rl_src, kCoreReg);
+      numerator_reg = rl_src.reg;
       OpRegCopy(rs_r0, numerator_reg);
     } else {
       // Only need this once.  Just put it into EAX.
@@ -1268,91 +1283,113 @@
   }
 }
 
-void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                            RegLocation rl_src2) {
+void X86Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                                RegLocation rl_src2) {
+  if (!cu_->target64) {
+    // Some x86 32b ops are fallback.
+    switch (opcode) {
+      case Instruction::NOT_LONG:
+      case Instruction::DIV_LONG:
+      case Instruction::DIV_LONG_2ADDR:
+      case Instruction::REM_LONG:
+      case Instruction::REM_LONG_2ADDR:
+        Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+        return;
+
+      default:
+        // Everything else we can handle.
+        break;
+    }
+  }
+
+  switch (opcode) {
+    case Instruction::NOT_LONG:
+      GenNotLong(rl_dest, rl_src2);
+      return;
+
+    case Instruction::ADD_LONG:
+    case Instruction::ADD_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::SUB_LONG:
+    case Instruction::SUB_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false);
+      return;
+
+    case Instruction::MUL_LONG:
+    case Instruction::MUL_LONG_2ADDR:
+      GenMulLong(opcode, rl_dest, rl_src1, rl_src2);
+      return;
+
+    case Instruction::DIV_LONG:
+    case Instruction::DIV_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true);
+      return;
+
+    case Instruction::REM_LONG:
+    case Instruction::REM_LONG_2ADDR:
+      GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false);
+      return;
+
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::AND_LONG:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::OR_LONG:
+    case Instruction::OR_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::XOR_LONG:
+    case Instruction::XOR_LONG_2ADDR:
+      GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
+      return;
+
+    case Instruction::NEG_LONG:
+      GenNegLong(rl_dest, rl_src2);
+      return;
+
+    default:
+      LOG(FATAL) << "Invalid long arith op";
+      return;
+  }
+}
+
+bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64_t val) {
   // All memory accesses below reference dalvik regs.
   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
 
-  if (cu_->target64) {
-    if (rl_src1.is_const) {
-      std::swap(rl_src1, rl_src2);
-    }
-    // Are we multiplying by a constant?
-    if (rl_src2.is_const) {
-      int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-      if (val == 0) {
-        RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-        OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
-        StoreValueWide(rl_dest, rl_result);
-        return;
-      } else if (val == 1) {
-        StoreValueWide(rl_dest, rl_src1);
-        return;
-      } else if (val == 2) {
-        GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
-        return;
-      } else if (IsPowerOfTwo(val)) {
-        int shift_amount = LowestSetBit(val);
-        if (!BadOverlap(rl_src1, rl_dest)) {
-          rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-          RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest,
-                                                    rl_src1, shift_amount);
-          StoreValueWide(rl_dest, rl_result);
-          return;
-        }
-      }
-    }
-    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+  if (val == 0) {
     RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
-    if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
-        rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
-    } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() &&
-               rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
-    } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
-               rl_result.reg.GetReg() != rl_src2.reg.GetReg()) {
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+    if (cu_->target64) {
+      OpRegReg(kOpXor, rl_result.reg, rl_result.reg);
     } else {
-      OpRegCopy(rl_result.reg, rl_src1.reg);
-      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
-    }
-    StoreValueWide(rl_dest, rl_result);
-    return;
-  }
-
-  if (rl_src1.is_const) {
-    std::swap(rl_src1, rl_src2);
-  }
-  // Are we multiplying by a constant?
-  if (rl_src2.is_const) {
-    // Do special compare/branch against simple const operand
-    int64_t val = mir_graph_->ConstantValueWide(rl_src2);
-    if (val == 0) {
-      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
       OpRegReg(kOpXor, rl_result.reg.GetLow(), rl_result.reg.GetLow());
       OpRegReg(kOpXor, rl_result.reg.GetHigh(), rl_result.reg.GetHigh());
-      StoreValueWide(rl_dest, rl_result);
-      return;
-    } else if (val == 1) {
-      StoreValueWide(rl_dest, rl_src1);
-      return;
-    } else if (val == 2) {
-      GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
-      return;
-    } else if (IsPowerOfTwo(val)) {
-      int shift_amount = LowestSetBit(val);
-      if (!BadOverlap(rl_src1, rl_dest)) {
-        rl_src1 = LoadValueWide(rl_src1, kCoreReg);
-        RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest,
-                                                  rl_src1, shift_amount);
-        StoreValueWide(rl_dest, rl_result);
-        return;
-      }
     }
+    StoreValueWide(rl_dest, rl_result);
+    return true;
+  } else if (val == 1) {
+    StoreValueWide(rl_dest, rl_src1);
+    return true;
+  } else if (val == 2) {
+    GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src1, rl_src1);
+    return true;
+  } else if (IsPowerOfTwo(val)) {
+    int shift_amount = LowestSetBit(val);
+    if (!BadOverlap(rl_src1, rl_dest)) {
+      rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+      RegLocation rl_result = GenShiftImmOpLong(Instruction::SHL_LONG, rl_dest, rl_src1,
+                                                shift_amount);
+      StoreValueWide(rl_dest, rl_result);
+      return true;
+    }
+  }
 
-    // Okay, just bite the bullet and do it.
+  // Okay, on 32b just bite the bullet and do it, still better than the general case.
+  if (!cu_->target64) {
     int32_t val_lo = Low32Bits(val);
     int32_t val_hi = High32Bits(val);
     FlushAllRegs();
@@ -1393,10 +1430,48 @@
     RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
                              RegStorage::MakeRegPair(rs_r0, rs_r2), INVALID_SREG, INVALID_SREG};
     StoreValueWide(rl_dest, rl_result);
+    return true;
+  }
+  return false;
+}
+
+void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2) {
+  if (rl_src1.is_const) {
+    std::swap(rl_src1, rl_src2);
+  }
+
+  if (rl_src2.is_const) {
+    if (GenMulLongConst(rl_dest, rl_src1, mir_graph_->ConstantValueWide(rl_src2))) {
+      return;
+    }
+  }
+
+  // All memory accesses below reference dalvik regs.
+  ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+
+  if (cu_->target64) {
+    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+    if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
+        rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+    } else if (rl_result.reg.GetReg() != rl_src1.reg.GetReg() &&
+               rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+    } else if (rl_result.reg.GetReg() == rl_src1.reg.GetReg() &&
+               rl_result.reg.GetReg() != rl_src2.reg.GetReg()) {
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+    } else {
+      OpRegCopy(rl_result.reg, rl_src1.reg);
+      NewLIR2(kX86Imul64RR, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+    }
+    StoreValueWide(rl_dest, rl_result);
     return;
   }
 
-  // Nope.  Do it the hard way
+  // Not multiplying by a constant. Do it the hard way
   // Check for V*V.  We can eliminate a multiply in that case, as 2L*1H == 2H*1L.
   bool is_square = mir_graph_->SRegToVReg(rl_src1.s_reg_low) ==
                    mir_graph_->SRegToVReg(rl_src2.s_reg_low);
@@ -1666,31 +1741,6 @@
   StoreFinalValueWide(rl_dest, rl_src1);
 }
 
-void X86Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, false);
-}
-
-void X86Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest,
-                           RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
-void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
-                            RegLocation rl_src1, RegLocation rl_src2) {
-  GenLongArith(rl_dest, rl_src1, rl_src2, opcode, true);
-}
-
 void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
   if (cu_->target64) {
     rl_src = LoadValueWide(rl_src, kCoreReg);
@@ -1704,13 +1754,191 @@
   }
 }
 
+void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
+                                  int64_t imm, bool is_div) {
+  if (imm == 0) {
+    GenDivZeroException();
+  } else if (imm == 1) {
+    if (is_div) {
+      // x / 1 == x.
+      StoreValueWide(rl_dest, rl_src);
+    } else {
+      // x % 1 == 0.
+      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+      LoadConstantWide(rl_result.reg, 0);
+      StoreValueWide(rl_dest, rl_result);
+    }
+  } else if (imm == -1) {  // handle 0x8000000000000000 / -1 special case.
+    if (is_div) {
+      rl_src = LoadValueWide(rl_src, kCoreReg);
+      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+      RegStorage rs_temp = AllocTempWide();
+
+      OpRegCopy(rl_result.reg, rl_src.reg);
+      LoadConstantWide(rs_temp, 0x8000000000000000);
+
+      // If x == MIN_LONG, return MIN_LONG.
+      OpRegReg(kOpCmp, rl_src.reg, rs_temp);
+      LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+
+      // For x != MIN_LONG, x / -1 == -x.
+      OpReg(kOpNeg, rl_result.reg);
+
+      minint_branch->target = NewLIR0(kPseudoTargetLabel);
+      FreeTemp(rs_temp);
+      StoreValueWide(rl_dest, rl_result);
+    } else {
+      // x % -1 == 0.
+      RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+      LoadConstantWide(rl_result.reg, 0);
+      StoreValueWide(rl_dest, rl_result);
+    }
+  } else if (is_div && IsPowerOfTwo(std::abs(imm))) {
+    // Division using shifting.
+    rl_src = LoadValueWide(rl_src, kCoreReg);
+    RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+    if (IsSameReg(rl_result.reg, rl_src.reg)) {
+      RegStorage rs_temp = AllocTypedTempWide(false, kCoreReg);
+      rl_result.reg.SetReg(rs_temp.GetReg());
+    }
+    LoadConstantWide(rl_result.reg, std::abs(imm) - 1);
+    OpRegReg(kOpAdd, rl_result.reg, rl_src.reg);
+    NewLIR2(kX86Test64RR, rl_src.reg.GetReg(), rl_src.reg.GetReg());
+    OpCondRegReg(kOpCmov, kCondPl, rl_result.reg, rl_src.reg);
+    int shift_amount = LowestSetBit(imm);
+    OpRegImm(kOpAsr, rl_result.reg, shift_amount);
+    if (imm < 0) {
+      OpReg(kOpNeg, rl_result.reg);
+    }
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    CHECK(imm <= -2 || imm >= 2);
+
+    FlushReg(rs_r0q);
+    Clobber(rs_r0q);
+    LockTemp(rs_r0q);
+    FlushReg(rs_r2q);
+    Clobber(rs_r2q);
+    LockTemp(rs_r2q);
+
+    RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rs_r2q, INVALID_SREG, INVALID_SREG};
+
+    // Use H.S.Warren's Hacker's Delight Chapter 10 and
+    // T,Grablund, P.L.Montogomery's Division by invariant integers using multiplication.
+    int64_t magic;
+    int shift;
+    CalculateMagicAndShift(imm, magic, shift, true /* is_long */);
+
+    /*
+     * For imm >= 2,
+     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n > 0
+     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1, while n < 0.
+     * For imm <= -2,
+     *     int(n/imm) = ceil(n/imm) = floor(M*n/2^S) +1 , while n > 0
+     *     int(n/imm) = floor(n/imm) = floor(M*n/2^S), while n < 0.
+     * We implement this algorithm in the following way:
+     * 1. multiply magic number m and numerator n, get the higher 64bit result in RDX
+     * 2. if imm > 0 and magic < 0, add numerator to RDX
+     *    if imm < 0 and magic > 0, sub numerator from RDX
+     * 3. if S !=0, SAR S bits for RDX
+     * 4. add 1 to RDX if RDX < 0
+     * 5. Thus, RDX is the quotient
+     */
+
+    // Numerator into RAX.
+    RegStorage numerator_reg;
+    if (!is_div || (imm > 0 && magic < 0) || (imm < 0 && magic > 0)) {
+      // We will need the value later.
+      rl_src = LoadValueWide(rl_src, kCoreReg);
+      numerator_reg = rl_src.reg;
+      OpRegCopyWide(rs_r0q, numerator_reg);
+    } else {
+      // Only need this once.  Just put it into RAX.
+      LoadValueDirectWideFixed(rl_src, rs_r0q);
+    }
+
+    // RDX = magic.
+    LoadConstantWide(rs_r2q, magic);
+
+    // RDX:RAX = magic & dividend.
+    NewLIR1(kX86Imul64DaR, rs_r2q.GetReg());
+
+    if (imm > 0 && magic < 0) {
+      // Add numerator to RDX.
+      DCHECK(numerator_reg.Valid());
+      OpRegReg(kOpAdd, rs_r2q, numerator_reg);
+    } else if (imm < 0 && magic > 0) {
+      DCHECK(numerator_reg.Valid());
+      OpRegReg(kOpSub, rs_r2q, numerator_reg);
+    }
+
+    // Do we need the shift?
+    if (shift != 0) {
+      // Shift RDX by 'shift' bits.
+      OpRegImm(kOpAsr, rs_r2q, shift);
+    }
+
+    // Move RDX to RAX.
+    OpRegCopyWide(rs_r0q, rs_r2q);
+
+    // Move sign bit to bit 0, zeroing the rest.
+    OpRegImm(kOpLsr, rs_r2q, 63);
+
+    // RDX = RDX + RAX.
+    OpRegReg(kOpAdd, rs_r2q, rs_r0q);
+
+    // Quotient is in RDX.
+    if (!is_div) {
+      // We need to compute the remainder.
+      // Remainder is divisor - (quotient * imm).
+      DCHECK(numerator_reg.Valid());
+      OpRegCopyWide(rs_r0q, numerator_reg);
+
+      // Imul doesn't support 64-bit imms.
+      if (imm > std::numeric_limits<int32_t>::max() ||
+          imm < std::numeric_limits<int32_t>::min()) {
+        RegStorage rs_temp = AllocTempWide();
+        LoadConstantWide(rs_temp, imm);
+
+        // RAX = numerator * imm.
+        NewLIR2(kX86Imul64RR, rs_r2q.GetReg(), rs_temp.GetReg());
+
+        FreeTemp(rs_temp);
+      } else {
+        // RAX = numerator * imm.
+        int short_imm = static_cast<int>(imm);
+        NewLIR3(kX86Imul64RRI, rs_r2q.GetReg(), rs_r2q.GetReg(), short_imm);
+      }
+
+      // RDX -= RAX.
+      OpRegReg(kOpSub, rs_r0q, rs_r2q);
+
+      // Store result.
+      OpRegCopyWide(rl_result.reg, rs_r0q);
+    } else {
+      // Store result.
+      OpRegCopyWide(rl_result.reg, rs_r2q);
+    }
+    StoreValueWide(rl_dest, rl_result);
+    FreeTemp(rs_r0q);
+    FreeTemp(rs_r2q);
+  }
+}
+
 void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
-                           RegLocation rl_src2, bool is_div) {
+                               RegLocation rl_src2, bool is_div) {
   if (!cu_->target64) {
     LOG(FATAL) << "Unexpected use GenDivRemLong()";
     return;
   }
 
+  if (rl_src2.is_const) {
+    DCHECK(rl_src2.wide);
+    int64_t imm = mir_graph_->ConstantValueWide(rl_src2);
+    GenDivRemLongLit(rl_dest, rl_src1, imm, is_div);
+    return;
+  }
+
   // We have to use fixed registers, so flush all the temps.
   FlushAllRegs();
   LockCallTemps();  // Prepare for explicit register usage.
@@ -1734,7 +1962,7 @@
   // RHS is -1.
   LoadConstantWide(rs_r6q, 0x8000000000000000);
   NewLIR2(kX86Cmp64RR, rs_r0q.GetReg(), rs_r6q.GetReg());
-  LIR * minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
+  LIR *minint_branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
 
   // In 0x8000000000000000/-1 case.
   if (!is_div) {
@@ -2021,7 +2249,7 @@
   } else if (shift_amount == 1 &&
             (opcode ==  Instruction::SHL_LONG || opcode == Instruction::SHL_LONG_2ADDR)) {
     // Need to handle this here to avoid calling StoreValueWide twice.
-    GenAddLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
+    GenArithOpLong(Instruction::ADD_LONG, rl_dest, rl_src, rl_src);
     return;
   }
   if (BadOverlap(rl_src, rl_dest)) {
@@ -2053,7 +2281,7 @@
       if (rl_src2.is_const) {
         isConstSuccess = GenLongLongImm(rl_dest, rl_src1, rl_src2, opcode);
       } else {
-        GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
+        GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
         isConstSuccess = true;
       }
       break;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 1bda738..a72d94a 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2868,4 +2868,24 @@
   return true;
 }
 
+bool X86Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
+  RegLocation rl_dest = InlineTarget(info);
+
+  // Early exit if the result is unused.
+  if (rl_dest.orig_sreg < 0) {
+    return true;
+  }
+
+  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+
+  if (cu_->target64) {
+    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<8>());
+  } else {
+    OpRegThreadMem(kOpMov, rl_result.reg, Thread::PeerOffset<4>());
+  }
+
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
 }  // namespace art
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index f4bcb1d..7bfbb6f 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -24,6 +24,7 @@
 #include "base/mutex.h"
 #include "mem_map.h"
 #include "utils.h"
+#include "utils/debug_stack.h"
 
 namespace art {
 
@@ -34,6 +35,9 @@
 class ScopedArenaAllocator;
 class MemStats;
 
+template <typename T>
+class ArenaAllocatorAdapter;
+
 static constexpr bool kArenaAllocatorCountAllocations = false;
 
 // Type of allocation for memory tuning.
@@ -147,11 +151,14 @@
   DISALLOW_COPY_AND_ASSIGN(ArenaPool);
 };
 
-class ArenaAllocator : private ArenaAllocatorStats {
+class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats {
  public:
   explicit ArenaAllocator(ArenaPool* pool);
   ~ArenaAllocator();
 
+  // Get adapter for use in STL containers. See arena_containers.h .
+  ArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
+
   // Returns zeroed memory.
   void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
     if (UNLIKELY(running_on_valgrind_)) {
@@ -190,6 +197,9 @@
   Arena* arena_head_;
   bool running_on_valgrind_;
 
+  template <typename U>
+  friend class ArenaAllocatorAdapter;
+
   DISALLOW_COPY_AND_ASSIGN(ArenaAllocator);
 };  // ArenaAllocator
 
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
new file mode 100644
index 0000000..c48b0c8
--- /dev/null
+++ b/compiler/utils/arena_containers.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+#define ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+
+#include <deque>
+#include <queue>
+#include <set>
+#include <vector>
+
+#include "utils/arena_allocator.h"
+#include "safe_map.h"
+
+namespace art {
+
+// Adapter for use of ArenaAllocator in STL containers.
+// Use ArenaAllocator::Adapter() to create an adapter to pass to container constructors.
+// For example,
+//   struct Foo {
+//     explicit Foo(ArenaAllocator* allocator)
+//         : foo_vector(allocator->Adapter(kArenaAllocMisc)),
+//           foo_map(std::less<int>(), allocator->Adapter()) {
+//     }
+//     ArenaVector<int> foo_vector;
+//     ArenaSafeMap<int, int> foo_map;
+//   };
+template <typename T>
+class ArenaAllocatorAdapter;
+
+template <typename T>
+using ArenaDeque = std::deque<T, ArenaAllocatorAdapter<T>>;
+
+template <typename T>
+using ArenaQueue = std::queue<T, ArenaDeque<T>>;
+
+template <typename T>
+using ArenaVector = std::vector<T, ArenaAllocatorAdapter<T>>;
+
+template <typename T, typename Comparator = std::less<T>>
+using ArenaSet = std::set<T, Comparator, ArenaAllocatorAdapter<T>>;
+
+template <typename K, typename V, typename Comparator = std::less<K>>
+using ArenaSafeMap =
+    SafeMap<K, V, Comparator, ArenaAllocatorAdapter<std::pair<const K, V>>>;
+
+// Implementation details below.
+
+template <bool kCount>
+class ArenaAllocatorAdapterKindImpl;
+
+template <>
+class ArenaAllocatorAdapterKindImpl<false> {
+ public:
+  // Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
+  explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+  ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+  ArenaAllocKind Kind() { return kArenaAllocSTL; }
+};
+
+template <bool kCount>
+class ArenaAllocatorAdapterKindImpl {
+ public:
+  explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) : kind_(kind) { }
+  ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
+  ArenaAllocKind Kind() { return kind_; }
+
+ private:
+  ArenaAllocKind kind_;
+};
+
+typedef ArenaAllocatorAdapterKindImpl<kArenaAllocatorCountAllocations> ArenaAllocatorAdapterKind;
+
+template <>
+class ArenaAllocatorAdapter<void>
+    : private DebugStackReference, private ArenaAllocatorAdapterKind {
+ public:
+  typedef void value_type;
+  typedef void* pointer;
+  typedef const void* const_pointer;
+
+  template <typename U>
+  struct rebind {
+    typedef ArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator,
+                                 ArenaAllocKind kind = kArenaAllocSTL)
+      : DebugStackReference(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_allocator_(arena_allocator) {
+  }
+  template <typename U>
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_allocator_(other.arena_allocator_) {
+  }
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
+  ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+  ~ArenaAllocatorAdapter() = default;
+
+ private:
+  ArenaAllocator* arena_allocator_;
+
+  template <typename U>
+  friend class ArenaAllocatorAdapter;
+};
+
+template <typename T>
+class ArenaAllocatorAdapter : private DebugStackReference, private ArenaAllocatorAdapterKind {
+ public:
+  typedef T value_type;
+  typedef T* pointer;
+  typedef T& reference;
+  typedef const T* const_pointer;
+  typedef const T& const_reference;
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+
+  template <typename U>
+  struct rebind {
+    typedef ArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind)
+      : DebugStackReference(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_allocator_(arena_allocator) {
+  }
+  template <typename U>
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_allocator_(other.arena_allocator_) {
+  }
+  ArenaAllocatorAdapter(const ArenaAllocatorAdapter& other) = default;
+  ArenaAllocatorAdapter& operator=(const ArenaAllocatorAdapter& other) = default;
+  ~ArenaAllocatorAdapter() = default;
+
+  size_type max_size() const {
+    return static_cast<size_type>(-1) / sizeof(T);
+  }
+
+  pointer address(reference x) const { return &x; }
+  const_pointer address(const_reference x) const { return &x; }
+
+  pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+    DCHECK_LE(n, max_size());
+    return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
+                                                        ArenaAllocatorAdapterKind::Kind()));
+  }
+  void deallocate(pointer p, size_type n) {
+  }
+
+  void construct(pointer p, const_reference val) {
+    new (static_cast<void*>(p)) value_type(val);
+  }
+  void destroy(pointer p) {
+    p->~value_type();
+  }
+
+ private:
+  ArenaAllocator* arena_allocator_;
+
+  template <typename U>
+  friend class ArenaAllocatorAdapter;
+
+  template <typename U>
+  friend bool operator==(const ArenaAllocatorAdapter<U>& lhs,
+                         const ArenaAllocatorAdapter<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const ArenaAllocatorAdapter<T>& lhs,
+                       const ArenaAllocatorAdapter<T>& rhs) {
+  return lhs.arena_allocator_ == rhs.arena_allocator_;
+}
+
+template <typename T>
+inline bool operator!=(const ArenaAllocatorAdapter<T>& lhs,
+                       const ArenaAllocatorAdapter<T>& rhs) {
+  return !(lhs == rhs);
+}
+
+inline ArenaAllocatorAdapter<void> ArenaAllocator::Adapter(ArenaAllocKind kind) {
+  return ArenaAllocatorAdapter<void>(this, kind);
+}
+
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 9f33f2d..62ea330 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -120,8 +120,8 @@
     return arena_stack_->Alloc(bytes, kind);
   }
 
-  // ScopedArenaAllocatorAdapter is incomplete here, we need to define this later.
-  ScopedArenaAllocatorAdapter<void> Adapter();
+  // Get adapter for use in STL containers. See scoped_arena_containers.h .
+  ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
 
   // Allow a delete-expression to destroy but not deallocate allocators created by Create().
   static void operator delete(void* ptr) { UNUSED(ptr); }
@@ -138,125 +138,6 @@
   DISALLOW_COPY_AND_ASSIGN(ScopedArenaAllocator);
 };
 
-template <>
-class ScopedArenaAllocatorAdapter<void>
-    : private DebugStackReference, private DebugStackIndirectTopRef {
- public:
-  typedef void value_type;
-  typedef void* pointer;
-  typedef const void* const_pointer;
-
-  template <typename U>
-  struct rebind {
-    typedef ScopedArenaAllocatorAdapter<U> other;
-  };
-
-  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
-      : DebugStackReference(arena_allocator),
-        DebugStackIndirectTopRef(arena_allocator),
-        arena_stack_(arena_allocator->arena_stack_) {
-  }
-  template <typename U>
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
-      : DebugStackReference(other),
-        DebugStackIndirectTopRef(other),
-        arena_stack_(other.arena_stack_) {
-  }
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
-  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
-  ~ScopedArenaAllocatorAdapter() = default;
-
- private:
-  ArenaStack* arena_stack_;
-
-  template <typename U>
-  friend class ScopedArenaAllocatorAdapter;
-};
-
-// Adapter for use of ScopedArenaAllocator in STL containers.
-template <typename T>
-class ScopedArenaAllocatorAdapter : private DebugStackReference, private DebugStackIndirectTopRef {
- public:
-  typedef T value_type;
-  typedef T* pointer;
-  typedef T& reference;
-  typedef const T* const_pointer;
-  typedef const T& const_reference;
-  typedef size_t size_type;
-  typedef ptrdiff_t difference_type;
-
-  template <typename U>
-  struct rebind {
-    typedef ScopedArenaAllocatorAdapter<U> other;
-  };
-
-  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
-      : DebugStackReference(arena_allocator),
-        DebugStackIndirectTopRef(arena_allocator),
-        arena_stack_(arena_allocator->arena_stack_) {
-  }
-  template <typename U>
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
-      : DebugStackReference(other),
-        DebugStackIndirectTopRef(other),
-        arena_stack_(other.arena_stack_) {
-  }
-  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
-  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
-  ~ScopedArenaAllocatorAdapter() = default;
-
-  size_type max_size() const {
-    return static_cast<size_type>(-1) / sizeof(T);
-  }
-
-  pointer address(reference x) const { return &x; }
-  const_pointer address(const_reference x) const { return &x; }
-
-  pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
-    DCHECK_LE(n, max_size());
-    DebugStackIndirectTopRef::CheckTop();
-    return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T), kArenaAllocSTL));
-  }
-  void deallocate(pointer p, size_type n) {
-    DebugStackIndirectTopRef::CheckTop();
-  }
-
-  void construct(pointer p, const_reference val) {
-    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
-    new (static_cast<void*>(p)) value_type(val);
-  }
-  void destroy(pointer p) {
-    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
-    p->~value_type();
-  }
-
- private:
-  ArenaStack* arena_stack_;
-
-  template <typename U>
-  friend class ScopedArenaAllocatorAdapter;
-
-  template <typename U>
-  friend bool operator==(const ScopedArenaAllocatorAdapter<U>& lhs,
-                         const ScopedArenaAllocatorAdapter<U>& rhs);
-};
-
-template <typename T>
-inline bool operator==(const ScopedArenaAllocatorAdapter<T>& lhs,
-                       const ScopedArenaAllocatorAdapter<T>& rhs) {
-  return lhs.arena_stack_ == rhs.arena_stack_;
-}
-
-template <typename T>
-inline bool operator!=(const ScopedArenaAllocatorAdapter<T>& lhs,
-                       const ScopedArenaAllocatorAdapter<T>& rhs) {
-  return !(lhs == rhs);
-}
-
-inline ScopedArenaAllocatorAdapter<void> ScopedArenaAllocator::Adapter() {
-  return ScopedArenaAllocatorAdapter<void>(this);
-}
-
 }  // namespace art
 
 #endif  // ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 6728565..0de7403 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -22,11 +22,23 @@
 #include <set>
 #include <vector>
 
+#include "utils/arena_containers.h"  // For ArenaAllocatorAdapterKind.
 #include "utils/scoped_arena_allocator.h"
 #include "safe_map.h"
 
 namespace art {
 
+// Adapter for use of ScopedArenaAllocator in STL containers.
+// Use ScopedArenaAllocator::Adapter() to create an adapter to pass to container constructors.
+// For example,
+//   void foo(ScopedArenaAllocator* allocator) {
+//     ScopedArenaVector<int> foo_vector(allocator->Adapter(kArenaAllocMisc));
+//     ScopedArenaSafeMap<int, int> foo_map(std::less<int>(), allocator->Adapter());
+//     // Use foo_vector and foo_map...
+//   }
+template <typename T>
+class ScopedArenaAllocatorAdapter;
+
 template <typename T>
 using ScopedArenaDeque = std::deque<T, ScopedArenaAllocatorAdapter<T>>;
 
@@ -43,6 +55,136 @@
 using ScopedArenaSafeMap =
     SafeMap<K, V, Comparator, ScopedArenaAllocatorAdapter<std::pair<const K, V>>>;
 
+// Implementation details below.
+
+template <>
+class ScopedArenaAllocatorAdapter<void>
+    : private DebugStackReference, private DebugStackIndirectTopRef,
+      private ArenaAllocatorAdapterKind {
+ public:
+  typedef void value_type;
+  typedef void* pointer;
+  typedef const void* const_pointer;
+
+  template <typename U>
+  struct rebind {
+    typedef ScopedArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+                                       ArenaAllocKind kind = kArenaAllocSTL)
+      : DebugStackReference(arena_allocator),
+        DebugStackIndirectTopRef(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_stack_(arena_allocator->arena_stack_) {
+  }
+  template <typename U>
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        DebugStackIndirectTopRef(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_stack_(other.arena_stack_) {
+  }
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+  ~ScopedArenaAllocatorAdapter() = default;
+
+ private:
+  ArenaStack* arena_stack_;
+
+  template <typename U>
+  friend class ScopedArenaAllocatorAdapter;
+};
+
+template <typename T>
+class ScopedArenaAllocatorAdapter
+    : private DebugStackReference, private DebugStackIndirectTopRef,
+      private ArenaAllocatorAdapterKind {
+ public:
+  typedef T value_type;
+  typedef T* pointer;
+  typedef T& reference;
+  typedef const T* const_pointer;
+  typedef const T& const_reference;
+  typedef size_t size_type;
+  typedef ptrdiff_t difference_type;
+
+  template <typename U>
+  struct rebind {
+    typedef ScopedArenaAllocatorAdapter<U> other;
+  };
+
+  explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator,
+                                       ArenaAllocKind kind = kArenaAllocSTL)
+      : DebugStackReference(arena_allocator),
+        DebugStackIndirectTopRef(arena_allocator),
+        ArenaAllocatorAdapterKind(kind),
+        arena_stack_(arena_allocator->arena_stack_) {
+  }
+  template <typename U>
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+      : DebugStackReference(other),
+        DebugStackIndirectTopRef(other),
+        ArenaAllocatorAdapterKind(other),
+        arena_stack_(other.arena_stack_) {
+  }
+  ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+  ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+  ~ScopedArenaAllocatorAdapter() = default;
+
+  size_type max_size() const {
+    return static_cast<size_type>(-1) / sizeof(T);
+  }
+
+  pointer address(reference x) const { return &x; }
+  const_pointer address(const_reference x) const { return &x; }
+
+  pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+    DCHECK_LE(n, max_size());
+    DebugStackIndirectTopRef::CheckTop();
+    return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
+                                                    ArenaAllocatorAdapterKind::Kind()));
+  }
+  void deallocate(pointer p, size_type n) {
+    DebugStackIndirectTopRef::CheckTop();
+  }
+
+  void construct(pointer p, const_reference val) {
+    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
+    new (static_cast<void*>(p)) value_type(val);
+  }
+  void destroy(pointer p) {
+    // Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
+    p->~value_type();
+  }
+
+ private:
+  ArenaStack* arena_stack_;
+
+  template <typename U>
+  friend class ScopedArenaAllocatorAdapter;
+
+  template <typename U>
+  friend bool operator==(const ScopedArenaAllocatorAdapter<U>& lhs,
+                         const ScopedArenaAllocatorAdapter<U>& rhs);
+};
+
+template <typename T>
+inline bool operator==(const ScopedArenaAllocatorAdapter<T>& lhs,
+                       const ScopedArenaAllocatorAdapter<T>& rhs) {
+  return lhs.arena_stack_ == rhs.arena_stack_;
+}
+
+template <typename T>
+inline bool operator!=(const ScopedArenaAllocatorAdapter<T>& lhs,
+                       const ScopedArenaAllocatorAdapter<T>& rhs) {
+  return !(lhs == rhs);
+}
+
+inline ScopedArenaAllocatorAdapter<void> ScopedArenaAllocator::Adapter(ArenaAllocKind kind) {
+  return ScopedArenaAllocatorAdapter<void>(this, kind);
+}
+
 }  // namespace art
 
 #endif  // ART_COMPILER_UTILS_SCOPED_ARENA_CONTAINERS_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a78d3f7..0437f30 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -430,8 +430,7 @@
       t2.NewTiming("Patching ELF");
       std::string error_msg;
       if (!PatchOatCode(driver.get(), oat_file, oat_location, &error_msg)) {
-        LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
-        LOG(ERROR) << "Error was: " << error_msg;
+        LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath() << ": " << error_msg;
         return nullptr;
       }
     }
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 594c65f..6179b5e 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -837,6 +837,7 @@
     }
   }
 
+  bool reserved = false;
   for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
     Elf32_Phdr& program_header = GetProgramHeader(i);
 
@@ -853,10 +854,8 @@
 
     // Found something to load.
 
-    // If p_vaddr is zero, it must be the first loadable segment,
-    // since they must be in order.  Since it is zero, there isn't a
-    // specific address requested, so first request a contiguous chunk
-    // of required size for all segments, but with no
+    // Before load the actual segments, reserve a contiguous chunk
+    // of required size and address for all segments, but with no
     // permissions. We'll then carve that up with the proper
     // permissions as we load the actual segments. If p_vaddr is
     // non-zero, the segments require the specific address specified,
@@ -870,18 +869,24 @@
       return false;
     }
     size_t file_length = static_cast<size_t>(temp_file_length);
-    if (program_header.p_vaddr == 0) {
+    if (!reserved) {
+      byte* reserve_base = ((program_header.p_vaddr != 0) ?
+                            reinterpret_cast<byte*>(program_header.p_vaddr) : nullptr);
       std::string reservation_name("ElfFile reservation for ");
       reservation_name += file_->GetPath();
       std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
-                                                     nullptr, GetLoadedSize(), PROT_NONE, false,
-                                                     error_msg));
+                                                           reserve_base,
+                                                           GetLoadedSize(), PROT_NONE, false,
+                                                           error_msg));
       if (reserve.get() == nullptr) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
                                   reservation_name.c_str(), error_msg->c_str());
         return false;
       }
-      base_address_ = reserve->Begin();
+      reserved = true;
+      if (reserve_base == nullptr) {
+        base_address_ = reserve->Begin();
+      }
       segments_.push_back(reserve.release());
     }
     // empty segment, nothing to map
@@ -1335,7 +1340,8 @@
   const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
   Elf32_Shdr* text_sec = all.FindSectionByName(".text");
   if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
-      debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+      debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
+      symtab_sec == nullptr) {
     return;
   }
   // We need to add in a strtab and symtab to the image.
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index ae60b97..fc6d2ef 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -101,10 +101,7 @@
             << "art base address of 0x" << std::hex << ART_BASE_ADDRESS;
   arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS + base_offset));
 
-  if (kIsTargetBuild) {
-    arg_vector.push_back("--image-classes-zip=/system/framework/framework.jar");
-    arg_vector.push_back("--image-classes=preloaded-classes");
-  } else {
+  if (!kIsTargetBuild) {
     arg_vector.push_back("--host");
   }
 
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index d5b90f2..43b9912 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -677,13 +677,15 @@
     return soa.AddLocalReference<jclass>(c->GetSuperClass());
   }
 
+  // Note: java_class1 should be safely castable to java_class2, and
+  // not the other way around.
   static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
     CHECK_NON_NULL_ARGUMENT_RETURN(java_class1, JNI_FALSE);
     CHECK_NON_NULL_ARGUMENT_RETURN(java_class2, JNI_FALSE);
     ScopedObjectAccess soa(env);
     mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
     mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
-    return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE;
+    return c2->IsAssignableFrom(c1) ? JNI_TRUE : JNI_FALSE;
   }
 
   static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 7c7e60c..da3080f 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -950,8 +950,28 @@
   jclass string_class = env_->FindClass("java/lang/String");
   ASSERT_NE(string_class, nullptr);
 
-  ASSERT_TRUE(env_->IsAssignableFrom(object_class, string_class));
-  ASSERT_FALSE(env_->IsAssignableFrom(string_class, object_class));
+  // A superclass is assignable from an instance of its
+  // subclass but not vice versa.
+  ASSERT_TRUE(env_->IsAssignableFrom(string_class, object_class));
+  ASSERT_FALSE(env_->IsAssignableFrom(object_class, string_class));
+
+  jclass charsequence_interface = env_->FindClass("java/lang/CharSequence");
+  ASSERT_NE(charsequence_interface, nullptr);
+
+  // An interface is assignable from an instance of an implementing
+  // class but not vice versa.
+  ASSERT_TRUE(env_->IsAssignableFrom(string_class, charsequence_interface));
+  ASSERT_FALSE(env_->IsAssignableFrom(charsequence_interface, string_class));
+
+  // Check that arrays are covariant.
+  jclass string_array_class = env_->FindClass("[Ljava/lang/String;");
+  ASSERT_NE(string_array_class, nullptr);
+  jclass object_array_class = env_->FindClass("[Ljava/lang/Object;");
+  ASSERT_NE(object_array_class, nullptr);
+  ASSERT_TRUE(env_->IsAssignableFrom(string_array_class, object_array_class));
+  ASSERT_FALSE(env_->IsAssignableFrom(object_array_class, string_array_class));
+
+  // Primitive types are tested in 004-JniTest.
 
   // Null as either class should fail.
   CheckJniAbortCatcher jni_abort_catcher;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1074253..6c7ee5b 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -130,8 +130,67 @@
 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
 #endif
 
+// Return true if the address range is contained in a single /proc/self/map entry.
+static bool CheckOverlapping(uintptr_t begin,
+                             uintptr_t end,
+                             std::string* error_msg) {
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+  if (!map->Build()) {
+    *error_msg = StringPrintf("Failed to build process map");
+    return false;
+  }
+  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+    if ((begin >= it->start && begin < it->end)  // start of new within old
+        && (end > it->start && end <= it->end)) {  // end of new within old
+      return true;
+    }
+  }
+  std::string maps;
+  ReadFileToString("/proc/self/maps", &maps);
+  *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
+                            "any existing map:\n%s\n",
+                            begin, end, maps.c_str());
+  return false;
+}
+
+// Return true if the address range does not conflict with any /proc/self/maps entry.
+static bool CheckNonOverlapping(uintptr_t begin,
+                                uintptr_t end,
+                                std::string* error_msg) {
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+  if (!map->Build()) {
+    *error_msg = StringPrintf("Failed to build process map");
+    return false;
+  }
+  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+    if ((begin >= it->start && begin < it->end)      // start of new within old
+        || (end > it->start && end < it->end)        // end of new within old
+        || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
+      std::ostringstream map_info;
+      map_info << std::make_pair(it, map->end());
+      *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
+                                "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
+                                begin, end,
+                                static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
+                                it->name.c_str(),
+                                map_info.str().c_str());
+      return false;
+    }
+  }
+  return true;
+}
+
+// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
+// the expected value, calling munmap if validation fails, giving the
+// reason in error_msg.
+//
+// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
+// non-null, we check that pointer is the actual_ptr == expected_ptr,
+// and if not, report in error_msg what the conflict mapping was if
+// found, or a generic error in other cases.
 static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
-                            std::ostringstream* error_msg) {
+                            std::string* error_msg) {
   // Handled first by caller for more specific error messages.
   CHECK(actual_ptr != MAP_FAILED);
 
@@ -139,6 +198,10 @@
     return true;
   }
 
+  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
+  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+  uintptr_t limit = expected + byte_count;
+
   if (expected_ptr == actual_ptr) {
     return true;
   }
@@ -149,40 +212,19 @@
     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
   }
 
-  uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
-  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
-  uintptr_t limit = expected + byte_count;
-
-  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
-  if (!map->Build()) {
-    *error_msg << StringPrintf("Failed to build process map to determine why mmap returned "
-                               "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
-
+  if (!CheckNonOverlapping(expected, limit, error_msg)) {
     return false;
   }
-  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
-    if ((expected >= it->start && expected < it->end)  // start of new within old
-        || (limit > it->start && limit < it->end)      // end of new within old
-        || (expected <= it->start && limit > it->end)) {  // start/end of new includes all of old
-      *error_msg
-          << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
-                          "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
-                          expected, limit,
-                          static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
-                          it->name.c_str())
-          << std::make_pair(it, map->end());
-      return false;
-    }
-  }
-  *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
-                             "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
+
+  *error_msg = StringPrintf("Failed to mmap at expected address, mapped at "
+                            "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
   return false;
 }
 
-MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
                              bool low_4gb, std::string* error_msg) {
   if (byte_count == 0) {
-    return new MemMap(name, nullptr, 0, nullptr, 0, prot);
+    return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
   }
   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
 
@@ -222,11 +264,11 @@
   // 4GB.
   if (low_4gb && (
       // Start out of bounds.
-      (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
+      (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
       // End out of bounds. For simplicity, this will fail for the last page of memory.
-      (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
+      (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
     *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
-                              expected, expected + page_aligned_byte_count);
+                              expected_ptr, expected_ptr + page_aligned_byte_count);
     return nullptr;
   }
 #endif
@@ -238,7 +280,7 @@
 #if USE_ART_LOW_4G_ALLOCATOR
   // MAP_32BIT only available on x86_64.
   void* actual = MAP_FAILED;
-  if (low_4gb && expected == nullptr) {
+  if (low_4gb && expected_ptr == nullptr) {
     bool first_run = true;
 
     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -294,18 +336,18 @@
       saved_errno = ENOMEM;
     }
   } else {
-    actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+    actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
     saved_errno = errno;
   }
 
 #else
 #if defined(__LP64__)
-  if (low_4gb && expected == nullptr) {
+  if (low_4gb && expected_ptr == nullptr) {
     flags |= MAP_32BIT;
   }
 #endif
 
-  void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+  void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
   saved_errno = errno;
 #endif
 
@@ -314,44 +356,51 @@
     ReadFileToString("/proc/self/maps", &maps);
 
     *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
-                              expected, page_aligned_byte_count, prot, flags, fd.get(),
+                              expected_ptr, page_aligned_byte_count, prot, flags, fd.get(),
                               strerror(saved_errno), maps.c_str());
     return nullptr;
   }
   std::ostringstream check_map_request_error_msg;
-  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
-    *error_msg = check_map_request_error_msg.str();
+  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
   return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
-                    page_aligned_byte_count, prot);
+                    page_aligned_byte_count, prot, false);
 }
 
-MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
                                  off_t start, bool reuse, const char* filename,
                                  std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
+  uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+  uintptr_t limit = expected + byte_count;
   if (reuse) {
     // reuse means it is okay that it overlaps an existing page mapping.
     // Only use this if you actually made the page reservation yourself.
-    CHECK(expected != nullptr);
+    CHECK(expected_ptr != nullptr);
+    if (!CheckOverlapping(expected, limit, error_msg)) {
+      return nullptr;
+    }
     flags |= MAP_FIXED;
   } else {
     CHECK_EQ(0, flags & MAP_FIXED);
+    if (expected_ptr != nullptr && !CheckNonOverlapping(expected, limit, error_msg)) {
+      return nullptr;
+    }
   }
 
   if (byte_count == 0) {
-    return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
+    return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
   }
   // Adjust 'offset' to be page-aligned as required by mmap.
   int page_offset = start % kPageSize;
   off_t page_aligned_offset = start - page_offset;
   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
-  // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
-  // necessarily to virtual memory. mmap will page align 'expected' for us.
-  byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
+  // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
+  // not necessarily to virtual memory. mmap will page align 'expected' for us.
+  byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
 
   byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
                                               page_aligned_byte_count,
@@ -373,21 +422,22 @@
     return nullptr;
   }
   std::ostringstream check_map_request_error_msg;
-  if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
-    *error_msg = check_map_request_error_msg.str();
+  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
     return nullptr;
   }
   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
-                    prot);
+                    prot, reuse);
 }
 
 MemMap::~MemMap() {
   if (base_begin_ == nullptr && base_size_ == 0) {
     return;
   }
-  int result = munmap(base_begin_, base_size_);
-  if (result == -1) {
-    PLOG(FATAL) << "munmap failed";
+  if (!reuse_) {
+    int result = munmap(base_begin_, base_size_);
+    if (result == -1) {
+      PLOG(FATAL) << "munmap failed";
+    }
   }
 
   // Remove it from maps_.
@@ -405,9 +455,9 @@
 }
 
 MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
-               size_t base_size, int prot)
+               size_t base_size, int prot, bool reuse)
     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
-      prot_(prot) {
+      prot_(prot), reuse_(reuse) {
   if (size_ == 0) {
     CHECK(begin_ == nullptr);
     CHECK(base_begin_ == nullptr);
@@ -437,7 +487,7 @@
   byte* new_base_end = new_end;
   DCHECK_LE(new_base_end, old_base_end);
   if (new_base_end == old_base_end) {
-    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
+    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
   }
   size_ = new_end - reinterpret_cast<byte*>(begin_);
   base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
@@ -489,7 +539,7 @@
                               maps.c_str());
     return nullptr;
   }
-  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
+  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
 void MemMap::MadviseDontNeedAndZero() {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index defa6a5..872c63b 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -73,7 +73,9 @@
 
   // Map part of a file, taking care of non-page aligned offsets.  The
   // "start" offset is absolute, not relative. This version allows
-  // requesting a specific address for the base of the mapping.
+  // requesting a specific address for the base of the
+  // mapping. "reuse" allows us to create a view into an existing
+  // mapping where we do not take ownership of the memory.
   //
   // On success, returns returns a MemMap instance.  On failure, returns a NULL;
   static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
@@ -134,7 +136,7 @@
 
  private:
   MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
-         int prot) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+         int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
 
   static void DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
       LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -145,7 +147,7 @@
   static MemMap* GetLargestMemMapAt(void* address)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
 
-  std::string name_;
+  const std::string name_;
   byte* const begin_;  // Start of data.
   size_t size_;  // Length of data.
 
@@ -153,6 +155,11 @@
   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
 
+  // When reuse_ is true, this is just a view of an existing mapping
+  // and we do not take ownership and are not responsible for
+  // unmapping.
+  const bool reuse_;
+
 #if USE_ART_LOW_4G_ALLOCATOR
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
 #endif
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 496a1b2..fb708a2 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -38,6 +38,7 @@
 }
 
 static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
+  LOG(INFO) << "System.exit called, status: " << status;
   Runtime::Current()->CallExitHook(status);
   exit(status);
 }
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 982553d..23b9aed 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -48,6 +48,11 @@
   kIntrinsicMinMaxFloat,
   kIntrinsicMinMaxDouble,
   kIntrinsicSqrt,
+  kIntrinsicCeil,
+  kIntrinsicFloor,
+  kIntrinsicRint,
+  kIntrinsicRoundFloat,
+  kIntrinsicRoundDouble,
   kIntrinsicGet,
   kIntrinsicCharAt,
   kIntrinsicCompareTo,
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 18f7626..329b4dc 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3111,7 +3111,7 @@
       } else {
         // Check whether the name of the called method is "<init>"
         const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
-        if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "init") != 0) {
+        if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "<init>") != 0) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
           return nullptr;
         }
diff --git a/test/003-omnibus-opcodes/src/IntMath.java b/test/003-omnibus-opcodes/src/IntMath.java
index 2e2962a..ad540fd 100644
--- a/test/003-omnibus-opcodes/src/IntMath.java
+++ b/test/003-omnibus-opcodes/src/IntMath.java
@@ -335,8 +335,8 @@
                        special = (start+i) / 15;
                        break;
                }
+               Main.assertTrue(normal == special);
            }
-           Main.assertTrue(normal == special);
        }
     }
 
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 4909a4a..554712a 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -286,3 +286,8 @@
 
   return char_returns[c1];
 }
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_nativeIsAssignableFrom(JNIEnv* env, jclass,
+                                                                       jclass from, jclass to) {
+  return env->IsAssignableFrom(from, to);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 11c80f5..ae133be 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -29,6 +29,7 @@
         testShortMethod();
         testBooleanMethod();
         testCharMethod();
+        testIsAssignableFromOnPrimitiveTypes();
     }
 
     private static native void testFindClassOnAttachedNativeThread();
@@ -151,4 +152,19 @@
         }
       }
     }
+
+    // http://b/16531674
+    private static void testIsAssignableFromOnPrimitiveTypes() {
+      if (!nativeIsAssignableFrom(int.class, Integer.TYPE)) {
+        System.out.println("IsAssignableFrom(int.class, Integer.TYPE) returned false, expected true");
+        throw new AssertionError();
+      }
+
+      if (!nativeIsAssignableFrom(Integer.TYPE, int.class)) {
+        System.out.println("IsAssignableFrom(Integer.TYPE, int.class) returned false, expected true");
+        throw new AssertionError();
+      }
+    }
+
+    native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
 }
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 9ecc0a0..56972ff 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -34,6 +34,11 @@
     test_Math_max_F();
     test_Math_min_D();
     test_Math_max_D();
+    test_Math_ceil();
+    test_Math_floor();
+    test_Math_rint();
+    test_Math_round_D();
+    test_Math_round_F();
     test_Short_reverseBytes();
     test_Integer_reverseBytes();
     test_Long_reverseBytes();
@@ -49,6 +54,11 @@
     test_StrictMath_max_F();
     test_StrictMath_min_D();
     test_StrictMath_max_D();
+    test_StrictMath_ceil();
+    test_StrictMath_floor();
+    test_StrictMath_rint();
+    test_StrictMath_round_D();
+    test_StrictMath_round_F();
     test_String_charAt();
     test_String_compareTo();
     test_String_indexOf();
@@ -376,6 +386,104 @@
     Assert.assertEquals(Math.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
   }
 
+  public static void test_Math_ceil() {
+    Assert.assertEquals(Math.ceil(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-0.9), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-0.5), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.1), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.5), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-2.9), -2.0d, 0.0);
+    Assert.assertEquals(Math.ceil(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(Math.ceil(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(Math.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(Math.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_Math_floor() {
+    Assert.assertEquals(Math.floor(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(Math.floor(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+2.9), +2.0d, 0.0);
+    Assert.assertEquals(Math.floor(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.1), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.5), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(Math.floor(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(Math.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(Math.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_Math_rint() {
+    Assert.assertEquals(Math.rint(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(Math.rint(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(Math.rint(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(Math.rint(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(Math.rint(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(Math.rint(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(Math.rint(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(Math.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(Math.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_Math_round_D() {
+    Assert.assertEquals(Math.round(+0.0d), (long)+0.0);
+    Assert.assertEquals(Math.round(-0.0d), (long)+0.0);
+    Assert.assertEquals(Math.round(2.0d), 2l);
+    Assert.assertEquals(Math.round(2.1d), 2l);
+    Assert.assertEquals(Math.round(2.5d), 3l);
+    Assert.assertEquals(Math.round(2.9d), 3l);
+    Assert.assertEquals(Math.round(3.0d), 3l);
+    Assert.assertEquals(Math.round(-2.0d), -2l);
+    Assert.assertEquals(Math.round(-2.1d), -2l);
+    Assert.assertEquals(Math.round(-2.5d), -2l);
+    Assert.assertEquals(Math.round(-2.9d), -3l);
+    Assert.assertEquals(Math.round(-3.0d), -3l);
+    Assert.assertEquals(Math.round(0.49999999999999994d), 1l);
+    Assert.assertEquals(Math.round(Double.NaN), (long)+0.0d);
+    Assert.assertEquals(Math.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+    Assert.assertEquals(Math.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+    Assert.assertEquals(Math.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+    Assert.assertEquals(Math.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+  }
+
+  public static void test_Math_round_F() {
+    Assert.assertEquals(Math.round(+0.0f), (int)+0.0);
+    Assert.assertEquals(Math.round(-0.0f), (int)+0.0);
+    Assert.assertEquals(Math.round(2.0f), 2);
+    Assert.assertEquals(Math.round(2.1f), 2);
+    Assert.assertEquals(Math.round(2.5f), 3);
+    Assert.assertEquals(Math.round(2.9f), 3);
+    Assert.assertEquals(Math.round(3.0f), 3);
+    Assert.assertEquals(Math.round(-2.0f), -2);
+    Assert.assertEquals(Math.round(-2.1f), -2);
+    Assert.assertEquals(Math.round(-2.5f), -2);
+    Assert.assertEquals(Math.round(-2.9f), -3);
+    Assert.assertEquals(Math.round(-3.0f), -3);
+    Assert.assertEquals(Math.round(Float.NaN), (int)+0.0f);
+    Assert.assertEquals(Math.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+    Assert.assertEquals(Math.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+    Assert.assertEquals(Math.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+    Assert.assertEquals(Math.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
+  }
+
   public static void test_StrictMath_abs_I() {
     Assert.assertEquals(StrictMath.abs(0), 0);
     Assert.assertEquals(StrictMath.abs(123), 123);
@@ -487,6 +595,104 @@
     Assert.assertEquals(StrictMath.max(Double.MIN_VALUE, Double.MAX_VALUE), Double.MAX_VALUE);
   }
 
+  public static void test_StrictMath_ceil() {
+    Assert.assertEquals(StrictMath.ceil(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-0.9), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-0.5), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.1), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.5), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-2.9), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.ceil(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(StrictMath.ceil(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(StrictMath.ceil(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_StrictMath_floor() {
+    Assert.assertEquals(StrictMath.floor(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+2.9), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.1), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.5), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.floor(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(StrictMath.floor(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(StrictMath.floor(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_StrictMath_rint() {
+    Assert.assertEquals(StrictMath.rint(+0.0), +0.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-0.0), -0.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.0), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.1), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.5), +2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+2.9), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(+3.0), +3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.0), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.1), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.5), -2.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-2.9), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(-3.0), -3.0d, 0.0);
+    Assert.assertEquals(StrictMath.rint(Double.NaN), Double.NaN, 0.0);
+    Assert.assertEquals(StrictMath.rint(Double.POSITIVE_INFINITY), Double.POSITIVE_INFINITY, 0.0);
+    Assert.assertEquals(StrictMath.rint(Double.NEGATIVE_INFINITY), Double.NEGATIVE_INFINITY, 0.0);
+  }
+
+  public static void test_StrictMath_round_D() {
+    Assert.assertEquals(StrictMath.round(+0.0d), (long)+0.0);
+    Assert.assertEquals(StrictMath.round(-0.0d), (long)+0.0);
+    Assert.assertEquals(StrictMath.round(2.0d), 2l);
+    Assert.assertEquals(StrictMath.round(2.1d), 2l);
+    Assert.assertEquals(StrictMath.round(2.5d), 3l);
+    Assert.assertEquals(StrictMath.round(2.9d), 3l);
+    Assert.assertEquals(StrictMath.round(3.0d), 3l);
+    Assert.assertEquals(StrictMath.round(-2.0d), -2l);
+    Assert.assertEquals(StrictMath.round(-2.1d), -2l);
+    Assert.assertEquals(StrictMath.round(-2.5d), -2l);
+    Assert.assertEquals(StrictMath.round(-2.9d), -3l);
+    Assert.assertEquals(StrictMath.round(-3.0d), -3l);
+    Assert.assertEquals(StrictMath.round(0.49999999999999994d), 1l);
+    Assert.assertEquals(StrictMath.round(Double.NaN), (long)+0.0d);
+    Assert.assertEquals(StrictMath.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
+    Assert.assertEquals(StrictMath.round(Double.POSITIVE_INFINITY), Long.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Double.NEGATIVE_INFINITY), Long.MIN_VALUE);
+  }
+
+  public static void test_StrictMath_round_F() {
+    Assert.assertEquals(StrictMath.round(+0.0f), (int)+0.0);
+    Assert.assertEquals(StrictMath.round(-0.0f), (int)+0.0);
+    Assert.assertEquals(StrictMath.round(2.0f), 2);
+    Assert.assertEquals(StrictMath.round(2.1f), 2);
+    Assert.assertEquals(StrictMath.round(2.5f), 3);
+    Assert.assertEquals(StrictMath.round(2.9f), 3);
+    Assert.assertEquals(StrictMath.round(3.0f), 3);
+    Assert.assertEquals(StrictMath.round(-2.0f), -2);
+    Assert.assertEquals(StrictMath.round(-2.1f), -2);
+    Assert.assertEquals(StrictMath.round(-2.5f), -2);
+    Assert.assertEquals(StrictMath.round(-2.9f), -3);
+    Assert.assertEquals(StrictMath.round(-3.0f), -3);
+    Assert.assertEquals(StrictMath.round(Float.NaN), (int)+0.0f);
+    Assert.assertEquals(StrictMath.round(Integer.MAX_VALUE + 1.0f), Integer.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Integer.MIN_VALUE - 1.0f), Integer.MIN_VALUE);
+    Assert.assertEquals(StrictMath.round(Float.POSITIVE_INFINITY), Integer.MAX_VALUE);
+    Assert.assertEquals(StrictMath.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE);
+  }
+
   public static void test_Float_floatToRawIntBits() {
     Assert.assertEquals(Float.floatToRawIntBits(-1.0f), 0xbf800000);
     Assert.assertEquals(Float.floatToRawIntBits(0.0f), 0);
diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt
index f852620..5b41606 100644
--- a/test/115-native-bridge/expected.txt
+++ b/test/115-native-bridge/expected.txt
@@ -1,13 +1,55 @@
 Ready for native bridge tests.
 Native bridge initialized.
 Checking for support.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
-Getting trampoline.
+Getting trampoline for JNI_OnLoad with shorty (null).
+Test ART callbacks: all JNI function number is 9.
+    name:booleanMethod, signature:(ZZZZZZZZZZ)Z, shorty:ZZZZZZZZZZZ.
+    name:byteMethod, signature:(BBBBBBBBBB)B, shorty:BBBBBBBBBBB.
+    name:charMethod, signature:(CCCCCCCCCC)C, shorty:CCCCCCCCCCC.
+    name:shortMethod, signature:(SSSSSSSSSS)S, shorty:SSSSSSSSSSS.
+    name:testCallStaticVoidMethodOnSubClassNative, signature:()V, shorty:V.
+    name:testFindClassOnAttachedNativeThread, signature:()V, shorty:V.
+    name:testFindFieldOnAttachedNativeThreadNative, signature:()V, shorty:V.
+    name:testGetMirandaMethodNative, signature:()Ljava/lang/reflect/Method;, shorty:L.
+    name:testZeroLengthByteBuffers, signature:()V, shorty:V.
+trampoline_JNI_OnLoad called!
+Getting trampoline for Java_Main_testFindClassOnAttachedNativeThread with shorty V.
+trampoline_Java_Main_testFindClassOnAttachedNativeThread called!
+Getting trampoline for Java_Main_testFindFieldOnAttachedNativeThreadNative with shorty V.
+trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative called!
+Getting trampoline for Java_Main_testCallStaticVoidMethodOnSubClassNative with shorty V.
+trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative called!
+Getting trampoline for Java_Main_testGetMirandaMethodNative with shorty L.
+trampoline_Java_Main_testGetMirandaMethodNative called!
+Getting trampoline for Java_Main_testZeroLengthByteBuffers with shorty V.
+trampoline_Java_Main_testZeroLengthByteBuffers called!
+Getting trampoline for Java_Main_byteMethod with shorty BBBBBBBBBBB.
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+trampoline_Java_Main_byteMethod called!
+Getting trampoline for Java_Main_shortMethod with shorty SSSSSSSSSSS.
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+trampoline_Java_Main_shortMethod called!
+Getting trampoline for Java_Main_booleanMethod with shorty ZZZZZZZZZZZ.
+trampoline_Java_Main_booleanMethod called!
+trampoline_Java_Main_booleanMethod called!
+Getting trampoline for Java_Main_charMethod with shorty CCCCCCCCCCC.
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
+trampoline_Java_Main_charMethod called!
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index bd3ae13..82211a5 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -44,13 +44,192 @@
   bool (*isSupported)(const char* libpath);
 };
 
+struct NativeBridgeMethod {
+  const char* name;
+  const char* signature;
+  bool static_method;
+  void* fnPtr;
+  void* trampoline;
+};
 
+static NativeBridgeMethod* find_native_bridge_method(const char *name);
+static NativeBridgeArtCallbacks* gNativeBridgeArtCallbacks;
 
-static std::vector<void*> symbols;
+static jint trampoline_JNI_OnLoad(JavaVM* vm, void* reserved) {
+  JNIEnv* env = nullptr;
+  typedef jint (*FnPtr_t)(JavaVM*, void*);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("JNI_OnLoad")->fnPtr);
+
+  vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_6);
+  if (env == nullptr) {
+    return 0;
+  }
+
+  jclass klass = env->FindClass("Main");
+  if (klass != nullptr) {
+    int i, count1, count2;
+    count1 = gNativeBridgeArtCallbacks->getNativeMethodCount(env, klass);
+    std::unique_ptr<JNINativeMethod[]> methods(new JNINativeMethod[count1]);
+    if (methods == nullptr) {
+      return 0;
+    }
+    count2 = gNativeBridgeArtCallbacks->getNativeMethods(env, klass, methods.get(), count1);
+    if (count1 == count2) {
+      printf("Test ART callbacks: all JNI function number is %d.\n", count1);
+    }
+
+    for (i = 0; i < count1; i++) {
+      NativeBridgeMethod* nb_method = find_native_bridge_method(methods[i].name);
+      if (nb_method != nullptr) {
+        jmethodID mid = nullptr;
+        if (nb_method->static_method) {
+          mid = env->GetStaticMethodID(klass, methods[i].name, nb_method->signature);
+        } else {
+          mid = env->GetMethodID(klass, methods[i].name, nb_method->signature);
+        }
+        if (mid != nullptr) {
+          const char* shorty = gNativeBridgeArtCallbacks->getMethodShorty(env, mid);
+          if (strcmp(shorty, methods[i].signature) == 0) {
+            printf("    name:%s, signature:%s, shorty:%s.\n",
+                   methods[i].name, nb_method->signature, shorty);
+          }
+        }
+      }
+    }
+    methods.release();
+  }
+
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(vm, reserved);
+}
+
+static void trampoline_Java_Main_testFindClassOnAttachedNativeThread(JNIEnv* env,
+                                                                     jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testFindClassOnAttachedNativeThread")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv* env,
+                                                                           jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testFindFieldOnAttachedNativeThreadNative")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
+                                                                          jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testCallStaticVoidMethodOnSubClassNative")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static jobject trampoline_Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass klass) {
+  typedef jobject (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testGetMirandaMethodNative")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static void trampoline_Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass klass) {
+  typedef void (*FnPtr_t)(JNIEnv*, jclass);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
+    (find_native_bridge_method("testZeroLengthByteBuffers")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass);
+}
+
+static jbyte trampoline_Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
+                                             jbyte b3, jbyte b4, jbyte b5, jbyte b6,
+                                             jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
+  typedef jbyte (*FnPtr_t)(JNIEnv*, jclass, jbyte, jbyte, jbyte, jbyte, jbyte,
+                           jbyte, jbyte, jbyte, jbyte, jbyte);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("byteMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
+}
+
+static jshort trampoline_Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
+                                               jshort s3, jshort s4, jshort s5, jshort s6,
+                                               jshort s7, jshort s8, jshort s9, jshort s10) {
+  typedef jshort (*FnPtr_t)(JNIEnv*, jclass, jshort, jshort, jshort, jshort, jshort,
+                            jshort, jshort, jshort, jshort, jshort);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("shortMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10);
+}
+
+static jboolean trampoline_Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
+                                                   jboolean b2, jboolean b3, jboolean b4,
+                                                   jboolean b5, jboolean b6, jboolean b7,
+                                                   jboolean b8, jboolean b9, jboolean b10) {
+  typedef jboolean (*FnPtr_t)(JNIEnv*, jclass, jboolean, jboolean, jboolean, jboolean, jboolean,
+                              jboolean, jboolean, jboolean, jboolean, jboolean);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("booleanMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
+}
+
+static jchar trampoline_Java_Main_charMethod(JNIEnv* env, jclass klass, jchar c1, jchar c2,
+                                             jchar c3, jchar c4, jchar c5, jchar c6,
+                                             jchar c7, jchar c8, jchar c9, jchar c10) {
+  typedef jchar (*FnPtr_t)(JNIEnv*, jclass, jchar, jchar, jchar, jchar, jchar,
+                           jchar, jchar, jchar, jchar, jchar);
+  FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("charMethod")->fnPtr);
+  printf("%s called!\n", __FUNCTION__);
+  return fnPtr(env, klass, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10);
+}
+
+NativeBridgeMethod gNativeBridgeMethods[] = {
+  { "JNI_OnLoad", "", true, nullptr,
+    reinterpret_cast<void*>(trampoline_JNI_OnLoad) },
+  { "booleanMethod", "(ZZZZZZZZZZ)Z", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_booleanMethod) },
+  { "byteMethod", "(BBBBBBBBBB)B", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_byteMethod) },
+  { "charMethod", "(CCCCCCCCCC)C", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_charMethod) },
+  { "shortMethod", "(SSSSSSSSSS)S", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_shortMethod) },
+  { "testCallStaticVoidMethodOnSubClassNative", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative) },
+  { "testFindClassOnAttachedNativeThread", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testFindClassOnAttachedNativeThread) },
+  { "testFindFieldOnAttachedNativeThreadNative", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative) },
+  { "testGetMirandaMethodNative", "()Ljava/lang/reflect/Method;", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testGetMirandaMethodNative) },
+  { "testZeroLengthByteBuffers", "()V", true, nullptr,
+    reinterpret_cast<void*>(trampoline_Java_Main_testZeroLengthByteBuffers) },
+};
+
+static NativeBridgeMethod* find_native_bridge_method(const char *name) {
+  const char* pname = name;
+  if (strncmp(name, "Java_Main_", 10) == 0) {
+    pname += 10;
+  }
+
+  for (size_t i = 0; i < sizeof(gNativeBridgeMethods) / sizeof(gNativeBridgeMethods[0]); i++) {
+    if (strcmp(pname, gNativeBridgeMethods[i].name) == 0) {
+      return &gNativeBridgeMethods[i];
+    }
+  }
+  return nullptr;
+}
 
 // NativeBridgeCallbacks implementations
 extern "C" bool native_bridge_initialize(NativeBridgeArtCallbacks* art_cbs) {
-  printf("Native bridge initialized.\n");
+  if (art_cbs != nullptr) {
+    gNativeBridgeArtCallbacks = art_cbs;
+    printf("Native bridge initialized.\n");
+  }
   return true;
 }
 
@@ -80,17 +259,16 @@
 
 extern "C" void* native_bridge_getTrampoline(void* handle, const char* name, const char* shorty,
                                              uint32_t len) {
-  printf("Getting trampoline.\n");
+  printf("Getting trampoline for %s with shorty %s.\n", name, shorty);
 
   // The name here is actually the JNI name, so we can directly do the lookup.
   void* sym = dlsym(handle, name);
-  if (sym != nullptr) {
-    symbols.push_back(sym);
-  }
+  NativeBridgeMethod* method = find_native_bridge_method(name);
+  if (method == nullptr)
+    return nullptr;
+  method->fnPtr = sym;
 
-  // As libarttest is the same arch as the host, we can actually directly use the code and do not
-  // need to create a trampoline. :-)
-  return sym;
+  return method->trampoline;
 }
 
 extern "C" bool native_bridge_isSupported(const char* libpath) {
@@ -109,6 +287,3 @@
   .getTrampoline = &native_bridge_getTrampoline,
   .isSupported = &native_bridge_isSupported
 };
-
-
-