Merge "ART: Extract JNI macro assembler for arm"
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 7a64678..0ede30d 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -115,6 +115,7 @@
 	optimizing/instruction_simplifier_shared.cc \
 	optimizing/intrinsics_arm64.cc \
 	utils/arm64/assembler_arm64.cc \
+	utils/arm64/jni_macro_assembler_arm64.cc \
 	utils/arm64/managed_register_arm64.cc \
 
 LIBART_COMPILER_SRC_FILES_mips := \
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 53685bf..22221e7 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -32,9 +32,6 @@
 #endif
 
 void Arm64Assembler::FinalizeCode() {
-  for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
-    EmitExceptionPoll(exception.get());
-  }
   ___ FinalizeCode();
 }
 
@@ -52,254 +49,6 @@
   region.CopyFrom(0, from);
 }
 
-void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
-  ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
-}
-
-void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
-  StoreToOffset(TR, SP, offset.Int32Value());
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  AddConstant(SP, -adjust);
-  cfi().AdjustCFAOffset(adjust);
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  AddConstant(SP, adjust);
-  cfi().AdjustCFAOffset(-adjust);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
-  AddConstant(rd, rd, value, cond);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, XRegister rn, int32_t value,
-                                 Condition cond) {
-  if ((cond == al) || (cond == nv)) {
-    // VIXL macro-assembler handles all variants.
-    ___ Add(reg_x(rd), reg_x(rn), value);
-  } else {
-    // temp = rd + value
-    // rd = cond ? temp : rn
-    UseScratchRegisterScope temps(&vixl_masm_);
-    temps.Exclude(reg_x(rd), reg_x(rn));
-    Register temp = temps.AcquireX();
-    ___ Add(temp, reg_x(rn), value);
-    ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
-  }
-}
-
-void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
-                                    XRegister base, int32_t offset) {
-  switch (type) {
-    case kStoreByte:
-      ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
-      break;
-    case kStoreHalfword:
-      ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
-      break;
-    case kStoreWord:
-      ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
-      break;
-    default:
-      LOG(FATAL) << "UNREACHABLE";
-  }
-}
-
-void Arm64Assembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
-  CHECK_NE(source, SP);
-  ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
-  ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
-  ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
-  Arm64ManagedRegister src = m_src.AsArm64();
-  if (src.IsNoRegister()) {
-    CHECK_EQ(0u, size);
-  } else if (src.IsWRegister()) {
-    CHECK_EQ(4u, size);
-    StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
-  } else if (src.IsXRegister()) {
-    CHECK_EQ(8u, size);
-    StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
-  } else if (src.IsSRegister()) {
-    StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
-  } else {
-    CHECK(src.IsDRegister()) << src;
-    StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
-  }
-}
-
-void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
-  Arm64ManagedRegister src = m_src.AsArm64();
-  CHECK(src.IsXRegister()) << src;
-  StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
-                 offs.Int32Value());
-}
-
-void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
-  Arm64ManagedRegister src = m_src.AsArm64();
-  CHECK(src.IsXRegister()) << src;
-  StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
-                                           ManagedRegister m_scratch) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  LoadImmediate(scratch.AsXRegister(), imm);
-  StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
-                 offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
-                                              FrameOffset fr_offs,
-                                              ManagedRegister m_scratch) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
-  StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
-  UseScratchRegisterScope temps(&vixl_masm_);
-  Register temp = temps.AcquireX();
-  ___ Mov(temp, reg_x(SP));
-  ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
-}
-
-void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
-                                   FrameOffset in_off, ManagedRegister m_scratch) {
-  Arm64ManagedRegister source = m_source.AsArm64();
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
-  LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
-  StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
-}
-
-// Load routines.
-void Arm64Assembler::LoadImmediate(XRegister dest, int32_t value,
-                                   Condition cond) {
-  if ((cond == al) || (cond == nv)) {
-    ___ Mov(reg_x(dest), value);
-  } else {
-    // temp = value
-    // rd = cond ? temp : rd
-    if (value != 0) {
-      UseScratchRegisterScope temps(&vixl_masm_);
-      temps.Exclude(reg_x(dest));
-      Register temp = temps.AcquireX();
-      ___ Mov(temp, value);
-      ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
-    } else {
-      ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
-    }
-  }
-}
-
-void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
-                                     XRegister base, int32_t offset) {
-  switch (type) {
-    case kLoadSignedByte:
-      ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
-      break;
-    case kLoadSignedHalfword:
-      ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
-      break;
-    case kLoadUnsignedByte:
-      ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
-      break;
-    case kLoadUnsignedHalfword:
-      ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
-      break;
-    case kLoadWord:
-      ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
-      break;
-    default:
-        LOG(FATAL) << "UNREACHABLE";
-  }
-}
-
-// Note: We can extend this member by adding load type info - see
-// sign extended A64 load variants.
-void Arm64Assembler::LoadFromOffset(XRegister dest, XRegister base,
-                                    int32_t offset) {
-  CHECK_NE(dest, SP);
-  ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadSFromOffset(SRegister dest, XRegister base,
-                                     int32_t offset) {
-  ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadDFromOffset(DRegister dest, XRegister base,
-                                     int32_t offset) {
-  ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Load(Arm64ManagedRegister dest, XRegister base,
-                          int32_t offset, size_t size) {
-  if (dest.IsNoRegister()) {
-    CHECK_EQ(0u, size) << dest;
-  } else if (dest.IsWRegister()) {
-    CHECK_EQ(4u, size) << dest;
-    ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
-  } else if (dest.IsXRegister()) {
-    CHECK_NE(dest.AsXRegister(), SP) << dest;
-    if (size == 4u) {
-      ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
-    } else {
-      CHECK_EQ(8u, size) << dest;
-      ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
-    }
-  } else if (dest.IsSRegister()) {
-    ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
-  } else {
-    CHECK(dest.IsDRegister()) << dest;
-    ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
-  }
-}
-
-void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
-  return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
-  return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
-  Arm64ManagedRegister dst = m_dst.AsArm64();
-  CHECK(dst.IsXRegister()) << dst;
-  LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs,
-                             bool unpoison_reference) {
-  Arm64ManagedRegister dst = m_dst.AsArm64();
-  Arm64ManagedRegister base = m_base.AsArm64();
-  CHECK(dst.IsXRegister() && base.IsXRegister());
-  LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
-                  offs.Int32Value());
-  if (unpoison_reference) {
-    WRegister ref_reg = dst.AsOverlappingWRegister();
-    MaybeUnpoisonHeapReference(reg_w(ref_reg));
-  }
-}
-
 void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
   Arm64ManagedRegister dst = m_dst.AsArm64();
   Arm64ManagedRegister base = m_base.AsArm64();
@@ -310,209 +59,6 @@
   ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
 }
 
-void Arm64Assembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
-  Arm64ManagedRegister dst = m_dst.AsArm64();
-  CHECK(dst.IsXRegister()) << dst;
-  LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
-}
-
-// Copying routines.
-void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
-  Arm64ManagedRegister dst = m_dst.AsArm64();
-  Arm64ManagedRegister src = m_src.AsArm64();
-  if (!dst.Equals(src)) {
-    if (dst.IsXRegister()) {
-      if (size == 4) {
-        CHECK(src.IsWRegister());
-        ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
-      } else {
-        if (src.IsXRegister()) {
-          ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
-        } else {
-          ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
-        }
-      }
-    } else if (dst.IsWRegister()) {
-      CHECK(src.IsWRegister()) << src;
-      ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
-    } else if (dst.IsSRegister()) {
-      CHECK(src.IsSRegister()) << src;
-      ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
-    } else {
-      CHECK(dst.IsDRegister()) << dst;
-      CHECK(src.IsDRegister()) << src;
-      ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
-    }
-  }
-}
-
-void Arm64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
-                                          ThreadOffset64 tr_offs,
-                                          ManagedRegister m_scratch) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-  StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
-                                        FrameOffset fr_offs,
-                                        ManagedRegister m_scratch) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
-  StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
-                             ManagedRegister m_scratch) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
-                  SP, src.Int32Value());
-  StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
-                 SP, dest.Int32Value());
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
-                          ManagedRegister m_scratch, size_t size) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
-    StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
-  } else if (size == 8) {
-    LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
-    StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
-                          ManagedRegister m_scratch, size_t size) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  Arm64ManagedRegister base = src_base.AsArm64();
-  CHECK(base.IsXRegister()) << base;
-  CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
-                   src_offset.Int32Value());
-    StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
-  } else if (size == 8) {
-    LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
-    StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
-                          ManagedRegister m_scratch, size_t size) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  Arm64ManagedRegister base = m_dest_base.AsArm64();
-  CHECK(base.IsXRegister()) << base;
-  CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
-    StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
-                   dest_offs.Int32Value());
-  } else if (size == 8) {
-    LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
-    StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
-                          ManagedRegister /*mscratch*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
-                          ManagedRegister m_src, Offset src_offset,
-                          ManagedRegister m_scratch, size_t size) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  Arm64ManagedRegister src = m_src.AsArm64();
-  Arm64ManagedRegister dest = m_dest.AsArm64();
-  CHECK(dest.IsXRegister()) << dest;
-  CHECK(src.IsXRegister()) << src;
-  CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    if (scratch.IsWRegister()) {
-      LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
-                    src_offset.Int32Value());
-      StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
-                   dest_offset.Int32Value());
-    } else {
-      LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
-                    src_offset.Int32Value());
-      StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
-                   dest_offset.Int32Value());
-    }
-  } else if (size == 8) {
-    LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
-    StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
-                          FrameOffset /*src*/, Offset /*src_offset*/,
-                          ManagedRegister /*scratch*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
-  // TODO: Should we check that m_scratch is IP? - see arm.
-  ___ Dmb(InnerShareable, BarrierAll);
-}
-
-void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
-  Arm64ManagedRegister reg = mreg.AsArm64();
-  CHECK(size == 1 || size == 2) << size;
-  CHECK(reg.IsWRegister()) << reg;
-  if (size == 1) {
-    ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
-  } else {
-    ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
-  }
-}
-
-void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
-  Arm64ManagedRegister reg = mreg.AsArm64();
-  CHECK(size == 1 || size == 2) << size;
-  CHECK(reg.IsWRegister()) << reg;
-  if (size == 1) {
-    ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
-  } else {
-    ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
-  }
-}
-
-void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
-  // TODO: not validating references.
-}
-
-void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
-  // TODO: not validating references.
-}
-
-void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
-  Arm64ManagedRegister base = m_base.AsArm64();
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(base.IsXRegister()) << base;
-  CHECK(scratch.IsXRegister()) << scratch;
-  LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
-  ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
 void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
   Arm64ManagedRegister base = m_base.AsArm64();
   Arm64ManagedRegister scratch = m_scratch.AsArm64();
@@ -525,114 +71,6 @@
   ___ Br(reg_x(scratch.AsXRegister()));
 }
 
-void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  // Call *(*(SP + base) + offset)
-  LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
-  LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
-  ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
-void Arm64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
-                                    ManagedRegister scratch ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(
-    ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg,
-    bool null_allowed) {
-  Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
-  Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
-  // For now we only hold stale handle scope entries in x registers.
-  CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
-  CHECK(out_reg.IsXRegister()) << out_reg;
-  if (null_allowed) {
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
-    if (in_reg.IsNoRegister()) {
-      LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
-                      handle_scope_offs.Int32Value());
-      in_reg = out_reg;
-    }
-    ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
-    if (!out_reg.Equals(in_reg)) {
-      LoadImmediate(out_reg.AsXRegister(), 0, eq);
-    }
-    AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
-  } else {
-    AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
-  }
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
-                                            ManagedRegister m_scratch, bool null_allowed) {
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  CHECK(scratch.IsXRegister()) << scratch;
-  if (null_allowed) {
-    LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
-                    handle_scope_offset.Int32Value());
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
-    ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
-    // Move this logic in add constants with flags.
-    AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
-  } else {
-    AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
-  }
-  StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
-}
-
-void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
-                                                  ManagedRegister m_in_reg) {
-  Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
-  Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
-  CHECK(out_reg.IsXRegister()) << out_reg;
-  CHECK(in_reg.IsXRegister()) << in_reg;
-  vixl::aarch64::Label exit;
-  if (!out_reg.Equals(in_reg)) {
-    // FIXME: Who sets the flags here?
-    LoadImmediate(out_reg.AsXRegister(), 0, eq);
-  }
-  ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
-  LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
-  ___ Bind(&exit);
-}
-
-void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
-  CHECK_ALIGNED(stack_adjust, kStackAlignment);
-  Arm64ManagedRegister scratch = m_scratch.AsArm64();
-  exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
-  LoadFromOffset(scratch.AsXRegister(),
-                 TR,
-                 Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
-  ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
-}
-
-void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
-  UseScratchRegisterScope temps(&vixl_masm_);
-  temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
-  Register temp = temps.AcquireX();
-
-  // Bind exception poll entry.
-  ___ Bind(exception->Entry());
-  if (exception->stack_adjust_ != 0) {  // Fix up the frame.
-    DecreaseFrameSize(exception->stack_adjust_);
-  }
-  // Pass exception object as argument.
-  // Don't care about preserving X0 as this won't return.
-  ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
-  ___ Ldr(temp,
-          MEM_OP(reg_x(TR),
-                 QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
-
-  ___ Blr(temp);
-  // Call should never return.
-  ___ Brk();
-}
-
 static inline dwarf::Reg DWARFReg(CPURegister reg) {
   if (reg.IsFPRegister()) {
     return dwarf::Reg::Arm64Fp(reg.GetCode());
@@ -696,105 +134,6 @@
   DCHECK(registers.IsEmpty());
 }
 
-void Arm64Assembler::BuildFrame(size_t frame_size,
-                                ManagedRegister method_reg,
-                                ArrayRef<const ManagedRegister> callee_save_regs,
-                                const ManagedRegisterEntrySpills& entry_spills) {
-  // Setup VIXL CPURegList for callee-saves.
-  CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
-  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
-  for (auto r : callee_save_regs) {
-    Arm64ManagedRegister reg = r.AsArm64();
-    if (reg.IsXRegister()) {
-      core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
-    } else {
-      DCHECK(reg.IsDRegister());
-      fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
-    }
-  }
-  size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
-  size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
-  // Increase frame to required size.
-  DCHECK_ALIGNED(frame_size, kStackAlignment);
-  DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
-  IncreaseFrameSize(frame_size);
-
-  // Save callee-saves.
-  SpillRegisters(core_reg_list, frame_size - core_reg_size);
-  SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
-  DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
-  // Write ArtMethod*
-  DCHECK(X0 == method_reg.AsArm64().AsXRegister());
-  StoreToOffset(X0, SP, 0);
-
-  // Write out entry spills
-  int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
-  for (size_t i = 0; i < entry_spills.size(); ++i) {
-    Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
-    if (reg.IsNoRegister()) {
-      // only increment stack offset.
-      ManagedRegisterSpill spill = entry_spills.at(i);
-      offset += spill.getSize();
-    } else if (reg.IsXRegister()) {
-      StoreToOffset(reg.AsXRegister(), SP, offset);
-      offset += 8;
-    } else if (reg.IsWRegister()) {
-      StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
-      offset += 4;
-    } else if (reg.IsDRegister()) {
-      StoreDToOffset(reg.AsDRegister(), SP, offset);
-      offset += 8;
-    } else if (reg.IsSRegister()) {
-      StoreSToOffset(reg.AsSRegister(), SP, offset);
-      offset += 4;
-    }
-  }
-}
-
-void Arm64Assembler::RemoveFrame(size_t frame_size,
-                                 ArrayRef<const ManagedRegister> callee_save_regs) {
-  // Setup VIXL CPURegList for callee-saves.
-  CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
-  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
-  for (auto r : callee_save_regs) {
-    Arm64ManagedRegister reg = r.AsArm64();
-    if (reg.IsXRegister()) {
-      core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
-    } else {
-      DCHECK(reg.IsDRegister());
-      fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
-    }
-  }
-  size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
-  size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
-  // For now we only check that the size of the frame is large enough to hold spills and method
-  // reference.
-  DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
-  DCHECK_ALIGNED(frame_size, kStackAlignment);
-
-  DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
-  cfi_.RememberState();
-
-  // Restore callee-saves.
-  UnspillRegisters(core_reg_list, frame_size - core_reg_size);
-  UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
-  // Decrease frame size to start of callee saved regs.
-  DecreaseFrameSize(frame_size);
-
-  // Pop callee saved and return to LR.
-  ___ Ret();
-
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size);
-}
-
 void Arm64Assembler::PoisonHeapReference(Register reg) {
   DCHECK(reg.IsW());
   // reg = -reg.
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index d7084da..4e88e64 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -22,11 +22,9 @@
 #include <vector>
 
 #include "base/arena_containers.h"
-#include "base/enums.h"
 #include "base/logging.h"
 #include "utils/arm64/managed_register_arm64.h"
 #include "utils/assembler.h"
-#include "utils/jni_macro_assembler.h"
 #include "offsets.h"
 
 // TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
@@ -63,38 +61,14 @@
   kStoreDWord
 };
 
-class Arm64Exception {
- private:
-  Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
-      : scratch_(scratch), stack_adjust_(stack_adjust) {
-    }
-
-  vixl::aarch64::Label* Entry() { return &exception_entry_; }
-
-  // Register used for passing Thread::Current()->exception_ .
-  const Arm64ManagedRegister scratch_;
-
-  // Stack adjust for ExceptionPool.
-  const size_t stack_adjust_;
-
-  vixl::aarch64::Label exception_entry_;
-
-  friend class Arm64Assembler;
-  DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
-};
-
-class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class Arm64Assembler FINAL : public Assembler {
  public:
-  explicit Arm64Assembler(ArenaAllocator* arena)
-      : Assembler(arena),
-        exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+  explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
 
   virtual ~Arm64Assembler() {}
 
   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
 
-  DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
-
   // Finalize the code.
   void FinalizeCode() OVERRIDE;
 
@@ -105,110 +79,14 @@
   // Copy instructions out of assembly buffer into the given region of memory.
   void FinalizeInstructions(const MemoryRegion& region);
 
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs);
+
   void SpillRegisters(vixl::aarch64::CPURegList registers, int offset);
   void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset);
 
-  // Emit code that will create an activation on the stack.
-  void BuildFrame(size_t frame_size,
-                  ManagedRegister method_reg,
-                  ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
-  // Emit code that will remove an activation from the stack.
-  void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
-      OVERRIDE;
-
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
-  // Store routines.
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-  void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
-                                FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
-  void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
-
-  // Load routines.
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-  void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
-  // Copying routines.
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-  void CopyRawPtrFromThread(FrameOffset fr_offs,
-                            ThreadOffset64 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
-  void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-  void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
-  void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
-  void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
-  // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
-  // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
-  // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
-
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
-
-  // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
-  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
-  // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
-  // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
   // Jump to address (not setting link register)
   void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
 
-  // Generate code to check if Thread::Current()->exception_ is non-null
-  // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
   //
   // Heap poisoning.
   //
@@ -227,7 +105,6 @@
     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
   }
 
- private:
   static vixl::aarch64::Register reg_x(int code) {
     CHECK(code < kNumberOfXRegisters) << code;
     if (code == SP) {
@@ -256,37 +133,7 @@
     return vixl::aarch64::FPRegister::GetSRegFromCode(code);
   }
 
-  // Emits Exception block.
-  void EmitExceptionPoll(Arm64Exception *exception);
-
-  void StoreWToOffset(StoreOperandType type, WRegister source,
-                      XRegister base, int32_t offset);
-  void StoreToOffset(XRegister source, XRegister base, int32_t offset);
-  void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
-  void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
-
-  void LoadImmediate(XRegister dest,
-                     int32_t value,
-                     vixl::aarch64::Condition cond = vixl::aarch64::al);
-  void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
-  void LoadWFromOffset(LoadOperandType type,
-                       WRegister dest,
-                       XRegister base,
-                       int32_t offset);
-  void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
-  void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
-  void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
-  void AddConstant(XRegister rd,
-                   int32_t value,
-                   vixl::aarch64::Condition cond = vixl::aarch64::al);
-  void AddConstant(XRegister rd,
-                   XRegister rn,
-                   int32_t value,
-                   vixl::aarch64::Condition cond = vixl::aarch64::al);
-
-  // List of exception blocks to generate at the end of the code cache.
-  ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
-
+ private:
   // VIXL assembler.
   vixl::aarch64::MacroAssembler vixl_masm_;
 
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
new file mode 100644
index 0000000..dfdcd11
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -0,0 +1,754 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_arm64.h"
+
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "managed_register_arm64.h"
+#include "offsets.h"
+#include "thread.h"
+
+using namespace vixl::aarch64;  // NOLINT(build/namespaces)
+
+namespace art {
+namespace arm64 {
+
+#ifdef ___
+#error "ARM64 Assembler macro already defined."
+#else
+#define ___   asm_.GetVIXLAssembler()->
+#endif
+
+#define reg_x(X) Arm64Assembler::reg_x(X)
+#define reg_w(W) Arm64Assembler::reg_w(W)
+#define reg_d(D) Arm64Assembler::reg_d(D)
+#define reg_s(S) Arm64Assembler::reg_s(S)
+
+Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() {
+}
+
+void Arm64JNIMacroAssembler::FinalizeCode() {
+  for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
+    EmitExceptionPoll(exception.get());
+  }
+  ___ FinalizeCode();
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+  ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
+  StoreToOffset(TR, SP, offset.Int32Value());
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  AddConstant(SP, -adjust);
+  cfi().AdjustCFAOffset(adjust);
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  AddConstant(SP, adjust);
+  cfi().AdjustCFAOffset(-adjust);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
+  AddConstant(rd, rd, value, cond);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd,
+                                         XRegister rn,
+                                         int32_t value,
+                                         Condition cond) {
+  if ((cond == al) || (cond == nv)) {
+    // VIXL macro-assembler handles all variants.
+    ___ Add(reg_x(rd), reg_x(rn), value);
+  } else {
+    // temp = rd + value
+    // rd = cond ? temp : rn
+    UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+    temps.Exclude(reg_x(rd), reg_x(rn));
+    Register temp = temps.AcquireX();
+    ___ Add(temp, reg_x(rn), value);
+    ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
+  }
+}
+
+void Arm64JNIMacroAssembler::StoreWToOffset(StoreOperandType type,
+                                            WRegister source,
+                                            XRegister base,
+                                            int32_t offset) {
+  switch (type) {
+    case kStoreByte:
+      ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
+      break;
+    case kStoreHalfword:
+      ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
+      break;
+    case kStoreWord:
+      ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
+      break;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+void Arm64JNIMacroAssembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
+  CHECK_NE(source, SP);
+  ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
+  ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
+  ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
+  Arm64ManagedRegister src = m_src.AsArm64();
+  if (src.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (src.IsWRegister()) {
+    CHECK_EQ(4u, size);
+    StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
+  } else if (src.IsXRegister()) {
+    CHECK_EQ(8u, size);
+    StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+  } else if (src.IsSRegister()) {
+    StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
+  } else {
+    CHECK(src.IsDRegister()) << src;
+    StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
+  }
+}
+
+void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
+  Arm64ManagedRegister src = m_src.AsArm64();
+  CHECK(src.IsXRegister()) << src;
+  StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
+                 offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
+  Arm64ManagedRegister src = m_src.AsArm64();
+  CHECK(src.IsXRegister()) << src;
+  StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs,
+                                                   uint32_t imm,
+                                                   ManagedRegister m_scratch) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  LoadImmediate(scratch.AsXRegister(), imm);
+  StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
+                 offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
+                                                      FrameOffset fr_offs,
+                                                      ManagedRegister m_scratch) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+  StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  Register temp = temps.AcquireX();
+  ___ Mov(temp, reg_x(SP));
+  ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off,
+                                           ManagedRegister m_source,
+                                           FrameOffset in_off,
+                                           ManagedRegister m_scratch) {
+  Arm64ManagedRegister source = m_source.AsArm64();
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
+  LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
+  StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
+}
+
+// Load routines.
+void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) {
+  if ((cond == al) || (cond == nv)) {
+    ___ Mov(reg_x(dest), value);
+  } else {
+    // temp = value
+    // rd = cond ? temp : rd
+    if (value != 0) {
+      UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+      temps.Exclude(reg_x(dest));
+      Register temp = temps.AcquireX();
+      ___ Mov(temp, value);
+      ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
+    } else {
+      ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
+    }
+  }
+}
+
+void Arm64JNIMacroAssembler::LoadWFromOffset(LoadOperandType type,
+                                             WRegister dest,
+                                             XRegister base,
+                                             int32_t offset) {
+  switch (type) {
+    case kLoadSignedByte:
+      ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
+      break;
+    case kLoadSignedHalfword:
+      ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
+      break;
+    case kLoadUnsignedByte:
+      ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
+      break;
+    case kLoadUnsignedHalfword:
+      ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
+      break;
+    case kLoadWord:
+      ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
+      break;
+    default:
+        LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+// Note: We can extend this member by adding load type info - see
+// sign extended A64 load variants.
+void Arm64JNIMacroAssembler::LoadFromOffset(XRegister dest, XRegister base, int32_t offset) {
+  CHECK_NE(dest, SP);
+  ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadSFromOffset(SRegister dest, XRegister base, int32_t offset) {
+  ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadDFromOffset(DRegister dest, XRegister base, int32_t offset) {
+  ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
+                                  XRegister base,
+                                  int32_t offset,
+                                  size_t size) {
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size) << dest;
+  } else if (dest.IsWRegister()) {
+    CHECK_EQ(4u, size) << dest;
+    ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
+  } else if (dest.IsXRegister()) {
+    CHECK_NE(dest.AsXRegister(), SP) << dest;
+    if (size == 4u) {
+      ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
+    } else {
+      CHECK_EQ(8u, size) << dest;
+      ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
+    }
+  } else if (dest.IsSRegister()) {
+    ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
+  } else {
+    CHECK(dest.IsDRegister()) << dest;
+    ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
+  }
+}
+
+void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+  return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
+                                            ThreadOffset64 src,
+                                            size_t size) {
+  return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
+  Arm64ManagedRegister dst = m_dst.AsArm64();
+  CHECK(dst.IsXRegister()) << dst;
+  LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst,
+                                     ManagedRegister m_base,
+                                     MemberOffset offs,
+                                     bool unpoison_reference) {
+  Arm64ManagedRegister dst = m_dst.AsArm64();
+  Arm64ManagedRegister base = m_base.AsArm64();
+  CHECK(dst.IsXRegister() && base.IsXRegister());
+  LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
+                  offs.Int32Value());
+  if (unpoison_reference) {
+    WRegister ref_reg = dst.AsOverlappingWRegister();
+    asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg));
+  }
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst,
+                                        ManagedRegister m_base,
+                                        Offset offs) {
+  Arm64ManagedRegister dst = m_dst.AsArm64();
+  Arm64ManagedRegister base = m_base.AsArm64();
+  CHECK(dst.IsXRegister() && base.IsXRegister());
+  // Remove dst and base form the temp list - higher level API uses IP1, IP0.
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
+  ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
+  Arm64ManagedRegister dst = m_dst.AsArm64();
+  CHECK(dst.IsXRegister()) << dst;
+  LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
+}
+
+// Copying routines.
+void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
+  Arm64ManagedRegister dst = m_dst.AsArm64();
+  Arm64ManagedRegister src = m_src.AsArm64();
+  if (!dst.Equals(src)) {
+    if (dst.IsXRegister()) {
+      if (size == 4) {
+        CHECK(src.IsWRegister());
+        ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
+      } else {
+        if (src.IsXRegister()) {
+          ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
+        } else {
+          ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
+        }
+      }
+    } else if (dst.IsWRegister()) {
+      CHECK(src.IsWRegister()) << src;
+      ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
+    } else if (dst.IsSRegister()) {
+      CHECK(src.IsSRegister()) << src;
+      ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
+    } else {
+      CHECK(dst.IsDRegister()) << dst;
+      CHECK(src.IsDRegister()) << src;
+      ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
+    }
+  }
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+                                                  ThreadOffset64 tr_offs,
+                                                  ManagedRegister m_scratch) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+  StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
+                                                FrameOffset fr_offs,
+                                                ManagedRegister m_scratch) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+  StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
+                  SP, src.Int32Value());
+  StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
+                 SP, dest.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+                                  FrameOffset src,
+                                  ManagedRegister m_scratch,
+                                  size_t size) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  CHECK(size == 4 || size == 8) << size;
+  if (size == 4) {
+    LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
+    StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
+  } else if (size == 8) {
+    LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+    StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+  } else {
+    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+  }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+                                  ManagedRegister src_base,
+                                  Offset src_offset,
+                                  ManagedRegister m_scratch,
+                                  size_t size) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  Arm64ManagedRegister base = src_base.AsArm64();
+  CHECK(base.IsXRegister()) << base;
+  CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+  CHECK(size == 4 || size == 8) << size;
+  if (size == 4) {
+    LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
+                   src_offset.Int32Value());
+    StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
+  } else if (size == 8) {
+    LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
+    StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+  } else {
+    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+  }
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base,
+                                  Offset dest_offs,
+                                  FrameOffset src,
+                                  ManagedRegister m_scratch,
+                                  size_t size) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  Arm64ManagedRegister base = m_dest_base.AsArm64();
+  CHECK(base.IsXRegister()) << base;
+  CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+  CHECK(size == 4 || size == 8) << size;
+  if (size == 4) {
+    LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
+    StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
+                   dest_offs.Int32Value());
+  } else if (size == 8) {
+    LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+    StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
+  } else {
+    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+  }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+                                  FrameOffset /*src_base*/,
+                                  Offset /*src_offset*/,
+                                  ManagedRegister /*mscratch*/,
+                                  size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest,
+                                  Offset dest_offset,
+                                  ManagedRegister m_src,
+                                  Offset src_offset,
+                                  ManagedRegister m_scratch,
+                                  size_t size) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  Arm64ManagedRegister src = m_src.AsArm64();
+  Arm64ManagedRegister dest = m_dest.AsArm64();
+  CHECK(dest.IsXRegister()) << dest;
+  CHECK(src.IsXRegister()) << src;
+  CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+  CHECK(size == 4 || size == 8) << size;
+  if (size == 4) {
+    if (scratch.IsWRegister()) {
+      LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
+                    src_offset.Int32Value());
+      StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
+                   dest_offset.Int32Value());
+    } else {
+      LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
+                    src_offset.Int32Value());
+      StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
+                   dest_offset.Int32Value());
+    }
+  } else if (size == 8) {
+    LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
+    StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
+  } else {
+    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+  }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+                                  Offset /*dest_offset*/,
+                                  FrameOffset /*src*/,
+                                  Offset /*src_offset*/,
+                                  ManagedRegister /*scratch*/,
+                                  size_t /*size*/) {
+  UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
+  // TODO: Should we check that m_scratch is IP? - see arm.
+  ___ Dmb(InnerShareable, BarrierAll);
+}
+
+void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+  Arm64ManagedRegister reg = mreg.AsArm64();
+  CHECK(size == 1 || size == 2) << size;
+  CHECK(reg.IsWRegister()) << reg;
+  if (size == 1) {
+    ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+  } else {
+    ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+  }
+}
+
+void Arm64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+  Arm64ManagedRegister reg = mreg.AsArm64();
+  CHECK(size == 1 || size == 2) << size;
+  CHECK(reg.IsWRegister()) << reg;
+  if (size == 1) {
+    ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+  } else {
+    ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+  }
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
+  Arm64ManagedRegister base = m_base.AsArm64();
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(base.IsXRegister()) << base;
+  CHECK(scratch.IsXRegister()) << scratch;
+  LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+  ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  // Call *(*(SP + base) + offset)
+  LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
+  LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
+  ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+                                            ManagedRegister scratch ATTRIBUTE_UNUSED) {
+  UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
+                                                    FrameOffset handle_scope_offs,
+                                                    ManagedRegister m_in_reg,
+                                                    bool null_allowed) {
+  Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+  Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+  // For now we only hold stale handle scope entries in x registers.
+  CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
+  CHECK(out_reg.IsXRegister()) << out_reg;
+  if (null_allowed) {
+    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
+    // the address in the handle scope holding the reference.
+    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+    if (in_reg.IsNoRegister()) {
+      LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
+                      handle_scope_offs.Int32Value());
+      in_reg = out_reg;
+    }
+    ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
+    if (!out_reg.Equals(in_reg)) {
+      LoadImmediate(out_reg.AsXRegister(), 0, eq);
+    }
+    AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
+  } else {
+    AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
+  }
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+                                                    FrameOffset handle_scope_offset,
+                                                    ManagedRegister m_scratch,
+                                                    bool null_allowed) {
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(scratch.IsXRegister()) << scratch;
+  if (null_allowed) {
+    LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
+                    handle_scope_offset.Int32Value());
+    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
+    // the address in the handle scope holding the reference.
+    // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+    ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
+    // Move this logic in add constants with flags.
+    AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
+  } else {
+    AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
+  }
+  StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
+                                                          ManagedRegister m_in_reg) {
+  Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+  Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+  CHECK(out_reg.IsXRegister()) << out_reg;
+  CHECK(in_reg.IsXRegister()) << in_reg;
+  vixl::aarch64::Label exit;
+  if (!out_reg.Equals(in_reg)) {
+    // FIXME: Who sets the flags here?
+    LoadImmediate(out_reg.AsXRegister(), 0, eq);
+  }
+  ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
+  LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
+  ___ Bind(&exit);
+}
+
+void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
+  CHECK_ALIGNED(stack_adjust, kStackAlignment);
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
+  LoadFromOffset(scratch.AsXRegister(),
+                 TR,
+                 Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
+  ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
+}
+
+void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
+  Register temp = temps.AcquireX();
+
+  // Bind exception poll entry.
+  ___ Bind(exception->Entry());
+  if (exception->stack_adjust_ != 0) {  // Fix up the frame.
+    DecreaseFrameSize(exception->stack_adjust_);
+  }
+  // Pass exception object as argument.
+  // Don't care about preserving X0 as this won't return.
+  ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
+  ___ Ldr(temp,
+          MEM_OP(reg_x(TR),
+                 QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
+
+  ___ Blr(temp);
+  // Call should never return.
+  ___ Brk();
+}
+
+void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size,
+                                        ManagedRegister method_reg,
+                                        ArrayRef<const ManagedRegister> callee_save_regs,
+                                        const ManagedRegisterEntrySpills& entry_spills) {
+  // Setup VIXL CPURegList for callee-saves.
+  CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+  for (auto r : callee_save_regs) {
+    Arm64ManagedRegister reg = r.AsArm64();
+    if (reg.IsXRegister()) {
+      core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+    } else {
+      DCHECK(reg.IsDRegister());
+      fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+    }
+  }
+  size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+  size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+  // Increase frame to required size.
+  DCHECK_ALIGNED(frame_size, kStackAlignment);
+  DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+  IncreaseFrameSize(frame_size);
+
+  // Save callee-saves.
+  asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size);
+  asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+  DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+  // Write ArtMethod*
+  DCHECK(X0 == method_reg.AsArm64().AsXRegister());
+  StoreToOffset(X0, SP, 0);
+
+  // Write out entry spills
+  int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
+  for (size_t i = 0; i < entry_spills.size(); ++i) {
+    Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
+    if (reg.IsNoRegister()) {
+      // only increment stack offset.
+      ManagedRegisterSpill spill = entry_spills.at(i);
+      offset += spill.getSize();
+    } else if (reg.IsXRegister()) {
+      StoreToOffset(reg.AsXRegister(), SP, offset);
+      offset += 8;
+    } else if (reg.IsWRegister()) {
+      StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
+      offset += 4;
+    } else if (reg.IsDRegister()) {
+      StoreDToOffset(reg.AsDRegister(), SP, offset);
+      offset += 8;
+    } else if (reg.IsSRegister()) {
+      StoreSToOffset(reg.AsSRegister(), SP, offset);
+      offset += 4;
+    }
+  }
+}
+
+void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+                                         ArrayRef<const ManagedRegister> callee_save_regs) {
+  // Setup VIXL CPURegList for callee-saves.
+  CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+  for (auto r : callee_save_regs) {
+    Arm64ManagedRegister reg = r.AsArm64();
+    if (reg.IsXRegister()) {
+      core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+    } else {
+      DCHECK(reg.IsDRegister());
+      fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+    }
+  }
+  size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+  size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+  // For now we only check that the size of the frame is large enough to hold spills and method
+  // reference.
+  DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+  DCHECK_ALIGNED(frame_size, kStackAlignment);
+
+  DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+  cfi().RememberState();
+
+  // Restore callee-saves.
+  asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
+  asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+  // Decrease frame size to start of callee saved regs.
+  DecreaseFrameSize(frame_size);
+
+  // Pop callee saved and return to LR.
+  ___ Ret();
+
+  // The CFI should be restored for any code that follows the exit block.
+  cfi().RestoreState();
+  cfi().DefCFAOffset(frame_size);
+}
+
+#undef ___
+
+}  // namespace arm64
+}  // namespace art
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
new file mode 100644
index 0000000..79ee441
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+#define ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+
+#include <stdint.h>
+#include <memory>
+#include <vector>
+
+#include "assembler_arm64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/logging.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+#include "offsets.h"
+
+// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
+#include "a64/macro-assembler-a64.h"
+#pragma GCC diagnostic pop
+
+namespace art {
+namespace arm64 {
+
+class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+ public:
+  explicit Arm64JNIMacroAssembler(ArenaAllocator* arena)
+      : JNIMacroAssemblerFwd(arena),
+        exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+
+  ~Arm64JNIMacroAssembler();
+
+  // Finalize the code.
+  void FinalizeCode() OVERRIDE;
+
+  // Emit code that will create an activation on the stack.
+  void BuildFrame(size_t frame_size,
+                  ManagedRegister method_reg,
+                  ArrayRef<const ManagedRegister> callee_save_regs,
+                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+  // Emit code that will remove an activation from the stack.
+  void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+      OVERRIDE;
+
+  void IncreaseFrameSize(size_t adjust) OVERRIDE;
+  void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+  // Store routines.
+  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+  void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+                                FrameOffset fr_offs,
+                                ManagedRegister scratch) OVERRIDE;
+  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+  void StoreSpanning(FrameOffset dest,
+                     ManagedRegister src,
+                     FrameOffset in_off,
+                     ManagedRegister scratch) OVERRIDE;
+
+  // Load routines.
+  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+  void LoadRef(ManagedRegister dest,
+               ManagedRegister base,
+               MemberOffset offs,
+               bool unpoison_reference) OVERRIDE;
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+  // Copying routines.
+  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+  void CopyRawPtrFromThread(FrameOffset fr_offs,
+                            ThreadOffset64 thr_offs,
+                            ManagedRegister scratch) OVERRIDE;
+  void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+      OVERRIDE;
+  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+  void Copy(FrameOffset dest,
+            ManagedRegister src_base,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+  void Copy(ManagedRegister dest_base,
+            Offset dest_offset,
+            FrameOffset src,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+  void Copy(FrameOffset dest,
+            FrameOffset src_base,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+  void Copy(ManagedRegister dest,
+            Offset dest_offset,
+            ManagedRegister src,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+  void Copy(FrameOffset dest,
+            Offset dest_offset,
+            FrameOffset src,
+            Offset src_offset,
+            ManagedRegister scratch,
+            size_t size) OVERRIDE;
+  void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+  // Sign extension.
+  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+  // Zero extension.
+  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+  // Exploit fast access in managed code to Thread::Current().
+  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the handle scope entry to see if the value is
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg,
+                              FrameOffset handlescope_offset,
+                              ManagedRegister in_reg,
+                              bool null_allowed) OVERRIDE;
+
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
+  // value is null and null_allowed.
+  void CreateHandleScopeEntry(FrameOffset out_off,
+                              FrameOffset handlescope_offset,
+                              ManagedRegister scratch,
+                              bool null_allowed) OVERRIDE;
+
+  // src holds a handle scope entry (Object**) load this into dst.
+  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+  // Call to address held at [base+offset].
+  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+  class Arm64Exception {
+   public:
+    Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
+        : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+    vixl::aarch64::Label* Entry() { return &exception_entry_; }
+
+    // Register used for passing Thread::Current()->exception_ .
+    const Arm64ManagedRegister scratch_;
+
+    // Stack adjust for ExceptionPool.
+    const size_t stack_adjust_;
+
+    vixl::aarch64::Label exception_entry_;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
+  };
+
+  // Emits Exception block.
+  void EmitExceptionPoll(Arm64Exception *exception);
+
+  void StoreWToOffset(StoreOperandType type,
+                      WRegister source,
+                      XRegister base,
+                      int32_t offset);
+  void StoreToOffset(XRegister source, XRegister base, int32_t offset);
+  void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
+  void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
+
+  void LoadImmediate(XRegister dest,
+                     int32_t value,
+                     vixl::aarch64::Condition cond = vixl::aarch64::al);
+  void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
+  void LoadWFromOffset(LoadOperandType type,
+                       WRegister dest,
+                       XRegister base,
+                       int32_t offset);
+  void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
+  void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
+  void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
+  void AddConstant(XRegister rd,
+                   int32_t value,
+                   vixl::aarch64::Condition cond = vixl::aarch64::al);
+  void AddConstant(XRegister rd,
+                   XRegister rn,
+                   int32_t value,
+                   vixl::aarch64::Condition cond = vixl::aarch64::al);
+
+  // List of exception blocks to generate at the end of the code cache.
+  ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
+};
+
+}  // namespace arm64
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 9b16b98..797a98c 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -23,7 +23,7 @@
 #include "arm/jni_macro_assembler_arm.h"
 #endif
 #ifdef ART_ENABLE_CODEGEN_arm64
-#include "arm64/assembler_arm64.h"
+#include "arm64/jni_macro_assembler_arm64.h"
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips
 #include "mips/assembler_mips.h"
@@ -88,7 +88,7 @@
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm64
     case kArm64:
-      return MacroAsm64UniquePtr(new (arena) arm64::Arm64Assembler(arena));
+      return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena));
 #endif
 #ifdef ART_ENABLE_CODEGEN_mips64
     case kMips64:
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 415bb71..439f8d4 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1788,7 +1788,20 @@
 ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
 
 // Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+// Comment out allocators that have arm64 specific asm.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB) implemented in asm
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
 
 // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
 ENTRY art_quick_alloc_object_rosalloc
@@ -1895,6 +1908,71 @@
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
 END art_quick_alloc_object_rosalloc
 
+
+// The common fast path code for art_quick_alloc_array_region_tlab.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+    // Check null class
+    cbz    \wClass, \slowPathLabel
+    ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED \slowPathLabel, \xClass, \wClass, \xCount, \wCount, \xTemp0, \wTemp0, \xTemp1, \wTemp1, \xTemp2, \wTemp2
+.endm
+
+// The common fast path code for art_quick_alloc_array_region_tlab.
+.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2
+    // Array classes are never finalizable or uninitialized, no need to check.
+    ldr    \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type
+    UNPOISON_HEAP_REF \wTemp0
+    ldr    \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET]
+    lsr    \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16
+                                                              // bits.
+                                                              // xCount is holding a 32 bit value,
+                                                              // it can not overflow.
+    lsl    \xTemp1, \xCount, \xTemp0                          // Calculate data size
+    // Add array data offset and alignment.
+    add    \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
+#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
+#error Long array data offset must be 4 greater than int array data offset.
+#endif
+
+    add    \xTemp0, \xTemp0, #1                               // Add 4 to the length only if the
+                                                              // component size shift is 3
+                                                              // (for 64 bit alignment).
+    and    \xTemp0, \xTemp0, #4
+    add    \xTemp1, \xTemp1, \xTemp0
+    and    \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED   // Round up the object size by the
+                                                              // object alignment. (addr + 7) & ~7.
+                                                              // Add by 7 is done above.
+
+    cmp    \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD               // Possibly a large object, go slow
+    bhs    \slowPathLabel                                     // path.
+
+    ldr    \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET]         // Check tlab for space, note that
+                                                              // we use (end - begin) to handle
+                                                              // negative size arrays. It is
+                                                              // assumed that a negative size will
+                                                              // always be greater unsigned than
+                                                              // region size.
+    ldr    \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET]
+    sub    \xTemp2, \xTemp2, \xTemp0
+    cmp    \xTemp1, \xTemp2
+    bhi    \slowPathLabel
+
+    // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
+                                                              // Move old thread_local_pos to x0
+                                                              // for the return value.
+    mov    x0, \xTemp0
+    add    \xTemp0, \xTemp0, \xTemp1
+    str    \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET]         // Store new thread_local_pos.
+    ldr    \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]     // Increment thread_local_objects.
+    add    \xTemp0, \xTemp0, #1
+    str    \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
+    POISON_HEAP_REF \wClass
+    str    \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET]         // Store the class pointer.
+    str    \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]         // Store the array length.
+                                                              // Fence.
+    dmb    ishst
+    ret
+.endm
+
 // The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
 //
 // x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
@@ -1902,8 +1980,11 @@
 // Need to preserve x0 and x1 to the slow path.
 .macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
     cbz    x2, \slowPathLabel                                 // Check null class
-                                                              // Check class status.
-    ldr    w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
+    ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
+.endm
+
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
+    ldr    w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]              // Check class status.
     cmp    x3, #MIRROR_CLASS_STATUS_INITIALIZED
     bne    \slowPathLabel
                                                               // Add a fake dependence from the
@@ -1916,6 +1997,10 @@
                                                               // a load-acquire for the status).
     eor    x3, x3, x3
     add    x2, x2, x3
+    ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
+.endm
+
+.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
                                                               // Check access flags has
                                                               // kAccClassIsFinalizable.
     ldr    w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
@@ -1977,32 +2062,37 @@
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
 END art_quick_alloc_object_tlab
 
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_region_tlab
+// The common code for art_quick_alloc_object_*region_tlab
+.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved
+ENTRY \name
     // Fast path region tlab allocation.
-    // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+    // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
+    // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index.
     // x2-x7: free.
 #if !defined(USE_READ_BARRIER)
     mvn    x0, xzr                                            // Read barrier must be enabled here.
     ret                                                       // Return -1.
 #endif
+.if \is_resolved
+    mov    x2, x0 // class is actually stored in x0 already
+.else
     ldr    x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
                                                               // Load the class (x2)
     ldr    w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
-
+.endif
     // Most common case: GC is not marking.
     ldr    w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
-    cbnz   x3, .Lart_quick_alloc_object_region_tlab_marking
-.Lart_quick_alloc_object_region_tlab_do_allocation:
-    ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_marking:
+    cbnz   x3, .Lmarking\name
+.Ldo_allocation\name:
+    \fast_path .Lslow_path\name
+.Lmarking\name:
     // GC is marking, check the lock word of the class for the mark bit.
     // If the class is null, go slow path. The check is required to read the lock word.
-    cbz    w2, .Lart_quick_alloc_object_region_tlab_slow_path
+    cbz    w2, .Lslow_path\name
     // Class is not null, check mark bit in lock word.
     ldr    w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
     // If the bit is not zero, do the allocation.
-    tbnz    w3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_alloc_object_region_tlab_do_allocation
+    tbnz    w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
                                                               // The read barrier slow path. Mark
                                                               // the class.
     stp    x0, x1, [sp, #-32]!                                // Save registers (x0, x1, lr).
@@ -2013,14 +2103,79 @@
     ldp    x0, x1, [sp, #0]                                   // Restore registers.
     ldr    xLR, [sp, #16]
     add    sp, sp, #32
-    b      .Lart_quick_alloc_object_region_tlab_do_allocation
-.Lart_quick_alloc_object_region_tlab_slow_path:
+    b      .Ldo_allocation\name
+.Lslow_path\name:
     SETUP_REFS_ONLY_CALLEE_SAVE_FRAME          // Save callee saves in case of GC.
     mov    x2, xSELF                           // Pass Thread::Current.
-    bl     artAllocObjectFromCodeRegionTLAB    // (uint32_t type_idx, Method* method, Thread*)
+    bl     \entrypoint    // (uint32_t type_idx, Method* method, Thread*)
     RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
     RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_region_tlab
+END \name
+.endm
+
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH, 0
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1
+
+// The common code for art_quick_alloc_array_*region_tlab
+.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved
+ENTRY \name
+    // Fast path array allocation for region tlab allocation.
+    // x0: uint32_t type_idx
+    // x1: int32_t component_count
+    // x2: ArtMethod* method
+    // x3-x7: free.
+#if !defined(USE_READ_BARRIER)
+    mvn    x0, xzr                                            // Read barrier must be enabled here.
+    ret                                                       // Return -1.
+#endif
+.if \is_resolved
+    mov    x3, x0
+    // If already resolved, class is stored in x0
+.else
+    ldr    x3, [x2, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64]    // Load dex cache resolved types array
+                                                              // Load the class (x2)
+    ldr    w3, [x3, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+.endif
+    // Most common case: GC is not marking.
+    ldr    w4, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+    cbnz   x4, .Lmarking\name
+.Ldo_allocation\name:
+    \fast_path .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6
+.Lmarking\name:
+    // GC is marking, check the lock word of the class for the mark bit.
+    // If the class is null, go slow path. The check is required to read the lock word.
+    cbz    w3, .Lslow_path\name
+    // Class is not null, check mark bit in lock word.
+    ldr    w4, [x3, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+    // If the bit is not zero, do the allocation.
+    tbnz   w4, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
+                                                              // The read barrier slow path. Mark
+                                                              // the class.
+    stp    x0, x1, [sp, #-32]!                                // Save registers (x0, x1, x2, lr).
+    stp    x2, xLR, [sp, #16]
+    mov    x0, x3                                             // Pass the class as the first param.
+    bl     artReadBarrierMark
+    mov    x3, x0                                             // Get the (marked) class back.
+    ldp    x2, xLR, [sp, #16]
+    ldp    x0, x1, [sp], #32                                  // Restore registers.
+    b      .Ldo_allocation\name
+.Lslow_path\name:
+    // x0: uint32_t type_idx / mirror::Class* klass (if resolved)
+    // x1: int32_t component_count
+    // x2: ArtMethod* method
+    // x3: Thread* self
+    SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
+    mov    x3, xSELF                  // pass Thread::Current
+    bl     \entrypoint
+    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \name
+.endm
+
+GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_region_tlab, artAllocArrayFromCodeRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH, 0
+// TODO: art_quick_alloc_array_resolved_region_tlab seems to not get called. Investigate compiler.
+GENERATE_ALLOC_ARRAY_REGION_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, 1
 
     /*
      * Called by managed code when the thread has been asked to suspend.
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index b926bdf..9b24128 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -352,8 +352,11 @@
 
     // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
     bal 1f
+    .set push
+    .set noat
     sw     $at, 144($sp)
     .cfi_rel_offset 1, 144
+    .set pop
 1:
     .cpload $ra
 
@@ -460,8 +463,11 @@
     .cfi_restore 3
     lw     $v0, 148($sp)
     .cfi_restore 2
+    .set push
+    .set noat
     lw     $at, 144($sp)
     .cfi_restore 1
+    .set pop
 
     addiu  $sp, $sp, 256            # pop frame
     .cfi_adjust_cfa_offset -256
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 0a37909..3469de2 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -334,8 +334,6 @@
     .cfi_rel_offset 31, 488
     sd     $s8, 480($sp)
     .cfi_rel_offset 30, 480
-    sd     $gp, 472($sp)
-    .cfi_rel_offset 28, 472
     sd     $t9, 464($sp)
     .cfi_rel_offset 25, 464
     sd     $t8, 456($sp)
@@ -387,11 +385,13 @@
 
     // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
     bal 1f
+    .set push
+    .set noat
     sd     $at,  272($sp)
     .cfi_rel_offset 1, 272
+    .set pop
 1:
-    // TODO: Can we avoid the unnecessary move $t8<-$gp?
-    .cpsetup $ra, $t8, 1b
+    .cpsetup $ra, 472, 1b
 
     // Save FP registers.
     s.d    $f31, 264($sp)
@@ -472,12 +472,11 @@
     l.d    $f0, 16($sp)
 
     // Restore core registers.
+    .cpreturn
     ld     $ra, 488($sp)
     .cfi_restore 31
     ld     $s8, 480($sp)
     .cfi_restore 30
-    ld     $gp, 472($sp)
-    .cfi_restore 28
     ld     $t9, 464($sp)
     .cfi_restore 25
     ld     $t8, 456($sp)
@@ -526,10 +525,12 @@
     .cfi_restore 3
     ld     $v0,  280($sp)
     .cfi_restore 2
+    .set push
+    .set noat
     ld     $at,  272($sp)
     .cfi_restore 1
+    .set pop
 
-    .cpreturn
     daddiu $sp, $sp, 496
     .cfi_adjust_cfa_offset -496
 .endm
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 290769b..fa86bf4 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -87,6 +87,27 @@
   ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
 
 .macro GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
+.endm
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+.endm
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
@@ -219,20 +240,6 @@
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
 
-// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
 GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 0619af8..d4cee44 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -20,6 +20,7 @@
 #if defined(__cplusplus)
 #include "art_method.h"
 #include "gc/allocator/rosalloc.h"
+#include "gc/heap.h"
 #include "jit/jit.h"
 #include "lock_word.h"
 #include "mirror/class.h"
@@ -174,10 +175,17 @@
 #define MIRROR_CLASS_OBJECT_SIZE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
 ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
             art::mirror::Class::ObjectSizeOffset().Int32Value())
+#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
+            art::mirror::Class::PrimitiveTypeOffset().Int32Value())
 #define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
 ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
             art::mirror::Class::StatusOffset().Int32Value())
 
+#define PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT 16
+ADD_TEST_EQ(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
+            static_cast<int>(art::mirror::Class::kPrimitiveTypeSizeShiftShift))
+
 // Array offsets.
 #define MIRROR_ARRAY_LENGTH_OFFSET      MIRROR_OBJECT_HEADER_SIZE
 ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32Value())
diff --git a/runtime/base/array_slice.h b/runtime/base/array_slice.h
index 19ad302..32283d0 100644
--- a/runtime/base/array_slice.h
+++ b/runtime/base/array_slice.h
@@ -129,6 +129,10 @@
     return element_size_;
   }
 
+  bool Contains(const T* element) const {
+    return &AtUnchecked(0) <= element && element < &AtUnchecked(size_);
+  }
+
  private:
   T& AtUnchecked(size_t index) {
     return *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array_) + index * element_size_);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3d7624d..a4e05bd 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -88,7 +88,6 @@
   kTracingUniqueMethodsLock,
   kTracingStreamingLock,
   kDeoptimizedMethodsLock,
-  kJitCodeCacheLock,
   kClassLoaderClassesLock,
   kDefaultMutexLevel,
   kMarkSweepLargeObjectLock,
@@ -99,6 +98,7 @@
   kMonitorPoolLock,
   kMethodVerifiersLock,
   kClassLinkerClassesLock,  // TODO rename.
+  kJitCodeCacheLock,
   kBreakpointLock,
   kMonitorLock,
   kMonitorListLock,
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 6f0e125..48e3ceb 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -132,14 +132,14 @@
 }
 
 bool FdFile::Open(const std::string& path, int flags, mode_t mode) {
+  static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
   CHECK_EQ(fd_, -1) << path;
-  read_only_mode_ = (flags & O_RDONLY) != 0;
+  read_only_mode_ = ((flags & O_ACCMODE) == O_RDONLY);
   fd_ = TEMP_FAILURE_RETRY(open(path.c_str(), flags, mode));
   if (fd_ == -1) {
     return false;
   }
   file_path_ = path;
-  static_assert(O_RDONLY == 0, "Readonly flag has unexpected value.");
   if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
     // Start in the base state (not flushed, not closed).
     guard_state_ = GuardState::kBase;
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index db3a44f..99ef6f7 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -53,12 +53,14 @@
   ASSERT_TRUE(file.IsOpened());
   EXPECT_GE(file.Fd(), 0);
   EXPECT_TRUE(file.IsOpened());
+  EXPECT_FALSE(file.ReadOnlyMode());
   EXPECT_EQ(0, file.Flush());
   EXPECT_EQ(0, file.Close());
   EXPECT_EQ(-1, file.Fd());
   EXPECT_FALSE(file.IsOpened());
-  FdFile file2(good_path,  O_RDONLY, true);
+  FdFile file2(good_path, O_RDONLY, true);
   EXPECT_TRUE(file2.IsOpened());
+  EXPECT_TRUE(file2.ReadOnlyMode());
   EXPECT_GE(file2.Fd(), 0);
 
   ASSERT_EQ(file2.Close(), 0);
@@ -70,6 +72,7 @@
   art::ScratchFile tmp;
   FdFile file(tmp.GetFilename(), O_RDONLY, false);
   ASSERT_TRUE(file.IsOpened());
+  EXPECT_TRUE(file.ReadOnlyMode());
   EXPECT_GE(file.Fd(), 0);
   uint8_t buffer[16];
   EXPECT_FALSE(file.ReadFully(&buffer, 4));
@@ -86,6 +89,7 @@
   FdFile file(tmp.GetFilename(), O_RDWR, false);
   ASSERT_TRUE(file.IsOpened());
   EXPECT_GE(file.Fd(), 0);
+  EXPECT_FALSE(file.ReadOnlyMode());
 
   char ignore_prefix[20] = {'a', };
   NullTerminateCharArray(ignore_prefix);
@@ -114,6 +118,7 @@
   FdFile file(tmp.GetFilename(), O_RDWR, false);
   ASSERT_GE(file.Fd(), 0);
   EXPECT_TRUE(file.IsOpened());
+  EXPECT_FALSE(file.ReadOnlyMode());
 
   const char* test_string = "This is a test string";
   size_t length = strlen(test_string) + 1;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3c64c81..46722ec 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1501,11 +1501,8 @@
     SHARED_REQUIRES(Locks::mutator_lock_) {
   DCHECK(error_msg != nullptr);
   std::unique_ptr<const DexFile> dex_file;
-  const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr);
+  const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr, error_msg);
   if (oat_dex_file == nullptr) {
-    *error_msg = StringPrintf("Failed finding oat dex file for %s %s",
-                              oat_file->GetLocation().c_str(),
-                              location);
     return std::unique_ptr<const DexFile>();
   }
   std::string inner_error_msg;
@@ -3563,32 +3560,40 @@
     }
     LOG(INFO) << "Loaded class " << descriptor << source;
   }
-  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  mirror::ClassLoader* const class_loader = klass->GetClassLoader();
-  ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
-  mirror::Class* existing = class_table->Lookup(descriptor, hash);
-  if (existing != nullptr) {
-    return existing;
-  }
-  if (kIsDebugBuild &&
-      !klass->IsTemp() &&
-      class_loader == nullptr &&
-      dex_cache_boot_image_class_lookup_required_) {
-    // Check a class loaded with the system class loader matches one in the image if the class
-    // is in the image.
-    existing = LookupClassFromBootImage(descriptor);
+  {
+    WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+    mirror::ClassLoader* const class_loader = klass->GetClassLoader();
+    ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
+    mirror::Class* existing = class_table->Lookup(descriptor, hash);
     if (existing != nullptr) {
-      CHECK_EQ(klass, existing);
+      return existing;
+    }
+    if (kIsDebugBuild &&
+        !klass->IsTemp() &&
+        class_loader == nullptr &&
+        dex_cache_boot_image_class_lookup_required_) {
+      // Check a class loaded with the system class loader matches one in the image if the class
+      // is in the image.
+      existing = LookupClassFromBootImage(descriptor);
+      if (existing != nullptr) {
+        CHECK_EQ(klass, existing);
+      }
+    }
+    VerifyObject(klass);
+    class_table->InsertWithHash(klass, hash);
+    if (class_loader != nullptr) {
+      // This is necessary because we need to have the card dirtied for remembered sets.
+      Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+    }
+    if (log_new_class_table_roots_) {
+      new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
     }
   }
-  VerifyObject(klass);
-  class_table->InsertWithHash(klass, hash);
-  if (class_loader != nullptr) {
-    // This is necessary because we need to have the card dirtied for remembered sets.
-    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
-  }
-  if (log_new_class_table_roots_) {
-    new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
+  if (kIsDebugBuild) {
+    // Test that copied methods correctly can find their holder.
+    for (ArtMethod& method : klass->GetCopiedMethods(image_pointer_size_)) {
+      CHECK_EQ(GetHoldingClassOfCopiedMethod(&method), klass);
+    }
   }
   return nullptr;
 }
@@ -8108,19 +8113,27 @@
 
 void ClassLinker::CleanupClassLoaders() {
   Thread* const self = Thread::Current();
-  WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
-  for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
-    const ClassLoaderData& data = *it;
-    // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
-    auto* const class_loader = down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
-    if (class_loader != nullptr) {
-      ++it;
-    } else {
-      VLOG(class_linker) << "Freeing class loader";
-      DeleteClassLoader(self, data);
-      it = class_loaders_.erase(it);
+  std::vector<ClassLoaderData> to_delete;
+  // Do the delete outside the lock to avoid lock violation in jit code cache.
+  {
+    WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+    for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
+      const ClassLoaderData& data = *it;
+      // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
+      auto* const class_loader =
+          down_cast<mirror::ClassLoader*>(self->DecodeJObject(data.weak_root));
+      if (class_loader != nullptr) {
+        ++it;
+      } else {
+        VLOG(class_linker) << "Freeing class loader";
+        to_delete.push_back(data);
+        it = class_loaders_.erase(it);
+      }
     }
   }
+  for (ClassLoaderData& data : to_delete) {
+    DeleteClassLoader(self, data);
+  }
 }
 
 std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) {
@@ -8239,6 +8252,33 @@
   return ret;
 }
 
+class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
+ public:
+  FindVirtualMethodHolderVisitor(const ArtMethod* method, PointerSize pointer_size)
+      : method_(method),
+        pointer_size_(pointer_size) {}
+
+  bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+    if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
+      holder_ = klass;
+    }
+    // Return false to stop searching if holder_ is not null.
+    return holder_ == nullptr;
+  }
+
+  mirror::Class* holder_ = nullptr;
+  const ArtMethod* const method_;
+  const PointerSize pointer_size_;
+};
+
+mirror::Class* ClassLinker::GetHoldingClassOfCopiedMethod(ArtMethod* method) {
+  ScopedTrace trace(__FUNCTION__);  // Since this function is slow, have a trace to notify people.
+  CHECK(method->IsCopied());
+  FindVirtualMethodHolderVisitor visitor(method, image_pointer_size_);
+  VisitClasses(&visitor);
+  return visitor.holder_;
+}
+
 // Instantiate ResolveMethod.
 template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::kForceICCECheck>(
     const DexFile& dex_file,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index fcc6b23..c3ab8c5 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -648,6 +648,10 @@
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!dex_lock_);
 
+  // Get the actual holding class for a copied method. Pretty slow, don't call often.
+  mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   struct DexCacheData {
     // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
     // not work properly.
@@ -676,7 +680,6 @@
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
-      REQUIRES(Locks::classlinker_classes_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   void VisitClassLoaders(ClassLoaderVisitor* visitor) const
@@ -1168,6 +1171,7 @@
   // Image pointer size.
   PointerSize image_pointer_size_;
 
+  class FindVirtualMethodHolderVisitor;
   friend class ImageDumper;  // for DexLock
   friend class ImageWriter;  // for GetClassRoots
   friend class JniCompilerTest;  // for GetRuntimeQuickGenericJniStub
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5485cd2..88fbf78 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -257,6 +257,7 @@
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
+  CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
   ScopedTrace trace(__FUNCTION__);
   Runtime* const runtime = Runtime::Current();
   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index bb0d11a..be8ed40 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -132,7 +132,8 @@
   static constexpr double kDefaultTargetUtilization = 0.5;
   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
   // Primitive arrays larger than this size are put in the large object space.
-  static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+  static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
+  static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
   // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
   static constexpr bool kDefaultEnableParallelGC = false;
 
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 3734bcc..0304d0d 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -195,7 +195,8 @@
     return root_.IsNull();
   }
 
-  ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_);
+  ALWAYS_INLINE GcRoot() {}
+  explicit ALWAYS_INLINE GcRoot(MirrorType* ref) SHARED_REQUIRES(Locks::mutator_lock_);
 
  private:
   // Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index c66029d..3d3cc4e 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -70,6 +70,8 @@
 DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
 #define ART_METHOD_QUICK_CODE_OFFSET_64 48
 DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod:: EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
+#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
+DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
 #define LOCK_WORD_STATE_SHIFT 30
 DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
 #define LOCK_WORD_STATE_MASK 0xc0000000
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d52030f..cff2354 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -692,9 +692,6 @@
   DCHECK(this_object != nullptr);
   ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize);
   if (info != nullptr) {
-    // Since the instrumentation is marked from the declaring class we need to mark the card so
-    // that mod-union tables and card rescanning know about the update.
-    Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(caller->GetDeclaringClass());
     info->AddInvokeInfo(dex_pc, this_object->GetClass());
   }
 }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 6dc1578..1938221 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -146,7 +146,6 @@
   // Remove all methods in our cache that were allocated by 'alloc'.
   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
       REQUIRES(!lock_)
-      REQUIRES(Locks::classlinker_classes_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 07c8051..216df2f 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -25,10 +25,33 @@
 
 namespace art {
 
+ProfilingInfo::ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
+      : number_of_inline_caches_(entries.size()),
+        method_(method),
+        is_method_being_compiled_(false),
+        is_osr_method_being_compiled_(false),
+        current_inline_uses_(0),
+        saved_entry_point_(nullptr) {
+  memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
+  for (size_t i = 0; i < number_of_inline_caches_; ++i) {
+    cache_[i].dex_pc_ = entries[i];
+  }
+  if (method->IsCopied()) {
+    // GetHoldingClassOfCopiedMethod is expensive, but creating a profiling info for a copied method
+    // appears to happen very rarely in practice.
+    holding_class_ = GcRoot<mirror::Class>(
+        Runtime::Current()->GetClassLinker()->GetHoldingClassOfCopiedMethod(method));
+  } else {
+    holding_class_ = GcRoot<mirror::Class>(method->GetDeclaringClass());
+  }
+  DCHECK(!holding_class_.IsNull());
+}
+
 bool ProfilingInfo::Create(Thread* self, ArtMethod* method, bool retry_allocation) {
   // Walk over the dex instructions of the method and keep track of
   // instructions we are interested in profiling.
   DCHECK(!method->IsNative());
+
   const DexFile::CodeItem& code_item = *method->GetCodeItem();
   const uint16_t* code_ptr = code_item.insns_;
   const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
@@ -93,6 +116,14 @@
         --i;
       } else {
         // We successfully set `cls`, just return.
+        // Since the instrumentation is marked from the declaring class we need to mark the card so
+        // that mod-union tables and card rescanning know about the update.
+        // Note that the declaring class is not necessarily the holding class if the method is
+        // copied. We need the card mark to be in the holding class since that is from where we
+        // will visit the profiling info.
+        if (!holding_class_.IsNull()) {
+          Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(holding_class_.Read());
+        }
         return;
       }
     }
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index d04d2de..a890fbb 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -105,6 +105,7 @@
   // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
   template<typename RootVisitorType>
   void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS {
+    visitor.VisitRootIfNonNull(holding_class_.AddressWithoutBarrier());
     for (size_t i = 0; i < number_of_inline_caches_; ++i) {
       InlineCache* cache = &cache_[i];
       for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
@@ -166,18 +167,7 @@
   }
 
  private:
-  ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
-      : number_of_inline_caches_(entries.size()),
-        method_(method),
-        is_method_being_compiled_(false),
-        is_osr_method_being_compiled_(false),
-        current_inline_uses_(0),
-        saved_entry_point_(nullptr) {
-    memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache));
-    for (size_t i = 0; i < number_of_inline_caches_; ++i) {
-      cache_[i].dex_pc_ = entries[i];
-    }
-  }
+  ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries);
 
   // Number of instructions we are profiling in the ArtMethod.
   const uint32_t number_of_inline_caches_;
@@ -185,6 +175,9 @@
   // Method this profiling info is for.
   ArtMethod* const method_;
 
+  // Holding class for the method in case method is a copied method.
+  GcRoot<mirror::Class> holding_class_;
+
   // Whether the ArtMethod is currently being compiled. This flag
   // is implicitly guarded by the JIT code cache lock.
   // TODO: Make the JIT code cache lock global.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 8f5419c..8ad47eb 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -636,8 +636,9 @@
   static_assert(sizeof(Primitive::Type) == sizeof(int32_t),
                 "art::Primitive::Type and int32_t have different sizes.");
   int32_t v32 = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_));
-  Primitive::Type type = static_cast<Primitive::Type>(v32 & 0xFFFF);
-  DCHECK_EQ(static_cast<size_t>(v32 >> 16), Primitive::ComponentSizeShift(type));
+  Primitive::Type type = static_cast<Primitive::Type>(v32 & kPrimitiveTypeMask);
+  DCHECK_EQ(static_cast<size_t>(v32 >> kPrimitiveTypeSizeShiftShift),
+            Primitive::ComponentSizeShift(type));
   return type;
 }
 
@@ -646,8 +647,9 @@
   static_assert(sizeof(Primitive::Type) == sizeof(int32_t),
                 "art::Primitive::Type and int32_t have different sizes.");
   int32_t v32 = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_));
-  size_t size_shift = static_cast<Primitive::Type>(v32 >> 16);
-  DCHECK_EQ(size_shift, Primitive::ComponentSizeShift(static_cast<Primitive::Type>(v32 & 0xFFFF)));
+  size_t size_shift = static_cast<Primitive::Type>(v32 >> kPrimitiveTypeSizeShiftShift);
+  DCHECK_EQ(size_shift,
+            Primitive::ComponentSizeShift(static_cast<Primitive::Type>(v32 & kPrimitiveTypeMask)));
   return size_shift;
 }
 
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 5c490de..8f6ce44 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -64,6 +64,12 @@
   // 2 ref instance fields.]
   static constexpr uint32_t kClassWalkSuper = 0xC0000000;
 
+  // Shift primitive type by kPrimitiveTypeSizeShiftShift to get the component type size shift
+  // Used for computing array size as follows:
+  // array_bytes = header_size + (elements << (primitive_type >> kPrimitiveTypeSizeShiftShift))
+  static constexpr uint32_t kPrimitiveTypeSizeShiftShift = 16;
+  static constexpr uint32_t kPrimitiveTypeMask = (1u << kPrimitiveTypeSizeShiftShift) - 1;
+
   // Class Status
   //
   // kStatusRetired: Class that's temporarily used till class linking time
@@ -371,10 +377,10 @@
 
   void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
     DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
-    int32_t v32 = static_cast<int32_t>(new_type);
-    DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero";
+    uint32_t v32 = static_cast<uint32_t>(new_type);
+    DCHECK_EQ(v32 & kPrimitiveTypeMask, v32) << "upper 16 bits aren't zero";
     // Store the component size shift in the upper 16 bits.
-    v32 |= Primitive::ComponentSizeShift(new_type) << 16;
+    v32 |= Primitive::ComponentSizeShift(new_type) << kPrimitiveTypeSizeShiftShift;
     SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), v32);
   }
 
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 68610a7..5752fd9 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1073,7 +1073,7 @@
 
 const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
                                                   const uint32_t* dex_location_checksum,
-                                                  bool warn_if_not_found) const {
+                                                  std::string* error_msg) const {
   // NOTE: We assume here that the canonical location for a given dex_location never
   // changes. If it does (i.e. some symlink used by the filename changes) we may return
   // an incorrect OatDexFile. As long as we have a checksum to check, we shall return
@@ -1115,32 +1115,29 @@
       secondary_oat_dex_files_.PutBefore(secondary_lb, key_copy, oat_dex_file);
     }
   }
-  if (oat_dex_file != nullptr &&
-      (dex_location_checksum == nullptr ||
-       oat_dex_file->GetDexFileLocationChecksum() == *dex_location_checksum)) {
-    return oat_dex_file;
+
+  if (oat_dex_file == nullptr) {
+    if (error_msg != nullptr) {
+      std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+      *error_msg = "Failed to find OatDexFile for DexFile " + std::string(dex_location)
+          + " (canonical path " + dex_canonical_location + ") in OatFile " + GetLocation();
+    }
+    return nullptr;
   }
 
-  if (warn_if_not_found) {
-    std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
-    std::string checksum("<unspecified>");
-    if (dex_location_checksum != nullptr) {
-      checksum = StringPrintf("0x%08x", *dex_location_checksum);
+  if (dex_location_checksum != nullptr &&
+      oat_dex_file->GetDexFileLocationChecksum() != *dex_location_checksum) {
+    if (error_msg != nullptr) {
+      std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+      std::string checksum = StringPrintf("0x%08x", oat_dex_file->GetDexFileLocationChecksum());
+      std::string required_checksum = StringPrintf("0x%08x", *dex_location_checksum);
+      *error_msg = "OatDexFile for DexFile " + std::string(dex_location)
+          + " (canonical path " + dex_canonical_location + ") in OatFile " + GetLocation()
+          + " has checksum " + checksum + " but " + required_checksum + " was required";
     }
-    LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location
-                 << " ( canonical path " << dex_canonical_location << ")"
-                 << " with checksum " << checksum << " in OatFile " << GetLocation();
-    if (kIsDebugBuild) {
-      for (const OatDexFile* odf : oat_dex_files_storage_) {
-        LOG(WARNING) << "OatFile " << GetLocation()
-                     << " contains OatDexFile " << odf->GetDexFileLocation()
-                     << " (canonical path " << odf->GetCanonicalDexFileLocation() << ")"
-                     << " with checksum 0x" << std::hex << odf->GetDexFileLocationChecksum();
-      }
-    }
+    return nullptr;
   }
-
-  return nullptr;
+  return oat_dex_file;
 }
 
 OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index aa727ff..f5ab9dc 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -213,9 +213,15 @@
 
     friend class art::OatDexFile;
   };
+
+  // Get the OatDexFile for the given dex_location within this oat file.
+  // If dex_location_checksum is non-null, the OatDexFile will only be
+  // returned if it has a matching checksum.
+  // If error_msg is non-null and no OatDexFile is returned, error_msg will
+  // be updated with a description of why no OatDexFile was returned.
   const OatDexFile* GetOatDexFile(const char* dex_location,
                                   const uint32_t* const dex_location_checksum,
-                                  bool exception_if_not_found = true) const
+                                  /*out*/std::string* error_msg = nullptr) const
       REQUIRES(!secondary_lookup_lock_);
 
   const std::vector<const OatDexFile*>& GetOatDexFiles() const {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index fd58907..2c2a2b8 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -277,10 +277,9 @@
   // Load the primary dex file.
   std::string error_msg;
   const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(
-      dex_location, nullptr, false);
+      dex_location, nullptr, &error_msg);
   if (oat_dex_file == nullptr) {
-    LOG(WARNING) << "Attempt to load out-of-date oat file "
-      << oat_file.GetLocation() << " for dex location " << dex_location;
+    LOG(WARNING) << error_msg;
     return std::vector<std::unique_ptr<const DexFile>>();
   }
 
@@ -294,7 +293,7 @@
   // Load secondary multidex files
   for (size_t i = 1; ; i++) {
     std::string secondary_dex_location = DexFile::GetMultiDexLocation(i, dex_location);
-    oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+    oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr);
     if (oat_dex_file == nullptr) {
       // There are no more secondary dex files to load.
       break;
@@ -389,25 +388,25 @@
   // Verify the dex checksum.
   // Note: GetOatDexFile will return null if the dex checksum doesn't match
   // what we provide, which verifies the primary dex checksum for us.
+  std::string error_msg;
   const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
   const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
-      dex_location_.c_str(), dex_checksum_pointer, false);
+      dex_location_.c_str(), dex_checksum_pointer, &error_msg);
   if (oat_dex_file == nullptr) {
+    VLOG(oat) << error_msg;
     return kOatOutOfDate;
   }
 
   // Verify the dex checksums for any secondary multidex files
   for (size_t i = 1; ; i++) {
-    std::string secondary_dex_location
-      = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
+    std::string secondary_dex_location = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
     const OatFile::OatDexFile* secondary_oat_dex_file
-      = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+      = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr);
     if (secondary_oat_dex_file == nullptr) {
       // There are no more secondary dex files to check.
       break;
     }
 
-    std::string error_msg;
     uint32_t expected_secondary_checksum = 0;
     if (DexFile::GetChecksum(secondary_dex_location.c_str(),
           &expected_secondary_checksum, &error_msg)) {
@@ -429,7 +428,6 @@
   }
 
   CompilerFilter::Filter current_compiler_filter = file.GetCompilerFilter();
-  VLOG(oat) << "Compiler filter for " << file.GetLocation() << " is " << current_compiler_filter;
 
   // Verify the image checksum
   if (CompilerFilter::DependsOnImageChecksum(current_compiler_filter)) {
@@ -760,8 +758,8 @@
       // Get the checksum from the odex if we can.
       const OatFile* odex_file = odex_.GetFile();
       if (odex_file != nullptr) {
-        const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(
-            dex_location_.c_str(), nullptr, false);
+        const OatFile::OatDexFile* odex_dex_file
+            = odex_file->GetOatDexFile(dex_location_.c_str(), nullptr);
         if (odex_dex_file != nullptr) {
           cached_required_dex_checksum_ = odex_dex_file->GetDexFileLocationChecksum();
           required_dex_checksum_found_ = true;
@@ -867,6 +865,8 @@
       status_ = kOatOutOfDate;
     } else {
       status_ = oat_file_assistant_->GivenOatFileStatus(*file);
+      VLOG(oat) << file->GetLocation() << " is " << status_
+          << " with filter " << file->GetCompilerFilter();
     }
   }
   return status_;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 39848b4..05c5a22 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -320,6 +320,34 @@
   EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
 }
 
+// Case: We have a DEX file and ODEX file for a different dex location.
+// Expect: The status is kDex2OatNeeded.
+TEST_F(OatFileAssistantTest, OatForDifferentDex) {
+  // Generate an odex file for OatForDifferentDex_A.jar
+  std::string dex_location_a = GetScratchDir() + "/OatForDifferentDex_A.jar";
+  std::string odex_location = GetOdexDir() + "/OatForDifferentDex.odex";
+  Copy(GetDexSrc1(), dex_location_a);
+  GenerateOdexForTest(dex_location_a, odex_location, CompilerFilter::kSpeed);
+
+  // Try to use that odex file for OatForDifferentDex.jar
+  std::string dex_location = GetScratchDir() + "/OatForDifferentDex.jar";
+  Copy(GetDexSrc1(), dex_location);
+
+  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+  EXPECT_EQ(OatFileAssistant::kDex2OatNeeded,
+      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+
+  EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+  EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+  EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+  EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+  EXPECT_FALSE(oat_file_assistant.OatFileExists());
+  EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+  EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+  EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
 // Case: We have a DEX file and speed-profile OAT file for it.
 // Expect: The status is kNoDexOptNeeded if the profile hasn't changed, but
 // kDex2Oat if the profile has changed.
diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc
index 1d1413b..1db09b4 100644
--- a/runtime/os_linux.cc
+++ b/runtime/os_linux.cc
@@ -53,7 +53,7 @@
 
 File* OS::OpenFileWithFlags(const char* name, int flags) {
   CHECK(name != nullptr);
-  bool read_only = (flags == O_RDONLY);
+  bool read_only = ((flags & O_ACCMODE) == O_RDONLY);
   std::unique_ptr<File> file(new File(name, flags, 0666, !read_only));
   if (!file->IsOpened()) {
     return nullptr;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7482d93..3326736 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1818,22 +1818,12 @@
   ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
   tlsPtr_.jni_env->ExceptionClear();
 
-  // If the thread has its own handler, use that.
-  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
-                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
-                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
-  if (handler.get() == nullptr) {
-    // Otherwise use the thread group's default handler.
-    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
-                                                  WellKnownClasses::java_lang_Thread_group));
-  }
+  // Call the Thread instance's dispatchUncaughtException(Throwable)
+  tlsPtr_.jni_env->CallVoidMethod(peer.get(),
+      WellKnownClasses::java_lang_Thread_dispatchUncaughtException,
+      exception.get());
 
-  // Call the handler.
-  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
-      WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
-      peer.get(), exception.get());
-
-  // If the handler threw, clear that exception too.
+  // If the dispatchUncaughtException threw, clear that exception too.
   tlsPtr_.jni_env->ExceptionClear();
 }
 
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 355d552..48deb35 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -57,7 +57,6 @@
 jclass WellKnownClasses::java_lang_StringFactory;
 jclass WellKnownClasses::java_lang_System;
 jclass WellKnownClasses::java_lang_Thread;
-jclass WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler;
 jclass WellKnownClasses::java_lang_ThreadGroup;
 jclass WellKnownClasses::java_lang_Throwable;
 jclass WellKnownClasses::java_nio_DirectByteBuffer;
@@ -121,9 +120,9 @@
 jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints;
 jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder;
 jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
+jmethodID WellKnownClasses::java_lang_Thread_dispatchUncaughtException;
 jmethodID WellKnownClasses::java_lang_Thread_init;
 jmethodID WellKnownClasses::java_lang_Thread_run;
-jmethodID WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
 jmethodID WellKnownClasses::java_lang_ThreadGroup_removeThread;
 jmethodID WellKnownClasses::java_nio_DirectByteBuffer_init;
 jmethodID WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation;
@@ -141,7 +140,6 @@
 jfieldID WellKnownClasses::java_lang_Thread_lock;
 jfieldID WellKnownClasses::java_lang_Thread_name;
 jfieldID WellKnownClasses::java_lang_Thread_priority;
-jfieldID WellKnownClasses::java_lang_Thread_uncaughtHandler;
 jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
 jfieldID WellKnownClasses::java_lang_ThreadGroup_ngroups;
@@ -245,8 +243,6 @@
   java_lang_StringFactory = CacheClass(env, "java/lang/StringFactory");
   java_lang_System = CacheClass(env, "java/lang/System");
   java_lang_Thread = CacheClass(env, "java/lang/Thread");
-  java_lang_Thread__UncaughtExceptionHandler = CacheClass(env,
-      "java/lang/Thread$UncaughtExceptionHandler");
   java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
   java_lang_Throwable = CacheClass(env, "java/lang/Throwable");
   java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer");
@@ -273,9 +269,9 @@
   java_lang_ref_ReferenceQueue_add = CacheMethod(env, java_lang_ref_ReferenceQueue.get(), true, "add", "(Ljava/lang/ref/Reference;)V");
 
   java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;");
+  java_lang_Thread_dispatchUncaughtException = CacheMethod(env, java_lang_Thread, false, "dispatchUncaughtException", "(Ljava/lang/Throwable;)V");
   java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
   java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V");
-  java_lang_Thread__UncaughtExceptionHandler_uncaughtException = CacheMethod(env, java_lang_Thread__UncaughtExceptionHandler, false, "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
   java_lang_ThreadGroup_removeThread = CacheMethod(env, java_lang_ThreadGroup, false, "threadTerminated", "(Ljava/lang/Thread;)V");
   java_nio_DirectByteBuffer_init = CacheMethod(env, java_nio_DirectByteBuffer, false, "<init>", "(JI)V");
   libcore_reflect_AnnotationFactory_createAnnotation = CacheMethod(env, libcore_reflect_AnnotationFactory, true, "createAnnotation", "(Ljava/lang/Class;[Llibcore/reflect/AnnotationMember;)Ljava/lang/annotation/Annotation;");
@@ -349,7 +345,6 @@
   java_lang_Thread_lock = CacheField(env, java_lang_Thread, false, "lock", "Ljava/lang/Object;");
   java_lang_Thread_name = CacheField(env, java_lang_Thread, false, "name", "Ljava/lang/String;");
   java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
-  java_lang_Thread_uncaughtHandler = CacheField(env, java_lang_Thread, false, "uncaughtExceptionHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
   java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
   java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "[Ljava/lang/ThreadGroup;");
   java_lang_ThreadGroup_ngroups = CacheField(env, java_lang_ThreadGroup, false, "ngroups", "I");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index cc60b4d..c9faf69 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -69,7 +69,6 @@
   static jclass java_lang_System;
   static jclass java_lang_Thread;
   static jclass java_lang_ThreadGroup;
-  static jclass java_lang_Thread__UncaughtExceptionHandler;
   static jclass java_lang_Throwable;
   static jclass java_util_ArrayList;
   static jclass java_util_Collections;
@@ -132,9 +131,9 @@
   static jmethodID java_lang_StringFactory_newStringFromCodePoints;
   static jmethodID java_lang_StringFactory_newStringFromStringBuilder;
   static jmethodID java_lang_System_runFinalization;
+  static jmethodID java_lang_Thread_dispatchUncaughtException;
   static jmethodID java_lang_Thread_init;
   static jmethodID java_lang_Thread_run;
-  static jmethodID java_lang_Thread__UncaughtExceptionHandler_uncaughtException;
   static jmethodID java_lang_ThreadGroup_removeThread;
   static jmethodID java_nio_DirectByteBuffer_init;
   static jmethodID libcore_reflect_AnnotationFactory_createAnnotation;
@@ -154,7 +153,6 @@
   static jfieldID java_lang_Thread_lock;
   static jfieldID java_lang_Thread_name;
   static jfieldID java_lang_Thread_priority;
-  static jfieldID java_lang_Thread_uncaughtHandler;
   static jfieldID java_lang_Thread_nativePeer;
   static jfieldID java_lang_ThreadGroup_groups;
   static jfieldID java_lang_ThreadGroup_ngroups;
diff --git a/tools/cpp-define-generator/constant_heap.def b/tools/cpp-define-generator/constant_heap.def
new file mode 100644
index 0000000..dc76736
--- /dev/null
+++ b/tools/cpp-define-generator/constant_heap.def
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Export heap values.
+
+#if defined(DEFINE_INCLUDE_DEPENDENCIES)
+#include "gc/heap.h"
+#endif
+
+// Size of references to the heap on the stack.
+DEFINE_EXPR(MIN_LARGE_OBJECT_THRESHOLD, size_t, art::gc::Heap::kMinLargeObjectThreshold)
+
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
index 01e4d5b..d2d8777 100644
--- a/tools/cpp-define-generator/offsets_all.def
+++ b/tools/cpp-define-generator/offsets_all.def
@@ -48,6 +48,7 @@
 // TODO: MIRROR_*_ARRAY offsets (depends on header size)
 // TODO: MIRROR_STRING offsets (depends on header size)
 #include "offset_dexcache.def"
+#include "constant_heap.def"
 #include "constant_lockword.def"
 #include "constant_globals.def"
 #include "constant_rosalloc.def"