Fix art test failures for Mips.

This patch fixes the following art test failures for Mips:
003-omnibus-opcodes
030-bad-finalizer
041-narrowing
059-finalizer-throw

Change-Id: I4e0e9ff75f949c92059dd6b8d579450dc15f4467
Signed-off-by: Douglas Leung <douglas@mips.com>
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 2c33377..4a06086 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -47,6 +47,8 @@
                           OpSize size) OVERRIDE;
     LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
                               RegStorage r_src, OpSize size) OVERRIDE;
+    LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+    LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
     void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
 
     // Required for target - register utilities.
@@ -83,8 +85,6 @@
     size_t GetInsnSize(LIR* lir) OVERRIDE;
     bool IsUnconditionalBranch(LIR* lir);
 
-    // Check support for volatile load/store of a given size.
-    bool SupportsVolatileLoadStore(OpSize size) OVERRIDE;
     // Get the register class for load/store of a field.
     RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
 
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index a5b7824..4ba94c4 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -496,6 +496,39 @@
   return inst;
 }
 
+LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
+  DCHECK(!r_dest.IsFloat());  // See RegClassForFieldLoadStore().
+  DCHECK(r_dest.IsPair());
+  ClobberCallerSave();
+  LockCallTemps();  // Using fixed registers
+  RegStorage reg_ptr = TargetReg(kArg0);
+  OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
+  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Load));
+  LIR *ret = OpReg(kOpBlx, r_tgt);
+  RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
+  OpRegCopyWide(r_dest, reg_ret);
+  return ret;
+}
+
+LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
+  DCHECK(!r_src.IsFloat());  // See RegClassForFieldLoadStore().
+  DCHECK(r_src.IsPair());
+  ClobberCallerSave();
+  LockCallTemps();  // Using fixed registers
+  RegStorage temp_ptr = AllocTemp();
+  OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+  RegStorage temp_value = AllocTempWide();
+  OpRegCopyWide(temp_value, r_src);
+  RegStorage reg_ptr = TargetReg(kArg0);
+  OpRegCopy(reg_ptr, temp_ptr);
+  RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+  OpRegCopyWide(reg_value, temp_value);
+  FreeTemp(temp_ptr);
+  FreeTemp(temp_value);
+  RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pA64Store));
+  return OpReg(kOpBlx, r_tgt);
+}
+
 void MipsMir2Lir::SpillCoreRegs() {
   if (num_core_spills_ == 0) {
     return;
@@ -530,17 +563,12 @@
   return (lir->opcode == kMipsB);
 }
 
-bool MipsMir2Lir::SupportsVolatileLoadStore(OpSize size) {
-  // No support for 64-bit atomic load/store on mips.
-  return size != k64 && size != kDouble;
-}
-
 RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
   if (UNLIKELY(is_volatile)) {
-    // On Mips, atomic 64-bit load/store requires an fp register.
+    // On Mips, atomic 64-bit load/store requires a core register.
     // Smaller aligned load/store is atomic for both core and fp registers.
     if (size == k64 || size == kDouble) {
-      return kFPReg;
+      return kCoreReg;
     }
   }
   // TODO: Verify that both core and fp registers are suitable for smaller sizes.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 75d3c5d..0e8188b 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -551,8 +551,9 @@
 
 LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
                                OpSize size, VolatileKind is_volatile) {
-  if (is_volatile == kVolatile) {
-    DCHECK(size != k64 && size != kDouble);
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+    // Do atomic 64-bit load.
+    return GenAtomic64Load(r_base, displacement, r_dest);
   }
 
   // TODO: base this on target.
@@ -654,17 +655,21 @@
 LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
                                 OpSize size, VolatileKind is_volatile) {
   if (is_volatile == kVolatile) {
-    DCHECK(size != k64 && size != kDouble);
     // Ensure that prior accesses become visible to other threads first.
     GenMemBarrier(kAnyStore);
   }
 
-  // TODO: base this on target.
-  if (size == kWord) {
-    size = k32;
-  }
   LIR* store;
-  store = StoreBaseDispBody(r_base, displacement, r_src, size);
+  if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+    // Do atomic 64-bit load.
+    store = GenAtomic64Store(r_base, displacement, r_src);
+  } else {
+    // TODO: base this on target.
+    if (size == kWord) {
+      size = k32;
+    }
+    store = StoreBaseDispBody(r_base, displacement, r_src, size);
+  }
 
   if (UNLIKELY(is_volatile == kVolatile)) {
     // Preserve order with respect to any subsequent volatile loads.