Remove LoadBaseDispWide and StoreBaseDispWide.

Just pass k64 or kDouble to non-wide versions.

Change-Id: I000619c3b78d3a71db42edc747c8a0ba1ee229be
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c9e1950..9915ff6 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -59,7 +59,7 @@
       RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
       reg_arg_low = new_regs.GetLow();
       reg_arg_high = new_regs.GetHigh();
-      LoadBaseDispWide(TargetReg(kSp), offset, new_regs, INVALID_SREG);
+      LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64, INVALID_SREG);
     } else {
       reg_arg_high = AllocTemp();
       int offset_high = offset + sizeof(uint32_t);
@@ -112,7 +112,7 @@
       OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
       Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
     } else {
-      LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg, INVALID_SREG);
+      LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, INVALID_SREG);
     }
   }
 }
@@ -126,6 +126,9 @@
   }
 
   bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
+  bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
+  OpSize size = LoadStoreOpSize(wide, ref);
+
   // The inliner doesn't distinguish kDouble or kFloat, use shorty.
   bool double_or_float = cu_->shorty[0] == 'F' || cu_->shorty[0] == 'D';
 
@@ -134,11 +137,7 @@
   LockArg(data.object_arg);
   RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
   RegStorage reg_obj = LoadArg(data.object_arg);
-  if (wide) {
-    LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg, INVALID_SREG);
-  } else {
-    Load32Disp(reg_obj, data.field_offset, rl_dest.reg);
-  }
+  LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg, size, INVALID_SREG);
   if (data.is_volatile) {
     // Without context sensitive analysis, we must issue the most conservative barriers.
     // In this case, either a load or store may follow so we issue both barriers.
@@ -161,6 +160,8 @@
   }
 
   bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
+  bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
+  OpSize size = LoadStoreOpSize(wide, ref);
 
   // Point of no return - no aborts after this
   GenPrintLabel(mir);
@@ -172,16 +173,12 @@
     // There might have been a store before this volatile one so insert StoreStore barrier.
     GenMemBarrier(kStoreStore);
   }
-  if (wide) {
-    StoreBaseDispWide(reg_obj, data.field_offset, reg_src);
-  } else {
-    Store32Disp(reg_obj, data.field_offset, reg_src);
-  }
+  StoreBaseDisp(reg_obj, data.field_offset, reg_src, size);
   if (data.is_volatile) {
     // A load might follow the volatile store so insert a StoreLoad barrier.
     GenMemBarrier(kStoreLoad);
   }
-  if (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT)) {
+  if (ref) {
     MarkGCCard(reg_src, reg_obj);
   }
   return true;