Quick compiler: allocate doubles as doubles

Significant refactoring of register handling to unify usage across
all targets & 32/64 backends.

Reworked RegStorage encoding to allow expanded use of
x86 xmm registers; removed vector registers as a separate
register type.  Reworked RegisterInfo to describe aliased
physical registers.  Eliminated quite a bit of target-specific code
and generalized common code.

Use of RegStorage instead of int for registers now propagated down
to the NewLIRx() level.  In future CLs, the NewLIRx() routines will
be replaced with versions that are explicit about what kind of
operand they expect (RegStorage, displacement, etc.).  The goal
is to eventually use RegStorage all the way to the assembly phase.

TBD: MIPS needs verification.
TBD: Re-enable liveness tracking.

Change-Id: I388c006d5fa9b3ea72db4e37a19ce257f2a15964
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 2e64f74..08acef7 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -69,7 +69,7 @@
 }
 
 LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
-  DCHECK(ARM_SINGLEREG(r_dest));
+  DCHECK(RegStorage::IsSingle(r_dest));
   if (value == 0) {
     // TODO: we need better info about the target CPU.  a vector exclusive or
     //       would probably be better here if we could rely on its existance.
@@ -88,7 +88,7 @@
     data_target = AddWordData(&literal_list_, value);
   }
   LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
-                          r_dest, r15pc, 0, 0, 0, data_target);
+                          r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
   SetMemRefType(load_pc_rel, true, kLiteral);
   AppendLIR(load_pc_rel);
   return load_pc_rel;
@@ -173,12 +173,12 @@
   LIR* res;
   int mod_imm;
 
-  if (ARM_FPREG(r_dest.GetReg())) {
+  if (r_dest.IsFloat()) {
     return LoadFPConstantValue(r_dest.GetReg(), value);
   }
 
   /* See if the value can be constructed cheaply */
-  if (ARM_LOWREG(r_dest.GetReg()) && (value >= 0) && (value <= 255)) {
+  if (r_dest.Low8() && (value >= 0) && (value <= 255)) {
     return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
   }
   /* Check Modified immediate special cases */
@@ -204,7 +204,7 @@
 }
 
 LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
-  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly*/);
+  LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly */);
   res->target = target;
   return res;
 }
@@ -237,7 +237,7 @@
 LIR* ArmMir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
                                int shift) {
   bool thumb_form =
-      ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()));
+      ((shift == 0) && r_dest_src1.Low8() && r_src2.Low8());
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
     case kOpAdc:
@@ -256,9 +256,9 @@
     case kOpCmp:
       if (thumb_form)
         opcode = kThumbCmpRR;
-      else if ((shift == 0) && !ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
+      else if ((shift == 0) && !r_dest_src1.Low8() && !r_src2.Low8())
         opcode = kThumbCmpHH;
-      else if ((shift == 0) && ARM_LOWREG(r_dest_src1.GetReg()))
+      else if ((shift == 0) && r_dest_src1.Low8())
         opcode = kThumbCmpLH;
       else if (shift == 0)
         opcode = kThumbCmpHL;
@@ -270,11 +270,11 @@
       break;
     case kOpMov:
       DCHECK_EQ(shift, 0);
-      if (ARM_LOWREG(r_dest_src1.GetReg()) && ARM_LOWREG(r_src2.GetReg()))
+      if (r_dest_src1.Low8() && r_src2.Low8())
         opcode = kThumbMovRR;
-      else if (!ARM_LOWREG(r_dest_src1.GetReg()) && !ARM_LOWREG(r_src2.GetReg()))
+      else if (!r_dest_src1.Low8() && !r_src2.Low8())
         opcode = kThumbMovRR_H2H;
-      else if (ARM_LOWREG(r_dest_src1.GetReg()))
+      else if (r_dest_src1.Low8())
         opcode = kThumbMovRR_H2L;
       else
         opcode = kThumbMovRR_L2H;
@@ -389,8 +389,7 @@
 LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
                                   RegStorage r_src2, int shift) {
   ArmOpcode opcode = kThumbBkpt;
-  bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()) &&
-      ARM_LOWREG(r_src2.GetReg());
+  bool thumb_form = (shift == 0) && r_dest.Low8() && r_src1.Low8() && r_src2.Low8();
   switch (op) {
     case kOpAdd:
       opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
@@ -466,7 +465,7 @@
   int32_t abs_value = (neg) ? -value : value;
   ArmOpcode opcode = kThumbBkpt;
   ArmOpcode alt_opcode = kThumbBkpt;
-  bool all_low_regs = (ARM_LOWREG(r_dest.GetReg()) && ARM_LOWREG(r_src1.GetReg()));
+  bool all_low_regs = r_dest.Low8() && r_src1.Low8();
   int32_t mod_imm = ModifiedImmediate(value);
 
   switch (op) {
@@ -488,10 +487,9 @@
     case kOpRor:
       return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
     case kOpAdd:
-      if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r13sp) &&
-        (value <= 1020) && ((value & 0x3) == 0)) {
+      if (r_dest.Low8() && (r_src1 == rs_r13sp) && (value <= 1020) && ((value & 0x3) == 0)) {
         return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
-      } else if (ARM_LOWREG(r_dest.GetReg()) && (r_src1 == rs_r15pc) &&
+      } else if (r_dest.Low8() && (r_src1 == rs_r15pc) &&
           (value <= 1020) && ((value & 0x3) == 0)) {
         return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
       }
@@ -601,7 +599,7 @@
 LIR* ArmMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
   bool neg = (value < 0);
   int32_t abs_value = (neg) ? -value : value;
-  bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1.GetReg()));
+  bool short_form = (((abs_value & 0xff) == abs_value) && r_dest_src1.Low8());
   ArmOpcode opcode = kThumbBkpt;
   switch (op) {
     case kOpAdd:
@@ -643,22 +641,24 @@
   LIR* res = NULL;
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
-  int target_reg = S2d(r_dest.GetLowReg(), r_dest.GetHighReg());
-  if (ARM_FPREG(r_dest.GetLowReg())) {
+  if (r_dest.IsFloat()) {
+    DCHECK(!r_dest.IsPair());
     if ((val_lo == 0) && (val_hi == 0)) {
       // TODO: we need better info about the target CPU.  a vector exclusive or
       //       would probably be better here if we could rely on its existance.
       // Load an immediate +2.0 (which encodes to 0)
-      NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0);
+      NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), 0);
       // +0.0 = +2.0 - +2.0
-      res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg);
+      res = NewLIR3(kThumb2Vsubd, r_dest.GetReg(), r_dest.GetReg(), r_dest.GetReg());
     } else {
       int encoded_imm = EncodeImmDouble(value);
       if (encoded_imm >= 0) {
-        res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm);
+        res = NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), encoded_imm);
       }
     }
   } else {
+    // NOTE: Arm32 assumption here.
+    DCHECK(r_dest.IsPair());
     if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
       res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
       LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
@@ -670,13 +670,13 @@
     if (data_target == NULL) {
       data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
-    if (ARM_FPREG(r_dest.GetLowReg())) {
+    if (r_dest.IsFloat()) {
       res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
-                   target_reg, r15pc, 0, 0, 0, data_target);
+                   r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
     } else {
       DCHECK(r_dest.IsPair());
       res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
-                   r_dest.GetLowReg(), r_dest.GetHighReg(), r15pc, 0, 0, data_target);
+                   r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
     }
     SetMemRefType(res, true, kLiteral);
     AppendLIR(res);
@@ -690,22 +690,20 @@
 
 LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                  int scale, OpSize size) {
-  bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
-      ARM_LOWREG(r_dest.GetReg());
+  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_dest.Low8();
   LIR* load;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
   RegStorage reg_ptr;
 
-  if (ARM_FPREG(r_dest.GetReg())) {
-    if (ARM_SINGLEREG(r_dest.GetReg())) {
+  if (r_dest.IsFloat()) {
+    if (r_dest.IsSingle()) {
       DCHECK((size == k32) || (size == kSingle) || (size == kReference));
       opcode = kThumb2Vldrs;
       size = kSingle;
     } else {
-      DCHECK(ARM_DOUBLEREG(r_dest.GetReg()));
+      DCHECK(r_dest.IsDouble());
       DCHECK((size == k64) || (size == kDouble));
-      DCHECK_EQ((r_dest.GetReg() & 0x1), 0);
       opcode = kThumb2Vldrd;
       size = kDouble;
     }
@@ -758,20 +756,19 @@
 
 LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                   int scale, OpSize size) {
-  bool all_low_regs = ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_index.GetReg()) &&
-      ARM_LOWREG(r_src.GetReg());
+  bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
   LIR* store = NULL;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
   RegStorage reg_ptr;
 
-  if (ARM_FPREG(r_src.GetReg())) {
-    if (ARM_SINGLEREG(r_src.GetReg())) {
+  if (r_src.IsFloat()) {
+    if (r_src.IsSingle()) {
       DCHECK((size == k32) || (size == kSingle) || (size == kReference));
       opcode = kThumb2Vstrs;
       size = kSingle;
     } else {
-      DCHECK(ARM_DOUBLEREG(r_src.GetReg()));
+      DCHECK(r_src.IsDouble());
       DCHECK((size == k64) || (size == kDouble));
       DCHECK_EQ((r_src.GetReg() & 0x1), 0);
       opcode = kThumb2Vstrd;
@@ -833,21 +830,16 @@
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
   bool thumb2Form = (displacement < 4092 && displacement >= 0);
-  bool all_low = r_dest.Is32Bit() && ARM_LOWREG(r_base.GetReg() && ARM_LOWREG(r_dest.GetReg()));
+  bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
   int encoded_disp = displacement;
   bool already_generated = false;
-  int dest_low_reg = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
   bool null_pointer_safepoint = false;
   switch (size) {
     case kDouble:
     // Intentional fall-though.
     case k64:
-      if (ARM_FPREG(dest_low_reg)) {
-        // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
-        if (r_dest.IsPair()) {
-          DCHECK(ARM_FPREG(r_dest.GetHighReg()));
-          r_dest = RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg()));
-        }
+      if (r_dest.IsFloat()) {
+        DCHECK(!r_dest.IsPair());
         opcode = kThumb2Vldrd;
         if (displacement <= 1020) {
           short_form = true;
@@ -870,7 +862,7 @@
     case k32:
     // Intentional fall-though.
     case kReference:
-      if (ARM_FPREG(r_dest.GetReg())) {
+      if (r_dest.IsFloat()) {
         opcode = kThumb2Vldrs;
         if (displacement <= 1020) {
           short_form = true;
@@ -878,13 +870,13 @@
         }
         break;
       }
-      if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r15pc) &&
-          (displacement <= 1020) && (displacement >= 0)) {
+      if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
+          (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbLdrPcRel;
-      } else if (ARM_LOWREG(r_dest.GetReg()) && (r_base.GetReg() == r13sp) &&
-          (displacement <= 1020) && (displacement >= 0)) {
+      } else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
+                 (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbLdrSpRel;
@@ -940,7 +932,7 @@
     } else {
       RegStorage reg_offset = AllocTemp();
       LoadConstant(reg_offset, encoded_disp);
-      if (ARM_FPREG(dest_low_reg)) {
+      if (r_dest.IsFloat()) {
         // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
         OpRegReg(kOpAdd, reg_offset, r_base);
         load = LoadBaseDispBody(reg_offset, 0, r_dest, size, s_reg);
@@ -985,15 +977,14 @@
   ArmOpcode opcode = kThumbBkpt;
   bool short_form = false;
   bool thumb2Form = (displacement < 4092 && displacement >= 0);
-  bool all_low = r_src.Is32Bit() && (ARM_LOWREG(r_base.GetReg()) && ARM_LOWREG(r_src.GetReg()));
+  bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
   int encoded_disp = displacement;
   bool already_generated = false;
-  int src_low_reg = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
   bool null_pointer_safepoint = false;
   switch (size) {
     case k64:
     case kDouble:
-      if (!ARM_FPREG(src_low_reg)) {
+      if (!r_src.IsFloat()) {
         if (displacement <= 1020) {
           store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(),
                           displacement >> 2);
@@ -1004,11 +995,7 @@
         }
         already_generated = true;
       } else {
-        // Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
-        if (r_src.IsPair()) {
-          DCHECK(ARM_FPREG(r_src.GetHighReg()));
-          r_src = RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg()));
-        }
+        DCHECK(!r_src.IsPair());
         opcode = kThumb2Vstrd;
         if (displacement <= 1020) {
           short_form = true;
@@ -1017,10 +1004,12 @@
       }
       break;
     case kSingle:
+    // Intentional fall-through.
     case k32:
+    // Intentional fall-through.
     case kReference:
-      if (ARM_FPREG(r_src.GetReg())) {
-        DCHECK(ARM_SINGLEREG(r_src.GetReg()));
+      if (r_src.IsFloat()) {
+        DCHECK(r_src.IsSingle());
         opcode = kThumb2Vstrs;
         if (displacement <= 1020) {
           short_form = true;
@@ -1028,8 +1017,7 @@
         }
         break;
       }
-      if (ARM_LOWREG(r_src.GetReg()) && (r_base == rs_r13sp) &&
-          (displacement <= 1020) && (displacement >= 0)) {
+      if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
         short_form = true;
         encoded_disp >>= 2;
         opcode = kThumbStrSpRel;
@@ -1074,7 +1062,7 @@
     } else {
       RegStorage r_scratch = AllocTemp();
       LoadConstant(r_scratch, encoded_disp);
-      if (ARM_FPREG(src_low_reg)) {
+      if (r_src.IsFloat()) {
         // No index ops - must use a long sequence.  Turn the offset into a direct pointer.
         OpRegReg(kOpAdd, r_scratch, r_base);
         store = StoreBaseDispBody(r_scratch, 0, r_src, size);
@@ -1113,14 +1101,14 @@
 
 LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
   int opcode;
-  DCHECK_EQ(ARM_DOUBLEREG(r_dest.GetReg()), ARM_DOUBLEREG(r_src.GetReg()));
-  if (ARM_DOUBLEREG(r_dest.GetReg())) {
+  DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
+  if (r_dest.IsDouble()) {
     opcode = kThumb2Vmovd;
   } else {
-    if (ARM_SINGLEREG(r_dest.GetReg())) {
-      opcode = ARM_SINGLEREG(r_src.GetReg()) ? kThumb2Vmovs : kThumb2Fmsr;
+    if (r_dest.IsSingle()) {
+      opcode = r_src.IsSingle() ? kThumb2Vmovs : kThumb2Fmsr;
     } else {
-      DCHECK(ARM_SINGLEREG(r_src.GetReg()));
+      DCHECK(r_src.IsSingle());
       opcode = kThumb2Fmrs;
     }
   }
@@ -1142,8 +1130,7 @@
 }
 
 LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, RegStorage r_src_hi,
-                                      OpSize size, int s_reg) {
+                                      int displacement, RegStorage r_src, OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
   return NULL;
 }
@@ -1154,8 +1141,7 @@
 }
 
 LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                     int displacement, RegStorage r_dest, RegStorage r_dest_hi,
-                                     OpSize size, int s_reg) {
+                                     int displacement, RegStorage r_dest, OpSize size, int s_reg) {
   LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
   return NULL;
 }