[SystemZ] Fold more spills

Add a mapping from register-based <INSN>R instructions to the corresponding
memory-based <INSN>.  Use it to cut down on the number of spill loads.

Some instructions extend their operands from smaller fields, so this
required a new TSFlags field to say how big the unextended operand is.

This optimisation doesn't trigger for C(G)R and CL(G)R because in practice
we always combine those instructions with a branch.  Adding a test for every
other case probably seems excessive, but it did catch a missed optimisation
for DSGF (fixed in r185435).

llvm-svn: 185529
diff --git a/llvm/test/CodeGen/SystemZ/fp-cmp-02.ll b/llvm/test/CodeGen/SystemZ/fp-cmp-02.ll
index 2987d50..c5bdd56 100644
--- a/llvm/test/CodeGen/SystemZ/fp-cmp-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-cmp-02.ll
@@ -2,6 +2,8 @@
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
+declare double @foo()
+
 ; Check comparison with registers.
 define i64 @f1(i64 %a, i64 %b, double %f1, double %f2) {
 ; CHECK: f1:
@@ -87,3 +89,61 @@
   %res = select i1 %cond, i64 %a, i64 %b
   ret i64 %res
 }
+
+; Check that comparisons of spilled values can use CDB rather than CDBR.
+define double @f7(double *%ptr0) {
+; CHECK: f7:
+; CHECK: brasl %r14, foo@PLT
+; CHECK: cdb {{%f[0-9]+}}, 160(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr double *%ptr0, i64 2
+  %ptr2 = getelementptr double *%ptr0, i64 4
+  %ptr3 = getelementptr double *%ptr0, i64 6
+  %ptr4 = getelementptr double *%ptr0, i64 8
+  %ptr5 = getelementptr double *%ptr0, i64 10
+  %ptr6 = getelementptr double *%ptr0, i64 12
+  %ptr7 = getelementptr double *%ptr0, i64 14
+  %ptr8 = getelementptr double *%ptr0, i64 16
+  %ptr9 = getelementptr double *%ptr0, i64 18
+  %ptr10 = getelementptr double *%ptr0, i64 20
+
+  %val0 = load double *%ptr0
+  %val1 = load double *%ptr1
+  %val2 = load double *%ptr2
+  %val3 = load double *%ptr3
+  %val4 = load double *%ptr4
+  %val5 = load double *%ptr5
+  %val6 = load double *%ptr6
+  %val7 = load double *%ptr7
+  %val8 = load double *%ptr8
+  %val9 = load double *%ptr9
+  %val10 = load double *%ptr10
+
+  %ret = call double @foo()
+
+  %cmp0 = fcmp olt double %ret, %val0
+  %cmp1 = fcmp olt double %ret, %val1
+  %cmp2 = fcmp olt double %ret, %val2
+  %cmp3 = fcmp olt double %ret, %val3
+  %cmp4 = fcmp olt double %ret, %val4
+  %cmp5 = fcmp olt double %ret, %val5
+  %cmp6 = fcmp olt double %ret, %val6
+  %cmp7 = fcmp olt double %ret, %val7
+  %cmp8 = fcmp olt double %ret, %val8
+  %cmp9 = fcmp olt double %ret, %val9
+  %cmp10 = fcmp olt double %ret, %val10
+
+  %sel0 = select i1 %cmp0, double %ret, double 0.0
+  %sel1 = select i1 %cmp1, double %sel0, double 1.0
+  %sel2 = select i1 %cmp2, double %sel1, double 2.0
+  %sel3 = select i1 %cmp3, double %sel2, double 3.0
+  %sel4 = select i1 %cmp4, double %sel3, double 4.0
+  %sel5 = select i1 %cmp5, double %sel4, double 5.0
+  %sel6 = select i1 %cmp6, double %sel5, double 6.0
+  %sel7 = select i1 %cmp7, double %sel6, double 7.0
+  %sel8 = select i1 %cmp8, double %sel7, double 8.0
+  %sel9 = select i1 %cmp9, double %sel8, double 9.0
+  %sel10 = select i1 %cmp10, double %sel9, double 10.0
+
+  ret double %sel10
+}