[SystemZ] Fold more spills

Add a mapping from register-based <INSN>R instructions to the corresponding
memory-based <INSN>.  Use it to cut down on the number of spill loads.

Some instructions extend their operands from smaller fields, so this
required a new TSFlags field to say how big the unextended operand is.

This optimisation doesn't trigger for C(G)R and CL(G)R because in practice
we always combine those instructions with a branch.  Adding a test for every
other case probably seems excessive, but it did catch a missed optimisation
for DSGF (fixed in r185435).

llvm-svn: 185529
diff --git a/llvm/test/CodeGen/SystemZ/fp-add-02.ll b/llvm/test/CodeGen/SystemZ/fp-add-02.ll
index 08eb90e..58afc13 100644
--- a/llvm/test/CodeGen/SystemZ/fp-add-02.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-add-02.ll
@@ -2,6 +2,8 @@
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
+declare double @foo()
+
 ; Check register addition.
 define double @f1(double %f1, double %f2) {
 ; CHECK: f1:
@@ -69,3 +71,49 @@
   %res = fadd double %f1, %f2
   ret double %res
 }
+
+; Check that additions of spilled values can use ADB rather than ADBR.
+define double @f7(double *%ptr0) {
+; CHECK: f7:
+; CHECK: brasl %r14, foo@PLT
+; CHECK: adb %f0, 160(%r15)
+; CHECK: br %r14
+  %ptr1 = getelementptr double *%ptr0, i64 2
+  %ptr2 = getelementptr double *%ptr0, i64 4
+  %ptr3 = getelementptr double *%ptr0, i64 6
+  %ptr4 = getelementptr double *%ptr0, i64 8
+  %ptr5 = getelementptr double *%ptr0, i64 10
+  %ptr6 = getelementptr double *%ptr0, i64 12
+  %ptr7 = getelementptr double *%ptr0, i64 14
+  %ptr8 = getelementptr double *%ptr0, i64 16
+  %ptr9 = getelementptr double *%ptr0, i64 18
+  %ptr10 = getelementptr double *%ptr0, i64 20
+
+  %val0 = load double *%ptr0
+  %val1 = load double *%ptr1
+  %val2 = load double *%ptr2
+  %val3 = load double *%ptr3
+  %val4 = load double *%ptr4
+  %val5 = load double *%ptr5
+  %val6 = load double *%ptr6
+  %val7 = load double *%ptr7
+  %val8 = load double *%ptr8
+  %val9 = load double *%ptr9
+  %val10 = load double *%ptr10
+
+  %ret = call double @foo()
+
+  %add0 = fadd double %ret, %val0
+  %add1 = fadd double %add0, %val1
+  %add2 = fadd double %add1, %val2
+  %add3 = fadd double %add2, %val3
+  %add4 = fadd double %add3, %val4
+  %add5 = fadd double %add4, %val5
+  %add6 = fadd double %add5, %val6
+  %add7 = fadd double %add6, %val7
+  %add8 = fadd double %add7, %val8
+  %add9 = fadd double %add8, %val9
+  %add10 = fadd double %add9, %val10
+
+  ret double %add10
+}