Convert -enable-sched-cycles and -enable-sched-hazard to -disable
flags. They are still not enable in this revision.

Added TargetInstrInfo::isZeroCost() to fix a fundamental problem with
the scheduler's model of operand latency in the selection DAG.

Generalized unit tests to work with sched-cycles.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123969 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/ARM/fnegs.ll b/test/CodeGen/ARM/fnegs.ll
index c15005e..418b598 100644
--- a/test/CodeGen/ARM/fnegs.ll
+++ b/test/CodeGen/ARM/fnegs.ll
@@ -13,19 +13,19 @@
 	ret float %retval
 }
 ; VFP2: test1:
-; VFP2: 	vneg.f32	s1, s0
+; VFP2: 	vneg.f32	s{{.*}}, s{{.*}}
 
 ; NFP1: test1:
-; NFP1: 	vneg.f32	d1, d0
+; NFP1: 	vneg.f32	d{{.*}}, d{{.*}}
 
 ; NFP0: test1:
-; NFP0: 	vneg.f32	s1, s0
+; NFP0: 	vneg.f32	s{{.*}}, s{{.*}}
 
 ; CORTEXA8: test1:
-; CORTEXA8: 	vneg.f32	d1, d0
+; CORTEXA8: 	vneg.f32	d{{.*}}, d{{.*}}
 
 ; CORTEXA9: test1:
-; CORTEXA9: 	vneg.f32	s1, s0
+; CORTEXA9: 	vneg.f32	s{{.*}}, s{{.*}}
 
 define float @test2(float* %a) {
 entry:
@@ -37,17 +37,17 @@
 	ret float %retval
 }
 ; VFP2: test2:
-; VFP2: 	vneg.f32	s1, s0
+; VFP2: 	vneg.f32	s{{.*}}, s{{.*}}
 
 ; NFP1: test2:
-; NFP1: 	vneg.f32	d1, d0
+; NFP1: 	vneg.f32	d{{.*}}, d{{.*}}
 
 ; NFP0: test2:
-; NFP0: 	vneg.f32	s1, s0
+; NFP0: 	vneg.f32	s{{.*}}, s{{.*}}
 
 ; CORTEXA8: test2:
-; CORTEXA8: 	vneg.f32	d1, d0
+; CORTEXA8: 	vneg.f32	d{{.*}}, d{{.*}}
 
 ; CORTEXA9: test2:
-; CORTEXA9: 	vneg.f32	s1, s0
+; CORTEXA9: 	vneg.f32	s{{.*}}, s{{.*}}
 
diff --git a/test/CodeGen/ARM/fnmscs.ll b/test/CodeGen/ARM/fnmscs.ll
index 5d83253..76c8067 100644
--- a/test/CodeGen/ARM/fnmscs.ll
+++ b/test/CodeGen/ARM/fnmscs.ll
@@ -11,7 +11,7 @@
 ; NEON: vnmla.f32
 
 ; A8: t1:
-; A8: vnmul.f32 s0, s1, s0
+; A8: vnmul.f32 s0, s{{[01]}}, s{{[01]}}
 ; A8: vsub.f32 d0, d0, d1
 	%0 = fmul float %a, %b
 	%1 = fsub float -0.0, %0
@@ -28,7 +28,7 @@
 ; NEON: vnmla.f32
 
 ; A8: t2:
-; A8: vnmul.f32 s0, s1, s0
+; A8: vnmul.f32 s0, s{{[01]}}, s{{[01]}}
 ; A8: vsub.f32 d0, d0, d1
 	%0 = fmul float %a, %b
 	%1 = fmul float -1.0, %0
@@ -45,7 +45,7 @@
 ; NEON: vnmla.f64
 
 ; A8: t3:
-; A8: vnmul.f64 d16, d16, d17
+; A8: vnmul.f64 d16, d1{{[67]}}, d1{{[67]}}
 ; A8: vsub.f64 d16, d16, d17
 	%0 = fmul double %a, %b
 	%1 = fsub double -0.0, %0
@@ -62,7 +62,7 @@
 ; NEON: vnmla.f64
 
 ; A8: t4:
-; A8: vnmul.f64 d16, d16, d17
+; A8: vnmul.f64 d16, d1{{[67]}}, d1{{[67]}}
 ; A8: vsub.f64 d16, d16, d17
 	%0 = fmul double %a, %b
 	%1 = fmul double -1.0, %0
diff --git a/test/CodeGen/ARM/fpconsts.ll b/test/CodeGen/ARM/fpconsts.ll
index 9e7a8ae..638dde9 100644
--- a/test/CodeGen/ARM/fpconsts.ll
+++ b/test/CodeGen/ARM/fpconsts.ll
@@ -3,7 +3,7 @@
 define float @t1(float %x) nounwind readnone optsize {
 entry:
 ; CHECK: t1:
-; CHECK: vmov.f32 s1, #4.000000e+00
+; CHECK: vmov.f32 s{{.*}}, #4.000000e+00
   %0 = fadd float %x, 4.000000e+00
   ret float %0
 }
@@ -27,7 +27,7 @@
 define float @t4(float %x) nounwind readnone optsize {
 entry:
 ; CHECK: t4:
-; CHECK: vmov.f32 s1, #-2.400000e+01
+; CHECK: vmov.f32 s{{.*}}, #-2.400000e+01
   %0 = fmul float %x, -2.400000e+01
   ret float %0
 }
diff --git a/test/CodeGen/ARM/unaligned_load_store.ll b/test/CodeGen/ARM/unaligned_load_store.ll
index 354895e..b42e11f 100644
--- a/test/CodeGen/ARM/unaligned_load_store.ll
+++ b/test/CodeGen/ARM/unaligned_load_store.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm | FileCheck %s -check-prefix=GENERIC
+; RUN: llc < %s -march=arm -pre-RA-sched=source | FileCheck %s -check-prefix=GENERIC
 ; RUN: llc < %s -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=DARWIN_V6
 ; RUN: llc < %s -mtriple=armv6-apple-darwin -arm-strict-align | FileCheck %s -check-prefix=GENERIC
 ; RUN: llc < %s -mtriple=armv6-linux | FileCheck %s -check-prefix=GENERIC