Revert "CodeGen: ensure that libcalls are always AAPCS CC"

This reverts SVN r280683.  Revert until I figure out why this is breaking lli
tests.

llvm-svn: 280778
diff --git a/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll b/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
index 99d662f..847aeac 100644
--- a/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
+++ b/llvm/test/CodeGen/Thumb2/float-intrinsics-float.ll
@@ -14,106 +14,106 @@
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.powi.f32(float %Val, i32 %power)
+declare float     @llvm.powi.f32(float %Val, i32 %power)
 define float @powi_f(float %a, i32 %b) {
 ; CHECK-LABEL: powi_f:
 ; SOFT: bl __powisf2
-; HARD: bl __powisf2
-  %1 = call arm_aapcscc float @llvm.powi.f32(float %a, i32 %b)
+; HARD: b __powisf2
+  %1 = call float @llvm.powi.f32(float %a, i32 %b)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.sin.f32(float %Val)
+declare float     @llvm.sin.f32(float %Val)
 define float @sin_f(float %a) {
 ; CHECK-LABEL: sin_f:
 ; SOFT: bl sinf
-; HARD: bl sinf
-  %1 = call arm_aapcscc float @llvm.sin.f32(float %a)
+; HARD: b sinf
+  %1 = call float @llvm.sin.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.cos.f32(float %Val)
+declare float     @llvm.cos.f32(float %Val)
 define float @cos_f(float %a) {
 ; CHECK-LABEL: cos_f:
 ; SOFT: bl cosf
-; HARD: bl cosf
-  %1 = call arm_aapcscc float @llvm.cos.f32(float %a)
+; HARD: b cosf
+  %1 = call float @llvm.cos.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.pow.f32(float %Val, float %power)
+declare float     @llvm.pow.f32(float %Val, float %power)
 define float @pow_f(float %a, float %b) {
 ; CHECK-LABEL: pow_f:
 ; SOFT: bl powf
-; HARD: bl powf
-  %1 = call arm_aapcscc float @llvm.pow.f32(float %a, float %b)
+; HARD: b powf
+  %1 = call float @llvm.pow.f32(float %a, float %b)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.exp.f32(float %Val)
+declare float     @llvm.exp.f32(float %Val)
 define float @exp_f(float %a) {
 ; CHECK-LABEL: exp_f:
 ; SOFT: bl expf
-; HARD: bl expf
-  %1 = call arm_aapcscc float @llvm.exp.f32(float %a)
+; HARD: b expf
+  %1 = call float @llvm.exp.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.exp2.f32(float %Val)
+declare float     @llvm.exp2.f32(float %Val)
 define float @exp2_f(float %a) {
 ; CHECK-LABEL: exp2_f:
 ; SOFT: bl exp2f
-; HARD: bl exp2f
-  %1 = call arm_aapcscc float @llvm.exp2.f32(float %a)
+; HARD: b exp2f
+  %1 = call float @llvm.exp2.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.log.f32(float %Val)
+declare float     @llvm.log.f32(float %Val)
 define float @log_f(float %a) {
 ; CHECK-LABEL: log_f:
 ; SOFT: bl logf
-; HARD: bl logf
-  %1 = call arm_aapcscc float @llvm.log.f32(float %a)
+; HARD: b logf
+  %1 = call float @llvm.log.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.log10.f32(float %Val)
+declare float     @llvm.log10.f32(float %Val)
 define float @log10_f(float %a) {
 ; CHECK-LABEL: log10_f:
 ; SOFT: bl log10f
-; HARD: bl log10f
-  %1 = call arm_aapcscc float @llvm.log10.f32(float %a)
+; HARD: b log10f
+  %1 = call float @llvm.log10.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.log2.f32(float %Val)
+declare float     @llvm.log2.f32(float %Val)
 define float @log2_f(float %a) {
 ; CHECK-LABEL: log2_f:
 ; SOFT: bl log2f
-; HARD: bl log2f
-  %1 = call arm_aapcscc float @llvm.log2.f32(float %a)
+; HARD: b log2f
+  %1 = call float @llvm.log2.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.fma.f32(float %a, float %b, float %c)
+declare float     @llvm.fma.f32(float %a, float %b, float %c)
 define float @fma_f(float %a, float %b, float %c) {
 ; CHECK-LABEL: fma_f:
 ; SOFT: bl fmaf
 ; HARD: vfma.f32
-  %1 = call arm_aapcscc float @llvm.fma.f32(float %a, float %b, float %c)
+  %1 = call float @llvm.fma.f32(float %a, float %b, float %c)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.fabs.f32(float %Val)
+declare float     @llvm.fabs.f32(float %Val)
 define float @abs_f(float %a) {
 ; CHECK-LABEL: abs_f:
 ; SOFT: bic r0, r0, #-2147483648
 ; HARD: vabs.f32
-  %1 = call arm_aapcscc float @llvm.fabs.f32(float %a)
+  %1 = call float @llvm.fabs.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.copysign.f32(float  %Mag, float  %Sgn)
+declare float     @llvm.copysign.f32(float  %Mag, float  %Sgn)
 define float @copysign_f(float %a, float %b) {
 ; CHECK-LABEL: copysign_f:
 ; NONE: lsrs [[REG:r[0-9]+]], r{{[0-9]+}}, #31
@@ -124,73 +124,73 @@
 ; VFP: bfi r{{[0-9]+}}, [[REG]], #31, #1
 ; NEON: vmov.i32 [[REG:d[0-9]+]], #0x80000000
 ; NEON: vbsl [[REG]], d
-  %1 = call arm_aapcscc float @llvm.copysign.f32(float %a, float %b)
+  %1 = call float @llvm.copysign.f32(float %a, float %b)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.floor.f32(float %Val)
+declare float     @llvm.floor.f32(float %Val)
 define float @floor_f(float %a) {
 ; CHECK-LABEL: floor_f:
 ; SOFT: bl floorf
-; VFP4: bl floorf
+; VFP4: b floorf
 ; FP-ARMv8: vrintm.f32
-  %1 = call arm_aapcscc float @llvm.floor.f32(float %a)
+  %1 = call float @llvm.floor.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.ceil.f32(float %Val)
+declare float     @llvm.ceil.f32(float %Val)
 define float @ceil_f(float %a) {
 ; CHECK-LABEL: ceil_f:
 ; SOFT: bl ceilf
-; VFP4: bl ceilf
+; VFP4: b ceilf
 ; FP-ARMv8: vrintp.f32
-  %1 = call arm_aapcscc float @llvm.ceil.f32(float %a)
+  %1 = call float @llvm.ceil.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.trunc.f32(float %Val)
+declare float     @llvm.trunc.f32(float %Val)
 define float @trunc_f(float %a) {
 ; CHECK-LABEL: trunc_f:
 ; SOFT: bl truncf
-; VFP4: bl truncf
+; VFP4: b truncf
 ; FP-ARMv8: vrintz.f32
-  %1 = call arm_aapcscc float @llvm.trunc.f32(float %a)
+  %1 = call float @llvm.trunc.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.rint.f32(float %Val)
+declare float     @llvm.rint.f32(float %Val)
 define float @rint_f(float %a) {
 ; CHECK-LABEL: rint_f:
 ; SOFT: bl rintf
-; VFP4: bl rintf
+; VFP4: b rintf
 ; FP-ARMv8: vrintx.f32
-  %1 = call arm_aapcscc float @llvm.rint.f32(float %a)
+  %1 = call float @llvm.rint.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.nearbyint.f32(float %Val)
+declare float     @llvm.nearbyint.f32(float %Val)
 define float @nearbyint_f(float %a) {
 ; CHECK-LABEL: nearbyint_f:
 ; SOFT: bl nearbyintf
-; VFP4: bl nearbyintf
+; VFP4: b nearbyintf
 ; FP-ARMv8: vrintr.f32
-  %1 = call arm_aapcscc float @llvm.nearbyint.f32(float %a)
+  %1 = call float @llvm.nearbyint.f32(float %a)
   ret float %1
 }
 
-declare arm_aapcscc float @llvm.round.f32(float %Val)
+declare float     @llvm.round.f32(float %Val)
 define float @round_f(float %a) {
 ; CHECK-LABEL: round_f:
 ; SOFT: bl roundf
-; VFP4: bl roundf
+; VFP4: b roundf
 ; FP-ARMv8: vrinta.f32
-  %1 = call arm_aapcscc float @llvm.round.f32(float %a)
+  %1 = call float @llvm.round.f32(float %a)
   ret float %1
 }
 
 ; FIXME: why does cortex-m4 use vmla, while cortex-a7 uses vmul+vadd?
 ; (these should be equivalent, even the rounding is the same)
-declare arm_aapcscc float @llvm.fmuladd.f32(float %a, float %b, float %c)
+declare float     @llvm.fmuladd.f32(float %a, float %b, float %c)
 define float @fmuladd_f(float %a, float %b, float %c) {
 ; CHECK-LABEL: fmuladd_f:
 ; SOFT: bl __aeabi_fmul
@@ -198,25 +198,24 @@
 ; VMLA: vmla.f32
 ; NO-VMLA: vmul.f32
 ; NO-VMLA: vadd.f32
-  %1 = call arm_aapcscc float @llvm.fmuladd.f32(float %a, float %b, float %c)
+  %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
   ret float %1
 }
 
-declare arm_aapcscc i16 @llvm.convert.to.fp16.f32(float %a)
+declare i16 @llvm.convert.to.fp16.f32(float %a)
 define i16 @f_to_h(float %a) {
 ; CHECK-LABEL: f_to_h:
 ; SOFT: bl __aeabi_f2h
 ; HARD: vcvt{{[bt]}}.f16.f32
-  %1 = call arm_aapcscc i16 @llvm.convert.to.fp16.f32(float %a)
+  %1 = call i16 @llvm.convert.to.fp16.f32(float %a)
   ret i16 %1
 }
 
-declare arm_aapcscc float @llvm.convert.from.fp16.f32(i16 %a)
+declare float @llvm.convert.from.fp16.f32(i16 %a)
 define float @h_to_f(i16 %a) {
 ; CHECK-LABEL: h_to_f:
 ; SOFT: bl __aeabi_h2f
 ; HARD: vcvt{{[bt]}}.f32.f16
-  %1 = call arm_aapcscc float @llvm.convert.from.fp16.f32(i16 %a)
+  %1 = call float @llvm.convert.from.fp16.f32(i16 %a)
   ret float %1
 }
-