[llvm][mlir] Promote the experimental reduction intrinsics to be first class intrinsics.
This change renames the intrinsics to not have "experimental" in the name.
The autoupgrader will handle legacy intrinsics.
Relevant ML thread: http://lists.llvm.org/pipermail/llvm-dev/2020-April/140729.html
Differential Revision: https://reviews.llvm.org/D88787
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir
index 77e35de..5bc82d4 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/add_reduce.mir
@@ -44,7 +44,7 @@
%add7 = add <4 x i32> %mul, %splat.output
%max = tail call <4 x i32> @llvm.arm.mve.max.predicated.v4i32.v4i1(<4 x i32> %add7, <4 x i32> %.splat.i42, i32 1, <4 x i1> %pred, <4 x i32> undef)
%min = tail call <4 x i32> @llvm.arm.mve.min.predicated.v4i32.v4i1(<4 x i32> %max, <4 x i32> %.splat.i, i32 1, <4 x i1> %pred, <4 x i32> undef)
- %reduce = tail call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %min)
+ %reduce = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %min)
store i32 %reduce, i32* %scevgep2
%add.ptr = getelementptr inbounds i8, i8* %input_1_vect.addr.052, i32 4
%add.ptr14 = getelementptr inbounds i8, i8* %input_2_vect.addr.051, i32 4
@@ -62,7 +62,7 @@
declare <4 x i32> @llvm.arm.mve.min.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, i32, <4 x i1>, <4 x i32>) #1
declare i1 @llvm.test.set.loop.iterations.i32(i32) #4
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #4
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #5
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #5
...
---
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
index 522cce4..29ecf00c 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll
@@ -85,7 +85,7 @@
middle.block: ; preds = %vector.body
%tmp8 = select <4 x i1> %tmp1, <4 x i32> %add, <4 x i32> %vec.phi
- %tmp9 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp8)
+ %tmp9 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp8)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -188,7 +188,7 @@
middle.block: ; preds = %vector.body
%acc = select <4 x i1> %tmp1, <4 x i32> %add, <4 x i32> %vec.phi
- %reduce = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %acc)
+ %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %acc)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -287,7 +287,7 @@
middle.block: ; preds = %vector.body
%acc = select <4 x i1> %tmp1, <4 x i32> %add, <4 x i32> %vec.phi
- %reduce = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %acc)
+ %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %acc)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -386,7 +386,7 @@
middle.block: ; preds = %vector.body
%acc = select <4 x i1> %tmp1, <4 x i32> %add, <4 x i32> %vec.phi
- %reduce = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %acc)
+ %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %acc)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -528,6 +528,6 @@
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
; Function Attrs: nounwind readnone willreturn
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir
index f27a98c..ab3c866 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-1.mir
@@ -56,7 +56,7 @@
br i1 %tmp16, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %tmp17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp14)
+ %tmp17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp14)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -64,7 +64,7 @@
ret i32 %res.0.lcssa
}
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir
index 5db355a..b796712 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/inloop-vpsel-2.mir
@@ -58,7 +58,7 @@
br i1 %tmp16, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %tmp17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp14)
+ %tmp17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp14)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -66,7 +66,7 @@
ret i32 %res.0.lcssa
}
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir
index ab2ffb5..2b35400 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/invariant-qreg.mir
@@ -68,7 +68,7 @@
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
%tmp12 = mul nsw <4 x i32> %pass, %tmp10
- %tmp13 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp12)
+ %tmp13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp12)
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
%tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
%tmp16 = icmp ne i32 %tmp15, 0
@@ -105,7 +105,7 @@
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
%tmp12 = add nsw <4 x i32> %pass, %tmp10
- %tmp13 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp12)
+ %tmp13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp12)
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
%tmp15 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
%tmp16 = icmp ne i32 %tmp15, 0
@@ -117,7 +117,7 @@
ret i32 %res
}
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
declare void @llvm.set.loop.iterations.i32(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
index e5131fd..cdf53b8 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/lstp-insertion-position.mir
@@ -40,7 +40,7 @@
br i1 %15, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %16 = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %13)
+ %16 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %13)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -88,7 +88,7 @@
br i1 %15, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %16 = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %13)
+ %16 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %13)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -98,7 +98,7 @@
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32 immarg, <4 x i1>, <4 x float>)
- declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float, <4 x float>)
+ declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
declare <4 x i1> @llvm.arm.mve.vctp32(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
index 886fbe7..f9d1abb 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/matrix.mir
@@ -91,7 +91,7 @@
%22 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %10)
%23 = bitcast i16* %lsr.iv7 to i1*
%24 = select <4 x i1> %22, <4 x i32> %.lcssa, <4 x i32> %vec.phi.lcssa
- %25 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %24)
+ %25 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %24)
%sunkaddr = mul i32 %i.064.us, 4
%26 = bitcast i32* %e to i8*
%sunkaddr17 = getelementptr inbounds i8, i8* %26, i32 %sunkaddr
@@ -141,7 +141,7 @@
}
declare dso_local arm_aapcs_vfpcc signext i16 @crc16(...) local_unnamed_addr #0
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
index 2b90065..5c3af35 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll
@@ -69,7 +69,7 @@
middle.block: ; preds = %vector.body
%7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
- %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+ %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -145,7 +145,7 @@
middle.block: ; preds = %vector.body
%7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
- %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+ %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -221,7 +221,7 @@
middle.block: ; preds = %vector.body
%7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
- %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+ %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -297,7 +297,7 @@
middle.block: ; preds = %vector.body
%7 = select <4 x i1> %1, <4 x i32> %5, <4 x i32> %vec.phi
- %8 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %7)
+ %8 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %7)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -371,7 +371,7 @@
middle.block: ; preds = %vector.body
%6 = select <4 x i1> %1, <4 x i32> %4, <4 x i32> %vec.phi
- %7 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %6)
+ %7 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %6)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -1273,6 +1273,6 @@
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll
index e5bcf2e..c797e04 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/nested.ll
@@ -51,7 +51,7 @@
; CHECK-NEXT: br i1 [[TMP16]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP17:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP14]], <4 x i32> [[VEC_PHI]]
-; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP17]])
+; CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP17]])
; CHECK-NEXT: store i32 [[TMP18]], i32* [[ARRAYIDX8_US]], align 4
; CHECK-NEXT: [[INC10_US]] = add nuw i32 [[I_025_US]], 1
; CHECK-NEXT: [[EXITCOND27:%.*]] = icmp eq i32 [[INC10_US]], [[N]]
@@ -112,7 +112,7 @@
middle.block: ; preds = %vector.body
%tmp17 = select <4 x i1> %tmp7, <4 x i32> %tmp14, <4 x i32> %vec.phi
- %tmp18 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp17)
+ %tmp18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp17)
store i32 %tmp18, i32* %arrayidx8.us, align 4
%inc10.us = add nuw i32 %i.025.us, 1
%exitcond27 = icmp eq i32 %inc10.us, %N
@@ -170,7 +170,7 @@
; CHECK-NEXT: br i1 [[TMP14]], label [[VECTOR_BODY]], label [[MIDDLE_BLOCK]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP15:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP12]], <4 x i32> [[VEC_PHI]]
-; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[TMP15]])
+; CHECK-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP15]])
; CHECK-NEXT: store i32 [[TMP16]], i32* [[ARRAYIDX7_US]], align 4
; CHECK-NEXT: [[INC9_US]] = add nuw i32 [[I_024_US]], 1
; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[INC9_US]], [[N]]
@@ -229,7 +229,7 @@
middle.block: ; preds = %vector.body
%tmp15 = select <4 x i1> %tmp7, <4 x i32> %tmp12, <4 x i32> %vec.phi
- %tmp16 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp15)
+ %tmp16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp15)
store i32 %tmp16, i32* %arrayidx7.us, align 4
%inc9.us = add nuw i32 %i.024.us, 1
%exitcond26 = icmp eq i32 %inc9.us, %N
@@ -247,7 +247,7 @@
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #0
; Function Attrs: nounwind readnone willreturn
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #1
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #1
; Function Attrs: noduplicate nounwind
declare void @llvm.set.loop.iterations.i32(i32) #2
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
index 9eb95d7..4a5f483 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/no-vpsel-liveout.mir
@@ -40,7 +40,7 @@
br i1 %tmp15, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %tmp16 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp13)
+ %tmp16 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp13)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -48,7 +48,7 @@
ret i32 %res.0.lcssa
}
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
index 65f9cc3..c27a6c3 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
@@ -44,7 +44,7 @@
%.lcssa = phi <16 x i8> [ %13, %vector.body ]
%16 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %7)
%17 = select <16 x i1> %16, <16 x i8> %.lcssa, <16 x i8> %vec.phi.lcssa
- %18 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %17)
+ %18 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %17)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -53,7 +53,7 @@
}
declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>) #1
- declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>) #2
+ declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <16 x i1> @llvm.arm.mve.vctp8(i32) #4
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
index 966bdc9..3a098f2 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/predicated-liveout.mir
@@ -36,7 +36,7 @@
br i1 %cmp, label %for.body, label %middle.block
middle.block: ; preds = %for.body
- %reduce = tail call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %acc.next)
+ %reduce = tail call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %acc.next)
ret i16 %reduce
for.cond.cleanup: ; preds = %entry
@@ -47,7 +47,7 @@
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>) #2
declare i1 @llvm.test.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
- declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>) #4
+ declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) #4
declare <8 x i16> @llvm.arm.mve.add.predicated.v8i16.v8i1(<8 x i16>, <8 x i16>, <8 x i1>, <8 x i16>) #1
...
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir
index f013cb2..c3655ba 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions-vpt-liveout.mir
@@ -41,7 +41,7 @@
br i1 %16, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %14)
+ %17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %14)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -88,7 +88,7 @@
br i1 %16, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %14)
+ %17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %14)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -135,7 +135,7 @@
br i1 %16, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %14)
+ %17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %14)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -182,7 +182,7 @@
br i1 %16, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %14)
+ %17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %14)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -228,7 +228,7 @@
br i1 %14, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %15 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %12)
+ %15 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %12)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -274,7 +274,7 @@
br i1 %14, label %vector.body, label %middle.block
middle.block: ; preds = %vector.body
- %15 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %12)
+ %15 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %12)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -285,7 +285,7 @@
declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
declare <4 x i1> @llvm.arm.mve.vctp32(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll
index a0cdb82..f911663 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/reductions.ll
@@ -45,7 +45,7 @@
%wide.masked.load16 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %i3, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
%i4 = add <16 x i8> %wide.masked.load, %wide.masked.load16
%i5 = select <16 x i1> %active.lane.mask, <16 x i8> %i4, <16 x i8> %vec.phi
- %i6 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %i5)
+ %i6 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %i5)
%index.next = add i32 %index, 16
%i7 = icmp eq i32 %index.next, %n.vec
br i1 %i7, label %middle.block, label %vector.body
@@ -123,7 +123,7 @@
middle.block: ; preds = %vector.body
%i9 = select <8 x i1> %active.lane.mask, <8 x i16> %i7, <8 x i16> %vec.phi
- %i10 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %i9)
+ %i10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i9)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -193,7 +193,7 @@
middle.block: ; preds = %vector.body
%i7 = select <16 x i1> %active.lane.mask, <16 x i8> %i5, <16 x i8> %vec.phi
- %i8 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %i7)
+ %i8 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %i7)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -265,7 +265,7 @@
middle.block: ; preds = %vector.body
%i9 = select <8 x i1> %active.lane.mask, <8 x i16> %i7, <8 x i16> %vec.phi
- %i10 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %i9)
+ %i10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i9)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -335,7 +335,7 @@
middle.block: ; preds = %vector.body
%i7 = select <16 x i1> %active.lane.mask, <16 x i8> %i5, <16 x i8> %vec.phi
- %i8 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %i7)
+ %i8 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %i7)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -407,7 +407,7 @@
middle.block: ; preds = %vector.body
%i9 = select <8 x i1> %active.lane.mask, <8 x i16> %i7, <8 x i16> %vec.phi
- %i10 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %i9)
+ %i10 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i9)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -504,7 +504,7 @@
middle.block: ; preds = %vector.body
%i9 = select <4 x i1> %active.lane.mask, <4 x i32> %i7, <4 x i32> %vec.phi
- %i10 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %i9)
+ %i10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %i9)
br i1 %cmp35, label %for.cond.cleanup7, label %vector.ph47
vector.ph47: ; preds = %middle.block
@@ -534,7 +534,7 @@
middle.block44: ; preds = %vector.body46
%i21 = select <4 x i1> %active.lane.mask61, <4 x i32> %i19, <4 x i32> %vec.phi60
- %i22 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %i21)
+ %i22 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %i21)
br label %for.cond.cleanup7
for.cond.cleanup7: ; preds = %middle.block44, %middle.block, %entry
@@ -620,9 +620,9 @@
middle.block: ; preds = %vector.body
%i11 = select <8 x i1> %active.lane.mask, <8 x i16> %i8, <8 x i16> %vec.phi
- %i12 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %i11)
+ %i12 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i11)
%i13 = select <8 x i1> %active.lane.mask, <8 x i16> %i9, <8 x i16> %vec.phi.1
- %i14 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %i13)
+ %i14 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %i13)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -747,7 +747,7 @@
middle.block: ; preds = %vector.body
%10 = select <4 x i1> %active.lane.mask, <4 x i32> %8, <4 x i32> %vec.phi
- %11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %10)
+ %11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %10)
br label %for.end
for.end: ; preds = %middle.block, %lor.end
@@ -758,10 +758,10 @@
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
-declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir
index 17acf67a..497f041 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/skip-debug.mir
@@ -46,7 +46,7 @@
%.lcssa = phi <4 x i32> [ %15, %vector.body ], !dbg !38
%18 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %9), !dbg !34
%19 = select <4 x i1> %18, <4 x i32> %.lcssa, <4 x i32> %vec.phi.lcssa, !dbg !38
- %20 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %19), !dbg !32
+ %20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %19), !dbg !32
br label %for.cond.cleanup, !dbg !42
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -58,7 +58,7 @@
declare void @llvm.dbg.value(metadata, metadata, metadata)
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <4 x i1> @llvm.arm.mve.vctp32(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll
index 338c980..d786209 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-reduce.ll
@@ -258,7 +258,7 @@
middle.block: ; preds = %vector.body
%19 = select <4 x i1> %active.lane.mask, <4 x i32> %16, <4 x i32> %vec.phi
- %20 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %19)
+ %20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %19)
br label %for.end
for.end: ; preds = %middle.block, %for.body
@@ -282,6 +282,6 @@
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tp-multiple-vpst.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tp-multiple-vpst.ll
index 26be532..6d14058 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tp-multiple-vpst.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/tp-multiple-vpst.ll
@@ -74,14 +74,14 @@
br i1 %8, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %9 = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %7)
- %10 = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %5)
+ %9 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %7)
+ %10 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %5)
store i32 %10, i32* %minp, align 4
ret i32 %9
}
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) #1
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #2
-declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>) #3
-declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>) #3
+declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>) #3
+declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>) #3
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir
index 1f212c9..dec5400 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unpredicated-max.mir
@@ -26,7 +26,7 @@
%tmp8 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %tmp7)
%tmp9 = sub i32 %tmp7, 8
%wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %lsr.iv17, i32 2, <8 x i1> %tmp8, <8 x i16> undef)
- %min = tail call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %wide.masked.load)
+ %min = tail call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %wide.masked.load)
store i16 %min, i16* %lsr.iv.2
%scevgep = getelementptr i16, i16* %lsr.iv, i32 8
%scevgep.2 = getelementptr i16, i16* %lsr.iv.2, i32 1
@@ -43,7 +43,7 @@
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <8 x i1> @llvm.arm.mve.vctp16(i32)
- declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>)
+ declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
...
---
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir
index cd8310c..1d9f7d7 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vaddv.mir
@@ -26,7 +26,7 @@
%tmp9 = sub i32 %tmp7, 4
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp10)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp10)
store i32 %tmp11, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
@@ -64,7 +64,7 @@
%tmp9 = sub i32 %tmp7, 8
%wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %lsr.iv17, i32 2, <8 x i1> %tmp8, <8 x i16> undef)
%sext = sext <8 x i16> %wide.masked.load to <8 x i32>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %sext)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %sext)
store i32 %tmp11, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
%scevgep = getelementptr i16, i16* %lsr.iv, i32 8
@@ -102,7 +102,7 @@
%tmp9 = sub i32 %tmp7, 16
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv17, i32 1, <16 x i1> %tmp8, <16 x i8> undef)
%sext = sext <16 x i8> %wide.masked.load to <16 x i32>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %sext)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %sext)
store i32 %tmp11, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
%scevgep = getelementptr i8, i8* %lsr.iv, i32 16
@@ -140,7 +140,7 @@
%tmp9 = sub i32 %tmp7, 4
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp10)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp10)
%acc.next = add i32 %tmp11, %acc
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
%tmp12 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
@@ -179,7 +179,7 @@
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
%not = xor <4 x i32> %tmp10, <i32 -1, i32 -1, i32 -1, i32 -1>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %not)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %not)
store i32 %tmp11, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
@@ -218,7 +218,7 @@
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = sext <4 x i16> %wide.masked.load to <4 x i32>
%not = xor <4 x i32> %tmp10, <i32 -1, i32 -1, i32 -1, i32 -1>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %not)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %not)
%acc.next = add i32 %tmp11, %acc
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
%tmp12 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
@@ -257,7 +257,7 @@
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = zext <4 x i16> %wide.masked.load to <4 x i32>
%not = xor <4 x i32> %tmp10, <i32 -1, i32 -1, i32 -1, i32 -1>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %not)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %not)
store i32 %tmp11, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
@@ -296,7 +296,7 @@
%wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %lsr.iv17, i32 2, <4 x i1> %tmp8, <4 x i16> undef)
%tmp10 = zext <4 x i16> %wide.masked.load to <4 x i32>
%not = xor <4 x i32> %tmp10, <i32 -1, i32 -1, i32 -1, i32 -1>
- %tmp11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %not)
+ %tmp11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %not)
%acc.next = add i32 %tmp11, %acc
%scevgep = getelementptr i16, i16* %lsr.iv, i32 4
%tmp12 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %lsr.iv1, i32 1)
@@ -335,7 +335,7 @@
%wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv17, i32 1, <8 x i1> %tmp8, <8 x i8> undef)
%sext.wide = sext <8 x i8> %wide.masked.load to <8 x i16>
%sub = sub <8 x i16> %sext.wide, %pass
- %reduce = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %sub)
+ %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %sub)
%sext.reduce = sext i16 %reduce to i32
store i32 %sext.reduce, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
@@ -375,7 +375,7 @@
%wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %lsr.iv17, i32 1, <8 x i1> %tmp8, <8 x i8> undef)
%sext.wide = sext <8 x i8> %wide.masked.load to <8 x i16>
%sub = sub <8 x i16> %sext.wide, %pass
- %reduce = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %sub)
+ %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %sub)
%sext.reduce = sext i16 %reduce to i32
%acc.next = add i32 %sext.reduce, %acc
%scevgep = getelementptr i8, i8* %lsr.iv, i32 8
@@ -414,7 +414,7 @@
%tmp9 = sub i32 %tmp7, 8
%wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %lsr.iv17, i32 2, <8 x i1> %tmp8, <8 x i16> undef)
%sub = sub <8 x i16> %wide.masked.load, %pass
- %reduce = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %sub)
+ %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %sub)
%zext.reduce = zext i16 %reduce to i32
store i32 %zext.reduce, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
@@ -453,7 +453,7 @@
%tmp9 = sub i32 %tmp7, 8
%wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %lsr.iv17, i32 2, <8 x i1> %tmp8, <8 x i16> undef)
%sub = sub <8 x i16> %wide.masked.load, %pass
- %reduce = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %sub)
+ %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %sub)
%zext.reduce = zext i16 %reduce to i32
%acc.next = add i32 %zext.reduce, %acc
%scevgep = getelementptr i16, i16* %lsr.iv, i32 8
@@ -492,7 +492,7 @@
%tmp9 = sub i32 %tmp7, 16
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv17, i32 1, <16 x i1> %tmp8, <16 x i8> undef)
%xor = xor <16 x i8> %wide.masked.load, %pass
- %reduce = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %xor)
+ %reduce = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %xor)
%sext.reduce = sext i8 %reduce to i32
store i32 %sext.reduce, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
@@ -531,7 +531,7 @@
%tmp9 = sub i32 %tmp7, 16
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv17, i32 1, <16 x i1> %tmp8, <16 x i8> undef)
%xor = xor <16 x i8> %wide.masked.load, %pass
- %reduce = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %xor)
+ %reduce = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %xor)
%sext.reduce = sext i8 %reduce to i32
%acc.next = add i32 %sext.reduce, %acc
%scevgep = getelementptr i8, i8* %lsr.iv, i32 16
@@ -570,7 +570,7 @@
%tmp9 = sub i32 %tmp7, 16
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv17, i32 1, <16 x i1> %tmp8, <16 x i8> undef)
%xor = xor <16 x i8> %wide.masked.load, %pass
- %reduce = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %xor)
+ %reduce = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %xor)
%zext.reduce = zext i8 %reduce to i32
store i32 %zext.reduce, i32* %store.addr
%store.next = getelementptr i32, i32* %store.addr, i32 1
@@ -609,7 +609,7 @@
%tmp9 = sub i32 %tmp7, 16
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv17, i32 1, <16 x i1> %tmp8, <16 x i8> undef)
%xor = xor <16 x i8> %wide.masked.load, %pass
- %reduce = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %xor)
+ %reduce = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %xor)
%zext.reduce = zext i8 %reduce to i32
%acc.next = add i32 %zext.reduce, %acc
%scevgep = getelementptr i8, i8* %lsr.iv, i32 16
@@ -652,7 +652,7 @@
%tmp4 = tail call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %tmp3, i32 2, <4 x i1> %tmp, <4 x i16> zeroinitializer)
%zext.wide.2 = zext <4 x i16> %tmp4 to <4 x i32>
%or = or <4 x i32> %zext.wide.1, %zext.wide.2
- %reduce = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %or)
+ %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %or)
%acc.next = add i32 %reduce, %acc
%add.ptr = getelementptr inbounds i16, i16* %x.addr.026, i32 4
%add.ptr4 = getelementptr inbounds i16, i16* %y.addr.025, i32 4
@@ -693,7 +693,7 @@
%tmp2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp1, i32 2, <8 x i1> %tmp, <8 x i16> zeroinitializer)
%tmp4 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp3, i32 2, <8 x i1> %tmp, <8 x i16> zeroinitializer)
%or = or <8 x i16> %tmp2, %tmp4
- %reduce = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %or)
+ %reduce = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %or)
%zext.reduce = zext i16 %reduce to i32
%acc.next = add i32 %zext.reduce, %acc
%add.ptr = getelementptr inbounds i16, i16* %x.addr.026, i32 8
@@ -737,7 +737,7 @@
%tmp5 = tail call <4 x i32> @llvm.arm.mve.vmull.v4i32.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp4, i32 0, i32 1)
%tmp6 = tail call <4 x i32> @llvm.arm.mve.vmull.v4i32.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp4, i32 0, i32 0)
%mul = add <4 x i32> %tmp5, %tmp6
- %reduce = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %mul)
+ %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
%acc.next = add i32 %reduce, %acc
%add.ptr = getelementptr inbounds i16, i16* %x.addr.026, i32 8
%add.ptr4 = getelementptr inbounds i16, i16* %y.addr.025, i32 8
@@ -778,7 +778,7 @@
%tmp2 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp1, i32 2, <8 x i1> %tmp, <8 x i16> zeroinitializer)
%tmp4 = tail call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %tmp3, i32 2, <8 x i1> %tmp, <8 x i16> zeroinitializer)
%mul = tail call <4 x i32> @llvm.arm.mve.vmull.v4i32.v8i16(<8 x i16> %tmp2, <8 x i16> %tmp4, i32 0, i32 1)
- %reduce = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %mul)
+ %reduce = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %mul)
%acc.next = add i32 %reduce, %acc
%add.ptr = getelementptr inbounds i16, i16* %x.addr.026, i32 8
%add.ptr4 = getelementptr inbounds i16, i16* %y.addr.025, i32 8
@@ -798,11 +798,11 @@
declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
- declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
- declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
- declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
- declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+ declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+ declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+ declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+ declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <4 x i32> @llvm.arm.mve.vmull.v4i32.v8i16(<8 x i16>, <8 x i16>, i32, i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
index 15aed3b..7aa772c 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
@@ -214,7 +214,7 @@
middle.block: ; preds = %vector.body
%i19 = select <4 x i1> %active.lane.mask, <4 x i32> %i16, <4 x i32> %vec.phi
- %i20 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %i19)
+ %i20 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %i19)
br label %for.end
for.end: ; preds = %middle.block, %for.body
@@ -235,6 +235,6 @@
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
declare void @llvm.set.loop.iterations.i32(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
index 4f80869..4308c7e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-add-operand-liveout.mir
@@ -47,7 +47,7 @@
%15 = add i32 %8, 4
%16 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %15)
%17 = select <4 x i1> %16, <4 x i32> %12, <4 x i32> %vec.phi
- %18 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %17)
+ %18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %17)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -55,7 +55,7 @@
ret i32 %res.0.lcssa
}
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <4 x i1> @llvm.arm.mve.vctp32(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
index a42c33e..9799ceb 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp16-reduce.mir
@@ -46,7 +46,7 @@
%.lcssa = phi <8 x i16> [ %15, %vector.body ]
%18 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %7)
%19 = select <8 x i1> %18, <8 x i16> %.lcssa, <8 x i16> %vec.phi.lcssa
- %20 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %19)
+ %20 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %19)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -54,7 +54,7 @@
ret i16 %a.0.lcssa
}
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
- declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+ declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <8 x i1> @llvm.arm.mve.vctp16(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll
index 6628df2..422fc3c 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-arith-codegen.ll
@@ -70,7 +70,7 @@
middle.block: ; preds = %vector.body
%8 = select <4 x i1> %1, <4 x i32> %6, <4 x i32> %vec.phi
- %9 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %8)
+ %9 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %8)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -141,7 +141,7 @@
middle.block: ; preds = %vector.body
%5 = select <4 x i1> %1, <4 x i32> %3, <4 x i32> %vec.phi
- %6 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %5)
+ %6 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %5)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -212,7 +212,7 @@
middle.block: ; preds = %vector.body
%5 = select <4 x i1> %1, <4 x i32> %3, <4 x i32> %vec.phi
- %6 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %5)
+ %6 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %5)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -459,7 +459,7 @@
declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll
index 64e7552..c05ed7d 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll
@@ -16,7 +16,7 @@
; CHECK: middle.block:
; CHECK: [[VPSEL:%[^ ]+]] = select <4 x i1> [[VCTP]],
-; CHECK: call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[VPSEL]])
+; CHECK: call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VPSEL]])
define i32 @vec_mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %N) {
entry:
@@ -64,7 +64,7 @@
middle.block: ; preds = %vector.body
%12 = select <4 x i1> %7, <4 x i32> %9, <4 x i32> %vec.phi
- %13 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %12)
+ %13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %12)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -73,7 +73,7 @@
}
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir
index 23cdf73..07c136e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wlstp.mir
@@ -118,7 +118,7 @@
middle.block: ; preds = %vector.body
%8 = call <4 x i1> @llvm.arm.vctp32(i32 %5)
%tmp8 = select <4 x i1> %8, <4 x i32> %tmp6, <4 x i32> %vec.phi
- %tmp9 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp8)
+ %tmp9 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp8)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -134,7 +134,7 @@
declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare <16 x i1> @llvm.arm.vctp8(i32)
declare void @llvm.stackprotector(i8*, i8**)
declare <8 x i1> @llvm.arm.vctp16(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir
index fc0aa20..fa7304e 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-liveout-lsr-shift.mir
@@ -46,7 +46,7 @@
%.lcssa = phi <8 x i16> [ %15, %vector.body ]
%18 = call <8 x i1> @llvm.arm.mve.vctp16(i32 %7)
%19 = select <8 x i1> %18, <8 x i16> %.lcssa, <8 x i16> %vec.phi.lcssa
- %20 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %19)
+ %20 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %19)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -54,7 +54,7 @@
ret i16 %a.0.lcssa
}
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
- declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
+ declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
declare void @llvm.set.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
declare <8 x i1> @llvm.arm.mve.vctp16(i32)
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir
index d91556e3..7ef303a 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-opcode-liveout.mir
@@ -52,7 +52,7 @@
%n.splat = shufflevector <4 x i32> %insert.n, <4 x i32> undef, <4 x i32> zeroinitializer
%tmp16 = icmp ult <4 x i32> %idx.splat, %n.splat
%tmp17 = select <4 x i1> %tmp16, <4 x i32> %tmp13, <4 x i32> %vec.phi
- %tmp18 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp17)
+ %tmp18 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp17)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -60,7 +60,7 @@
ret i32 %res.0.lcssa
}
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir
index 3378161..00abf16 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/wrong-vctp-operand-liveout.mir
@@ -45,7 +45,7 @@
middle.block: ; preds = %vector.body
%15 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %8)
%16 = select <4 x i1> %15, <4 x i32> %12, <4 x i32> %vec.phi
- %17 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %16)
+ %17 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %16)
br label %for.cond.cleanup
for.cond.cleanup: ; preds = %middle.block, %entry
@@ -53,7 +53,7 @@
ret i32 %res.0.lcssa
}
declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>) #1
- declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) #2
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) #2
declare void @llvm.set.loop.iterations.i32(i32) #3
declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
index d158c85..e06ec42 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
@@ -572,7 +572,7 @@
br i1 %10, label %middle.block, label %vector.body, !llvm.loop !7
middle.block: ; preds = %vector.body
- %11 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %9)
+ %11 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
;for.cond8.for.cond.cleanup10_crit_edge.us.us: ; preds = %for.body11.us.us, %middle.block
%add19.us.us = add i32 %j.051.us.us, %mul18.us
%arrayidx20.us.us = getelementptr inbounds i32, i32* %C, i32 %add19.us.us
@@ -803,7 +803,7 @@
br i1 %12, label %middle.block, label %vector.body, !llvm.loop !7
middle.block: ; preds = %vector.body
- %13 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %11)
+ %13 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %11)
br i1 %cmp.n, label %for.cond5.for.cond.cleanup7_crit_edge.us.us, label %for.body8.us.us.preheader
for.cond5.for.cond.cleanup7_crit_edge.us.us: ; preds = %for.body8.us.us, %middle.block
@@ -1065,7 +1065,7 @@
%wide.masked.gather75 = call <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*> %tmp85, i32 1, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i8> undef)
%tmp86 = sext <4 x i8> %wide.masked.gather75 to <4 x i32>
%tmp87 = mul nsw <4 x i32> %tmp84, %tmp86
- %tmp88 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %tmp87)
+ %tmp88 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %tmp87)
%tmp89 = add i32 %tmp88, %vec.phi
%index.next = add i32 %index, 4
%vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
@@ -1091,7 +1091,7 @@
declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32 immarg, <4 x i1>, <4 x i8>) #3
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare void @llvm.memset.p0i8.i32(i8* align 2, i8, i32, i1)
declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
index a4a6751..5c32f37 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-tailpred.ll
@@ -62,7 +62,7 @@
br i1 %8, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
%9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
- %10 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %9)
+ %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
store i32 %10, i32* %arrayidx.us.us, align 4
%inc21.us.us = add nuw i32 4, 1
%exitcond81.not = icmp eq i32 %inc21.us.us, %n
@@ -139,7 +139,7 @@
br i1 %8, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
%9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
- %10 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %9)
+ %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
store i32 %10, i32* %arrayidx.us.us, align 4
%inc21.us.us = add nuw i32 4, 1
%exitcond81.not = icmp eq i32 %inc21.us.us, %n
@@ -210,7 +210,7 @@
br i1 %8, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
%9 = select <4 x i1> %active.lane.mask, <4 x i32> %7, <4 x i32> %vec.phi
- %10 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %9)
+ %10 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %9)
store i32 %10, i32* %arrayidx.us.us, align 4
%inc21.us.us = add nuw i32 4, 1
%exitcond81.not = icmp eq i32 %inc21.us.us, %n
@@ -440,7 +440,7 @@
ret void
}
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
diff --git a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
index d67ccd9..b710912 100644
--- a/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-postinc-lsr.ll
@@ -1390,7 +1390,7 @@
declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32 immarg, <4 x i1>)
-declare i32 @llvm.experimental.vector.reduce.add.v16i8(<16 x i32> %ext4)
+declare i32 @llvm.vector.reduce.add.v16i8(<16 x i32> %ext4)
declare i32 @llvm.arm.mve.vmldava.v8i16(i32, i32, i32, i32, <8 x i16>, <8 x i16>)
declare i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32, i32, i32, i32, <16 x i8>, <16 x i8>, <16 x i1>)
declare i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32, i32, i32, i32, <8 x i16>, <8 x i16>, <8 x i1>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vaddv.ll b/llvm/test/CodeGen/Thumb2/mve-vaddv.ll
index e3f236b..d4a0456 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vaddv.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vaddv.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
-declare i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64>)
-declare i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8>)
+declare i64 @llvm.vector.reduce.add.i64.v2i64(<2 x i64>)
+declare i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32>)
+declare i16 @llvm.vector.reduce.add.i16.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16>)
+declare i8 @llvm.vector.reduce.add.i8.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.add.i8.v32i8(<32 x i8>)
define arm_aapcs_vfpcc i64 @vaddv_v2i64_i64(<2 x i64> %s1) {
; CHECK-LABEL: vaddv_v2i64_i64:
@@ -20,7 +20,7 @@
; CHECK-NEXT: adcs r1, r2
; CHECK-NEXT: bx lr
entry:
- %r = call i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64> %s1)
+ %r = call i64 @llvm.vector.reduce.add.i64.v2i64(<2 x i64> %s1)
ret i64 %r
}
@@ -30,7 +30,7 @@
; CHECK-NEXT: vaddv.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
- %r = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %s1)
ret i32 %r
}
@@ -41,7 +41,7 @@
; CHECK-NEXT: vaddv.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
- %r = call i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32> %s1)
ret i32 %r
}
@@ -51,7 +51,7 @@
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: bx lr
entry:
- %r = call i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.add.i16.v8i16(<8 x i16> %s1)
ret i16 %r
}
@@ -62,7 +62,7 @@
; CHECK-NEXT: vaddv.u16 r0, q0
; CHECK-NEXT: bx lr
entry:
- %r = call i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %s1)
ret i16 %r
}
@@ -72,7 +72,7 @@
; CHECK-NEXT: vaddv.u8 r0, q0
; CHECK-NEXT: bx lr
entry:
- %r = call i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.add.i8.v16i8(<16 x i8> %s1)
ret i8 %r
}
@@ -83,7 +83,7 @@
; CHECK-NEXT: vaddv.u8 r0, q0
; CHECK-NEXT: bx lr
entry:
- %r = call i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.add.i8.v32i8(<32 x i8> %s1)
ret i8 %r
}
@@ -102,7 +102,7 @@
; CHECK-NEXT: adcs r1, r3
; CHECK-NEXT: pop {r7, pc}
entry:
- %t = call i64 @llvm.experimental.vector.reduce.add.i64.v2i64(<2 x i64> %s1)
+ %t = call i64 @llvm.vector.reduce.add.i64.v2i64(<2 x i64> %s1)
%r = add i64 %t, %x
ret i64 %r
}
@@ -113,7 +113,7 @@
; CHECK-NEXT: vaddva.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
- %t = call i32 @llvm.experimental.vector.reduce.add.i32.v4i32(<4 x i32> %s1)
+ %t = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %s1)
%r = add i32 %t, %x
ret i32 %r
}
@@ -125,7 +125,7 @@
; CHECK-NEXT: vaddva.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
- %t = call i32 @llvm.experimental.vector.reduce.add.i32.v8i32(<8 x i32> %s1)
+ %t = call i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32> %s1)
%r = add i32 %t, %x
ret i32 %r
}
@@ -136,7 +136,7 @@
; CHECK-NEXT: vaddva.u16 r0, q0
; CHECK-NEXT: bx lr
entry:
- %t = call i16 @llvm.experimental.vector.reduce.add.i16.v8i16(<8 x i16> %s1)
+ %t = call i16 @llvm.vector.reduce.add.i16.v8i16(<8 x i16> %s1)
%r = add i16 %t, %x
ret i16 %r
}
@@ -148,7 +148,7 @@
; CHECK-NEXT: vaddva.u16 r0, q0
; CHECK-NEXT: bx lr
entry:
- %t = call i16 @llvm.experimental.vector.reduce.add.i16.v16i16(<16 x i16> %s1)
+ %t = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %s1)
%r = add i16 %t, %x
ret i16 %r
}
@@ -159,7 +159,7 @@
; CHECK-NEXT: vaddva.u8 r0, q0
; CHECK-NEXT: bx lr
entry:
- %t = call i8 @llvm.experimental.vector.reduce.add.i8.v16i8(<16 x i8> %s1)
+ %t = call i8 @llvm.vector.reduce.add.i8.v16i8(<16 x i8> %s1)
%r = add i8 %t, %x
ret i8 %r
}
@@ -171,7 +171,7 @@
; CHECK-NEXT: vaddva.u8 r0, q0
; CHECK-NEXT: bx lr
entry:
- %t = call i8 @llvm.experimental.vector.reduce.add.i8.v32i8(<32 x i8> %s1)
+ %t = call i8 @llvm.vector.reduce.add.i8.v32i8(<32 x i8> %s1)
%r = add i8 %t, %x
ret i8 %r
}
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
index 35eecab..df2cb43 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll
@@ -7,7 +7,7 @@
; CHECK-NEXT: vaddv.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %x)
ret i32 %z
}
@@ -18,7 +18,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <4 x i32> %x to <4 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %xx)
ret i64 %z
}
@@ -29,7 +29,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <4 x i32> %x to <4 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %xx)
ret i64 %z
}
@@ -47,7 +47,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <2 x i32> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
ret i64 %z
}
@@ -65,7 +65,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <2 x i32> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
ret i64 %z
}
@@ -76,7 +76,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <8 x i16> %x to <8 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %xx)
ret i32 %z
}
@@ -87,7 +87,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <8 x i16> %x to <8 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %xx)
ret i32 %z
}
@@ -99,7 +99,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <4 x i16> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
ret i32 %z
}
@@ -111,7 +111,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <4 x i16> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
ret i32 %z
}
@@ -122,7 +122,7 @@
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %x)
ret i16 %z
}
@@ -175,7 +175,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <8 x i16> %x to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %xx)
ret i64 %z
}
@@ -242,7 +242,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <8 x i16> %x to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %xx)
ret i64 %z
}
@@ -258,7 +258,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <2 x i16> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
ret i64 %z
}
@@ -278,7 +278,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <2 x i16> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
ret i64 %z
}
@@ -289,7 +289,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <16 x i8> %x to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %xx)
ret i32 %z
}
@@ -300,7 +300,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <16 x i8> %x to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %xx)
ret i32 %z
}
@@ -313,7 +313,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <4 x i8> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
ret i32 %z
}
@@ -326,7 +326,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <4 x i8> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
ret i32 %z
}
@@ -338,7 +338,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <16 x i8> %x to <16 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %xx)
ret i16 %z
}
@@ -350,7 +350,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <16 x i8> %x to <16 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %xx)
ret i16 %z
}
@@ -363,7 +363,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <8 x i8> %x to <8 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %xx)
ret i16 %z
}
@@ -376,7 +376,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <8 x i8> %x to <8 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %xx)
ret i16 %z
}
@@ -387,7 +387,7 @@
; CHECK-NEXT: uxtb r0, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %x)
ret i8 %z
}
@@ -492,7 +492,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <16 x i8> %x to <16 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %xx)
ret i64 %z
}
@@ -627,7 +627,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <16 x i8> %x to <16 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %xx)
ret i64 %z
}
@@ -643,7 +643,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <2 x i8> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
ret i64 %z
}
@@ -663,7 +663,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <2 x i8> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
ret i64 %z
}
@@ -678,7 +678,7 @@
; CHECK-NEXT: adcs r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %x)
ret i64 %z
}
@@ -688,7 +688,7 @@
; CHECK-NEXT: vaddva.u32 r0, q0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %x)
%r = add i32 %z, %a
ret i32 %r
}
@@ -700,7 +700,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <4 x i32> %x to <4 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -712,7 +712,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <4 x i32> %x to <4 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -735,7 +735,7 @@
; CHECK-NEXT: pop {r7, pc}
entry:
%xx = zext <2 x i32> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -756,7 +756,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <2 x i32> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -768,7 +768,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <8 x i16> %x to <8 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -780,7 +780,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <8 x i16> %x to <8 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -793,7 +793,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <4 x i16> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -806,7 +806,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <4 x i16> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -818,7 +818,7 @@
; CHECK-NEXT: uxth r0, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %x)
%r = add i16 %z, %a
ret i16 %r
}
@@ -876,7 +876,7 @@
; CHECK-NEXT: pop {r4, pc}
entry:
%xx = zext <8 x i16> %x to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -948,7 +948,7 @@
; CHECK-NEXT: pop {r4, pc}
entry:
%xx = sext <8 x i16> %x to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -967,7 +967,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <2 x i16> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -990,7 +990,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <2 x i16> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1002,7 +1002,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <16 x i8> %x to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1014,7 +1014,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <16 x i8> %x to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1028,7 +1028,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <4 x i8> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1042,7 +1042,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <4 x i8> %x to <4 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %xx)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %xx)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1055,7 +1055,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <16 x i8> %x to <16 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %xx)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1068,7 +1068,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <16 x i8> %x to <16 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %xx)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1082,7 +1082,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <8 x i8> %x to <8 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %xx)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1096,7 +1096,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <8 x i8> %x to <8 x i16>
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %xx)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %xx)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1108,7 +1108,7 @@
; CHECK-NEXT: uxtb r0, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %x)
%r = add i8 %z, %a
ret i8 %r
}
@@ -1218,7 +1218,7 @@
; CHECK-NEXT: pop {r4, pc}
entry:
%xx = zext <16 x i8> %x to <16 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1358,7 +1358,7 @@
; CHECK-NEXT: pop {r4, pc}
entry:
%xx = sext <16 x i8> %x to <16 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1377,7 +1377,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = zext <2 x i8> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1400,7 +1400,7 @@
; CHECK-NEXT: bx lr
entry:
%xx = sext <2 x i8> %x to <2 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %xx)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %xx)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1420,18 +1420,18 @@
; CHECK-NEXT: adcs r1, r3
; CHECK-NEXT: pop {r7, pc}
entry:
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %x)
%r = add i64 %z, %a
ret i64 %r
}
-declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
-declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
-declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
index 0f3aacf..e59fb0b 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll
@@ -10,7 +10,7 @@
entry:
%c = icmp eq <4 x i32> %b, zeroinitializer
%s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -24,7 +24,7 @@
%c = icmp eq <4 x i32> %b, zeroinitializer
%xx = zext <4 x i32> %x to <4 x i64>
%s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
ret i64 %z
}
@@ -38,7 +38,7 @@
%c = icmp eq <4 x i32> %b, zeroinitializer
%xx = sext <4 x i32> %x to <4 x i64>
%s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
ret i64 %z
}
@@ -73,7 +73,7 @@
%c = icmp eq <2 x i32> %b, zeroinitializer
%xx = zext <2 x i32> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -114,7 +114,7 @@
%c = icmp eq <2 x i32> %b, zeroinitializer
%xx = sext <2 x i32> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -128,7 +128,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = zext <8 x i16> %x to <8 x i32>
%s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
ret i32 %z
}
@@ -142,7 +142,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = sext <8 x i16> %x to <8 x i32>
%s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
ret i32 %z
}
@@ -158,7 +158,7 @@
%c = icmp eq <4 x i16> %b, zeroinitializer
%xx = zext <4 x i16> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -174,7 +174,7 @@
%c = icmp eq <4 x i16> %b, zeroinitializer
%xx = sext <4 x i16> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -188,7 +188,7 @@
entry:
%c = icmp eq <8 x i16> %b, zeroinitializer
%s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
ret i16 %z
}
@@ -314,7 +314,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = zext <8 x i16> %x to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
ret i64 %z
}
@@ -456,7 +456,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = sext <8 x i16> %x to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
ret i64 %z
}
@@ -492,7 +492,7 @@
%c = icmp eq <2 x i16> %b, zeroinitializer
%xx = zext <2 x i16> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -537,7 +537,7 @@
%c = icmp eq <2 x i16> %b, zeroinitializer
%xx = sext <2 x i16> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -551,7 +551,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = zext <16 x i8> %x to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -565,7 +565,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = sext <16 x i8> %x to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -582,7 +582,7 @@
%c = icmp eq <4 x i8> %b, zeroinitializer
%xx = zext <4 x i8> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -600,7 +600,7 @@
%c = icmp eq <4 x i8> %b, zeroinitializer
%xx = sext <4 x i8> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -615,7 +615,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = zext <16 x i8> %x to <16 x i16>
%s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
ret i16 %z
}
@@ -630,7 +630,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = sext <16 x i8> %x to <16 x i16>
%s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
ret i16 %z
}
@@ -647,7 +647,7 @@
%c = icmp eq <8 x i8> %b, zeroinitializer
%xx = zext <8 x i8> %x to <8 x i16>
%s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
ret i16 %z
}
@@ -664,7 +664,7 @@
%c = icmp eq <8 x i8> %b, zeroinitializer
%xx = sext <8 x i8> %x to <8 x i16>
%s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
ret i16 %z
}
@@ -678,7 +678,7 @@
entry:
%c = icmp eq <16 x i8> %b, zeroinitializer
%s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %s)
ret i8 %z
}
@@ -948,7 +948,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = zext <16 x i8> %x to <16 x i64>
%s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
ret i64 %z
}
@@ -1257,7 +1257,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = sext <16 x i8> %x to <16 x i64>
%s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
ret i64 %z
}
@@ -1293,7 +1293,7 @@
%c = icmp eq <2 x i8> %b, zeroinitializer
%xx = zext <2 x i8> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -1338,7 +1338,7 @@
%c = icmp eq <2 x i8> %b, zeroinitializer
%xx = sext <2 x i8> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -1372,7 +1372,7 @@
entry:
%c = icmp eq <2 x i64> %b, zeroinitializer
%s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -1385,7 +1385,7 @@
entry:
%c = icmp eq <4 x i32> %b, zeroinitializer
%s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1400,7 +1400,7 @@
%c = icmp eq <4 x i32> %b, zeroinitializer
%xx = zext <4 x i32> %x to <4 x i64>
%s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1415,7 +1415,7 @@
%c = icmp eq <4 x i32> %b, zeroinitializer
%xx = sext <4 x i32> %x to <4 x i64>
%s = select <4 x i1> %c, <4 x i64> %xx, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1455,7 +1455,7 @@
%c = icmp eq <2 x i32> %b, zeroinitializer
%xx = zext <2 x i32> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1501,7 +1501,7 @@
%c = icmp eq <2 x i32> %b, zeroinitializer
%xx = sext <2 x i32> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1516,7 +1516,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = zext <8 x i16> %x to <8 x i32>
%s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1531,7 +1531,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = sext <8 x i16> %x to <8 x i32>
%s = select <8 x i1> %c, <8 x i32> %xx, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1548,7 +1548,7 @@
%c = icmp eq <4 x i16> %b, zeroinitializer
%xx = zext <4 x i16> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1565,7 +1565,7 @@
%c = icmp eq <4 x i16> %b, zeroinitializer
%xx = sext <4 x i16> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1580,7 +1580,7 @@
entry:
%c = icmp eq <8 x i16> %b, zeroinitializer
%s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1711,7 +1711,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = zext <8 x i16> %x to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1858,7 +1858,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%xx = sext <8 x i16> %x to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %xx, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1897,7 +1897,7 @@
%c = icmp eq <2 x i16> %b, zeroinitializer
%xx = zext <2 x i16> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1947,7 +1947,7 @@
%c = icmp eq <2 x i16> %b, zeroinitializer
%xx = sext <2 x i16> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1962,7 +1962,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = zext <16 x i8> %x to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1977,7 +1977,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = sext <16 x i8> %x to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %xx, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1995,7 +1995,7 @@
%c = icmp eq <4 x i8> %b, zeroinitializer
%xx = zext <4 x i8> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -2014,7 +2014,7 @@
%c = icmp eq <4 x i8> %b, zeroinitializer
%xx = sext <4 x i8> %x to <4 x i32>
%s = select <4 x i1> %c, <4 x i32> %xx, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -2030,7 +2030,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = zext <16 x i8> %x to <16 x i16>
%s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2046,7 +2046,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = sext <16 x i8> %x to <16 x i16>
%s = select <16 x i1> %c, <16 x i16> %xx, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2064,7 +2064,7 @@
%c = icmp eq <8 x i8> %b, zeroinitializer
%xx = zext <8 x i8> %x to <8 x i16>
%s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2082,7 +2082,7 @@
%c = icmp eq <8 x i8> %b, zeroinitializer
%xx = sext <8 x i8> %x to <8 x i16>
%s = select <8 x i1> %c, <8 x i16> %xx, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2097,7 +2097,7 @@
entry:
%c = icmp eq <16 x i8> %b, zeroinitializer
%s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> zeroinitializer
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %s)
%r = add i8 %z, %a
ret i8 %r
}
@@ -2372,7 +2372,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = zext <16 x i8> %x to <16 x i64>
%s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -2686,7 +2686,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%xx = sext <16 x i8> %x to <16 x i64>
%s = select <16 x i1> %c, <16 x i64> %xx, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -2725,7 +2725,7 @@
%c = icmp eq <2 x i8> %b, zeroinitializer
%xx = zext <2 x i8> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -2775,7 +2775,7 @@
%c = icmp eq <2 x i8> %b, zeroinitializer
%xx = sext <2 x i8> %x to <2 x i64>
%s = select <2 x i1> %c, <2 x i64> %xx, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -2814,18 +2814,18 @@
entry:
%c = icmp eq <2 x i64> %b, zeroinitializer
%s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
-declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
-declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
-declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll
index fc06181..cf9c2b6 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-bit.ll
@@ -9,7 +9,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %x)
ret i32 %z
}
@@ -25,7 +25,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %x)
ret i32 %z
}
@@ -42,7 +42,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %x)
ret i32 %z
}
@@ -58,7 +58,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %x)
ret i16 %z
}
@@ -76,7 +76,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %x)
ret i16 %z
}
@@ -95,7 +95,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %x)
ret i16 %z
}
@@ -113,7 +113,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %x)
ret i8 %z
}
@@ -133,7 +133,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %x)
ret i8 %z
}
@@ -154,7 +154,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %x)
ret i8 %z
}
@@ -163,7 +163,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.and.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.and.v1i64(<1 x i64> %x)
ret i64 %z
}
@@ -178,7 +178,7 @@
; CHECK-NEXT: ands r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %x)
ret i64 %z
}
@@ -194,7 +194,7 @@
; CHECK-NEXT: ands r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %x)
ret i64 %z
}
@@ -207,7 +207,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %x)
%r = and i32 %y, %z
ret i32 %r
}
@@ -225,7 +225,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %x)
%r = and i32 %y, %z
ret i32 %r
}
@@ -244,7 +244,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %x)
%r = and i32 %y, %z
ret i32 %r
}
@@ -262,7 +262,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %x)
%r = and i16 %y, %z
ret i16 %r
}
@@ -282,7 +282,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %x)
%r = and i16 %y, %z
ret i16 %r
}
@@ -303,7 +303,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %x)
%r = and i16 %y, %z
ret i16 %r
}
@@ -323,7 +323,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %x)
%r = and i8 %y, %z
ret i8 %r
}
@@ -345,7 +345,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %x)
%r = and i8 %y, %z
ret i8 %r
}
@@ -368,7 +368,7 @@
; CHECK-NEXT: ands r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %x)
%r = and i8 %y, %z
ret i8 %r
}
@@ -380,7 +380,7 @@
; CHECK-NEXT: ands r1, r3
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.and.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.and.v1i64(<1 x i64> %x)
%r = and i64 %y, %z
ret i64 %r
}
@@ -398,7 +398,7 @@
; CHECK-NEXT: ands r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %x)
%r = and i64 %y, %z
ret i64 %r
}
@@ -417,7 +417,7 @@
; CHECK-NEXT: ands r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %x)
%r = and i64 %y, %z
ret i64 %r
}
@@ -430,7 +430,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %x)
ret i32 %z
}
@@ -446,7 +446,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %x)
ret i32 %z
}
@@ -463,7 +463,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %x)
ret i32 %z
}
@@ -479,7 +479,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %x)
ret i16 %z
}
@@ -497,7 +497,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %x)
ret i16 %z
}
@@ -516,7 +516,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %x)
ret i16 %z
}
@@ -534,7 +534,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %x)
ret i8 %z
}
@@ -554,7 +554,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %x)
ret i8 %z
}
@@ -575,7 +575,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %x)
ret i8 %z
}
@@ -584,7 +584,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.or.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.or.v1i64(<1 x i64> %x)
ret i64 %z
}
@@ -599,7 +599,7 @@
; CHECK-NEXT: orrs r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %x)
ret i64 %z
}
@@ -615,7 +615,7 @@
; CHECK-NEXT: orrs r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %x)
ret i64 %z
}
@@ -628,7 +628,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.or.v2i32(<2 x i32> %x)
%r = or i32 %y, %z
ret i32 %r
}
@@ -646,7 +646,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %x)
%r = or i32 %y, %z
ret i32 %r
}
@@ -665,7 +665,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> %x)
%r = or i32 %y, %z
ret i32 %r
}
@@ -683,7 +683,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.or.v4i16(<4 x i16> %x)
%r = or i16 %y, %z
ret i16 %r
}
@@ -703,7 +703,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %x)
%r = or i16 %y, %z
ret i16 %r
}
@@ -724,7 +724,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %x)
%r = or i16 %y, %z
ret i16 %r
}
@@ -744,7 +744,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.or.v8i8(<8 x i8> %x)
%r = or i8 %y, %z
ret i8 %r
}
@@ -766,7 +766,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %x)
%r = or i8 %y, %z
ret i8 %r
}
@@ -789,7 +789,7 @@
; CHECK-NEXT: orrs r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %x)
%r = or i8 %y, %z
ret i8 %r
}
@@ -801,7 +801,7 @@
; CHECK-NEXT: orrs r1, r3
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.or.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.or.v1i64(<1 x i64> %x)
%r = or i64 %y, %z
ret i64 %r
}
@@ -819,7 +819,7 @@
; CHECK-NEXT: orrs r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %x)
%r = or i64 %y, %z
ret i64 %r
}
@@ -838,7 +838,7 @@
; CHECK-NEXT: orrs r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.or.v4i64(<4 x i64> %x)
%r = or i64 %y, %z
ret i64 %r
}
@@ -851,7 +851,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %x)
ret i32 %z
}
@@ -867,7 +867,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %x)
ret i32 %z
}
@@ -884,7 +884,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %x)
ret i32 %z
}
@@ -900,7 +900,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %x)
ret i16 %z
}
@@ -918,7 +918,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %x)
ret i16 %z
}
@@ -937,7 +937,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %x)
ret i16 %z
}
@@ -955,7 +955,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %x)
ret i8 %z
}
@@ -975,7 +975,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %x)
ret i8 %z
}
@@ -996,7 +996,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %x)
ret i8 %z
}
@@ -1005,7 +1005,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.xor.v1i64(<1 x i64> %x)
ret i64 %z
}
@@ -1020,7 +1020,7 @@
; CHECK-NEXT: eors r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %x)
ret i64 %z
}
@@ -1036,7 +1036,7 @@
; CHECK-NEXT: eors r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %x)
ret i64 %z
}
@@ -1049,7 +1049,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.xor.v2i32(<2 x i32> %x)
%r = xor i32 %y, %z
ret i32 %r
}
@@ -1067,7 +1067,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %x)
%r = xor i32 %y, %z
ret i32 %r
}
@@ -1086,7 +1086,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.xor.v8i32(<8 x i32> %x)
%r = xor i32 %y, %z
ret i32 %r
}
@@ -1104,7 +1104,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.xor.v4i16(<4 x i16> %x)
%r = xor i16 %y, %z
ret i16 %r
}
@@ -1124,7 +1124,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.xor.v8i16(<8 x i16> %x)
%r = xor i16 %y, %z
ret i16 %r
}
@@ -1145,7 +1145,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %x)
%r = xor i16 %y, %z
ret i16 %r
}
@@ -1165,7 +1165,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> %x)
%r = xor i8 %y, %z
ret i8 %r
}
@@ -1187,7 +1187,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.xor.v16i8(<16 x i8> %x)
%r = xor i8 %y, %z
ret i8 %r
}
@@ -1210,7 +1210,7 @@
; CHECK-NEXT: eors r0, r1
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.xor.v32i8(<32 x i8> %x)
%r = xor i8 %y, %z
ret i8 %r
}
@@ -1222,7 +1222,7 @@
; CHECK-NEXT: eors r1, r3
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.xor.v1i64(<1 x i64> %x)
%r = xor i64 %y, %z
ret i64 %r
}
@@ -1240,7 +1240,7 @@
; CHECK-NEXT: eors r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.xor.v2i64(<2 x i64> %x)
%r = xor i64 %y, %z
ret i64 %r
}
@@ -1259,44 +1259,44 @@
; CHECK-NEXT: eors r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.xor.v4i64(<4 x i64> %x)
%r = xor i64 %y, %z
ret i64 %r
}
-declare i16 @llvm.experimental.vector.reduce.and.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.and.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.and.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.or.v8i16(<8 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.xor.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.and.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.and.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v8i32(<8 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v8i32(<8 x i32>)
-declare i64 @llvm.experimental.vector.reduce.and.v1i64(<1 x i64>)
-declare i64 @llvm.experimental.vector.reduce.and.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.and.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.or.v1i64(<1 x i64>)
-declare i64 @llvm.experimental.vector.reduce.or.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.or.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.xor.v1i64(<1 x i64>)
-declare i64 @llvm.experimental.vector.reduce.xor.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.xor.v4i64(<4 x i64>)
-declare i8 @llvm.experimental.vector.reduce.and.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.and.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.or.v8i8(<8 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.xor.v8i8(<8 x i8>)
+declare i16 @llvm.vector.reduce.and.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.and.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.and.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.or.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.or.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.or.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.xor.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.xor.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.and.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.and.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.or.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.or.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.xor.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.xor.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.and.v1i64(<1 x i64>)
+declare i64 @llvm.vector.reduce.and.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.and.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.or.v1i64(<1 x i64>)
+declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.or.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.xor.v1i64(<1 x i64>)
+declare i64 @llvm.vector.reduce.xor.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.xor.v4i64(<4 x i64>)
+declare i8 @llvm.vector.reduce.and.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>)
+declare i8 @llvm.vector.reduce.and.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.or.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.or.v32i8(<32 x i8>)
+declare i8 @llvm.vector.reduce.or.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.xor.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.xor.v32i8(<32 x i8>)
+declare i8 @llvm.vector.reduce.xor.v8i8(<8 x i8>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fadd.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fadd.ll
index 77f0c77..8ead4f5 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fadd.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fadd.ll
@@ -9,7 +9,7 @@
; CHECK-NEXT: vadd.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v2f32(float %y, <2 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float %y, <2 x float> %x)
ret float %z
}
@@ -30,7 +30,7 @@
; CHECK-NOFP-NEXT: vadd.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float %y, <4 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float %y, <4 x float> %x)
ret float %z
}
@@ -56,7 +56,7 @@
; CHECK-NOFP-NEXT: vadd.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32(float %y, <8 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float %y, <8 x float> %x)
ret float %z
}
@@ -71,7 +71,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fadd.f16.v2f16(half %y, <2 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fadd.f16.v2f16(half %y, <2 x half> %x)
store half %z, half* %yy
ret void
}
@@ -102,7 +102,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fadd.f16.v4f16(half %y, <4 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fadd.f16.v4f16(half %y, <4 x half> %x)
store half %z, half* %yy
ret void
}
@@ -139,7 +139,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fadd.f16.v8f16(half %y, <8 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fadd.f16.v8f16(half %y, <8 x half> %x)
store half %z, half* %yy
ret void
}
@@ -189,7 +189,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fadd.f16.v16f16(half %y, <16 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fadd.f16.v16f16(half %y, <16 x half> %x)
store half %z, half* %yy
ret void
}
@@ -200,7 +200,7 @@
; CHECK-NEXT: vadd.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double %y, <1 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fadd.f64.v1f64(double %y, <1 x double> %x)
ret double %z
}
@@ -211,7 +211,7 @@
; CHECK-NEXT: vadd.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.v2.fadd.f64.v2f64(double %y, <2 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double %y, <2 x double> %x)
ret double %z
}
@@ -224,7 +224,7 @@
; CHECK-NEXT: vadd.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.v2.fadd.f64.v4f64(double %y, <4 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double %y, <4 x double> %x)
ret double %z
}
@@ -235,7 +235,7 @@
; CHECK-NEXT: vadd.f32 s0, s4, s1
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v2f32(float %y, <2 x float> %x)
+ %z = call float @llvm.vector.reduce.fadd.f32.v2f32(float %y, <2 x float> %x)
ret float %z
}
@@ -248,7 +248,7 @@
; CHECK-NEXT: vadd.f32 s0, s4, s3
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float %y, <4 x float> %x)
+ %z = call float @llvm.vector.reduce.fadd.f32.v4f32(float %y, <4 x float> %x)
ret float %z
}
@@ -265,7 +265,7 @@
; CHECK-NEXT: vadd.f32 s0, s0, s7
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32(float %y, <8 x float> %x)
+ %z = call float @llvm.vector.reduce.fadd.f32.v8f32(float %y, <8 x float> %x)
ret float %z
}
@@ -283,7 +283,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v4f16(half %y, <4 x half> %x)
+ %z = call half @llvm.vector.reduce.fadd.f16.v4f16(half %y, <4 x half> %x)
store half %z, half* %yy
ret void
}
@@ -308,7 +308,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v8f16(half %y, <8 x half> %x)
+ %z = call half @llvm.vector.reduce.fadd.f16.v8f16(half %y, <8 x half> %x)
store half %z, half* %yy
ret void
}
@@ -345,7 +345,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.v2.fadd.f16.v16f16(half %y, <16 x half> %x)
+ %z = call half @llvm.vector.reduce.fadd.f16.v16f16(half %y, <16 x half> %x)
store half %z, half* %yy
ret void
}
@@ -356,7 +356,7 @@
; CHECK-NEXT: vadd.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double %y, <1 x double> %x)
+ %z = call double @llvm.vector.reduce.fadd.f64.v1f64(double %y, <1 x double> %x)
ret double %z
}
@@ -367,7 +367,7 @@
; CHECK-NEXT: vadd.f64 d0, d2, d1
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v2f64(double %y, <2 x double> %x)
+ %z = call double @llvm.vector.reduce.fadd.f64.v2f64(double %y, <2 x double> %x)
ret double %z
}
@@ -380,17 +380,17 @@
; CHECK-NEXT: vadd.f64 d0, d0, d3
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.v2.fadd.f64.v4f64(double %y, <4 x double> %x)
+ %z = call double @llvm.vector.reduce.fadd.f64.v4f64(double %y, <4 x double> %x)
ret double %z
}
-declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v1f64(double, <1 x double>)
-declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v2f64(double, <2 x double>)
-declare double @llvm.experimental.vector.reduce.v2.fadd.f64.v4f64(double, <4 x double>)
-declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v2f32(float, <2 x float>)
-declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float, <4 x float>)
-declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v8f32(float, <8 x float>)
-declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v16f16(half, <16 x half>)
-declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v2f16(half, <2 x half>)
-declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v4f16(half, <4 x half>)
-declare half @llvm.experimental.vector.reduce.v2.fadd.f16.v8f16(half, <8 x half>)
+declare double @llvm.vector.reduce.fadd.f64.v1f64(double, <1 x double>)
+declare double @llvm.vector.reduce.fadd.f64.v2f64(double, <2 x double>)
+declare double @llvm.vector.reduce.fadd.f64.v4f64(double, <4 x double>)
+declare float @llvm.vector.reduce.fadd.f32.v2f32(float, <2 x float>)
+declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
+declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>)
+declare half @llvm.vector.reduce.fadd.f16.v16f16(half, <16 x half>)
+declare half @llvm.vector.reduce.fadd.f16.v2f16(half, <2 x half>)
+declare half @llvm.vector.reduce.fadd.f16.v4f16(half, <4 x half>)
+declare half @llvm.vector.reduce.fadd.f16.v8f16(half, <8 x half>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
index a83fa68..45c6972 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fminmax.ll
@@ -8,7 +8,7 @@
; CHECK-NEXT: vminnm.f32 s0, s0, s1
; CHECK-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmin.v2f32(<2 x float> %x)
ret float %z
}
@@ -27,7 +27,7 @@
; CHECK-NOFP-NEXT: vminnm.f32 s0, s4, s3
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmin.v4f32(<4 x float> %x)
ret float %z
}
@@ -60,7 +60,7 @@
; CHECK-NOFP-NEXT: vminnm.f32 s0, s2, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmin.v8f32(<8 x float> %x)
ret float %z
}
@@ -83,7 +83,7 @@
; CHECK-NOFP-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmin.v4f16(<4 x half> %x)
ret half %z
}
@@ -112,7 +112,7 @@
; CHECK-NOFP-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmin.v8f16(<8 x half> %x)
ret half %z
}
@@ -170,7 +170,7 @@
; CHECK-NOFP-NEXT: vminnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmin.v16f16(<16 x half> %x)
ret half %z
}
@@ -179,7 +179,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmin.v1f64(<1 x double> %x)
ret double %z
}
@@ -189,7 +189,7 @@
; CHECK-NEXT: vminnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmin.v2f64(<2 x double> %x)
ret double %z
}
@@ -205,7 +205,7 @@
; CHECK-NEXT: vminnm.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmin.v4f64(<4 x double> %x)
ret double %z
}
@@ -215,7 +215,7 @@
; CHECK-NEXT: vminnm.f32 s0, s0, s1
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ %z = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %x)
ret float %z
}
@@ -234,7 +234,7 @@
; CHECK-NOFP-NEXT: vminnm.f32 s0, s4, s3
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ %z = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %x)
ret float %z
}
@@ -258,7 +258,7 @@
; CHECK-NOFP-NEXT: vminnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ %z = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %x)
ret float %z
}
@@ -281,7 +281,7 @@
; CHECK-NOFP-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ %z = call half @llvm.vector.reduce.fmin.v4f16(<4 x half> %x)
ret half %z
}
@@ -310,7 +310,7 @@
; CHECK-NOFP-NEXT: vminnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ %z = call half @llvm.vector.reduce.fmin.v8f16(<8 x half> %x)
ret half %z
}
@@ -352,7 +352,7 @@
; CHECK-NOFP-NEXT: vminnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ %z = call half @llvm.vector.reduce.fmin.v16f16(<16 x half> %x)
ret half %z
}
@@ -361,7 +361,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ %z = call double @llvm.vector.reduce.fmin.v1f64(<1 x double> %x)
ret double %z
}
@@ -371,7 +371,7 @@
; CHECK-NEXT: vminnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ %z = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %x)
ret double %z
}
@@ -383,7 +383,7 @@
; CHECK-NEXT: vminnm.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ %z = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %x)
ret double %z
}
@@ -394,7 +394,7 @@
; CHECK-NEXT: vminnm.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmin.v2f32(<2 x float> %x)
%c = fcmp fast olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -417,7 +417,7 @@
; CHECK-NOFP-NEXT: vminnm.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmin.v4f32(<4 x float> %x)
%c = fcmp fast olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -453,7 +453,7 @@
; CHECK-NOFP-NEXT: vminnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmin.v8f32(<8 x float> %x)
%c = fcmp fast olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -485,7 +485,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmin.v4f16(<4 x half> %x)
%c = fcmp fast olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -503,7 +503,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmin.v2f16(<2 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmin.v2f16(<2 x half> %x)
%c = fcmp fast olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -542,7 +542,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmin.v8f16(<8 x half> %x)
%c = fcmp fast olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -610,7 +610,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmin.v16f16(<16 x half> %x)
%c = fcmp fast olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -623,7 +623,7 @@
; CHECK-NEXT: vminnm.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmin.v1f64(<1 x double> %x)
%c = fcmp fast olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -636,7 +636,7 @@
; CHECK-NEXT: vminnm.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmin.v2f64(<2 x double> %x)
%c = fcmp fast olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -655,7 +655,7 @@
; CHECK-NEXT: vminnm.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmin.v4f64(<4 x double> %x)
%c = fcmp fast olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -670,7 +670,7 @@
; CHECK-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float> %x)
+ %z = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %x)
%c = fcmp olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -697,7 +697,7 @@
; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %x)
+ %z = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %x)
%c = fcmp olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -729,7 +729,7 @@
; CHECK-NOFP-NEXT: vselgt.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float> %x)
+ %z = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %x)
%c = fcmp olt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -765,7 +765,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half> %x)
+ %z = call half @llvm.vector.reduce.fmin.v4f16(<4 x half> %x)
%c = fcmp olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -808,7 +808,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half> %x)
+ %z = call half @llvm.vector.reduce.fmin.v8f16(<8 x half> %x)
%c = fcmp olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -864,7 +864,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half> %x)
+ %z = call half @llvm.vector.reduce.fmin.v16f16(<16 x half> %x)
%c = fcmp olt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -879,7 +879,7 @@
; CHECK-NEXT: vselgt.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double> %x)
+ %z = call double @llvm.vector.reduce.fmin.v1f64(<1 x double> %x)
%c = fcmp olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -894,7 +894,7 @@
; CHECK-NEXT: vselgt.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double> %x)
+ %z = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %x)
%c = fcmp olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -911,7 +911,7 @@
; CHECK-NEXT: vselgt.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double> %x)
+ %z = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %x)
%c = fcmp olt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -923,7 +923,7 @@
; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
; CHECK-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmax.v2f32(<2 x float> %x)
ret float %z
}
@@ -942,7 +942,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s4, s3
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> %x)
ret float %z
}
@@ -974,7 +974,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s2, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmax.v8f32(<8 x float> %x)
ret float %z
}
@@ -997,7 +997,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmax.v4f16(<4 x half> %x)
ret half %z
}
@@ -1026,7 +1026,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmax.v8f16(<8 x half> %x)
ret half %z
}
@@ -1084,7 +1084,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmax.v16f16(<16 x half> %x)
ret half %z
}
@@ -1093,7 +1093,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmax.v1f64(<1 x double> %x)
ret double %z
}
@@ -1103,7 +1103,7 @@
; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmax.v2f64(<2 x double> %x)
ret double %z
}
@@ -1119,7 +1119,7 @@
; CHECK-NEXT: vmaxnm.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmax.v4f64(<4 x double> %x)
ret double %z
}
@@ -1129,7 +1129,7 @@
; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ %z = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %x)
ret float %z
}
@@ -1148,7 +1148,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s4, s3
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ %z = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %x)
ret float %z
}
@@ -1172,7 +1172,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ %z = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %x)
ret float %z
}
@@ -1195,7 +1195,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ %z = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %x)
ret half %z
}
@@ -1224,7 +1224,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ %z = call half @llvm.vector.reduce.fmax.v8f16(<8 x half> %x)
ret half %z
}
@@ -1266,7 +1266,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f16 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ %z = call half @llvm.vector.reduce.fmax.v16f16(<16 x half> %x)
ret half %z
}
@@ -1275,7 +1275,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ %z = call double @llvm.vector.reduce.fmax.v1f64(<1 x double> %x)
ret double %z
}
@@ -1285,7 +1285,7 @@
; CHECK-NEXT: vmaxnm.f64 d0, d0, d1
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ %z = call double @llvm.vector.reduce.fmax.v2f64(<2 x double> %x)
ret double %z
}
@@ -1297,7 +1297,7 @@
; CHECK-NEXT: vmaxnm.f64 d0, d0, d4
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ %z = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %x)
ret double %z
}
@@ -1308,7 +1308,7 @@
; CHECK-NEXT: vmaxnm.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmax.v2f32(<2 x float> %x)
%c = fcmp fast ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -1331,7 +1331,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> %x)
%c = fcmp fast ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -1367,7 +1367,7 @@
; CHECK-NOFP-NEXT: vmaxnm.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmax.v8f32(<8 x float> %x)
%c = fcmp fast ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -1384,7 +1384,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmax.v2f16(<2 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmax.v2f16(<2 x half> %x)
%c = fcmp fast ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -1417,7 +1417,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmax.v4f16(<4 x half> %x)
%c = fcmp fast ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -1456,7 +1456,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmax.v8f16(<8 x half> %x)
%c = fcmp fast ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -1524,7 +1524,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmax.v16f16(<16 x half> %x)
%c = fcmp fast ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -1537,7 +1537,7 @@
; CHECK-NEXT: vmaxnm.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmax.v1f64(<1 x double> %x)
%c = fcmp fast ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -1550,7 +1550,7 @@
; CHECK-NEXT: vmaxnm.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmax.v2f64(<2 x double> %x)
%c = fcmp fast ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -1569,7 +1569,7 @@
; CHECK-NEXT: vmaxnm.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmax.v4f64(<4 x double> %x)
%c = fcmp fast ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -1584,7 +1584,7 @@
; CHECK-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float> %x)
+ %z = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %x)
%c = fcmp ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -1611,7 +1611,7 @@
; CHECK-NOFP-NEXT: vselgt.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %x)
+ %z = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %x)
%c = fcmp ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -1643,7 +1643,7 @@
; CHECK-NOFP-NEXT: vselgt.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float> %x)
+ %z = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %x)
%c = fcmp ogt float %y, %z
%r = select i1 %c, float %y, float %z
ret float %r
@@ -1679,7 +1679,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half> %x)
+ %z = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %x)
%c = fcmp ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -1722,7 +1722,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half> %x)
+ %z = call half @llvm.vector.reduce.fmax.v8f16(<8 x half> %x)
%c = fcmp ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -1778,7 +1778,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half> %x)
+ %z = call half @llvm.vector.reduce.fmax.v16f16(<16 x half> %x)
%c = fcmp ogt half %y, %z
%r = select i1 %c, half %y, half %z
store half %r, half* %yy
@@ -1793,7 +1793,7 @@
; CHECK-NEXT: vselgt.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double> %x)
+ %z = call double @llvm.vector.reduce.fmax.v1f64(<1 x double> %x)
%c = fcmp ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -1808,7 +1808,7 @@
; CHECK-NEXT: vselgt.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double> %x)
+ %z = call double @llvm.vector.reduce.fmax.v2f64(<2 x double> %x)
%c = fcmp ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
@@ -1825,29 +1825,29 @@
; CHECK-NEXT: vselgt.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double> %x)
+ %z = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %x)
%c = fcmp ogt double %y, %z
%r = select i1 %c, double %y, double %z
ret double %r
}
-declare double @llvm.experimental.vector.reduce.fmax.v1f64(<1 x double>)
-declare double @llvm.experimental.vector.reduce.fmax.v2f64(<2 x double>)
-declare double @llvm.experimental.vector.reduce.fmax.v4f64(<4 x double>)
-declare double @llvm.experimental.vector.reduce.fmin.v1f64(<1 x double>)
-declare double @llvm.experimental.vector.reduce.fmin.v2f64(<2 x double>)
-declare double @llvm.experimental.vector.reduce.fmin.v4f64(<4 x double>)
-declare float @llvm.experimental.vector.reduce.fmax.v2f32(<2 x float>)
-declare float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float>)
-declare float @llvm.experimental.vector.reduce.fmax.v8f32(<8 x float>)
-declare float @llvm.experimental.vector.reduce.fmin.v2f32(<2 x float>)
-declare float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float>)
-declare float @llvm.experimental.vector.reduce.fmin.v8f32(<8 x float>)
-declare half @llvm.experimental.vector.reduce.fmax.v16f16(<16 x half>)
-declare half @llvm.experimental.vector.reduce.fmax.v2f16(<2 x half>)
-declare half @llvm.experimental.vector.reduce.fmax.v4f16(<4 x half>)
-declare half @llvm.experimental.vector.reduce.fmax.v8f16(<8 x half>)
-declare half @llvm.experimental.vector.reduce.fmin.v16f16(<16 x half>)
-declare half @llvm.experimental.vector.reduce.fmin.v2f16(<2 x half>)
-declare half @llvm.experimental.vector.reduce.fmin.v4f16(<4 x half>)
-declare half @llvm.experimental.vector.reduce.fmin.v8f16(<8 x half>)
+declare double @llvm.vector.reduce.fmax.v1f64(<1 x double>)
+declare double @llvm.vector.reduce.fmax.v2f64(<2 x double>)
+declare double @llvm.vector.reduce.fmax.v4f64(<4 x double>)
+declare double @llvm.vector.reduce.fmin.v1f64(<1 x double>)
+declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>)
+declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>)
+declare float @llvm.vector.reduce.fmax.v2f32(<2 x float>)
+declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>)
+declare float @llvm.vector.reduce.fmax.v8f32(<8 x float>)
+declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>)
+declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
+declare float @llvm.vector.reduce.fmin.v8f32(<8 x float>)
+declare half @llvm.vector.reduce.fmax.v16f16(<16 x half>)
+declare half @llvm.vector.reduce.fmax.v2f16(<2 x half>)
+declare half @llvm.vector.reduce.fmax.v4f16(<4 x half>)
+declare half @llvm.vector.reduce.fmax.v8f16(<8 x half>)
+declare half @llvm.vector.reduce.fmin.v16f16(<16 x half>)
+declare half @llvm.vector.reduce.fmin.v2f16(<2 x half>)
+declare half @llvm.vector.reduce.fmin.v4f16(<4 x half>)
+declare half @llvm.vector.reduce.fmin.v8f16(<8 x half>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fmul.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fmul.ll
index 89d1546..940e2e2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-fmul.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-fmul.ll
@@ -9,7 +9,7 @@
; CHECK-NEXT: vmul.f32 s0, s4, s0
; CHECK-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.v2.fmul.f32.v2f32(float %y, <2 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmul.f32.v2f32(float %y, <2 x float> %x)
ret float %z
}
@@ -30,7 +30,7 @@
; CHECK-NOFP-NEXT: vmul.f32 s0, s4, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float %y, <4 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float %y, <4 x float> %x)
ret float %z
}
@@ -56,7 +56,7 @@
; CHECK-NOFP-NEXT: vmul.f32 s0, s8, s0
; CHECK-NOFP-NEXT: bx lr
entry:
- %z = call fast float @llvm.experimental.vector.reduce.v2.fmul.f32.v8f32(float %y, <8 x float> %x)
+ %z = call fast float @llvm.vector.reduce.fmul.f32.v8f32(float %y, <8 x float> %x)
ret float %z
}
@@ -71,7 +71,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fmul.f16.v2f16(half %y, <2 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmul.f16.v2f16(half %y, <2 x half> %x)
store half %z, half* %yy
ret void
}
@@ -102,7 +102,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fmul.f16.v4f16(half %y, <4 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmul.f16.v4f16(half %y, <4 x half> %x)
store half %z, half* %yy
ret void
}
@@ -139,7 +139,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fmul.f16.v8f16(half %y, <8 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmul.f16.v8f16(half %y, <8 x half> %x)
store half %z, half* %yy
ret void
}
@@ -189,7 +189,7 @@
; CHECK-NOFP-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call fast half @llvm.experimental.vector.reduce.v2.fmul.f16.v16f16(half %y, <16 x half> %x)
+ %z = call fast half @llvm.vector.reduce.fmul.f16.v16f16(half %y, <16 x half> %x)
store half %z, half* %yy
ret void
}
@@ -200,7 +200,7 @@
; CHECK-NEXT: vmul.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double %y, <1 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmul.f64.v1f64(double %y, <1 x double> %x)
ret double %z
}
@@ -211,7 +211,7 @@
; CHECK-NEXT: vmul.f64 d0, d2, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.v2.fmul.f64.v2f64(double %y, <2 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmul.f64.v2f64(double %y, <2 x double> %x)
ret double %z
}
@@ -224,7 +224,7 @@
; CHECK-NEXT: vmul.f64 d0, d4, d0
; CHECK-NEXT: bx lr
entry:
- %z = call fast double @llvm.experimental.vector.reduce.v2.fmul.f64.v4f64(double %y, <4 x double> %x)
+ %z = call fast double @llvm.vector.reduce.fmul.f64.v4f64(double %y, <4 x double> %x)
ret double %z
}
@@ -235,7 +235,7 @@
; CHECK-NEXT: vmul.f32 s0, s4, s1
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v2f32(float %y, <2 x float> %x)
+ %z = call float @llvm.vector.reduce.fmul.f32.v2f32(float %y, <2 x float> %x)
ret float %z
}
@@ -248,7 +248,7 @@
; CHECK-NEXT: vmul.f32 s0, s4, s3
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float %y, <4 x float> %x)
+ %z = call float @llvm.vector.reduce.fmul.f32.v4f32(float %y, <4 x float> %x)
ret float %z
}
@@ -265,7 +265,7 @@
; CHECK-NEXT: vmul.f32 s0, s0, s7
; CHECK-NEXT: bx lr
entry:
- %z = call float @llvm.experimental.vector.reduce.v2.fmul.f32.v8f32(float %y, <8 x float> %x)
+ %z = call float @llvm.vector.reduce.fmul.f32.v8f32(float %y, <8 x float> %x)
ret float %z
}
@@ -280,7 +280,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v2f16(half %y, <2 x half> %x)
+ %z = call half @llvm.vector.reduce.fmul.f16.v2f16(half %y, <2 x half> %x)
store half %z, half* %yy
ret void
}
@@ -299,7 +299,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v4f16(half %y, <4 x half> %x)
+ %z = call half @llvm.vector.reduce.fmul.f16.v4f16(half %y, <4 x half> %x)
store half %z, half* %yy
ret void
}
@@ -324,7 +324,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v8f16(half %y, <8 x half> %x)
+ %z = call half @llvm.vector.reduce.fmul.f16.v8f16(half %y, <8 x half> %x)
store half %z, half* %yy
ret void
}
@@ -361,7 +361,7 @@
; CHECK-NEXT: bx lr
entry:
%y = load half, half* %yy
- %z = call half @llvm.experimental.vector.reduce.v2.fmul.f16.v16f16(half %y, <16 x half> %x)
+ %z = call half @llvm.vector.reduce.fmul.f16.v16f16(half %y, <16 x half> %x)
store half %z, half* %yy
ret void
}
@@ -372,7 +372,7 @@
; CHECK-NEXT: vmul.f64 d0, d1, d0
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double %y, <1 x double> %x)
+ %z = call double @llvm.vector.reduce.fmul.f64.v1f64(double %y, <1 x double> %x)
ret double %z
}
@@ -383,7 +383,7 @@
; CHECK-NEXT: vmul.f64 d0, d2, d1
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v2f64(double %y, <2 x double> %x)
+ %z = call double @llvm.vector.reduce.fmul.f64.v2f64(double %y, <2 x double> %x)
ret double %z
}
@@ -396,17 +396,17 @@
; CHECK-NEXT: vmul.f64 d0, d0, d3
; CHECK-NEXT: bx lr
entry:
- %z = call double @llvm.experimental.vector.reduce.v2.fmul.f64.v4f64(double %y, <4 x double> %x)
+ %z = call double @llvm.vector.reduce.fmul.f64.v4f64(double %y, <4 x double> %x)
ret double %z
}
-declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v1f64(double, <1 x double>)
-declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v2f64(double, <2 x double>)
-declare double @llvm.experimental.vector.reduce.v2.fmul.f64.v4f64(double, <4 x double>)
-declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v2f32(float, <2 x float>)
-declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float, <4 x float>)
-declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v8f32(float, <8 x float>)
-declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v16f16(half, <16 x half>)
-declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v2f16(half, <2 x half>)
-declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v4f16(half, <4 x half>)
-declare half @llvm.experimental.vector.reduce.v2.fmul.f16.v8f16(half, <8 x half>)
+declare double @llvm.vector.reduce.fmul.f64.v1f64(double, <1 x double>)
+declare double @llvm.vector.reduce.fmul.f64.v2f64(double, <2 x double>)
+declare double @llvm.vector.reduce.fmul.f64.v4f64(double, <4 x double>)
+declare float @llvm.vector.reduce.fmul.f32.v2f32(float, <2 x float>)
+declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>)
+declare float @llvm.vector.reduce.fmul.f32.v8f32(float, <8 x float>)
+declare half @llvm.vector.reduce.fmul.f16.v16f16(half, <16 x half>)
+declare half @llvm.vector.reduce.fmul.f16.v2f16(half, <2 x half>)
+declare half @llvm.vector.reduce.fmul.f16.v4f16(half, <4 x half>)
+declare half @llvm.vector.reduce.fmul.f16.v8f16(half, <8 x half>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
index 2862779..2544474 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll
@@ -65,7 +65,7 @@
%0 = getelementptr inbounds i32, i32* %x, i32 %index
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load = load <4 x i32>, <4 x i32>* %1, align 4
- %2 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %wide.load)
+ %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %wide.load)
%3 = add i32 %2, %vec.phi
%index.next = add i32 %index, 4
%4 = icmp eq i32 %index.next, %n.vec
@@ -167,7 +167,7 @@
br i1 %3, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %4 = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %2)
+ %4 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %2)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -267,7 +267,7 @@
br i1 %3, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %4 = call i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32> %2)
+ %4 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %2)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -367,7 +367,7 @@
br i1 %3, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %4 = call i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32> %2)
+ %4 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %2)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -467,7 +467,7 @@
br i1 %3, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %4 = call i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32> %2)
+ %4 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %2)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -568,7 +568,7 @@
br i1 %3, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %4 = call fast float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %2)
+ %4 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.000000e+00, <4 x float> %2)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -665,7 +665,7 @@
br i1 %3, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %4 = call fast float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float 1.000000e+00, <4 x float> %2)
+ %4 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.000000e+00, <4 x float> %2)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -762,7 +762,7 @@
br i1 %4, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %5 = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %3)
+ %5 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %3)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -852,7 +852,7 @@
%0 = getelementptr inbounds i32, i32* %x, i32 %index
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load = load <4 x i32>, <4 x i32>* %1, align 4
- %l5 = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %wide.load)
+ %l5 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %wide.load)
%2 = icmp slt i32 %vec.phi, %l5
%3 = select i1 %2, i32 %vec.phi, i32 %l5
%index.next = add i32 %index, 4
@@ -958,7 +958,7 @@
br i1 %4, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %5 = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %3)
+ %5 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %3)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -1048,7 +1048,7 @@
%0 = getelementptr inbounds i32, i32* %x, i32 %index
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load = load <4 x i32>, <4 x i32>* %1, align 4
- %l5 = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %wide.load)
+ %l5 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %wide.load)
%2 = icmp sgt i32 %vec.phi, %l5
%3 = select i1 %2, i32 %vec.phi, i32 %l5
%index.next = add i32 %index, 4
@@ -1154,7 +1154,7 @@
br i1 %4, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %5 = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %3)
+ %5 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %3)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -1244,7 +1244,7 @@
%0 = getelementptr inbounds i32, i32* %x, i32 %index
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load = load <4 x i32>, <4 x i32>* %1, align 4
- %l5 = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %wide.load)
+ %l5 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %wide.load)
%2 = icmp ult i32 %vec.phi, %l5
%3 = select i1 %2, i32 %vec.phi, i32 %l5
%index.next = add i32 %index, 4
@@ -1350,7 +1350,7 @@
br i1 %4, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %5 = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %3)
+ %5 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %3)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -1440,7 +1440,7 @@
%0 = getelementptr inbounds i32, i32* %x, i32 %index
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load = load <4 x i32>, <4 x i32>* %1, align 4
- %l5 = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %wide.load)
+ %l5 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %wide.load)
%2 = icmp ugt i32 %vec.phi, %l5
%3 = select i1 %2, i32 %vec.phi, i32 %l5
%index.next = add i32 %index, 4
@@ -1553,7 +1553,7 @@
br i1 %4, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %5 = call float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float> %3)
+ %5 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %3)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -1658,7 +1658,7 @@
br i1 %4, label %middle.block, label %vector.body
middle.block: ; preds = %vector.body
- %5 = call float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float> %3)
+ %5 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %3)
%cmp.n = icmp eq i32 %n.vec, %n
br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader1
@@ -1722,7 +1722,7 @@
%1 = bitcast i32* %0 to <4 x i32>*
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
%2 = select <4 x i1> %active.lane.mask, <4 x i32> %wide.masked.load, <4 x i32> zeroinitializer
- %3 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %2)
+ %3 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %2)
%4 = add i32 %3, %vec.phi
%index.next = add i32 %index, 4
%5 = icmp eq i32 %index.next, %n.vec
@@ -1777,7 +1777,7 @@
%wide.masked.load13 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
%4 = mul nsw <4 x i32> %wide.masked.load13, %wide.masked.load
%5 = select <4 x i1> %active.lane.mask, <4 x i32> %4, <4 x i32> zeroinitializer
- %6 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %5)
+ %6 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %5)
%7 = add i32 %6, %vec.phi
%index.next = add i32 %index, 4
%8 = icmp eq i32 %index.next, %n.vec
@@ -1828,7 +1828,7 @@
%wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
%2 = sext <8 x i16> %wide.masked.load to <8 x i32>
%3 = select <8 x i1> %active.lane.mask, <8 x i32> %2, <8 x i32> zeroinitializer
- %4 = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %3)
+ %4 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %3)
%5 = add i32 %4, %vec.phi
%index.next = add i32 %index, 8
%6 = icmp eq i32 %index.next, %n.vec
@@ -1885,7 +1885,7 @@
%5 = sext <8 x i16> %wide.masked.load14 to <8 x i32>
%6 = mul nsw <8 x i32> %5, %2
%7 = select <8 x i1> %active.lane.mask, <8 x i32> %6, <8 x i32> zeroinitializer
- %8 = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %7)
+ %8 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %7)
%9 = add i32 %8, %vec.phi
%index.next = add i32 %index, 8
%10 = icmp eq i32 %index.next, %n.vec
@@ -1936,7 +1936,7 @@
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
%2 = zext <16 x i8> %wide.masked.load to <16 x i32>
%3 = select <16 x i1> %active.lane.mask, <16 x i32> %2, <16 x i32> zeroinitializer
- %4 = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %3)
+ %4 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
%5 = add i32 %4, %vec.phi
%index.next = add i32 %index, 16
%6 = icmp eq i32 %index.next, %n.vec
@@ -1993,7 +1993,7 @@
%5 = zext <16 x i8> %wide.masked.load14 to <16 x i32>
%6 = mul nuw nsw <16 x i32> %5, %2
%7 = select <16 x i1> %active.lane.mask, <16 x i32> %6, <16 x i32> zeroinitializer
- %8 = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %7)
+ %8 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %7)
%9 = add i32 %8, %vec.phi
%index.next = add i32 %index, 16
%10 = icmp eq i32 %index.next, %n.vec
@@ -2043,7 +2043,7 @@
%1 = bitcast i16* %0 to <8 x i16>*
%wide.masked.load = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %1, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
%2 = select <8 x i1> %active.lane.mask, <8 x i16> %wide.masked.load, <8 x i16> zeroinitializer
- %3 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %2)
+ %3 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %2)
%4 = add i16 %3, %vec.phi
%index.next = add i32 %index, 8
%5 = icmp eq i32 %index.next, %n.vec
@@ -2098,7 +2098,7 @@
%wide.masked.load16 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %3, i32 2, <8 x i1> %active.lane.mask, <8 x i16> undef)
%4 = mul <8 x i16> %wide.masked.load16, %wide.masked.load
%5 = select <8 x i1> %active.lane.mask, <8 x i16> %4, <8 x i16> zeroinitializer
- %6 = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %5)
+ %6 = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %5)
%7 = add i16 %6, %vec.phi
%index.next = add i32 %index, 8
%8 = icmp eq i32 %index.next, %n.vec
@@ -2149,7 +2149,7 @@
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
%2 = zext <16 x i8> %wide.masked.load to <16 x i16>
%3 = select <16 x i1> %active.lane.mask, <16 x i16> %2, <16 x i16> zeroinitializer
- %4 = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %3)
+ %4 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %3)
%5 = add i16 %4, %vec.phi
%index.next = add i32 %index, 16
%6 = icmp eq i32 %index.next, %n.vec
@@ -2206,7 +2206,7 @@
%5 = zext <16 x i8> %wide.masked.load18 to <16 x i16>
%6 = mul nuw <16 x i16> %5, %2
%7 = select <16 x i1> %active.lane.mask, <16 x i16> %6, <16 x i16> zeroinitializer
- %8 = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %7)
+ %8 = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %7)
%9 = add i16 %8, %vec.phi
%index.next = add i32 %index, 16
%10 = icmp eq i32 %index.next, %n.vec
@@ -2256,7 +2256,7 @@
%1 = bitcast i8* %0 to <16 x i8>*
%wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %1, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
%2 = select <16 x i1> %active.lane.mask, <16 x i8> %wide.masked.load, <16 x i8> zeroinitializer
- %3 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %2)
+ %3 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %2)
%4 = add i8 %3, %vec.phi
%index.next = add i32 %index, 16
%5 = icmp eq i32 %index.next, %n.vec
@@ -2311,7 +2311,7 @@
%wide.masked.load15 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %3, i32 1, <16 x i1> %active.lane.mask, <16 x i8> undef)
%4 = mul <16 x i8> %wide.masked.load15, %wide.masked.load
%5 = select <16 x i1> %active.lane.mask, <16 x i8> %4, <16 x i8> zeroinitializer
- %6 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %5)
+ %6 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %5)
%7 = add i8 %6, %vec.phi
%index.next = add i32 %index, 16
%8 = icmp eq i32 %index.next, %n.vec
@@ -2364,7 +2364,7 @@
%wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef)
%2 = sext <4 x i32> %wide.masked.load to <4 x i64>
%3 = select <4 x i1> %active.lane.mask, <4 x i64> %2, <4 x i64> zeroinitializer
- %4 = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %3)
+ %4 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %3)
%5 = add i64 %4, %vec.phi
%index.next = add i32 %index, 4
%6 = icmp eq i32 %index.next, %n.vec
@@ -2423,7 +2423,7 @@
%5 = sext <4 x i32> %wide.masked.load14 to <4 x i64>
%6 = mul nsw <4 x i64> %5, %2
%7 = select <4 x i1> %active.lane.mask, <4 x i64> %6, <4 x i64> zeroinitializer
- %8 = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %7)
+ %8 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %7)
%9 = add i64 %8, %vec.phi
%index.next = add i32 %index, 4
%10 = icmp eq i32 %index.next, %n.vec
@@ -2482,7 +2482,7 @@
%5 = sext <8 x i16> %wide.masked.load14 to <8 x i64>
%6 = mul nsw <8 x i64> %5, %2
%7 = select <8 x i1> %active.lane.mask, <8 x i64> %6, <8 x i64> zeroinitializer
- %8 = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %7)
+ %8 = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %7)
%9 = add i64 %8, %vec.phi
%index.next = add i32 %index, 8
%10 = icmp eq i32 %index.next, %n.vec
@@ -2497,26 +2497,26 @@
declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #2
declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32) #1
declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>) #2
-declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>) #3
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) #3
declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32) #1
declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>) #2
-declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>) #3
-declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>) #3
-declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>) #3
-declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>) #3
-declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>) #3
-declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>) #3
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) #3
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>) #3
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>) #3
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) #3
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) #3
+declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>) #3
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.and.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.or.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.xor.v4i32(<4 x i32>)
-declare float @llvm.experimental.vector.reduce.v2.fadd.f32.v4f32(float, <4 x float>)
-declare float @llvm.experimental.vector.reduce.v2.fmul.f32.v4f32(float, <4 x float>)
-declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>)
-declare float @llvm.experimental.vector.reduce.fmin.v4f32(<4 x float>)
-declare float @llvm.experimental.vector.reduce.fmax.v4f32(<4 x float>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.and.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.or.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.xor.v4i32(<4 x i32>)
+declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
+declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>)
+declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
+declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
+declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
index b83b51b..ee15f82 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll
@@ -8,7 +8,7 @@
; CHECK-NEXT: bx lr
entry:
%m = mul <4 x i32> %x, %y
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
ret i32 %z
}
@@ -21,7 +21,7 @@
%xx = zext <4 x i32> %x to <4 x i64>
%yy = zext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %m)
ret i64 %z
}
@@ -34,7 +34,7 @@
%xx = sext <4 x i32> %x to <4 x i64>
%yy = sext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %m)
ret i64 %z
}
@@ -53,7 +53,7 @@
%xx = zext <2 x i32> %x to <2 x i64>
%yy = zext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
ret i64 %z
}
@@ -72,7 +72,7 @@
%xx = sext <2 x i32> %x to <2 x i64>
%yy = sext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
ret i64 %z
}
@@ -85,7 +85,7 @@
%xx = zext <8 x i16> %x to <8 x i32>
%yy = zext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m)
ret i32 %z
}
@@ -98,7 +98,7 @@
%xx = sext <8 x i16> %x to <8 x i32>
%yy = sext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m)
ret i32 %z
}
@@ -113,7 +113,7 @@
%xx = zext <4 x i16> %x to <4 x i32>
%yy = zext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
ret i32 %z
}
@@ -128,7 +128,7 @@
%xx = sext <4 x i16> %x to <4 x i32>
%yy = sext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
ret i32 %z
}
@@ -140,7 +140,7 @@
; CHECK-NEXT: bx lr
entry:
%m = mul <8 x i16> %x, %y
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %m)
ret i16 %z
}
@@ -153,7 +153,7 @@
%xx = zext <8 x i16> %x to <8 x i64>
%yy = zext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %m)
ret i64 %z
}
@@ -166,7 +166,7 @@
%xx = sext <8 x i16> %x to <8 x i64>
%yy = sext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %m)
ret i64 %z
}
@@ -180,7 +180,7 @@
%yy = zext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%ma = zext <8 x i32> %m to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %ma)
ret i64 %z
}
@@ -194,7 +194,7 @@
%yy = sext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%ma = sext <8 x i32> %m to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %ma)
ret i64 %z
}
@@ -207,7 +207,7 @@
%xx = sext <8 x i16> %x to <8 x i32>
%m = mul <8 x i32> %xx, %xx
%ma = zext <8 x i32> %m to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %ma)
ret i64 %z
}
@@ -228,7 +228,7 @@
%xx = zext <2 x i16> %x to <2 x i64>
%yy = zext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
ret i64 %z
}
@@ -250,7 +250,7 @@
%xx = sext <2 x i16> %x to <2 x i64>
%yy = sext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
ret i64 %z
}
@@ -263,7 +263,7 @@
%xx = zext <16 x i8> %x to <16 x i32>
%yy = zext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m)
ret i32 %z
}
@@ -276,7 +276,7 @@
%xx = sext <16 x i8> %x to <16 x i32>
%yy = sext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m)
ret i32 %z
}
@@ -290,7 +290,7 @@
%yy = zext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%ma = zext <16 x i16> %m to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %ma)
ret i32 %z
}
@@ -304,7 +304,7 @@
%yy = sext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%ma = sext <16 x i16> %m to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %ma)
ret i32 %z
}
@@ -317,7 +317,7 @@
%xx = sext <16 x i8> %x to <16 x i16>
%m = mul <16 x i16> %xx, %xx
%ma = zext <16 x i16> %m to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %ma)
ret i32 %z
}
@@ -333,7 +333,7 @@
%xx = zext <4 x i8> %x to <4 x i32>
%yy = zext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
ret i32 %z
}
@@ -350,7 +350,7 @@
%xx = sext <4 x i8> %x to <4 x i32>
%yy = sext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
ret i32 %z
}
@@ -364,7 +364,7 @@
%xx = zext <16 x i8> %x to <16 x i16>
%yy = zext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %m)
ret i16 %z
}
@@ -378,7 +378,7 @@
%xx = sext <16 x i8> %x to <16 x i16>
%yy = sext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %m)
ret i16 %z
}
@@ -394,7 +394,7 @@
%xx = zext <8 x i8> %x to <8 x i16>
%yy = zext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %m)
ret i16 %z
}
@@ -410,7 +410,7 @@
%xx = sext <8 x i8> %x to <8 x i16>
%yy = sext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %m)
ret i16 %z
}
@@ -422,7 +422,7 @@
; CHECK-NEXT: bx lr
entry:
%m = mul <16 x i8> %x, %y
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %m)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %m)
ret i8 %z
}
@@ -636,7 +636,7 @@
%xx = zext <16 x i8> %x to <16 x i64>
%yy = zext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %m)
ret i64 %z
}
@@ -803,7 +803,7 @@
%xx = sext <16 x i8> %x to <16 x i64>
%yy = sext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %m)
ret i64 %z
}
@@ -826,7 +826,7 @@
%xx = zext <2 x i8> %x to <2 x i64>
%yy = zext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
ret i64 %z
}
@@ -848,7 +848,7 @@
%xx = sext <2 x i8> %x to <2 x i64>
%yy = sext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
ret i64 %z
}
@@ -879,7 +879,7 @@
; CHECK-NEXT: pop {r4, pc}
entry:
%m = mul <2 x i64> %x, %y
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
ret i64 %z
}
@@ -890,7 +890,7 @@
; CHECK-NEXT: bx lr
entry:
%m = mul <4 x i32> %x, %y
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -904,7 +904,7 @@
%xx = zext <4 x i32> %x to <4 x i64>
%yy = zext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -918,7 +918,7 @@
%xx = sext <4 x i32> %x to <4 x i64>
%yy = sext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -942,7 +942,7 @@
%xx = zext <2 x i32> %x to <2 x i64>
%yy = zext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -966,7 +966,7 @@
%xx = sext <2 x i32> %x to <2 x i64>
%yy = sext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -980,7 +980,7 @@
%xx = zext <8 x i16> %x to <8 x i32>
%yy = zext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -994,7 +994,7 @@
%xx = sext <8 x i16> %x to <8 x i32>
%yy = sext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1010,7 +1010,7 @@
%xx = zext <4 x i16> %x to <4 x i32>
%yy = zext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1026,7 +1026,7 @@
%xx = sext <4 x i16> %x to <4 x i32>
%yy = sext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1039,7 +1039,7 @@
; CHECK-NEXT: bx lr
entry:
%m = mul <8 x i16> %x, %y
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %m)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1053,7 +1053,7 @@
%xx = zext <8 x i16> %x to <8 x i64>
%yy = zext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1067,7 +1067,7 @@
%xx = sext <8 x i16> %x to <8 x i64>
%yy = sext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1082,7 +1082,7 @@
%yy = zext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%ma = zext <8 x i32> %m to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %ma)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1097,7 +1097,7 @@
%yy = sext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%ma = sext <8 x i32> %m to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %ma)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1111,7 +1111,7 @@
%xx = sext <8 x i16> %x to <8 x i32>
%m = mul <8 x i32> %xx, %xx
%ma = zext <8 x i32> %m to <8 x i64>
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %ma)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %ma)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1137,7 +1137,7 @@
%xx = zext <2 x i16> %x to <2 x i64>
%yy = zext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1164,7 +1164,7 @@
%xx = sext <2 x i16> %x to <2 x i64>
%yy = sext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1178,7 +1178,7 @@
%xx = zext <16 x i8> %x to <16 x i32>
%yy = zext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1192,7 +1192,7 @@
%xx = sext <16 x i8> %x to <16 x i32>
%yy = sext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1207,7 +1207,7 @@
%yy = zext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%ma = zext <16 x i16> %m to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %ma)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1222,7 +1222,7 @@
%yy = sext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%ma = sext <16 x i16> %m to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %ma)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1236,7 +1236,7 @@
%xx = sext <16 x i8> %x to <16 x i16>
%m = mul <16 x i16> %xx, %xx
%ma = zext <16 x i16> %m to <16 x i32>
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %ma)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %ma)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1253,7 +1253,7 @@
%xx = zext <4 x i8> %x to <4 x i32>
%yy = zext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1271,7 +1271,7 @@
%xx = sext <4 x i8> %x to <4 x i32>
%yy = sext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %m)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %m)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1286,7 +1286,7 @@
%xx = zext <16 x i8> %x to <16 x i16>
%yy = zext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %m)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1301,7 +1301,7 @@
%xx = sext <16 x i8> %x to <16 x i16>
%yy = sext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %m)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1318,7 +1318,7 @@
%xx = zext <8 x i8> %x to <8 x i16>
%yy = zext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %m)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1335,7 +1335,7 @@
%xx = sext <8 x i8> %x to <8 x i16>
%yy = sext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %m)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %m)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1348,7 +1348,7 @@
; CHECK-NEXT: bx lr
entry:
%m = mul <16 x i8> %x, %y
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %m)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %m)
%r = add i8 %z, %a
ret i8 %r
}
@@ -1565,7 +1565,7 @@
%xx = zext <16 x i8> %x to <16 x i64>
%yy = zext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1737,7 +1737,7 @@
%xx = sext <16 x i8> %x to <16 x i64>
%yy = sext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1765,7 +1765,7 @@
%xx = zext <2 x i8> %x to <2 x i64>
%yy = zext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1792,7 +1792,7 @@
%xx = sext <2 x i8> %x to <2 x i64>
%yy = sext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1826,18 +1826,18 @@
; CHECK-NEXT: pop {r4, r5, r6, pc}
entry:
%m = mul <2 x i64> %x, %y
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %m)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %m)
%r = add i64 %z, %a
ret i64 %r
}
-declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
-declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
-declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
index 02d1248..72462bb 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll
@@ -11,7 +11,7 @@
%c = icmp eq <4 x i32> %b, zeroinitializer
%m = mul <4 x i32> %x, %y
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -27,7 +27,7 @@
%yy = zext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
%s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
ret i64 %z
}
@@ -43,7 +43,7 @@
%yy = sext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
%s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
ret i64 %z
}
@@ -79,7 +79,7 @@
%yy = zext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -115,7 +115,7 @@
%yy = sext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -131,7 +131,7 @@
%yy = zext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
ret i32 %z
}
@@ -147,7 +147,7 @@
%yy = sext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
ret i32 %z
}
@@ -166,7 +166,7 @@
%yy = zext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -185,7 +185,7 @@
%yy = sext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -200,7 +200,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%m = mul <8 x i16> %x, %y
%s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
ret i16 %z
}
@@ -216,7 +216,7 @@
%yy = zext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
%s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
ret i64 %z
}
@@ -232,7 +232,7 @@
%yy = sext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
%s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
ret i64 %z
}
@@ -249,7 +249,7 @@
%m = mul <8 x i32> %xx, %yy
%ma = zext <8 x i32> %m to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
ret i64 %z
}
@@ -266,7 +266,7 @@
%m = mul <8 x i32> %xx, %yy
%ma = sext <8 x i32> %m to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
ret i64 %z
}
@@ -282,7 +282,7 @@
%m = mul <8 x i32> %xx, %xx
%ma = zext <8 x i32> %m to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
ret i64 %z
}
@@ -334,7 +334,7 @@
%yy = zext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -385,7 +385,7 @@
%yy = sext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -401,7 +401,7 @@
%yy = zext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
%s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -417,7 +417,7 @@
%yy = sext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
%s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -434,7 +434,7 @@
%m = mul <16 x i16> %xx, %yy
%ma = zext <16 x i16> %m to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -451,7 +451,7 @@
%m = mul <16 x i16> %xx, %yy
%ma = sext <16 x i16> %m to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -467,7 +467,7 @@
%m = mul <16 x i16> %xx, %xx
%ma = zext <16 x i16> %m to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
ret i32 %z
}
@@ -487,7 +487,7 @@
%yy = zext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -509,7 +509,7 @@
%yy = sext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
ret i32 %z
}
@@ -526,7 +526,7 @@
%yy = zext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
ret i16 %z
}
@@ -543,7 +543,7 @@
%yy = sext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
ret i16 %z
}
@@ -563,7 +563,7 @@
%yy = zext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
%s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
ret i16 %z
}
@@ -583,7 +583,7 @@
%yy = sext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
%s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
ret i16 %z
}
@@ -598,7 +598,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%m = mul <16 x i8> %x, %y
%s = select <16 x i1> %c, <16 x i8> %m, <16 x i8> zeroinitializer
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %s)
ret i8 %z
}
@@ -1010,7 +1010,7 @@
%yy = zext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
%s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
ret i64 %z
}
@@ -1353,7 +1353,7 @@
%yy = sext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
%s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
ret i64 %z
}
@@ -1405,7 +1405,7 @@
%yy = zext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -1456,7 +1456,7 @@
%yy = sext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -1509,7 +1509,7 @@
%c = icmp eq <2 x i64> %b, zeroinitializer
%m = mul <2 x i64> %x, %y
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
ret i64 %z
}
@@ -1523,7 +1523,7 @@
%c = icmp eq <4 x i32> %b, zeroinitializer
%m = mul <4 x i32> %x, %y
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1540,7 +1540,7 @@
%yy = zext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
%s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1557,7 +1557,7 @@
%yy = sext <4 x i32> %y to <4 x i64>
%m = mul <4 x i64> %xx, %yy
%s = select <4 x i1> %c, <4 x i64> %m, <4 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1598,7 +1598,7 @@
%yy = zext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1639,7 +1639,7 @@
%yy = sext <2 x i32> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1656,7 +1656,7 @@
%yy = zext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1673,7 +1673,7 @@
%yy = sext <8 x i16> %y to <8 x i32>
%m = mul <8 x i32> %xx, %yy
%s = select <8 x i1> %c, <8 x i32> %m, <8 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1693,7 +1693,7 @@
%yy = zext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1713,7 +1713,7 @@
%yy = sext <4 x i16> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1729,7 +1729,7 @@
%c = icmp eq <8 x i16> %b, zeroinitializer
%m = mul <8 x i16> %x, %y
%s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -1746,7 +1746,7 @@
%yy = zext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
%s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1763,7 +1763,7 @@
%yy = sext <8 x i16> %y to <8 x i64>
%m = mul <8 x i64> %xx, %yy
%s = select <8 x i1> %c, <8 x i64> %m, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1781,7 +1781,7 @@
%m = mul <8 x i32> %xx, %yy
%ma = zext <8 x i32> %m to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1799,7 +1799,7 @@
%m = mul <8 x i32> %xx, %yy
%ma = sext <8 x i32> %m to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1816,7 +1816,7 @@
%m = mul <8 x i32> %xx, %xx
%ma = zext <8 x i32> %m to <8 x i64>
%s = select <8 x i1> %c, <8 x i64> %ma, <8 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1873,7 +1873,7 @@
%yy = zext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1929,7 +1929,7 @@
%yy = sext <2 x i16> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -1946,7 +1946,7 @@
%yy = zext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
%s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1963,7 +1963,7 @@
%yy = sext <16 x i8> %y to <16 x i32>
%m = mul <16 x i32> %xx, %yy
%s = select <16 x i1> %c, <16 x i32> %m, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1981,7 +1981,7 @@
%m = mul <16 x i16> %xx, %yy
%ma = zext <16 x i16> %m to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -1999,7 +1999,7 @@
%m = mul <16 x i16> %xx, %yy
%ma = sext <16 x i16> %m to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -2016,7 +2016,7 @@
%m = mul <16 x i16> %xx, %xx
%ma = zext <16 x i16> %m to <16 x i32>
%s = select <16 x i1> %c, <16 x i32> %ma, <16 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -2037,7 +2037,7 @@
%yy = zext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -2060,7 +2060,7 @@
%yy = sext <4 x i8> %y to <4 x i32>
%m = mul <4 x i32> %xx, %yy
%s = select <4 x i1> %c, <4 x i32> %m, <4 x i32> zeroinitializer
- %z = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %s)
+ %z = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %s)
%r = add i32 %z, %a
ret i32 %r
}
@@ -2078,7 +2078,7 @@
%yy = zext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2096,7 +2096,7 @@
%yy = sext <16 x i8> %y to <16 x i16>
%m = mul <16 x i16> %xx, %yy
%s = select <16 x i1> %c, <16 x i16> %m, <16 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2117,7 +2117,7 @@
%yy = zext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
%s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2138,7 +2138,7 @@
%yy = sext <8 x i8> %y to <8 x i16>
%m = mul <8 x i16> %xx, %yy
%s = select <8 x i1> %c, <8 x i16> %m, <8 x i16> zeroinitializer
- %z = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %s)
+ %z = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %s)
%r = add i16 %z, %a
ret i16 %r
}
@@ -2154,7 +2154,7 @@
%c = icmp eq <16 x i8> %b, zeroinitializer
%m = mul <16 x i8> %x, %y
%s = select <16 x i1> %c, <16 x i8> %m, <16 x i8> zeroinitializer
- %z = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %s)
+ %z = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %s)
%r = add i8 %z, %a
ret i8 %r
}
@@ -2569,7 +2569,7 @@
%yy = zext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
%s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -2917,7 +2917,7 @@
%yy = sext <16 x i8> %y to <16 x i64>
%m = mul <16 x i64> %xx, %yy
%s = select <16 x i1> %c, <16 x i64> %m, <16 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -2974,7 +2974,7 @@
%yy = zext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -3030,7 +3030,7 @@
%yy = sext <2 x i8> %y to <2 x i64>
%m = mul <2 x i64> %xx, %yy
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
@@ -3088,18 +3088,18 @@
%c = icmp eq <2 x i64> %b, zeroinitializer
%m = mul <2 x i64> %x, %y
%s = select <2 x i1> %c, <2 x i64> %m, <2 x i64> zeroinitializer
- %z = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %s)
+ %z = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %s)
%r = add i64 %z, %a
ret i64 %r
}
-declare i16 @llvm.experimental.vector.reduce.add.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
-declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
-declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
-declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
+declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
+declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>)
+declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>)
+declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll
index 7510169..4ff6821 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mul.ll
@@ -9,7 +9,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %x)
ret i32 %z
}
@@ -25,7 +25,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %x)
ret i32 %z
}
@@ -42,7 +42,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %x)
ret i32 %z
}
@@ -58,7 +58,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %x)
ret i16 %z
}
@@ -76,7 +76,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %x)
ret i16 %z
}
@@ -95,7 +95,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %x)
ret i16 %z
}
@@ -113,7 +113,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %x)
ret i8 %z
}
@@ -133,7 +133,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %x)
ret i8 %z
}
@@ -154,7 +154,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %x)
ret i8 %z
}
@@ -163,7 +163,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.mul.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.mul.v1i64(<1 x i64> %x)
ret i64 %z
}
@@ -179,7 +179,7 @@
; CHECK-NEXT: mla r1, r3, r1, r2
; CHECK-NEXT: bx lr
entry:
- %z = call i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %x)
ret i64 %z
}
@@ -207,7 +207,7 @@
; CHECK-NEXT: mla r1, r1, r6, r4
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
entry:
- %z = call i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %x)
ret i64 %z
}
@@ -220,7 +220,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.mul.v2i32(<2 x i32> %x)
%r = mul i32 %y, %z
ret i32 %r
}
@@ -238,7 +238,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %x)
%r = mul i32 %y, %z
ret i32 %r
}
@@ -257,7 +257,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32> %x)
+ %z = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %x)
%r = mul i32 %y, %z
ret i32 %r
}
@@ -275,7 +275,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.mul.v4i16(<4 x i16> %x)
%r = mul i16 %y, %z
ret i16 %r
}
@@ -295,7 +295,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %x)
%r = mul i16 %y, %z
ret i16 %r
}
@@ -316,7 +316,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16> %x)
+ %z = call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %x)
%r = mul i16 %y, %z
ret i16 %r
}
@@ -336,7 +336,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> %x)
%r = mul i8 %y, %z
ret i8 %r
}
@@ -358,7 +358,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %x)
%r = mul i8 %y, %z
ret i8 %r
}
@@ -381,7 +381,7 @@
; CHECK-NEXT: muls r0, r1, r0
; CHECK-NEXT: bx lr
entry:
- %z = call i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8> %x)
+ %z = call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %x)
%r = mul i8 %y, %z
ret i8 %r
}
@@ -397,7 +397,7 @@
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: pop {r7, pc}
entry:
- %z = call i64 @llvm.experimental.vector.reduce.mul.v1i64(<1 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.mul.v1i64(<1 x i64> %x)
%r = mul i64 %y, %z
ret i64 %r
}
@@ -420,7 +420,7 @@
; CHECK-NEXT: mov r0, r2
; CHECK-NEXT: pop {r4, pc}
entry:
- %z = call i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %x)
%r = mul i64 %y, %z
ret i64 %r
}
@@ -453,20 +453,20 @@
; CHECK-NEXT: mov r0, r2
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc}
entry:
- %z = call i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64> %x)
+ %z = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %x)
%r = mul i64 %y, %z
ret i64 %r
}
-declare i16 @llvm.experimental.vector.reduce.mul.v16i16(<16 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.v4i16(<4 x i16>)
-declare i16 @llvm.experimental.vector.reduce.mul.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.mul.v2i32(<2 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.v4i32(<4 x i32>)
-declare i32 @llvm.experimental.vector.reduce.mul.v8i32(<8 x i32>)
-declare i64 @llvm.experimental.vector.reduce.mul.v1i64(<1 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.v2i64(<2 x i64>)
-declare i64 @llvm.experimental.vector.reduce.mul.v4i64(<4 x i64>)
-declare i8 @llvm.experimental.vector.reduce.mul.v16i8(<16 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v32i8(<32 x i8>)
-declare i8 @llvm.experimental.vector.reduce.mul.v8i8(<8 x i8>)
+declare i16 @llvm.vector.reduce.mul.v16i16(<16 x i16>)
+declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.mul.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.mul.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.mul.v8i32(<8 x i32>)
+declare i64 @llvm.vector.reduce.mul.v1i64(<1 x i64>)
+declare i64 @llvm.vector.reduce.mul.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.mul.v4i64(<4 x i64>)
+declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.mul.v32i8(<32 x i8>)
+declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>)
diff --git a/llvm/test/CodeGen/Thumb2/mve-vmaxv.ll b/llvm/test/CodeGen/Thumb2/mve-vmaxv.ll
index 80c8ae6..9502716 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmaxv.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmaxv.ll
@@ -1,18 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
-declare i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8>)
-declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)
-declare i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8>)
-declare i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>)
-declare i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8>)
-declare i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>)
-declare i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8>)
-declare i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16>)
-declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>)
+declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
+declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
+declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
+declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>)
+declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>)
+declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
define arm_aapcs_vfpcc i8 @vmaxv_s_v16i8(<16 x i8> %s1) {
; CHECK-LABEL: vmaxv_s_v16i8:
@@ -20,7 +20,7 @@
; CHECK-NEXT: mvn r0, #127
; CHECK-NEXT: vmaxv.s8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %s1)
ret i8 %r
}
@@ -31,7 +31,7 @@
; CHECK-NEXT: movt r0, #65535
; CHECK-NEXT: vmaxv.s16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %s1)
ret i16 %r
}
@@ -41,7 +41,7 @@
; CHECK-NEXT: mov.w r0, #-2147483648
; CHECK-NEXT: vmaxv.s32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %s1)
ret i32 %r
}
@@ -51,7 +51,7 @@
; CHECK-NEXT: movs r0, #0
; CHECK-NEXT: vmaxv.u8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %s1)
ret i8 %r
}
@@ -61,7 +61,7 @@
; CHECK-NEXT: movs r0, #0
; CHECK-NEXT: vmaxv.u16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %s1)
ret i16 %r
}
@@ -71,7 +71,7 @@
; CHECK-NEXT: movs r0, #0
; CHECK-NEXT: vmaxv.u32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %s1)
ret i32 %r
}
@@ -81,7 +81,7 @@
; CHECK-NEXT: movs r0, #127
; CHECK-NEXT: vminv.s8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %s1)
ret i8 %r
}
@@ -91,7 +91,7 @@
; CHECK-NEXT: movw r0, #32767
; CHECK-NEXT: vminv.s16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %s1)
ret i16 %r
}
@@ -101,7 +101,7 @@
; CHECK-NEXT: mvn r0, #-2147483648
; CHECK-NEXT: vminv.s32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %s1)
ret i32 %r
}
@@ -111,7 +111,7 @@
; CHECK-NEXT: movs r0, #255
; CHECK-NEXT: vminv.u8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %s1)
ret i8 %r
}
@@ -121,7 +121,7 @@
; CHECK-NEXT: movw r0, #65535
; CHECK-NEXT: vminv.u16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %s1)
ret i16 %r
}
@@ -131,7 +131,7 @@
; CHECK-NEXT: mov.w r0, #-1
; CHECK-NEXT: vminv.u32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %s1)
ret i32 %r
}
@@ -142,7 +142,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vmaxv.s8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %s1)
%c = icmp sgt i8 %r, %s2
%s = select i1 %c, i8 %r, i8 %s2
ret i8 %s
@@ -157,7 +157,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, gt
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %s1)
%rs = sext i8 %r to i32
%c = icmp sgt i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -169,7 +169,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vmaxv.s16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %s1)
%c = icmp sgt i16 %r, %s2
%s = select i1 %c, i16 %r, i16 %s2
ret i16 %s
@@ -185,7 +185,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, gt
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %s1)
%rs = sext i16 %r to i32
%c = icmp sgt i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -197,7 +197,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vmaxv.s32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %s1)
%c = icmp sgt i32 %r, %s2
%s = select i1 %c, i32 %r, i32 %s2
ret i32 %s
@@ -208,7 +208,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vmaxv.u8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %s1)
%c = icmp ugt i8 %r, %s2
%s = select i1 %c, i8 %r, i8 %s2
ret i8 %s
@@ -223,7 +223,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, hi
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %s1)
%rs = zext i8 %r to i32
%c = icmp ugt i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -235,7 +235,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vmaxv.u16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %s1)
%c = icmp ugt i16 %r, %s2
%s = select i1 %c, i16 %r, i16 %s2
ret i16 %s
@@ -250,7 +250,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, hi
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %s1)
%rs = zext i16 %r to i32
%c = icmp ugt i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -262,7 +262,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vmaxv.u32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %s1)
%c = icmp ugt i32 %r, %s2
%s = select i1 %c, i32 %r, i32 %s2
ret i32 %s
@@ -273,7 +273,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vminv.s8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %s1)
%c = icmp slt i8 %r, %s2
%s = select i1 %c, i8 %r, i8 %s2
ret i8 %s
@@ -288,7 +288,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, lt
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %s1)
%rs = sext i8 %r to i32
%c = icmp slt i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -300,7 +300,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vminv.s16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %s1)
%c = icmp slt i16 %r, %s2
%s = select i1 %c, i16 %r, i16 %s2
ret i16 %s
@@ -315,7 +315,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, lt
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %s1)
%rs = sext i16 %r to i32
%c = icmp slt i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -327,7 +327,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vminv.s32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %s1)
%c = icmp slt i32 %r, %s2
%s = select i1 %c, i32 %r, i32 %s2
ret i32 %s
@@ -338,7 +338,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vminv.u8 r0, q0
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %s1)
%c = icmp ult i8 %r, %s2
%s = select i1 %c, i8 %r, i8 %s2
ret i8 %s
@@ -353,7 +353,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, lo
; CHECK-NEXT: bx lr
- %r = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %s1)
+ %r = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %s1)
%rs = zext i8 %r to i32
%c = icmp ult i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -365,7 +365,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vminv.u16 r0, q0
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %s1)
%c = icmp ult i16 %r, %s2
%s = select i1 %c, i16 %r, i16 %s2
ret i16 %s
@@ -380,7 +380,7 @@
; CHECK-NEXT: cmp r1, r0
; CHECK-NEXT: csel r0, r1, r0, lo
; CHECK-NEXT: bx lr
- %r = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %s1)
+ %r = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %s1)
%rs = zext i16 %r to i32
%c = icmp ult i32 %rs, %s2
%s = select i1 %c, i32 %rs, i32 %s2
@@ -392,7 +392,7 @@
; CHECK: @ %bb.0:
; CHECK-NEXT: vminv.u32 r0, q0
; CHECK-NEXT: bx lr
- %r = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %s1)
+ %r = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %s1)
%c = icmp ult i32 %r, %s2
%s = select i1 %c, i32 %r, i32 %s2
ret i32 %s