[Target][ARM] Add PerformVSELECTCombine for MVE Integer Ops
This patch adds an implementation of PerformVSELECTCombine in the
ARM DAG Combiner that transforms vselect(not(cond), lhs, rhs) into
vselect(cond, rhs, lhs).
Normally, this should be done by the target-independent DAG Combiner,
but it doesn't handle the kind of constants that we generate, so we
have to reimplement it here.
Differential Revision: https://reviews.llvm.org/D77712
diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-or.ll b/llvm/test/CodeGen/Thumb2/mve-pred-or.ll
index 4e9e074..f93cc66 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-or.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-or.ll
@@ -6,8 +6,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.i32 ne, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -22,8 +21,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.i32 eq, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -38,8 +36,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 ge, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -54,8 +51,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 le, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -70,8 +66,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 gt, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -86,8 +81,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 lt, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -116,8 +110,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.i32 eq, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -165,8 +158,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.i32 ne, q1, q2
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -181,8 +173,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.i32 eq, q1, q2
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -197,8 +188,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 le, q2, q1
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -213,8 +203,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 le, q1, q2
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -229,8 +218,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 lt, q2, q1
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -245,8 +233,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i32 ne, q0, zr
; CHECK-NEXT: vcmpt.s32 lt, q1, q2
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <4 x i32> %a, zeroinitializer
@@ -340,8 +327,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i16 ne, q0, zr
; CHECK-NEXT: vcmpt.i16 ne, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <8 x i16> %a, zeroinitializer
@@ -356,8 +342,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i16 ne, q0, zr
; CHECK-NEXT: vcmpt.i16 ne, q1, q2
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <8 x i16> %a, zeroinitializer
@@ -373,8 +358,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i8 ne, q0, zr
; CHECK-NEXT: vcmpt.i8 ne, q1, zr
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <16 x i8> %a, zeroinitializer
@@ -389,8 +373,7 @@
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: vpt.i8 ne, q0, zr
; CHECK-NEXT: vcmpt.i8 ne, q1, q2
-; CHECK-NEXT: vpnot
-; CHECK-NEXT: vpsel q0, q0, q1
+; CHECK-NEXT: vpsel q0, q1, q0
; CHECK-NEXT: bx lr
entry:
%c1 = icmp eq <16 x i8> %a, zeroinitializer