AMDGPU/SI: Use new SimplifyDemandedBits helper for multi-use operations
Summary:
We are using this helper for our 24-bit arithmetic combines, so we are now able to eliminate multi-use operations that mask the high-bits of 24-bit inputs (e.g. and x, 0xffffff)
Reviewers: arsenm, nhaehnle
Subscribers: tony-tye, arsenm, kzhuravl, wdng, nhaehnle, llvm-commits, yaxunl
Differential Revision: https://reviews.llvm.org/D24672
llvm-svn: 284267
diff --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index 72c6b2b..f107775 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -74,3 +74,63 @@
store i32 %4, i32 addrspace(1)* %out
ret void
}
+
+; FUNC-LABEL: {{^}}extra_and:
+; SI-NOT: v_and
+; SI: v_mad_u32_u24
+; SI: v_mad_u32_u24
+define amdgpu_kernel void @extra_and(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
+bb:
+ br label %bb4
+
+bb4: ; preds = %bb4, %bb
+ %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
+ %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
+ %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
+ %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
+ %tmp8 = and i32 %tmp7, 16777215
+ %tmp9 = and i32 %tmp6, 16777215
+ %tmp10 = and i32 %tmp5, 16777215
+ %tmp11 = and i32 %tmp, 16777215
+ %tmp12 = mul i32 %tmp8, %tmp11
+ %tmp13 = add i32 %arg2, %tmp12
+ %tmp14 = mul i32 %tmp9, %tmp11
+ %tmp15 = add i32 %arg3, %tmp14
+ %tmp16 = add nuw nsw i32 %tmp13, %tmp15
+ %tmp17 = icmp eq i32 %tmp16, 8
+ br i1 %tmp17, label %bb18, label %bb4
+
+bb18: ; preds = %bb4
+ store i32 %tmp16, i32 addrspace(1)* %arg
+ ret void
+}
+
+; FUNC-LABEL: {{^}}dont_remove_shift
+; SI: v_lshr
+; SI: v_mad_u32_u24
+; SI: v_mad_u32_u24
+define amdgpu_kernel void @dont_remove_shift(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
+bb:
+ br label %bb4
+
+bb4: ; preds = %bb4, %bb
+ %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
+ %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
+ %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
+ %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
+ %tmp8 = lshr i32 %tmp7, 8
+ %tmp9 = lshr i32 %tmp6, 8
+ %tmp10 = lshr i32 %tmp5, 8
+ %tmp11 = lshr i32 %tmp, 8
+ %tmp12 = mul i32 %tmp8, %tmp11
+ %tmp13 = add i32 %arg2, %tmp12
+ %tmp14 = mul i32 %tmp9, %tmp11
+ %tmp15 = add i32 %arg3, %tmp14
+ %tmp16 = add nuw nsw i32 %tmp13, %tmp15
+ %tmp17 = icmp eq i32 %tmp16, 8
+ br i1 %tmp17, label %bb18, label %bb4
+
+bb18: ; preds = %bb4
+ store i32 %tmp16, i32 addrspace(1)* %arg
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
index c8f8ba2..4503e86 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
@@ -82,12 +82,10 @@
ret void
}
-; FIXME: Should be able to eliminate bfe
; FUNC-LABEL: {{^}}test_smul24_i64_square:
; GCN: s_load_dword [[A:s[0-9]+]]
-; GCN: s_bfe_i32 [[SEXT:s[0-9]+]], [[A]], 0x180000{{$}}
-; GCN-DAG: v_mul_hi_i32_i24_e64 v{{[0-9]+}}, [[SEXT]], [[SEXT]]
-; GCN-DAG: v_mul_i32_i24_e64 v{{[0-9]+}}, [[SEXT]], [[SEXT]]
+; GCN-DAG: v_mul_hi_i32_i24_e64 v{{[0-9]+}}, [[A]], [[A]]
+; GCN-DAG: v_mul_i32_i24_e64 v{{[0-9]+}}, [[A]], [[A]]
; GCN: buffer_store_dwordx2
define void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
%shl.i = shl i32 %a, 8
diff --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
index 44b17b6..9e5f31a 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
@@ -145,12 +145,11 @@
ret void
}
-; FIXME: Should be able to eliminate the and.
; FUNC-LABEL: {{^}}test_umul24_i64_square:
; GCN: s_load_dword [[A:s[0-9]+]]
-; GCN: s_and_b32 [[TRUNC:s[0-9]+]], [[A]], 0xffffff{{$}}
-; GCN-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[TRUNC]], [[TRUNC]]
-; GCN-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[TRUNC]], [[TRUNC]]
+; GCN-NOT: s_and_b32
+; GCN-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
+; GCN-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[A]], [[A]]
define void @test_umul24_i64_square(i64 addrspace(1)* %out, i64 %a) {
entry:
%tmp0 = shl i64 %a, 40