AMDGPU: Reduce 64-bit SRAs
llvm-svn: 258096
diff --git a/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll b/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
index 2fab003..b47e68a 100644
--- a/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
+++ b/llvm/test/CodeGen/AMDGPU/shift-i64-opts.ll
@@ -105,10 +105,18 @@
; ashr (i64 x), 63 => (ashr lo(x), 31), lo(x)
-; GCN-LABEL: {{^}}ashr_i64_const_gt_32:
-define void @ashr_i64_const_gt_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+; GCN-LABEL: {{^}}ashr_i64_const_32:
+define void @ashr_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%val = load i64, i64 addrspace(1)* %in
- %shl = ashr i64 %val, 35
+ %shl = ashr i64 %val, 32
+ store i64 %shl, i64 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}ashr_i64_const_63:
+define void @ashr_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
+ %val = load i64, i64 addrspace(1)* %in
+ %shl = ashr i64 %val, 63
store i64 %shl, i64 addrspace(1)* %out
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
index 138b93b..1d75c49a 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
@@ -13,8 +13,7 @@
; FUNC-LABEL: {{^}}v_sint_to_fp_i64_to_f32:
; GCN: {{buffer|flat}}_load_dwordx2
-; SI: v_ashr_i64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, 63
-; VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\]}}, 63, {{v\[[0-9]+:[0-9]+\]}}
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
; GCN: v_xor_b32
; GCN: v_ffbh_u32
diff --git a/llvm/test/CodeGen/AMDGPU/sra.ll b/llvm/test/CodeGen/AMDGPU/sra.ll
index b3ec6bd..bf1de02 100644
--- a/llvm/test/CodeGen/AMDGPU/sra.ll
+++ b/llvm/test/CodeGen/AMDGPU/sra.ll
@@ -201,10 +201,10 @@
}
; GCN-LABEL: {{^}}s_ashr_32_i64:
-; GCN: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
-; GCN: s_ashr_i64 [[SHIFT:s\[[0-9]+:[0-9]+\]]], [[VAL]], 32
-; GCN: s_add_u32
-; GCN: s_addc_u32
+; GCN: s_load_dword s[[HI:[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN: s_ashr_i32 s[[SHIFT:[0-9]+]], s[[HI]], 31
+; GCN: s_add_u32 s{{[0-9]+}}, s[[HI]], s{{[0-9]+}}
+; GCN: s_addc_u32 s{{[0-9]+}}, s[[SHIFT]], s{{[0-9]+}}
define void @s_ashr_32_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = ashr i64 %a, 32
%add = add i64 %result, %b
@@ -213,10 +213,10 @@
}
; GCN-LABEL: {{^}}v_ashr_32_i64:
-; GCN: {{buffer|flat}}_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; SI: v_ashr_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], [[VAL]], 32
-; VI: v_ashrrev_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], 32, [[VAL]]
-; GCN: {{buffer|flat}}_store_dwordx2 [[SHIFT]]
+; SI: buffer_load_dword v[[HI:[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; VI: flat_load_dword v[[HI:[0-9]+]]
+; GCN: v_ashrrev_i32_e32 v[[SHIFT:[0-9]+]], 31, v[[HI]]
+; GCN: {{buffer|flat}}_store_dwordx2 v{{\[}}[[HI]]:[[SHIFT]]{{\]}}
define void @v_ashr_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
@@ -228,9 +228,11 @@
}
; GCN-LABEL: {{^}}s_ashr_63_i64:
-; GCN-DAG: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
-; GCN: s_ashr_i64 [[SHIFT:s\[[0-9]+:[0-9]+\]]], [[VAL]], 63
-; GCN: s_add_u32
+; GCN-DAG: s_load_dword s[[HI:[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN: s_ashr_i32 s[[SHIFT:[0-9]+]], s[[HI]], 31
+; GCN: s_mov_b32 s[[COPYSHIFT:[0-9]+]], s[[SHIFT]]
+; GCN: s_add_u32 {{s[0-9]+}}, s[[HI]], {{s[0-9]+}}
+; GCN: s_addc_u32 {{s[0-9]+}}, s[[COPYSHIFT]], {{s[0-9]+}}
define void @s_ashr_63_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
%result = ashr i64 %a, 63
%add = add i64 %result, %b
@@ -239,10 +241,11 @@
}
; GCN-LABEL: {{^}}v_ashr_63_i64:
-; GCN-DAG: {{buffer|flat}}_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; SI: v_ashr_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], [[VAL]], 63
-; VI: v_ashrrev_i64 [[SHIFT:v\[[0-9]+:[0-9]+\]]], 63, [[VAL]]
-; GCN: {{buffer|flat}}_store_dwordx2 [[SHIFT]]
+; SI: buffer_load_dword v[[HI:[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
+; VI: flat_load_dword v[[HI:[0-9]+]]
+; GCN: v_ashrrev_i32_e32 v[[SHIFT:[0-9]+]], 31, v[[HI]]
+; GCN: v_mov_b32_e32 v[[COPY:[0-9]+]], v[[SHIFT]]
+; GCN: {{buffer|flat}}_store_dwordx2 v{{\[}}[[SHIFT]]:[[COPY]]{{\]}}
define void @v_ashr_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() #0
%gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid