blob: b47e68a01fa5e18a5c29804ad9503071c82d4c0b [file] [log] [blame]
Matt Arsenault80edab92016-01-18 21:43:36 +00001; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=FAST64 -check-prefix=GCN %s
2; RUN: llc -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=SLOW64 -check-prefix=GCN %s
3
4
5; lshr (i64 x), c: c > 32 => reg_sequence lshr (i32 hi_32(x)), (c - 32), 0
6; GCN-LABEL: {{^}}lshr_i64_35:
7; GCN: buffer_load_dword [[VAL:v[0-9]+]]
8; GCN: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 3, [[VAL]]
9; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
10; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
11define void @lshr_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
12 %val = load i64, i64 addrspace(1)* %in
13 %shl = lshr i64 %val, 35
14 store i64 %shl, i64 addrspace(1)* %out
15 ret void
16}
17
18; GCN-LABEL: {{^}}lshr_i64_63:
19; GCN: buffer_load_dword [[VAL:v[0-9]+]]
20; GCN: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 31, [[VAL]]
21; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
22; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
23define void @lshr_i64_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
24 %val = load i64, i64 addrspace(1)* %in
25 %shl = lshr i64 %val, 63
26 store i64 %shl, i64 addrspace(1)* %out
27 ret void
28}
29
30; GCN-LABEL: {{^}}lshr_i64_33:
31; GCN: buffer_load_dword [[VAL:v[0-9]+]]
32; GCN: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 1, [[VAL]]
33; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
34; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
35define void @lshr_i64_33(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
36 %val = load i64, i64 addrspace(1)* %in
37 %shl = lshr i64 %val, 33
38 store i64 %shl, i64 addrspace(1)* %out
39 ret void
40}
41
42; GCN-LABEL: {{^}}lshr_i64_32:
43; GCN: buffer_load_dword v[[LO:[0-9]+]]
44; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
45; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
46define void @lshr_i64_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
47 %val = load i64, i64 addrspace(1)* %in
48 %shl = lshr i64 %val, 32
49 store i64 %shl, i64 addrspace(1)* %out
50 ret void
51}
52
Matt Arsenault6e3a4512016-01-18 22:01:13 +000053; Make sure the and of the constant doesn't prevent bfe from forming
54; after 64-bit shift is split.
55
Matt Arsenault80edab92016-01-18 21:43:36 +000056; GCN-LABEL: {{^}}lshr_and_i64_35:
Matt Arsenault6e3a4512016-01-18 22:01:13 +000057; GCN: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
58; GCN: v_bfe_u32 v[[BFE:[0-9]+]], v[[HI]], 8, 23
59; GCN: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
60; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
Matt Arsenault80edab92016-01-18 21:43:36 +000061define void @lshr_and_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
62 %val = load i64, i64 addrspace(1)* %in
Matt Arsenault6e3a4512016-01-18 22:01:13 +000063 %and = and i64 %val, 9223372036854775807 ; 0x7fffffffffffffff
64 %shl = lshr i64 %and, 40
Matt Arsenault80edab92016-01-18 21:43:36 +000065 store i64 %shl, i64 addrspace(1)* %out
66 ret void
67}
Matt Arsenault3cbbc102016-01-18 21:55:14 +000068
69; lshl (i64 x), c: c > 32 => reg_sequence lshl 0, (i32 lo_32(x)), (c - 32)
70
71; GCN-LABEL: {{^}}shl_i64_const_35:
72; GCN: buffer_load_dword [[VAL:v[0-9]+]]
73; GCN: v_lshlrev_b32_e32 v[[HI:[0-9]+]], 3, [[VAL]]
74; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
75; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
76define void @shl_i64_const_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
77 %val = load i64, i64 addrspace(1)* %in
78 %shl = shl i64 %val, 35
79 store i64 %shl, i64 addrspace(1)* %out
80 ret void
81}
82
83; GCN-LABEL: {{^}}shl_i64_const_32:
84; GCN: buffer_load_dword v[[HI:[0-9]+]]
85; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
86; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
87define void @shl_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
88 %val = load i64, i64 addrspace(1)* %in
89 %shl = shl i64 %val, 32
90 store i64 %shl, i64 addrspace(1)* %out
91 ret void
92}
93
94; GCN-LABEL: {{^}}shl_i64_const_63:
95; GCN: buffer_load_dword [[VAL:v[0-9]+]]
96; GCN: v_lshlrev_b32_e32 v[[HI:[0-9]+]], 31, [[VAL]]
97; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
98; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
99define void @shl_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
100 %val = load i64, i64 addrspace(1)* %in
101 %shl = shl i64 %val, 63
102 store i64 %shl, i64 addrspace(1)* %out
103 ret void
104}
105
106; ashr (i64 x), 63 => (ashr lo(x), 31), lo(x)
107
Matt Arsenault33e3ece2016-01-18 22:09:04 +0000108; GCN-LABEL: {{^}}ashr_i64_const_32:
109define void @ashr_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault3cbbc102016-01-18 21:55:14 +0000110 %val = load i64, i64 addrspace(1)* %in
Matt Arsenault33e3ece2016-01-18 22:09:04 +0000111 %shl = ashr i64 %val, 32
112 store i64 %shl, i64 addrspace(1)* %out
113 ret void
114}
115
116; GCN-LABEL: {{^}}ashr_i64_const_63:
117define void @ashr_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
118 %val = load i64, i64 addrspace(1)* %in
119 %shl = ashr i64 %val, 63
Matt Arsenault3cbbc102016-01-18 21:55:14 +0000120 store i64 %shl, i64 addrspace(1)* %out
121 ret void
122}