blob: e60083ebb634714475d657e60f0a50e81d1b4311 [file] [log] [blame]
Stanislav Mekhanoshine8bf6c92017-06-27 19:10:47 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2
3; GCN-LABEL: {{^}}alignbit_shr_pat:
4; GCN-DAG: s_load_dword s[[SHR:[0-9]+]]
5; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
6; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]]
7
8define amdgpu_kernel void @alignbit_shr_pat(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
9bb:
10 %tmp = load i64, i64 addrspace(1)* %arg, align 8
11 %tmp3 = and i32 %arg2, 31
12 %tmp4 = zext i32 %tmp3 to i64
13 %tmp5 = lshr i64 %tmp, %tmp4
14 %tmp6 = trunc i64 %tmp5 to i32
15 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
16 ret void
17}
18
19; GCN-LABEL: {{^}}alignbit_shl_pat:
20; GCN-DAG: s_load_dword s[[SHL:[0-9]+]]
21; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
22; GCN-DAG: s_sub_i32 s[[SHR:[0-9]+]], 32, s[[SHL]]
23; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]]
24
25define amdgpu_kernel void @alignbit_shl_pat(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
26bb:
27 %tmp = load i64, i64 addrspace(1)* %arg, align 8
28 %tmp3 = and i32 %arg2, 31
29 %tmp4 = zext i32 %tmp3 to i64
30 %tmp5 = shl i64 %tmp, %tmp4
31 %tmp6 = trunc i64 %tmp5 to i32
32 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
33 ret void
34}
35
36; GCN-LABEL: {{^}}alignbit_shr_pat_v:
37; GCN-DAG: load_dword v[[SHR:[0-9]+]],
38; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
39; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]]
40
41define amdgpu_kernel void @alignbit_shr_pat_v(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
42bb:
43 %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
44 %gep1 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tid
45 %tmp = load i64, i64 addrspace(1)* %gep1, align 8
46 %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tid
47 %amt = load i32, i32 addrspace(1)* %gep2, align 4
48 %tmp3 = and i32 %amt, 31
49 %tmp4 = zext i32 %tmp3 to i64
50 %tmp5 = lshr i64 %tmp, %tmp4
51 %tmp6 = trunc i64 %tmp5 to i32
52 store i32 %tmp6, i32 addrspace(1)* %gep2, align 4
53 ret void
54}
55
56; GCN-LABEL: {{^}}alignbit_shl_pat_v:
57; GCN-DAG: load_dword v[[SHL:[0-9]+]],
58; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
59; GCN-DAG: v_sub_i32_e32 v[[SHR:[0-9]+]], {{[^,]+}}, 32, v[[SHL]]
60; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]]
61
62define amdgpu_kernel void @alignbit_shl_pat_v(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
63bb:
64 %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
65 %gep1 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tid
66 %tmp = load i64, i64 addrspace(1)* %gep1, align 8
67 %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tid
68 %amt = load i32, i32 addrspace(1)* %gep2, align 4
69 %tmp3 = and i32 %amt, 31
70 %tmp4 = zext i32 %tmp3 to i64
71 %tmp5 = shl i64 %tmp, %tmp4
72 %tmp6 = trunc i64 %tmp5 to i32
73 store i32 %tmp6, i32 addrspace(1)* %gep2, align 4
74 ret void
75}
76
77; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and30:
78; Negative test, wrong constant
79; GCN: v_lshr_b64
80; GCN-NOT: v_alignbit_b32
81
82define amdgpu_kernel void @alignbit_shr_pat_wrong_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
83bb:
84 %tmp = load i64, i64 addrspace(1)* %arg, align 8
85 %tmp3 = and i32 %arg2, 30
86 %tmp4 = zext i32 %tmp3 to i64
87 %tmp5 = lshr i64 %tmp, %tmp4
88 %tmp6 = trunc i64 %tmp5 to i32
89 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
90 ret void
91}
92
93; GCN-LABEL: {{^}}alignbit_shl_pat_wrong_and30:
94; Negative test, wrong constant
95; GCN: v_lshl_b64
96; GCN-NOT: v_alignbit_b32
97
98define amdgpu_kernel void @alignbit_shl_pat_wrong_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
99bb:
100 %tmp = load i64, i64 addrspace(1)* %arg, align 8
101 %tmp3 = and i32 %arg2, 30
102 %tmp4 = zext i32 %tmp3 to i64
103 %tmp5 = shl i64 %tmp, %tmp4
104 %tmp6 = trunc i64 %tmp5 to i32
105 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
106 ret void
107}
108
109; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and63:
110; Negative test, wrong constant
111; GCN: v_lshr_b64
112; GCN-NOT: v_alignbit_b32
113
114define amdgpu_kernel void @alignbit_shr_pat_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
115bb:
116 %tmp = load i64, i64 addrspace(1)* %arg, align 8
117 %tmp3 = and i32 %arg2, 63
118 %tmp4 = zext i32 %tmp3 to i64
119 %tmp5 = lshr i64 %tmp, %tmp4
120 %tmp6 = trunc i64 %tmp5 to i32
121 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
122 ret void
123}
124
125; GCN-LABEL: {{^}}alignbit_shl_pat_wrong_and63:
126; Negative test, wrong constant
127; GCN: v_lshl_b64
128; GCN-NOT: v_alignbit_b32
129
130define amdgpu_kernel void @alignbit_shl_pat_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
131bb:
132 %tmp = load i64, i64 addrspace(1)* %arg, align 8
133 %tmp3 = and i32 %arg2, 63
134 %tmp4 = zext i32 %tmp3 to i64
135 %tmp5 = shl i64 %tmp, %tmp4
136 %tmp6 = trunc i64 %tmp5 to i32
137 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
138 ret void
139}
140declare i32 @llvm.amdgcn.workitem.id.x() #0
141
142attributes #0 = { nounwind readnone speculatable }