blob: ee0190149e92eaf30182932670bba47f4efa063d [file] [log] [blame]
Tom Stellard49f8bfd2015-01-06 18:00:21 +00001; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Matt Arsenault7aad8fd2017-01-24 22:02:15 +00002; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Matt Arsenault6689abe2016-05-05 20:07:37 +00003; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
Tom Stellard75aadc22012-12-11 21:25:42 +00004
Matt Arsenault28bd7d42015-09-25 18:21:47 +00005declare i32 @llvm.r600.read.tidig.x() #0
6
Tom Stellard79243d92014-10-01 17:15:17 +00007; FUNC-LABEL: {{^}}test2:
Matt Arsenault284ae082014-06-09 08:36:53 +00008; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
9; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Tom Stellard75aadc22012-12-11 21:25:42 +000010
Alexander Timofeev982aee62017-07-04 17:32:00 +000011; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
12; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000013
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000014define amdgpu_kernel void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000015 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000016 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
17 %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
Aaron Watry00aeb112013-06-25 13:55:23 +000018 %result = and <2 x i32> %a, %b
19 store <2 x i32> %result, <2 x i32> addrspace(1)* %out
20 ret void
21}
22
Tom Stellard79243d92014-10-01 17:15:17 +000023; FUNC-LABEL: {{^}}test4:
Matt Arsenault284ae082014-06-09 08:36:53 +000024; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
26; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watry00aeb112013-06-25 13:55:23 +000028
Alexander Timofeev982aee62017-07-04 17:32:00 +000029
30; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
31; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
32; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
33; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000034
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000035define amdgpu_kernel void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000036 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000037 %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
38 %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
Tom Stellard75aadc22012-12-11 21:25:42 +000039 %result = and <4 x i32> %a, %b
40 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
41 ret void
42}
Matt Arsenault284ae082014-06-09 08:36:53 +000043
Tom Stellard79243d92014-10-01 17:15:17 +000044; FUNC-LABEL: {{^}}s_and_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000045; SI: s_and_b32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000046define amdgpu_kernel void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
Matt Arsenault284ae082014-06-09 08:36:53 +000047 %and = and i32 %a, %b
48 store i32 %and, i32 addrspace(1)* %out, align 4
49 ret void
50}
51
Tom Stellard79243d92014-10-01 17:15:17 +000052; FUNC-LABEL: {{^}}s_and_constant_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000053; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000054define amdgpu_kernel void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
Matt Arsenault284ae082014-06-09 08:36:53 +000055 %and = and i32 %a, 1234567
56 store i32 %and, i32 addrspace(1)* %out, align 4
57 ret void
58}
59
Matt Arsenault28bd7d42015-09-25 18:21:47 +000060; FIXME: We should really duplicate the constant so that the SALU use
61; can fold into the s_and_b32 and the VALU one is materialized
62; directly without copying from the SGPR.
63
64; Second use is a VGPR use of the constant.
65; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_0:
66; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
67; SI-DAG: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
68; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], [[K]]
69; SI: buffer_store_dword [[VK]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000070define amdgpu_kernel void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32 %b) {
Matt Arsenault28bd7d42015-09-25 18:21:47 +000071 %and = and i32 %a, 1234567
72
73 ; Just to stop future replacement of copy to vgpr + store with VALU op.
74 %foo = add i32 %and, %b
75 store volatile i32 %foo, i32 addrspace(1)* %out
76 store volatile i32 1234567, i32 addrspace(1)* %out
77 ret void
78}
79
80; Second use is another SGPR use of the constant.
81; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_1:
82; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
83; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
84; SI: s_add_i32
85; SI: s_add_i32 [[ADD:s[0-9]+]], s{{[0-9]+}}, [[K]]
86; SI: buffer_store_dword [[VK]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000087define amdgpu_kernel void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32 %b) {
Matt Arsenault28bd7d42015-09-25 18:21:47 +000088 %and = and i32 %a, 1234567
89 %foo = add i32 %and, 1234567
90 %bar = add i32 %foo, %b
91 store volatile i32 %bar, i32 addrspace(1)* %out
92 ret void
93}
94
95; FUNC-LABEL: {{^}}v_and_i32_vgpr_vgpr:
96; SI: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000097define amdgpu_kernel void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
Matt Arsenault28bd7d42015-09-25 18:21:47 +000098 %tid = call i32 @llvm.r600.read.tidig.x() #0
99 %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
100 %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
101 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
102 %a = load i32, i32 addrspace(1)* %gep.a
103 %b = load i32, i32 addrspace(1)* %gep.b
Matt Arsenault284ae082014-06-09 08:36:53 +0000104 %and = and i32 %a, %b
Matt Arsenault28bd7d42015-09-25 18:21:47 +0000105 store i32 %and, i32 addrspace(1)* %gep.out
106 ret void
107}
108
109; FUNC-LABEL: {{^}}v_and_i32_sgpr_vgpr:
110; SI-DAG: s_load_dword [[SA:s[0-9]+]]
111; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
112; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000113define amdgpu_kernel void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1)* %bptr) {
Matt Arsenault28bd7d42015-09-25 18:21:47 +0000114 %tid = call i32 @llvm.r600.read.tidig.x() #0
115 %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
116 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
117 %b = load i32, i32 addrspace(1)* %gep.b
118 %and = and i32 %a, %b
119 store i32 %and, i32 addrspace(1)* %gep.out
120 ret void
121}
122
123; FUNC-LABEL: {{^}}v_and_i32_vgpr_sgpr:
124; SI-DAG: s_load_dword [[SA:s[0-9]+]]
125; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
126; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000127define amdgpu_kernel void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 %b) {
Matt Arsenault28bd7d42015-09-25 18:21:47 +0000128 %tid = call i32 @llvm.r600.read.tidig.x() #0
129 %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
130 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
131 %a = load i32, i32 addrspace(1)* %gep.a
132 %and = and i32 %a, %b
133 store i32 %and, i32 addrspace(1)* %gep.out
Matt Arsenault284ae082014-06-09 08:36:53 +0000134 ret void
135}
136
Matt Arsenault11a4d672015-02-13 19:05:03 +0000137; FUNC-LABEL: {{^}}v_and_constant_i32
138; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000139define amdgpu_kernel void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000140 %tid = call i32 @llvm.r600.read.tidig.x() #0
141 %gep = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
142 %a = load i32, i32 addrspace(1)* %gep, align 4
Matt Arsenault284ae082014-06-09 08:36:53 +0000143 %and = and i32 %a, 1234567
144 store i32 %and, i32 addrspace(1)* %out, align 4
145 ret void
146}
147
Matt Arsenault11a4d672015-02-13 19:05:03 +0000148; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
149; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000150define amdgpu_kernel void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000151 %tid = call i32 @llvm.r600.read.tidig.x() #0
152 %gep = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
153 %a = load i32, i32 addrspace(1)* %gep, align 4
Matt Arsenault11a4d672015-02-13 19:05:03 +0000154 %and = and i32 %a, 64
155 store i32 %and, i32 addrspace(1)* %out, align 4
156 ret void
157}
158
159; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
160; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000161define amdgpu_kernel void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000162 %tid = call i32 @llvm.r600.read.tidig.x() #0
163 %gep = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
164 %a = load i32, i32 addrspace(1)* %gep, align 4
Matt Arsenault11a4d672015-02-13 19:05:03 +0000165 %and = and i32 %a, -16
166 store i32 %and, i32 addrspace(1)* %out, align 4
167 ret void
168}
169
170; FUNC-LABEL: {{^}}s_and_i64
Tom Stellard326d6ec2014-11-05 14:50:53 +0000171; SI: s_and_b64
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000172define amdgpu_kernel void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
Matt Arsenault284ae082014-06-09 08:36:53 +0000173 %and = and i64 %a, %b
174 store i64 %and, i64 addrspace(1)* %out, align 8
175 ret void
176}
177
Matt Arsenault0d89e842014-07-15 21:44:37 +0000178; FIXME: Should use SGPRs
Tom Stellard79243d92014-10-01 17:15:17 +0000179; FUNC-LABEL: {{^}}s_and_i1:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000180; SI: v_and_b32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000181define amdgpu_kernel void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
Matt Arsenault0d89e842014-07-15 21:44:37 +0000182 %and = and i1 %a, %b
183 store i1 %and, i1 addrspace(1)* %out
184 ret void
185}
186
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000187; FUNC-LABEL: {{^}}s_and_constant_i64:
188; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000{{$}}
189; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80{{$}}
190; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000191define amdgpu_kernel void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000192 %and = and i64 %a, 549756338176
Matt Arsenault284ae082014-06-09 08:36:53 +0000193 store i64 %and, i64 addrspace(1)* %out, align 8
194 ret void
195}
196
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000197; FUNC-LABEL: {{^}}s_and_multi_use_constant_i64:
198; XSI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x80000{{$}}
199; XSI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0x80{{$}}
200; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000201define amdgpu_kernel void @s_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000202 %and0 = and i64 %a, 549756338176
203 %and1 = and i64 %b, 549756338176
204 store volatile i64 %and0, i64 addrspace(1)* %out
205 store volatile i64 %and1, i64 addrspace(1)* %out
206 ret void
207}
208
209; FUNC-LABEL: {{^}}s_and_32_bit_constant_i64:
210; SI: s_load_dwordx2
211; SI-NOT: and
212; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687{{$}}
213; SI-NOT: and
214; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000215define amdgpu_kernel void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000216 %and = and i64 %a, 1234567
217 store i64 %and, i64 addrspace(1)* %out, align 8
218 ret void
219}
220
221; FUNC-LABEL: {{^}}s_and_multi_use_inline_imm_i64:
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000222; SI: s_load_dword [[A:s[0-9]+]]
223; SI: s_load_dword [[B:s[0-9]+]]
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000224; SI: s_load_dwordx2
Tom Stellardcb6ba622016-04-30 00:23:06 +0000225; SI: s_load_dwordx2
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000226; SI-NOT: and
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000227; SI: s_lshl_b32 [[A]], [[A]], 1
228; SI: s_lshl_b32 [[B]], [[B]], 1
229; SI: s_and_b32 s{{[0-9]+}}, [[A]], 62
230; SI: s_and_b32 s{{[0-9]+}}, [[B]], 62
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000231; SI-NOT: and
232; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000233define amdgpu_kernel void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000234 %shl.a = shl i64 %a, 1
235 %shl.b = shl i64 %b, 1
236 %and0 = and i64 %shl.a, 62
237 %and1 = and i64 %shl.b, 62
238 %add0 = add i64 %and0, %c
239 %add1 = add i64 %and1, %c
240 store volatile i64 %add0, i64 addrspace(1)* %out
241 store volatile i64 %add1, i64 addrspace(1)* %out
242 ret void
243}
244
Tom Stellard79243d92014-10-01 17:15:17 +0000245; FUNC-LABEL: {{^}}v_and_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000246; SI: v_and_b32
247; SI: v_and_b32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000248define amdgpu_kernel void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000249 %tid = call i32 @llvm.r600.read.tidig.x() #0
250 %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
251 %a = load i64, i64 addrspace(1)* %gep.a, align 8
252 %gep.b = getelementptr i64, i64 addrspace(1)* %bptr, i32 %tid
253 %b = load i64, i64 addrspace(1)* %gep.b, align 8
Matt Arsenault284ae082014-06-09 08:36:53 +0000254 %and = and i64 %a, %b
255 store i64 %and, i64 addrspace(1)* %out, align 8
256 ret void
257}
258
Tom Stellard79243d92014-10-01 17:15:17 +0000259; FUNC-LABEL: {{^}}v_and_constant_i64:
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000260; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0xab19b207, {{v[0-9]+}}
261; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, {{v[0-9]+}}
Matt Arsenault68d93862015-09-24 08:36:14 +0000262; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000263define amdgpu_kernel void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000264 %tid = call i32 @llvm.r600.read.tidig.x() #0
265 %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
266 %a = load i64, i64 addrspace(1)* %gep.a, align 8
Matt Arsenault68d93862015-09-24 08:36:14 +0000267 %and = and i64 %a, 1231231234567
268 store i64 %and, i64 addrspace(1)* %out, align 8
269 ret void
270}
271
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000272; FUNC-LABEL: {{^}}v_and_multi_use_constant_i64:
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000273; SI-DAG: buffer_load_dwordx2 v{{\[}}[[LO0:[0-9]+]]:[[HI0:[0-9]+]]{{\]}}
274; SI-DAG: buffer_load_dwordx2 v{{\[}}[[LO1:[0-9]+]]:[[HI1:[0-9]+]]{{\]}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000275; SI-DAG: s_movk_i32 [[KHI:s[0-9]+]], 0x11e{{$}}
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000276; SI-DAG: s_mov_b32 [[KLO:s[0-9]+]], 0xab19b207{{$}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000277; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KLO]], v[[LO0]]
278; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], v[[HI0]]
279; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KLO]], v[[LO1]]
280; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], v[[HI1]]
281; SI: buffer_store_dwordx2
282; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000283define amdgpu_kernel void @v_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000284 %a = load volatile i64, i64 addrspace(1)* %aptr
285 %b = load volatile i64, i64 addrspace(1)* %aptr
286 %and0 = and i64 %a, 1231231234567
287 %and1 = and i64 %b, 1231231234567
288 store volatile i64 %and0, i64 addrspace(1)* %out
289 store volatile i64 %and1, i64 addrspace(1)* %out
290 ret void
291}
292
293; FUNC-LABEL: {{^}}v_and_multi_use_inline_imm_i64:
294; SI: buffer_load_dwordx2 v{{\[}}[[LO0:[0-9]+]]:[[HI0:[0-9]+]]{{\]}}
295; SI-NOT: and
296; SI: buffer_load_dwordx2 v{{\[}}[[LO1:[0-9]+]]:[[HI1:[0-9]+]]{{\]}}
297; SI-NOT: and
298; SI: v_and_b32_e32 v[[RESLO0:[0-9]+]], 63, v[[LO0]]
299; SI: v_and_b32_e32 v[[RESLO1:[0-9]+]], 63, v[[LO1]]
300; SI-NOT: and
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000301; SI: buffer_store_dwordx2 v{{\[}}[[RESLO0]]
Tom Stellard0bc954e2016-03-30 16:35:09 +0000302; SI: buffer_store_dwordx2 v{{\[}}[[RESLO1]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000303define amdgpu_kernel void @v_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000304 %a = load volatile i64, i64 addrspace(1)* %aptr
305 %b = load volatile i64, i64 addrspace(1)* %aptr
306 %and0 = and i64 %a, 63
307 %and1 = and i64 %b, 63
308 store volatile i64 %and0, i64 addrspace(1)* %out
309 store volatile i64 %and1, i64 addrspace(1)* %out
310 ret void
311}
312
Matt Arsenault68d93862015-09-24 08:36:14 +0000313; FUNC-LABEL: {{^}}v_and_i64_32_bit_constant:
Alexander Timofeev982aee62017-07-04 17:32:00 +0000314; SI: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]]
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000315; SI-NOT: and
316; SI: v_and_b32_e32 {{v[0-9]+}}, 0x12d687, [[VAL]]
317; SI-NOT: and
318; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000319define amdgpu_kernel void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000320 %tid = call i32 @llvm.r600.read.tidig.x() #0
321 %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
322 %a = load i64, i64 addrspace(1)* %gep.a, align 8
Matt Arsenault284ae082014-06-09 08:36:53 +0000323 %and = and i64 %a, 1234567
324 store i64 %and, i64 addrspace(1)* %out, align 8
325 ret void
326}
Matt Arsenault49dd4282014-09-15 17:15:02 +0000327
Tom Stellard79243d92014-10-01 17:15:17 +0000328; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
Alexander Timofeev982aee62017-07-04 17:32:00 +0000329; SI: {{buffer|flat}}_load_dword v{{[0-9]+}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000330; SI-NOT: and
Tom Stellard326d6ec2014-11-05 14:50:53 +0000331; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000332; SI-NOT: and
333; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000334define amdgpu_kernel void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000335 %tid = call i32 @llvm.r600.read.tidig.x() #0
336 %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
337 %a = load i64, i64 addrspace(1)* %gep.a, align 8
Matt Arsenault49dd4282014-09-15 17:15:02 +0000338 %and = and i64 %a, 64
339 store i64 %and, i64 addrspace(1)* %out, align 8
340 ret void
341}
342
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000343; FIXME: Should be able to reduce load width
344; FUNC-LABEL: {{^}}v_and_inline_neg_imm_i64:
Alexander Timofeev982aee62017-07-04 17:32:00 +0000345; SI: {{buffer|flat}}_load_dwordx2 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000346; SI-NOT: and
347; SI: v_and_b32_e32 v[[VAL_LO]], -8, v[[VAL_LO]]
348; SI-NOT: and
349; SI: buffer_store_dwordx2 v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000350define amdgpu_kernel void @v_and_inline_neg_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Alexander Timofeev982aee62017-07-04 17:32:00 +0000351 %tid = call i32 @llvm.r600.read.tidig.x() #0
352 %gep.a = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
353 %a = load i64, i64 addrspace(1)* %gep.a, align 8
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000354 %and = and i64 %a, -8
355 store i64 %and, i64 addrspace(1)* %out, align 8
356 ret void
357}
358
Matt Arsenault11a4d672015-02-13 19:05:03 +0000359; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000360; SI: s_load_dword
361; SI-NOT: and
362; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 64
363; SI-NOT: and
364; SI: buffer_store_dword
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000365define amdgpu_kernel void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault49dd4282014-09-15 17:15:02 +0000366 %and = and i64 %a, 64
367 store i64 %and, i64 addrspace(1)* %out, align 8
368 ret void
369}
Matt Arsenault11a4d672015-02-13 19:05:03 +0000370
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000371; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64_noshrink:
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000372; SI: s_load_dword [[A:s[0-9]+]]
373; SI: s_lshl_b32 [[A]], [[A]], 1{{$}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000374; SI-NOT: and
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000375; SI: s_and_b32 s{{[0-9]+}}, [[A]], 64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000376; SI-NOT: and
377; SI: s_add_u32
378; SI-NEXT: s_addc_u32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000379define amdgpu_kernel void @s_and_inline_imm_64_i64_noshrink(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a, i64 %b) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000380 %shl = shl i64 %a, 1
381 %and = and i64 %shl, 64
382 %add = add i64 %and, %b
383 store i64 %add, i64 addrspace(1)* %out, align 8
384 ret void
385}
386
Matt Arsenault11a4d672015-02-13 19:05:03 +0000387; FUNC-LABEL: {{^}}s_and_inline_imm_1_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000388; SI: s_load_dwordx2
389; SI-NOT: and
390; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1
391; SI-NOT: and
392; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000393define amdgpu_kernel void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000394 %and = and i64 %a, 1
395 store i64 %and, i64 addrspace(1)* %out, align 8
396 ret void
397}
398
399; FUNC-LABEL: {{^}}s_and_inline_imm_1.0_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000400; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0
401
402; SI: s_load_dwordx2
403; SI: s_load_dwordx2
404; SI-NOT: and
405; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3ff00000
406; SI-NOT: and
407; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000408define amdgpu_kernel void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000409 %and = and i64 %a, 4607182418800017408
410 store i64 %and, i64 addrspace(1)* %out, align 8
411 ret void
412}
413
414; FUNC-LABEL: {{^}}s_and_inline_imm_neg_1.0_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000415; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0
416
417; SI: s_load_dwordx2
418; SI: s_load_dwordx2
419; SI-NOT: and
420; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbff00000
421; SI-NOT: and
422; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000423define amdgpu_kernel void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000424 %and = and i64 %a, 13830554455654793216
425 store i64 %and, i64 addrspace(1)* %out, align 8
426 ret void
427}
428
429; FUNC-LABEL: {{^}}s_and_inline_imm_0.5_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000430; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5
431
432; SI: s_load_dwordx2
433; SI: s_load_dwordx2
434; SI-NOT: and
435; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3fe00000
436; SI-NOT: and
437; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000438define amdgpu_kernel void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000439 %and = and i64 %a, 4602678819172646912
440 store i64 %and, i64 addrspace(1)* %out, align 8
441 ret void
442}
443
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000444; FUNC-LABEL: {{^}}s_and_inline_imm_neg_0.5_i64:
445; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5
446
447; SI: s_load_dwordx2
448; SI: s_load_dwordx2
449; SI-NOT: and
450; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbfe00000
451; SI-NOT: and
452; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000453define amdgpu_kernel void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000454 %and = and i64 %a, 13826050856027422720
455 store i64 %and, i64 addrspace(1)* %out, align 8
456 ret void
457}
458
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000459; FUNC-LABEL: {{^}}s_and_inline_imm_2.0_i64:
460; SI: s_load_dwordx2
461; SI: s_load_dwordx2
462; SI-NOT: and
463; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 2.0
464; SI-NOT: and
465; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000466define amdgpu_kernel void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000467 %and = and i64 %a, 4611686018427387904
468 store i64 %and, i64 addrspace(1)* %out, align 8
469 ret void
470}
471
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000472; FUNC-LABEL: {{^}}s_and_inline_imm_neg_2.0_i64:
473; SI: s_load_dwordx2
474; SI: s_load_dwordx2
475; SI-NOT: and
476; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, -2.0
477; SI-NOT: and
478; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000479define amdgpu_kernel void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000480 %and = and i64 %a, 13835058055282163712
481 store i64 %and, i64 addrspace(1)* %out, align 8
482 ret void
483}
484
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000485; FUNC-LABEL: {{^}}s_and_inline_imm_4.0_i64:
486; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0
487
488; SI: s_load_dwordx2
489; SI: s_load_dwordx2
490; SI-NOT: and
491; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x40100000
492; SI-NOT: and
493; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000494define amdgpu_kernel void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000495 %and = and i64 %a, 4616189618054758400
496 store i64 %and, i64 addrspace(1)* %out, align 8
497 ret void
498}
499
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000500; FUNC-LABEL: {{^}}s_and_inline_imm_neg_4.0_i64:
501; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
502
503; SI: s_load_dwordx2
504; SI: s_load_dwordx2
505; SI-NOT: and
506; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xc0100000
507; SI-NOT: and
508; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000509define amdgpu_kernel void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000510 %and = and i64 %a, 13839561654909534208
511 store i64 %and, i64 addrspace(1)* %out, align 8
512 ret void
513}
514
515
516; Test with the 64-bit integer bitpattern for a 32-bit float in the
517; low 32-bits, which is not a valid 64-bit inline immmediate.
518
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000519; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64:
Matthias Braun6ad3d052016-06-25 00:23:00 +0000520; SI: s_load_dwordx2
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000521; SI: s_load_dword s
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000522; SI-NOT: and
523; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
524; SI-NOT: and
525; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000526define amdgpu_kernel void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000527 %and = and i64 %a, 1082130432
528 store i64 %and, i64 addrspace(1)* %out, align 8
529 ret void
530}
531
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000532; FUNC-LABEL: {{^}}s_and_inline_imm_f32_neg_4.0_i64:
533; SI: s_load_dwordx2
534; SI: s_load_dwordx2
535; SI-NOT: and
536; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
537; SI-NOT: and
538; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000539define amdgpu_kernel void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000540 %and = and i64 %a, -1065353216
541 store i64 %and, i64 addrspace(1)* %out, align 8
542 ret void
543}
544
545; Shift into upper 32-bits
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000546; SI: s_load_dwordx2
547; SI: s_load_dwordx2
548; SI-NOT: and
549; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
550; SI-NOT: and
551; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000552define amdgpu_kernel void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000553 %and = and i64 %a, 4647714815446351872
554 store i64 %and, i64 addrspace(1)* %out, align 8
555 ret void
556}
557
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000558; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_neg_4.0_i64:
559; SI: s_load_dwordx2
560; SI: s_load_dwordx2
561; SI-NOT: and
562; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
563; SI-NOT: and
564; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000565define amdgpu_kernel void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000566 %and = and i64 %a, 13871086852301127680
567 store i64 %and, i64 addrspace(1)* %out, align 8
568 ret void
569}
Matt Arsenault28bd7d42015-09-25 18:21:47 +0000570attributes #0 = { nounwind readnone }