blob: d3d3cec9bbbfbd2710dc8113dd0cffeb079b10d1 [file] [log] [blame]
Tom Stellard49f8bfd2015-01-06 18:00:21 +00001; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Marek Olsak75170772015-01-27 17:27:15 +00002; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Matt Arsenault6689abe2016-05-05 20:07:37 +00003; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
Tom Stellard75aadc22012-12-11 21:25:42 +00004
Matt Arsenault28bd7d42015-09-25 18:21:47 +00005declare i32 @llvm.r600.read.tidig.x() #0
6
Tom Stellard79243d92014-10-01 17:15:17 +00007; FUNC-LABEL: {{^}}test2:
Matt Arsenault284ae082014-06-09 08:36:53 +00008; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
9; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Tom Stellard75aadc22012-12-11 21:25:42 +000010
Tom Stellard326d6ec2014-11-05 14:50:53 +000011; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
12; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000013
14define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000015 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000016 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
17 %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
Aaron Watry00aeb112013-06-25 13:55:23 +000018 %result = and <2 x i32> %a, %b
19 store <2 x i32> %result, <2 x i32> addrspace(1)* %out
20 ret void
21}
22
Tom Stellard79243d92014-10-01 17:15:17 +000023; FUNC-LABEL: {{^}}test4:
Matt Arsenault284ae082014-06-09 08:36:53 +000024; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
26; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watry00aeb112013-06-25 13:55:23 +000028
Tom Stellard326d6ec2014-11-05 14:50:53 +000029; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
30; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
31; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
32; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000033
34define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000035 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000036 %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
37 %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
Tom Stellard75aadc22012-12-11 21:25:42 +000038 %result = and <4 x i32> %a, %b
39 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
40 ret void
41}
Matt Arsenault284ae082014-06-09 08:36:53 +000042
Tom Stellard79243d92014-10-01 17:15:17 +000043; FUNC-LABEL: {{^}}s_and_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000044; SI: s_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +000045define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
46 %and = and i32 %a, %b
47 store i32 %and, i32 addrspace(1)* %out, align 4
48 ret void
49}
50
Tom Stellard79243d92014-10-01 17:15:17 +000051; FUNC-LABEL: {{^}}s_and_constant_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000052; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
Matt Arsenault284ae082014-06-09 08:36:53 +000053define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
54 %and = and i32 %a, 1234567
55 store i32 %and, i32 addrspace(1)* %out, align 4
56 ret void
57}
58
Matt Arsenault28bd7d42015-09-25 18:21:47 +000059; FIXME: We should really duplicate the constant so that the SALU use
60; can fold into the s_and_b32 and the VALU one is materialized
61; directly without copying from the SGPR.
62
63; Second use is a VGPR use of the constant.
64; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_0:
65; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
66; SI-DAG: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
67; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], [[K]]
68; SI: buffer_store_dword [[VK]]
69define void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32 %b) {
70 %and = and i32 %a, 1234567
71
72 ; Just to stop future replacement of copy to vgpr + store with VALU op.
73 %foo = add i32 %and, %b
74 store volatile i32 %foo, i32 addrspace(1)* %out
75 store volatile i32 1234567, i32 addrspace(1)* %out
76 ret void
77}
78
79; Second use is another SGPR use of the constant.
80; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_1:
81; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
82; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
83; SI: s_add_i32
84; SI: s_add_i32 [[ADD:s[0-9]+]], s{{[0-9]+}}, [[K]]
85; SI: buffer_store_dword [[VK]]
86define void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32 %b) {
87 %and = and i32 %a, 1234567
88 %foo = add i32 %and, 1234567
89 %bar = add i32 %foo, %b
90 store volatile i32 %bar, i32 addrspace(1)* %out
91 ret void
92}
93
94; FUNC-LABEL: {{^}}v_and_i32_vgpr_vgpr:
95; SI: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
96define void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
97 %tid = call i32 @llvm.r600.read.tidig.x() #0
98 %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
99 %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
100 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
101 %a = load i32, i32 addrspace(1)* %gep.a
102 %b = load i32, i32 addrspace(1)* %gep.b
Matt Arsenault284ae082014-06-09 08:36:53 +0000103 %and = and i32 %a, %b
Matt Arsenault28bd7d42015-09-25 18:21:47 +0000104 store i32 %and, i32 addrspace(1)* %gep.out
105 ret void
106}
107
108; FUNC-LABEL: {{^}}v_and_i32_sgpr_vgpr:
109; SI-DAG: s_load_dword [[SA:s[0-9]+]]
110; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
111; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
112define void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1)* %bptr) {
113 %tid = call i32 @llvm.r600.read.tidig.x() #0
114 %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
115 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
116 %b = load i32, i32 addrspace(1)* %gep.b
117 %and = and i32 %a, %b
118 store i32 %and, i32 addrspace(1)* %gep.out
119 ret void
120}
121
122; FUNC-LABEL: {{^}}v_and_i32_vgpr_sgpr:
123; SI-DAG: s_load_dword [[SA:s[0-9]+]]
124; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
125; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
126define void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 %b) {
127 %tid = call i32 @llvm.r600.read.tidig.x() #0
128 %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
129 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
130 %a = load i32, i32 addrspace(1)* %gep.a
131 %and = and i32 %a, %b
132 store i32 %and, i32 addrspace(1)* %gep.out
Matt Arsenault284ae082014-06-09 08:36:53 +0000133 ret void
134}
135
Matt Arsenault11a4d672015-02-13 19:05:03 +0000136; FUNC-LABEL: {{^}}v_and_constant_i32
137; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
Matt Arsenault284ae082014-06-09 08:36:53 +0000138define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
David Blaikiea79ac142015-02-27 21:17:42 +0000139 %a = load i32, i32 addrspace(1)* %aptr, align 4
Matt Arsenault284ae082014-06-09 08:36:53 +0000140 %and = and i32 %a, 1234567
141 store i32 %and, i32 addrspace(1)* %out, align 4
142 ret void
143}
144
Matt Arsenault11a4d672015-02-13 19:05:03 +0000145; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
146; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
147define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
David Blaikiea79ac142015-02-27 21:17:42 +0000148 %a = load i32, i32 addrspace(1)* %aptr, align 4
Matt Arsenault11a4d672015-02-13 19:05:03 +0000149 %and = and i32 %a, 64
150 store i32 %and, i32 addrspace(1)* %out, align 4
151 ret void
152}
153
154; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
155; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
156define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
David Blaikiea79ac142015-02-27 21:17:42 +0000157 %a = load i32, i32 addrspace(1)* %aptr, align 4
Matt Arsenault11a4d672015-02-13 19:05:03 +0000158 %and = and i32 %a, -16
159 store i32 %and, i32 addrspace(1)* %out, align 4
160 ret void
161}
162
163; FUNC-LABEL: {{^}}s_and_i64
Tom Stellard326d6ec2014-11-05 14:50:53 +0000164; SI: s_and_b64
Matt Arsenault284ae082014-06-09 08:36:53 +0000165define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
166 %and = and i64 %a, %b
167 store i64 %and, i64 addrspace(1)* %out, align 8
168 ret void
169}
170
Matt Arsenault0d89e842014-07-15 21:44:37 +0000171; FIXME: Should use SGPRs
Tom Stellard79243d92014-10-01 17:15:17 +0000172; FUNC-LABEL: {{^}}s_and_i1:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000173; SI: v_and_b32
Matt Arsenault0d89e842014-07-15 21:44:37 +0000174define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
175 %and = and i1 %a, %b
176 store i1 %and, i1 addrspace(1)* %out
177 ret void
178}
179
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000180; FUNC-LABEL: {{^}}s_and_constant_i64:
181; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000{{$}}
182; SI-DAG: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80{{$}}
183; SI: buffer_store_dwordx2
Matt Arsenault284ae082014-06-09 08:36:53 +0000184define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000185 %and = and i64 %a, 549756338176
Matt Arsenault284ae082014-06-09 08:36:53 +0000186 store i64 %and, i64 addrspace(1)* %out, align 8
187 ret void
188}
189
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000190; FUNC-LABEL: {{^}}s_and_multi_use_constant_i64:
191; XSI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x80000{{$}}
192; XSI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0x80{{$}}
193; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}
194define void @s_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
195 %and0 = and i64 %a, 549756338176
196 %and1 = and i64 %b, 549756338176
197 store volatile i64 %and0, i64 addrspace(1)* %out
198 store volatile i64 %and1, i64 addrspace(1)* %out
199 ret void
200}
201
202; FUNC-LABEL: {{^}}s_and_32_bit_constant_i64:
203; SI: s_load_dwordx2
204; SI-NOT: and
205; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687{{$}}
206; SI-NOT: and
207; SI: buffer_store_dwordx2
208define void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) {
209 %and = and i64 %a, 1234567
210 store i64 %and, i64 addrspace(1)* %out, align 8
211 ret void
212}
213
214; FUNC-LABEL: {{^}}s_and_multi_use_inline_imm_i64:
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000215; SI: s_load_dword [[A:s[0-9]+]]
216; SI: s_load_dword [[B:s[0-9]+]]
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000217; SI: s_load_dwordx2
Tom Stellardcb6ba622016-04-30 00:23:06 +0000218; SI: s_load_dwordx2
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000219; SI-NOT: and
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000220; SI: s_lshl_b32 [[A]], [[A]], 1
221; SI: s_lshl_b32 [[B]], [[B]], 1
222; SI: s_and_b32 s{{[0-9]+}}, [[A]], 62
223; SI: s_and_b32 s{{[0-9]+}}, [[B]], 62
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000224; SI-NOT: and
225; SI: buffer_store_dwordx2
226define void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
227 %shl.a = shl i64 %a, 1
228 %shl.b = shl i64 %b, 1
229 %and0 = and i64 %shl.a, 62
230 %and1 = and i64 %shl.b, 62
231 %add0 = add i64 %and0, %c
232 %add1 = add i64 %and1, %c
233 store volatile i64 %add0, i64 addrspace(1)* %out
234 store volatile i64 %add1, i64 addrspace(1)* %out
235 ret void
236}
237
Tom Stellard79243d92014-10-01 17:15:17 +0000238; FUNC-LABEL: {{^}}v_and_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000239; SI: v_and_b32
240; SI: v_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +0000241define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
David Blaikiea79ac142015-02-27 21:17:42 +0000242 %a = load i64, i64 addrspace(1)* %aptr, align 8
243 %b = load i64, i64 addrspace(1)* %bptr, align 8
Matt Arsenault284ae082014-06-09 08:36:53 +0000244 %and = and i64 %a, %b
245 store i64 %and, i64 addrspace(1)* %out, align 8
246 ret void
247}
248
Tom Stellard79243d92014-10-01 17:15:17 +0000249; FUNC-LABEL: {{^}}v_and_constant_i64:
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000250; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0xab19b207, {{v[0-9]+}}
251; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, 0x11e, {{v[0-9]+}}
Matt Arsenault68d93862015-09-24 08:36:14 +0000252; SI: buffer_store_dwordx2
253define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
254 %a = load i64, i64 addrspace(1)* %aptr, align 8
255 %and = and i64 %a, 1231231234567
256 store i64 %and, i64 addrspace(1)* %out, align 8
257 ret void
258}
259
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000260; FUNC-LABEL: {{^}}v_and_multi_use_constant_i64:
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000261; SI-DAG: buffer_load_dwordx2 v{{\[}}[[LO0:[0-9]+]]:[[HI0:[0-9]+]]{{\]}}
262; SI-DAG: buffer_load_dwordx2 v{{\[}}[[LO1:[0-9]+]]:[[HI1:[0-9]+]]{{\]}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000263; SI-DAG: s_movk_i32 [[KHI:s[0-9]+]], 0x11e{{$}}
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000264; SI-DAG: s_mov_b32 [[KLO:s[0-9]+]], 0xab19b207{{$}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000265; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KLO]], v[[LO0]]
266; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], v[[HI0]]
267; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KLO]], v[[LO1]]
268; SI-DAG: v_and_b32_e32 {{v[0-9]+}}, [[KHI]], v[[HI1]]
269; SI: buffer_store_dwordx2
270; SI: buffer_store_dwordx2
271define void @v_and_multi_use_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
272 %a = load volatile i64, i64 addrspace(1)* %aptr
273 %b = load volatile i64, i64 addrspace(1)* %aptr
274 %and0 = and i64 %a, 1231231234567
275 %and1 = and i64 %b, 1231231234567
276 store volatile i64 %and0, i64 addrspace(1)* %out
277 store volatile i64 %and1, i64 addrspace(1)* %out
278 ret void
279}
280
281; FUNC-LABEL: {{^}}v_and_multi_use_inline_imm_i64:
282; SI: buffer_load_dwordx2 v{{\[}}[[LO0:[0-9]+]]:[[HI0:[0-9]+]]{{\]}}
283; SI-NOT: and
284; SI: buffer_load_dwordx2 v{{\[}}[[LO1:[0-9]+]]:[[HI1:[0-9]+]]{{\]}}
285; SI-NOT: and
286; SI: v_and_b32_e32 v[[RESLO0:[0-9]+]], 63, v[[LO0]]
287; SI: v_and_b32_e32 v[[RESLO1:[0-9]+]], 63, v[[LO1]]
288; SI-NOT: and
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000289; SI: buffer_store_dwordx2 v{{\[}}[[RESLO0]]
Tom Stellard0bc954e2016-03-30 16:35:09 +0000290; SI: buffer_store_dwordx2 v{{\[}}[[RESLO1]]
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000291define void @v_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
292 %a = load volatile i64, i64 addrspace(1)* %aptr
293 %b = load volatile i64, i64 addrspace(1)* %aptr
294 %and0 = and i64 %a, 63
295 %and1 = and i64 %b, 63
296 store volatile i64 %and0, i64 addrspace(1)* %out
297 store volatile i64 %and1, i64 addrspace(1)* %out
298 ret void
299}
300
Matt Arsenault68d93862015-09-24 08:36:14 +0000301; FUNC-LABEL: {{^}}v_and_i64_32_bit_constant:
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000302; SI: buffer_load_dword [[VAL:v[0-9]+]]
303; SI-NOT: and
304; SI: v_and_b32_e32 {{v[0-9]+}}, 0x12d687, [[VAL]]
305; SI-NOT: and
306; SI: buffer_store_dwordx2
Matt Arsenault68d93862015-09-24 08:36:14 +0000307define void @v_and_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
David Blaikiea79ac142015-02-27 21:17:42 +0000308 %a = load i64, i64 addrspace(1)* %aptr, align 8
Matt Arsenault284ae082014-06-09 08:36:53 +0000309 %and = and i64 %a, 1234567
310 store i64 %and, i64 addrspace(1)* %out, align 8
311 ret void
312}
Matt Arsenault49dd4282014-09-15 17:15:02 +0000313
Tom Stellard79243d92014-10-01 17:15:17 +0000314; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000315; SI: buffer_load_dword v{{[0-9]+}}
316; SI-NOT: and
Tom Stellard326d6ec2014-11-05 14:50:53 +0000317; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000318; SI-NOT: and
319; SI: buffer_store_dwordx2
Matt Arsenault49dd4282014-09-15 17:15:02 +0000320define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
David Blaikiea79ac142015-02-27 21:17:42 +0000321 %a = load i64, i64 addrspace(1)* %aptr, align 8
Matt Arsenault49dd4282014-09-15 17:15:02 +0000322 %and = and i64 %a, 64
323 store i64 %and, i64 addrspace(1)* %out, align 8
324 ret void
325}
326
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000327; FIXME: Should be able to reduce load width
328; FUNC-LABEL: {{^}}v_and_inline_neg_imm_i64:
329; SI: buffer_load_dwordx2 v{{\[}}[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]{{\]}}
330; SI-NOT: and
331; SI: v_and_b32_e32 v[[VAL_LO]], -8, v[[VAL_LO]]
332; SI-NOT: and
333; SI: buffer_store_dwordx2 v{{\[}}[[VAL_LO]]:[[VAL_HI]]{{\]}}
334define void @v_and_inline_neg_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
335 %a = load i64, i64 addrspace(1)* %aptr, align 8
336 %and = and i64 %a, -8
337 store i64 %and, i64 addrspace(1)* %out, align 8
338 ret void
339}
340
Matt Arsenault11a4d672015-02-13 19:05:03 +0000341; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000342; SI: s_load_dword
343; SI-NOT: and
344; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 64
345; SI-NOT: and
346; SI: buffer_store_dword
Matt Arsenault11a4d672015-02-13 19:05:03 +0000347define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault49dd4282014-09-15 17:15:02 +0000348 %and = and i64 %a, 64
349 store i64 %and, i64 addrspace(1)* %out, align 8
350 ret void
351}
Matt Arsenault11a4d672015-02-13 19:05:03 +0000352
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000353; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64_noshrink:
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000354; SI: s_load_dword [[A:s[0-9]+]]
355; SI: s_lshl_b32 [[A]], [[A]], 1{{$}}
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000356; SI-NOT: and
Matt Arsenaultefa3fe12016-04-22 22:48:38 +0000357; SI: s_and_b32 s{{[0-9]+}}, [[A]], 64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000358; SI-NOT: and
359; SI: s_add_u32
360; SI-NEXT: s_addc_u32
361define void @s_and_inline_imm_64_i64_noshrink(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a, i64 %b) {
362 %shl = shl i64 %a, 1
363 %and = and i64 %shl, 64
364 %add = add i64 %and, %b
365 store i64 %add, i64 addrspace(1)* %out, align 8
366 ret void
367}
368
Matt Arsenault11a4d672015-02-13 19:05:03 +0000369; FUNC-LABEL: {{^}}s_and_inline_imm_1_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000370; SI: s_load_dwordx2
371; SI-NOT: and
372; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 1
373; SI-NOT: and
374; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000375define void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
376 %and = and i64 %a, 1
377 store i64 %and, i64 addrspace(1)* %out, align 8
378 ret void
379}
380
381; FUNC-LABEL: {{^}}s_and_inline_imm_1.0_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000382; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0
383
384; SI: s_load_dwordx2
385; SI: s_load_dwordx2
386; SI-NOT: and
387; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3ff00000
388; SI-NOT: and
389; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000390define void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
391 %and = and i64 %a, 4607182418800017408
392 store i64 %and, i64 addrspace(1)* %out, align 8
393 ret void
394}
395
396; FUNC-LABEL: {{^}}s_and_inline_imm_neg_1.0_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000397; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0
398
399; SI: s_load_dwordx2
400; SI: s_load_dwordx2
401; SI-NOT: and
402; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbff00000
403; SI-NOT: and
404; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000405define void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
406 %and = and i64 %a, 13830554455654793216
407 store i64 %and, i64 addrspace(1)* %out, align 8
408 ret void
409}
410
411; FUNC-LABEL: {{^}}s_and_inline_imm_0.5_i64
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000412; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5
413
414; SI: s_load_dwordx2
415; SI: s_load_dwordx2
416; SI-NOT: and
417; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x3fe00000
418; SI-NOT: and
419; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000420define void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
421 %and = and i64 %a, 4602678819172646912
422 store i64 %and, i64 addrspace(1)* %out, align 8
423 ret void
424}
425
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000426; FUNC-LABEL: {{^}}s_and_inline_imm_neg_0.5_i64:
427; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5
428
429; SI: s_load_dwordx2
430; SI: s_load_dwordx2
431; SI-NOT: and
432; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xbfe00000
433; SI-NOT: and
434; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000435define void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
436 %and = and i64 %a, 13826050856027422720
437 store i64 %and, i64 addrspace(1)* %out, align 8
438 ret void
439}
440
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000441; FUNC-LABEL: {{^}}s_and_inline_imm_2.0_i64:
442; SI: s_load_dwordx2
443; SI: s_load_dwordx2
444; SI-NOT: and
445; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 2.0
446; SI-NOT: and
447; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000448define void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
449 %and = and i64 %a, 4611686018427387904
450 store i64 %and, i64 addrspace(1)* %out, align 8
451 ret void
452}
453
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000454; FUNC-LABEL: {{^}}s_and_inline_imm_neg_2.0_i64:
455; SI: s_load_dwordx2
456; SI: s_load_dwordx2
457; SI-NOT: and
458; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, -2.0
459; SI-NOT: and
460; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000461define void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
462 %and = and i64 %a, 13835058055282163712
463 store i64 %and, i64 addrspace(1)* %out, align 8
464 ret void
465}
466
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000467; FUNC-LABEL: {{^}}s_and_inline_imm_4.0_i64:
468; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0
469
470; SI: s_load_dwordx2
471; SI: s_load_dwordx2
472; SI-NOT: and
473; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0x40100000
474; SI-NOT: and
475; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000476define void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
477 %and = and i64 %a, 4616189618054758400
478 store i64 %and, i64 addrspace(1)* %out, align 8
479 ret void
480}
481
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000482; FUNC-LABEL: {{^}}s_and_inline_imm_neg_4.0_i64:
483; XSI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
484
485; SI: s_load_dwordx2
486; SI: s_load_dwordx2
487; SI-NOT: and
488; SI: s_and_b32 {{s[0-9]+}}, {{s[0-9]+}}, 0xc0100000
489; SI-NOT: and
490; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000491define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
492 %and = and i64 %a, 13839561654909534208
493 store i64 %and, i64 addrspace(1)* %out, align 8
494 ret void
495}
496
497
498; Test with the 64-bit integer bitpattern for a 32-bit float in the
499; low 32-bits, which is not a valid 64-bit inline immmediate.
500
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000501; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64:
Matthias Braun6ad3d052016-06-25 00:23:00 +0000502; SI: s_load_dwordx2
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000503; SI: s_load_dword s
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000504; SI-NOT: and
505; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
506; SI-NOT: and
507; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000508define void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
509 %and = and i64 %a, 1082130432
510 store i64 %and, i64 addrspace(1)* %out, align 8
511 ret void
512}
513
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000514; FUNC-LABEL: {{^}}s_and_inline_imm_f32_neg_4.0_i64:
515; SI: s_load_dwordx2
516; SI: s_load_dwordx2
517; SI-NOT: and
518; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
519; SI-NOT: and
520; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000521define void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
522 %and = and i64 %a, -1065353216
523 store i64 %and, i64 addrspace(1)* %out, align 8
524 ret void
525}
526
527; Shift into upper 32-bits
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000528; SI: s_load_dwordx2
529; SI: s_load_dwordx2
530; SI-NOT: and
531; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, 4.0
532; SI-NOT: and
533; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000534define void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
535 %and = and i64 %a, 4647714815446351872
536 store i64 %and, i64 addrspace(1)* %out, align 8
537 ret void
538}
539
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000540; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_neg_4.0_i64:
541; SI: s_load_dwordx2
542; SI: s_load_dwordx2
543; SI-NOT: and
544; SI: s_and_b32 s[[K_HI:[0-9]+]], s{{[0-9]+}}, -4.0
545; SI-NOT: and
546; SI: buffer_store_dwordx2
Matt Arsenault11a4d672015-02-13 19:05:03 +0000547define void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
548 %and = and i64 %a, 13871086852301127680
549 store i64 %and, i64 addrspace(1)* %out, align 8
550 ret void
551}
Matt Arsenault28bd7d42015-09-25 18:21:47 +0000552
553attributes #0 = { nounwind readnone }