blob: 54aaba794c3e0e908044f17900cdad164904cc01 [file] [log] [blame]
Matt Arsenault284ae082014-06-09 08:36:53 +00001; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
Tom Stellard49f8bfd2015-01-06 18:00:21 +00002; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Marek Olsak75170772015-01-27 17:27:15 +00003; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Tom Stellard75aadc22012-12-11 21:25:42 +00004
Tom Stellard79243d92014-10-01 17:15:17 +00005; FUNC-LABEL: {{^}}test2:
Matt Arsenault284ae082014-06-09 08:36:53 +00006; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
7; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Tom Stellard75aadc22012-12-11 21:25:42 +00008
Tom Stellard326d6ec2014-11-05 14:50:53 +00009; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
10; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000011
12define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000013 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
Aaron Watry00aeb112013-06-25 13:55:23 +000014 %a = load <2 x i32> addrspace(1) * %in
15 %b = load <2 x i32> addrspace(1) * %b_ptr
16 %result = and <2 x i32> %a, %b
17 store <2 x i32> %result, <2 x i32> addrspace(1)* %out
18 ret void
19}
20
Tom Stellard79243d92014-10-01 17:15:17 +000021; FUNC-LABEL: {{^}}test4:
Matt Arsenault284ae082014-06-09 08:36:53 +000022; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
23; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
24; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: AND_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watry00aeb112013-06-25 13:55:23 +000026
Tom Stellard326d6ec2014-11-05 14:50:53 +000027; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
28; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
29; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
30; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry00aeb112013-06-25 13:55:23 +000031
32define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000033 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
Tom Stellard75aadc22012-12-11 21:25:42 +000034 %a = load <4 x i32> addrspace(1) * %in
35 %b = load <4 x i32> addrspace(1) * %b_ptr
36 %result = and <4 x i32> %a, %b
37 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
38 ret void
39}
Matt Arsenault284ae082014-06-09 08:36:53 +000040
Tom Stellard79243d92014-10-01 17:15:17 +000041; FUNC-LABEL: {{^}}s_and_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000042; SI: s_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +000043define void @s_and_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
44 %and = and i32 %a, %b
45 store i32 %and, i32 addrspace(1)* %out, align 4
46 ret void
47}
48
Tom Stellard79243d92014-10-01 17:15:17 +000049; FUNC-LABEL: {{^}}s_and_constant_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000050; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x12d687
Matt Arsenault284ae082014-06-09 08:36:53 +000051define void @s_and_constant_i32(i32 addrspace(1)* %out, i32 %a) {
52 %and = and i32 %a, 1234567
53 store i32 %and, i32 addrspace(1)* %out, align 4
54 ret void
55}
56
Tom Stellard79243d92014-10-01 17:15:17 +000057; FUNC-LABEL: {{^}}v_and_i32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000058; SI: v_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +000059define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
60 %a = load i32 addrspace(1)* %aptr, align 4
61 %b = load i32 addrspace(1)* %bptr, align 4
62 %and = and i32 %a, %b
63 store i32 %and, i32 addrspace(1)* %out, align 4
64 ret void
65}
66
Matt Arsenault11a4d672015-02-13 19:05:03 +000067; FUNC-LABEL: {{^}}v_and_constant_i32
68; SI: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, v{{[0-9]+}}
Matt Arsenault284ae082014-06-09 08:36:53 +000069define void @v_and_constant_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
70 %a = load i32 addrspace(1)* %aptr, align 4
71 %and = and i32 %a, 1234567
72 store i32 %and, i32 addrspace(1)* %out, align 4
73 ret void
74}
75
Matt Arsenault11a4d672015-02-13 19:05:03 +000076; FUNC-LABEL: {{^}}v_and_inline_imm_64_i32
77; SI: v_and_b32_e32 v{{[0-9]+}}, 64, v{{[0-9]+}}
78define void @v_and_inline_imm_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
79 %a = load i32 addrspace(1)* %aptr, align 4
80 %and = and i32 %a, 64
81 store i32 %and, i32 addrspace(1)* %out, align 4
82 ret void
83}
84
85; FUNC-LABEL: {{^}}v_and_inline_imm_neg_16_i32
86; SI: v_and_b32_e32 v{{[0-9]+}}, -16, v{{[0-9]+}}
87define void @v_and_inline_imm_neg_16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr) {
88 %a = load i32 addrspace(1)* %aptr, align 4
89 %and = and i32 %a, -16
90 store i32 %and, i32 addrspace(1)* %out, align 4
91 ret void
92}
93
94; FUNC-LABEL: {{^}}s_and_i64
Tom Stellard326d6ec2014-11-05 14:50:53 +000095; SI: s_and_b64
Matt Arsenault284ae082014-06-09 08:36:53 +000096define void @s_and_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
97 %and = and i64 %a, %b
98 store i64 %and, i64 addrspace(1)* %out, align 8
99 ret void
100}
101
Matt Arsenault0d89e842014-07-15 21:44:37 +0000102; FIXME: Should use SGPRs
Tom Stellard79243d92014-10-01 17:15:17 +0000103; FUNC-LABEL: {{^}}s_and_i1:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000104; SI: v_and_b32
Matt Arsenault0d89e842014-07-15 21:44:37 +0000105define void @s_and_i1(i1 addrspace(1)* %out, i1 %a, i1 %b) {
106 %and = and i1 %a, %b
107 store i1 %and, i1 addrspace(1)* %out
108 ret void
109}
110
Matt Arsenault11a4d672015-02-13 19:05:03 +0000111; FUNC-LABEL: {{^}}s_and_constant_i64
112; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
Matt Arsenault284ae082014-06-09 08:36:53 +0000113define void @s_and_constant_i64(i64 addrspace(1)* %out, i64 %a) {
114 %and = and i64 %a, 281474976710655
115 store i64 %and, i64 addrspace(1)* %out, align 8
116 ret void
117}
118
Tom Stellard79243d92014-10-01 17:15:17 +0000119; FUNC-LABEL: {{^}}v_and_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000120; SI: v_and_b32
121; SI: v_and_b32
Matt Arsenault284ae082014-06-09 08:36:53 +0000122define void @v_and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
123 %a = load i64 addrspace(1)* %aptr, align 8
124 %b = load i64 addrspace(1)* %bptr, align 8
125 %and = and i64 %a, %b
126 store i64 %and, i64 addrspace(1)* %out, align 8
127 ret void
128}
129
Tom Stellard79243d92014-10-01 17:15:17 +0000130; FUNC-LABEL: {{^}}v_and_i64_br:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000131; SI: v_and_b32
132; SI: v_and_b32
Tom Stellard102c6872014-09-03 15:22:41 +0000133define void @v_and_i64_br(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i32 %cond) {
134entry:
135 %tmp0 = icmp eq i32 %cond, 0
136 br i1 %tmp0, label %if, label %endif
137
138if:
139 %a = load i64 addrspace(1)* %aptr, align 8
140 %b = load i64 addrspace(1)* %bptr, align 8
141 %and = and i64 %a, %b
142 br label %endif
143
144endif:
145 %tmp1 = phi i64 [%and, %if], [0, %entry]
146 store i64 %tmp1, i64 addrspace(1)* %out, align 8
147 ret void
148}
149
Tom Stellard79243d92014-10-01 17:15:17 +0000150; FUNC-LABEL: {{^}}v_and_constant_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000151; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
152; SI: v_and_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
Matt Arsenault284ae082014-06-09 08:36:53 +0000153define void @v_and_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
154 %a = load i64 addrspace(1)* %aptr, align 8
155 %and = and i64 %a, 1234567
156 store i64 %and, i64 addrspace(1)* %out, align 8
157 ret void
158}
Matt Arsenault49dd4282014-09-15 17:15:02 +0000159
160; FIXME: Replace and 0 with mov 0
Tom Stellard79243d92014-10-01 17:15:17 +0000161; FUNC-LABEL: {{^}}v_and_inline_imm_i64:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000162; SI: v_and_b32_e32 {{v[0-9]+}}, 64, {{v[0-9]+}}
163; SI: v_and_b32_e32 {{v[0-9]+}}, 0, {{v[0-9]+}}
Matt Arsenault49dd4282014-09-15 17:15:02 +0000164define void @v_and_inline_imm_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
165 %a = load i64 addrspace(1)* %aptr, align 8
166 %and = and i64 %a, 64
167 store i64 %and, i64 addrspace(1)* %out, align 8
168 ret void
169}
170
Matt Arsenault11a4d672015-02-13 19:05:03 +0000171; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64
Tom Stellard326d6ec2014-11-05 14:50:53 +0000172; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 64
Matt Arsenault11a4d672015-02-13 19:05:03 +0000173define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault49dd4282014-09-15 17:15:02 +0000174 %and = and i64 %a, 64
175 store i64 %and, i64 addrspace(1)* %out, align 8
176 ret void
177}
Matt Arsenault11a4d672015-02-13 19:05:03 +0000178
179; FUNC-LABEL: {{^}}s_and_inline_imm_1_i64
180; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1
181define void @s_and_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
182 %and = and i64 %a, 1
183 store i64 %and, i64 addrspace(1)* %out, align 8
184 ret void
185}
186
187; FUNC-LABEL: {{^}}s_and_inline_imm_1.0_i64
188; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1.0
189define void @s_and_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
190 %and = and i64 %a, 4607182418800017408
191 store i64 %and, i64 addrspace(1)* %out, align 8
192 ret void
193}
194
195; FUNC-LABEL: {{^}}s_and_inline_imm_neg_1.0_i64
196; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1.0
197define void @s_and_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
198 %and = and i64 %a, 13830554455654793216
199 store i64 %and, i64 addrspace(1)* %out, align 8
200 ret void
201}
202
203; FUNC-LABEL: {{^}}s_and_inline_imm_0.5_i64
204; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0.5
205define void @s_and_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
206 %and = and i64 %a, 4602678819172646912
207 store i64 %and, i64 addrspace(1)* %out, align 8
208 ret void
209}
210
211; FUNC-LABEL: {{^}}s_and_inline_imm_neg_0.5_i64
212; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -0.5
213define void @s_and_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
214 %and = and i64 %a, 13826050856027422720
215 store i64 %and, i64 addrspace(1)* %out, align 8
216 ret void
217}
218
219; FUNC-LABEL: {{^}}s_and_inline_imm_2.0_i64
220; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 2.0
221define void @s_and_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
222 %and = and i64 %a, 4611686018427387904
223 store i64 %and, i64 addrspace(1)* %out, align 8
224 ret void
225}
226
227; FUNC-LABEL: {{^}}s_and_inline_imm_neg_2.0_i64
228; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -2.0
229define void @s_and_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
230 %and = and i64 %a, 13835058055282163712
231 store i64 %and, i64 addrspace(1)* %out, align 8
232 ret void
233}
234
235; FUNC-LABEL: {{^}}s_and_inline_imm_4.0_i64
236; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 4.0
237define void @s_and_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
238 %and = and i64 %a, 4616189618054758400
239 store i64 %and, i64 addrspace(1)* %out, align 8
240 ret void
241}
242
243; FUNC-LABEL: {{^}}s_and_inline_imm_neg_4.0_i64
244; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -4.0
245define void @s_and_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
246 %and = and i64 %a, 13839561654909534208
247 store i64 %and, i64 addrspace(1)* %out, align 8
248 ret void
249}
250
251
252; Test with the 64-bit integer bitpattern for a 32-bit float in the
253; low 32-bits, which is not a valid 64-bit inline immmediate.
254
255; FUNC-LABEL: {{^}}s_and_inline_imm_f32_4.0_i64
256; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0
257; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}}
258; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
259define void @s_and_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
260 %and = and i64 %a, 1082130432
261 store i64 %and, i64 addrspace(1)* %out, align 8
262 ret void
263}
264
265; FIXME: Copy of -1 register
266; FUNC-LABEL: {{^}}s_and_inline_imm_f32_neg_4.0_i64
267; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], -4.0
268; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}}
269; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]]
270; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}
271define void @s_and_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
272 %and = and i64 %a, -1065353216
273 store i64 %and, i64 addrspace(1)* %out, align 8
274 ret void
275}
276
277; Shift into upper 32-bits
278; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_4.0_i64
279; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0
280; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
281; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
282define void @s_and_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
283 %and = and i64 %a, 4647714815446351872
284 store i64 %and, i64 addrspace(1)* %out, align 8
285 ret void
286}
287
288; FUNC-LABEL: {{^}}s_and_inline_high_imm_f32_neg_4.0_i64
289; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0
290; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
291; SI: s_and_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}
292define void @s_and_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
293 %and = and i64 %a, 13871086852301127680
294 store i64 %and, i64 addrspace(1)* %out, align 8
295 ret void
296}