blob: 805a88b59c7216a61ad3a4ea07e348f4fe0cbcc4 [file] [log] [blame]
Tom Stellard40ce8af2015-01-28 16:04:26 +00001; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
Matt Arsenault4831ce52015-01-06 23:00:37 +00002
3declare i1 @llvm.AMDGPU.class.f32(float, i32) #1
4declare i1 @llvm.AMDGPU.class.f64(double, i32) #1
5declare i32 @llvm.r600.read.tidig.x() #1
6declare float @llvm.fabs.f32(float) #1
7declare double @llvm.fabs.f64(double) #1
8
9; SI-LABEL: {{^}}test_class_f32:
10; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
11; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
12; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
13; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[VB]]
14; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
15; SI-NEXT: buffer_store_dword [[RESULT]]
16; SI: s_endpgm
17define void @test_class_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
18 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 %b) #1
19 %sext = sext i1 %result to i32
20 store i32 %sext, i32 addrspace(1)* %out, align 4
21 ret void
22}
23
24; SI-LABEL: {{^}}test_class_fabs_f32:
25; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
26; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
27; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
28; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
29; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
30; SI-NEXT: buffer_store_dword [[RESULT]]
31; SI: s_endpgm
32define void @test_class_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
33 %a.fabs = call float @llvm.fabs.f32(float %a) #1
34 %result = call i1 @llvm.AMDGPU.class.f32(float %a.fabs, i32 %b) #1
35 %sext = sext i1 %result to i32
36 store i32 %sext, i32 addrspace(1)* %out, align 4
37 ret void
38}
39
40; SI-LABEL: {{^}}test_class_fneg_f32:
41; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
42; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
43; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
44; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
45; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
46; SI-NEXT: buffer_store_dword [[RESULT]]
47; SI: s_endpgm
48define void @test_class_fneg_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
49 %a.fneg = fsub float -0.0, %a
50 %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg, i32 %b) #1
51 %sext = sext i1 %result to i32
52 store i32 %sext, i32 addrspace(1)* %out, align 4
53 ret void
54}
55
56; SI-LABEL: {{^}}test_class_fneg_fabs_f32:
57; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
58; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
59; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
60; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
61; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
62; SI-NEXT: buffer_store_dword [[RESULT]]
63; SI: s_endpgm
64define void @test_class_fneg_fabs_f32(i32 addrspace(1)* %out, float %a, i32 %b) #0 {
65 %a.fabs = call float @llvm.fabs.f32(float %a) #1
66 %a.fneg.fabs = fsub float -0.0, %a.fabs
67 %result = call i1 @llvm.AMDGPU.class.f32(float %a.fneg.fabs, i32 %b) #1
68 %sext = sext i1 %result to i32
69 store i32 %sext, i32 addrspace(1)* %out, align 4
70 ret void
71}
72
73; SI-LABEL: {{^}}test_class_1_f32:
74; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
75; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 1{{$}}
76; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
77; SI-NEXT: buffer_store_dword [[RESULT]]
78; SI: s_endpgm
79define void @test_class_1_f32(i32 addrspace(1)* %out, float %a) #0 {
80 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
81 %sext = sext i1 %result to i32
82 store i32 %sext, i32 addrspace(1)* %out, align 4
83 ret void
84}
85
86; SI-LABEL: {{^}}test_class_64_f32:
87; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
88; SI: v_cmp_class_f32_e64 [[COND:s\[[0-9]+:[0-9]+\]]], [[SA]], 64{{$}}
89; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[COND]]
90; SI-NEXT: buffer_store_dword [[RESULT]]
91; SI: s_endpgm
92define void @test_class_64_f32(i32 addrspace(1)* %out, float %a) #0 {
93 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1
94 %sext = sext i1 %result to i32
95 store i32 %sext, i32 addrspace(1)* %out, align 4
96 ret void
97}
98
99; Set all 10 bits of mask
100; SI-LABEL: {{^}}test_class_full_mask_f32:
101; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
102; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}}
103; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
104; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
105; SI-NEXT: buffer_store_dword [[RESULT]]
106; SI: s_endpgm
107define void @test_class_full_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
108 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1023) #1
109 %sext = sext i1 %result to i32
110 store i32 %sext, i32 addrspace(1)* %out, align 4
111 ret void
112}
113
114; SI-LABEL: {{^}}test_class_9bit_mask_f32:
115; SI: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
116; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
117; SI: v_cmp_class_f32_e32 vcc, [[SA]], [[MASK]]
118; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
119; SI-NEXT: buffer_store_dword [[RESULT]]
120; SI: s_endpgm
121define void @test_class_9bit_mask_f32(i32 addrspace(1)* %out, float %a) #0 {
122 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
123 %sext = sext i1 %result to i32
124 store i32 %sext, i32 addrspace(1)* %out, align 4
125 ret void
126}
127
128; SI-LABEL: {{^}}v_test_class_full_mask_f32:
129; SI-DAG: buffer_load_dword [[VA:v[0-9]+]]
130; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
131; SI: v_cmp_class_f32_e32 vcc, [[VA]], [[MASK]]
132; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
133; SI: buffer_store_dword [[RESULT]]
134; SI: s_endpgm
135define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
136 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000137 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
138 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000139 %a = load float, float addrspace(1)* %gep.in
Matt Arsenault4831ce52015-01-06 23:00:37 +0000140
141 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
142 %sext = sext i1 %result to i32
143 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
144 ret void
145}
146
147; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f32:
148; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
149; SI: v_cmp_class_f32_e32 vcc, 1.0, [[VB]]
150; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
151; SI: buffer_store_dword [[RESULT]]
152; SI: s_endpgm
153define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
154 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000155 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
156 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000157 %b = load i32, i32 addrspace(1)* %gep.in
Matt Arsenault4831ce52015-01-06 23:00:37 +0000158
159 %result = call i1 @llvm.AMDGPU.class.f32(float 1.0, i32 %b) #1
160 %sext = sext i1 %result to i32
161 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
162 ret void
163}
164
165; FIXME: Why isn't this using a literal constant operand?
166; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f32:
167; SI-DAG: buffer_load_dword [[VB:v[0-9]+]]
168; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
169; SI: v_cmp_class_f32_e32 vcc, [[VK]], [[VB]]
170; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
171; SI: buffer_store_dword [[RESULT]]
172; SI: s_endpgm
173define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
174 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000175 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
176 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000177 %b = load i32, i32 addrspace(1)* %gep.in
Matt Arsenault4831ce52015-01-06 23:00:37 +0000178
179 %result = call i1 @llvm.AMDGPU.class.f32(float 1024.0, i32 %b) #1
180 %sext = sext i1 %result to i32
181 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
182 ret void
183}
184
185; SI-LABEL: {{^}}test_class_f64:
186; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
187; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
188; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
189; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[VB]]
190; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
191; SI-NEXT: buffer_store_dword [[RESULT]]
192; SI: s_endpgm
193define void @test_class_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
194 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 %b) #1
195 %sext = sext i1 %result to i32
196 store i32 %sext, i32 addrspace(1)* %out, align 4
197 ret void
198}
199
200; SI-LABEL: {{^}}test_class_fabs_f64:
201; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
202; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
203; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
204; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]]
205; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
206; SI-NEXT: buffer_store_dword [[RESULT]]
207; SI: s_endpgm
208define void @test_class_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
209 %a.fabs = call double @llvm.fabs.f64(double %a) #1
210 %result = call i1 @llvm.AMDGPU.class.f64(double %a.fabs, i32 %b) #1
211 %sext = sext i1 %result to i32
212 store i32 %sext, i32 addrspace(1)* %out, align 4
213 ret void
214}
215
216; SI-LABEL: {{^}}test_class_fneg_f64:
217; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
218; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
219; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
220; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -[[SA]], [[VB]]
221; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
222; SI-NEXT: buffer_store_dword [[RESULT]]
223; SI: s_endpgm
224define void @test_class_fneg_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
225 %a.fneg = fsub double -0.0, %a
226 %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg, i32 %b) #1
227 %sext = sext i1 %result to i32
228 store i32 %sext, i32 addrspace(1)* %out, align 4
229 ret void
230}
231
232; SI-LABEL: {{^}}test_class_fneg_fabs_f64:
233; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
234; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd
235; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]]
236; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]]
237; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]]
238; SI-NEXT: buffer_store_dword [[RESULT]]
239; SI: s_endpgm
240define void @test_class_fneg_fabs_f64(i32 addrspace(1)* %out, double %a, i32 %b) #0 {
241 %a.fabs = call double @llvm.fabs.f64(double %a) #1
242 %a.fneg.fabs = fsub double -0.0, %a.fabs
243 %result = call i1 @llvm.AMDGPU.class.f64(double %a.fneg.fabs, i32 %b) #1
244 %sext = sext i1 %result to i32
245 store i32 %sext, i32 addrspace(1)* %out, align 4
246 ret void
247}
248
249; SI-LABEL: {{^}}test_class_1_f64:
250; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 1{{$}}
251; SI: s_endpgm
252define void @test_class_1_f64(i32 addrspace(1)* %out, double %a) #0 {
253 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 1) #1
254 %sext = sext i1 %result to i32
255 store i32 %sext, i32 addrspace(1)* %out, align 4
256 ret void
257}
258
259; SI-LABEL: {{^}}test_class_64_f64:
260; SI: v_cmp_class_f64_e64 {{s\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 64{{$}}
261; SI: s_endpgm
262define void @test_class_64_f64(i32 addrspace(1)* %out, double %a) #0 {
263 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 64) #1
264 %sext = sext i1 %result to i32
265 store i32 %sext, i32 addrspace(1)* %out, align 4
266 ret void
267}
268
269; Set all 9 bits of mask
270; SI-LABEL: {{^}}test_class_full_mask_f64:
271; SI: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
272; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
273; SI: v_cmp_class_f64_e32 vcc, [[SA]], [[MASK]]
274; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
275; SI-NEXT: buffer_store_dword [[RESULT]]
276; SI: s_endpgm
277define void @test_class_full_mask_f64(i32 addrspace(1)* %out, double %a) #0 {
278 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
279 %sext = sext i1 %result to i32
280 store i32 %sext, i32 addrspace(1)* %out, align 4
281 ret void
282}
283
284; SI-LABEL: {{^}}v_test_class_full_mask_f64:
285; SI-DAG: buffer_load_dwordx2 [[VA:v\[[0-9]+:[0-9]+\]]]
286; SI-DAG: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x1ff{{$}}
287; SI: v_cmp_class_f64_e32 vcc, [[VA]], [[MASK]]
288; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, vcc
289; SI: buffer_store_dword [[RESULT]]
290; SI: s_endpgm
291define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 {
292 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000293 %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
294 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000295 %a = load double, double addrspace(1)* %in
Matt Arsenault4831ce52015-01-06 23:00:37 +0000296
297 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
298 %sext = sext i1 %result to i32
299 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
300 ret void
301}
302
303; SI-LABEL: {{^}}test_class_inline_imm_constant_dynamic_mask_f64:
304; XSI: v_cmp_class_f64_e32 vcc, 1.0,
305; SI: v_cmp_class_f64_e32 vcc,
306; SI: s_endpgm
307define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
308 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000309 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
310 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000311 %b = load i32, i32 addrspace(1)* %gep.in
Matt Arsenault4831ce52015-01-06 23:00:37 +0000312
313 %result = call i1 @llvm.AMDGPU.class.f64(double 1.0, i32 %b) #1
314 %sext = sext i1 %result to i32
315 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
316 ret void
317}
318
319; SI-LABEL: {{^}}test_class_lit_constant_dynamic_mask_f64:
320; SI: v_cmp_class_f64_e32 vcc, s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
321; SI: s_endpgm
322define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
323 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000324 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
325 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000326 %b = load i32, i32 addrspace(1)* %gep.in
Matt Arsenault4831ce52015-01-06 23:00:37 +0000327
328 %result = call i1 @llvm.AMDGPU.class.f64(double 1024.0, i32 %b) #1
329 %sext = sext i1 %result to i32
330 store i32 %sext, i32 addrspace(1)* %gep.out, align 4
331 ret void
332}
333
Matt Arsenaultf2290332015-01-06 23:00:39 +0000334; SI-LABEL: {{^}}test_fold_or_class_f32_0:
335; SI-NOT: v_cmp_class
336; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 3{{$}}
337; SI-NOT: v_cmp_class
338; SI: s_endpgm
339define void @test_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
340 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000341 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
342 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000343 %a = load float, float addrspace(1)* %gep.in
Matt Arsenaultf2290332015-01-06 23:00:39 +0000344
345 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
346 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 3) #1
347 %or = or i1 %class0, %class1
348
349 %sext = sext i1 %or to i32
350 store i32 %sext, i32 addrspace(1)* %out, align 4
351 ret void
352}
353
354; SI-LABEL: {{^}}test_fold_or3_class_f32_0:
355; SI-NOT: v_cmp_class
356; SI: v_cmp_class_f32_e64 s{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
357; SI-NOT: v_cmp_class
358; SI: s_endpgm
359define void @test_fold_or3_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
360 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000361 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
362 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000363 %a = load float, float addrspace(1)* %gep.in
Matt Arsenaultf2290332015-01-06 23:00:39 +0000364
365 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
366 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
367 %class2 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
368 %or.0 = or i1 %class0, %class1
369 %or.1 = or i1 %or.0, %class2
370
371 %sext = sext i1 %or.1 to i32
372 store i32 %sext, i32 addrspace(1)* %out, align 4
373 ret void
374}
375
376; SI-LABEL: {{^}}test_fold_or_all_tests_class_f32_0:
377; SI-NOT: v_cmp_class
378; SI: v_mov_b32_e32 [[MASK:v[0-9]+]], 0x3ff{{$}}
379; SI: v_cmp_class_f32_e32 vcc, v{{[0-9]+}}, [[MASK]]{{$}}
380; SI-NOT: v_cmp_class
381; SI: s_endpgm
382define void @test_fold_or_all_tests_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
383 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000384 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
385 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000386 %a = load float, float addrspace(1)* %gep.in
Matt Arsenaultf2290332015-01-06 23:00:39 +0000387
388 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
389 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 2) #1
390 %class2 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
391 %class3 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 8) #1
392 %class4 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 16) #1
393 %class5 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 32) #1
394 %class6 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 64) #1
395 %class7 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 128) #1
396 %class8 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 256) #1
397 %class9 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 512) #1
398 %or.0 = or i1 %class0, %class1
399 %or.1 = or i1 %or.0, %class2
400 %or.2 = or i1 %or.1, %class3
401 %or.3 = or i1 %or.2, %class4
402 %or.4 = or i1 %or.3, %class5
403 %or.5 = or i1 %or.4, %class6
404 %or.6 = or i1 %or.5, %class7
405 %or.7 = or i1 %or.6, %class8
406 %or.8 = or i1 %or.7, %class9
407 %sext = sext i1 %or.8 to i32
408 store i32 %sext, i32 addrspace(1)* %out, align 4
409 ret void
410}
411
412; SI-LABEL: {{^}}test_fold_or_class_f32_1:
413; SI-NOT: v_cmp_class
414; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 12{{$}}
415; SI-NOT: v_cmp_class
416; SI: s_endpgm
417define void @test_fold_or_class_f32_1(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
418 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000419 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
420 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000421 %a = load float, float addrspace(1)* %gep.in
Matt Arsenaultf2290332015-01-06 23:00:39 +0000422
423 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
424 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 8) #1
425 %or = or i1 %class0, %class1
426
427 %sext = sext i1 %or to i32
428 store i32 %sext, i32 addrspace(1)* %out, align 4
429 ret void
430}
431
432; SI-LABEL: {{^}}test_fold_or_class_f32_2:
433; SI-NOT: v_cmp_class
434; SI: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 7{{$}}
435; SI-NOT: v_cmp_class
436; SI: s_endpgm
437define void @test_fold_or_class_f32_2(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
438 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000439 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
440 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000441 %a = load float, float addrspace(1)* %gep.in
Matt Arsenaultf2290332015-01-06 23:00:39 +0000442
443 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
444 %class1 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
445 %or = or i1 %class0, %class1
446
447 %sext = sext i1 %or to i32
448 store i32 %sext, i32 addrspace(1)* %out, align 4
449 ret void
450}
451
452; SI-LABEL: {{^}}test_no_fold_or_class_f32_0:
453; SI-DAG: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, 4{{$}}
454; SI-DAG: v_cmp_class_f32_e64 {{s\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, 8{{$}}
455; SI: s_or_b64
456; SI: s_endpgm
457define void @test_no_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in, float %b) #0 {
458 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000459 %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
460 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
David Blaikiea79ac142015-02-27 21:17:42 +0000461 %a = load float, float addrspace(1)* %gep.in
Matt Arsenaultf2290332015-01-06 23:00:39 +0000462
463 %class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
464 %class1 = call i1 @llvm.AMDGPU.class.f32(float %b, i32 8) #1
465 %or = or i1 %class0, %class1
466
467 %sext = sext i1 %or to i32
468 store i32 %sext, i32 addrspace(1)* %out, align 4
469 ret void
470}
471
472; SI-LABEL: {{^}}test_class_0_f32:
473; SI-NOT: v_cmp_class
474; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
Tom Stellardf6afc802015-02-04 23:14:18 +0000475; SI: buffer_store_dword [[RESULT]]
Matt Arsenaultf2290332015-01-06 23:00:39 +0000476; SI: s_endpgm
477define void @test_class_0_f32(i32 addrspace(1)* %out, float %a) #0 {
478 %result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 0) #1
479 %sext = sext i1 %result to i32
480 store i32 %sext, i32 addrspace(1)* %out, align 4
481 ret void
482}
483
484; SI-LABEL: {{^}}test_class_0_f64:
485; SI-NOT: v_cmp_class
486; SI: v_mov_b32_e32 [[RESULT:v[0-9]+]], 0{{$}}
487; SI: buffer_store_dword [[RESULT]]
488; SI: s_endpgm
489define void @test_class_0_f64(i32 addrspace(1)* %out, double %a) #0 {
490 %result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 0) #1
491 %sext = sext i1 %result to i32
492 store i32 %sext, i32 addrspace(1)* %out, align 4
493 ret void
494}
495
Matt Arsenault4831ce52015-01-06 23:00:37 +0000496attributes #0 = { nounwind }
497attributes #1 = { nounwind readnone }