blob: 52336f95a90962797caf7a6d23c07fac8a970af5 [file] [log] [blame]
Matt Arsenaultfcb345f2016-02-11 06:15:39 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -enable-no-nans-fp-math -enable-unsafe-fp-math -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI-NONAN -check-prefix=SI -check-prefix=FUNC %s
Matt Arsenaultda59f3d2014-11-13 23:03:09 +00003; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
4
Matt Arsenaulta982e4f2015-01-13 00:43:00 +00005; FIXME: Should replace unsafe-fp-math with no signed zeros.
6
Matt Arsenault36094d72014-11-15 05:02:57 +00007declare i32 @llvm.r600.read.tidig.x() #1
8
Matt Arsenaultfcb345f2016-02-11 06:15:39 +00009; The two inputs to the instruction are different SGPRs from the same
10; super register, so we can't fold both SGPR operands even though they
11; are both the same register.
12
13; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32:
Matt Arsenaultda59f3d2014-11-13 23:03:09 +000014; EG: MIN *
Matt Arsenaultfcb345f2016-02-11 06:15:39 +000015; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
16; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000017define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(<4 x float> addrspace(1)* %out, <4 x float> inreg %reg0) #0 {
Matt Arsenaultda59f3d2014-11-13 23:03:09 +000018 %r0 = extractelement <4 x float> %reg0, i32 0
19 %r1 = extractelement <4 x float> %reg0, i32 1
20 %r2 = fcmp uge float %r0, %r1
21 %r3 = select i1 %r2, float %r1, float %r0
22 %vec = insertelement <4 x float> undef, float %r3, i32 0
23 store <4 x float> %vec, <4 x float> addrspace(1)* %out, align 16
24 ret void
25}
26
Matt Arsenaultfcb345f2016-02-11 06:15:39 +000027; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32:
28; SI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
29; SI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
30
31; SI-SAFE-DAG: v_mov_b32_e32 [[VA:v[0-9]+]], [[A]]
32; SI-NONAN-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]]
33
34; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[VA]]
35; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[VB]]
36
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000037define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(float addrspace(1)* %out, float %a, float %b) #0 {
Matt Arsenaultfcb345f2016-02-11 06:15:39 +000038 %cmp = fcmp ule float %a, %b
39 %val = select i1 %cmp, float %a, float %b
40 store float %val, float addrspace(1)* %out, align 4
41 ret void
42}
43
Matt Arsenaultda59f3d2014-11-13 23:03:09 +000044; FUNC-LABEL: @test_fmin_legacy_ule_f32
Matt Arsenault36094d72014-11-15 05:02:57 +000045; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
Matt Arsenaultfb13b222014-12-03 03:12:13 +000046; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
Matt Arsenaulta982e4f2015-01-13 00:43:00 +000047; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
48; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000049define amdgpu_kernel void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
Matt Arsenault36094d72014-11-15 05:02:57 +000050 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +000051 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
52 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
Matt Arsenault36094d72014-11-15 05:02:57 +000053
Matt Arsenault44e54832016-04-12 13:38:18 +000054 %a = load volatile float, float addrspace(1)* %gep.0, align 4
55 %b = load volatile float, float addrspace(1)* %gep.1, align 4
Matt Arsenault36094d72014-11-15 05:02:57 +000056
Matt Arsenaultda59f3d2014-11-13 23:03:09 +000057 %cmp = fcmp ule float %a, %b
58 %val = select i1 %cmp, float %a, float %b
59 store float %val, float addrspace(1)* %out, align 4
60 ret void
61}
62
63; FUNC-LABEL: @test_fmin_legacy_ole_f32
Matt Arsenault36094d72014-11-15 05:02:57 +000064; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
Matt Arsenaultfb13b222014-12-03 03:12:13 +000065; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
Matt Arsenaulta982e4f2015-01-13 00:43:00 +000066; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
67; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000068define amdgpu_kernel void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
Matt Arsenault36094d72014-11-15 05:02:57 +000069 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +000070 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
71 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
Matt Arsenault36094d72014-11-15 05:02:57 +000072
Matt Arsenault44e54832016-04-12 13:38:18 +000073 %a = load volatile float, float addrspace(1)* %gep.0, align 4
74 %b = load volatile float, float addrspace(1)* %gep.1, align 4
Matt Arsenault36094d72014-11-15 05:02:57 +000075
Matt Arsenaultda59f3d2014-11-13 23:03:09 +000076 %cmp = fcmp ole float %a, %b
77 %val = select i1 %cmp, float %a, float %b
78 store float %val, float addrspace(1)* %out, align 4
79 ret void
80}
81
82; FUNC-LABEL: @test_fmin_legacy_olt_f32
Matt Arsenault36094d72014-11-15 05:02:57 +000083; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
Matt Arsenaultfb13b222014-12-03 03:12:13 +000084; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
Matt Arsenaulta982e4f2015-01-13 00:43:00 +000085; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
86; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000087define amdgpu_kernel void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
Matt Arsenault36094d72014-11-15 05:02:57 +000088 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +000089 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
90 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
Matt Arsenault36094d72014-11-15 05:02:57 +000091
Matt Arsenault44e54832016-04-12 13:38:18 +000092 %a = load volatile float, float addrspace(1)* %gep.0, align 4
93 %b = load volatile float, float addrspace(1)* %gep.1, align 4
Matt Arsenault36094d72014-11-15 05:02:57 +000094
Matt Arsenaultda59f3d2014-11-13 23:03:09 +000095 %cmp = fcmp olt float %a, %b
96 %val = select i1 %cmp, float %a, float %b
97 store float %val, float addrspace(1)* %out, align 4
98 ret void
99}
100
101; FUNC-LABEL: @test_fmin_legacy_ult_f32
Matt Arsenault36094d72014-11-15 05:02:57 +0000102; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
Matt Arsenaultfb13b222014-12-03 03:12:13 +0000103; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
Matt Arsenaulta982e4f2015-01-13 00:43:00 +0000104; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
105; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000106define amdgpu_kernel void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
Matt Arsenault36094d72014-11-15 05:02:57 +0000107 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000108 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
109 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
Matt Arsenault36094d72014-11-15 05:02:57 +0000110
Matt Arsenault44e54832016-04-12 13:38:18 +0000111 %a = load volatile float, float addrspace(1)* %gep.0, align 4
112 %b = load volatile float, float addrspace(1)* %gep.1, align 4
Matt Arsenault36094d72014-11-15 05:02:57 +0000113
Matt Arsenaultda59f3d2014-11-13 23:03:09 +0000114 %cmp = fcmp ult float %a, %b
115 %val = select i1 %cmp, float %a, float %b
116 store float %val, float addrspace(1)* %out, align 4
117 ret void
118}
Matt Arsenault36094d72014-11-15 05:02:57 +0000119
Matt Arsenaultfabab4b2015-12-11 23:16:47 +0000120; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32:
121; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
122; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
123; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
124; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000125define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(<1 x float> addrspace(1)* %out, <1 x float> addrspace(1)* %in) #0 {
Matt Arsenaultfabab4b2015-12-11 23:16:47 +0000126 %tid = call i32 @llvm.r600.read.tidig.x() #1
127 %gep.0 = getelementptr <1 x float>, <1 x float> addrspace(1)* %in, i32 %tid
128 %gep.1 = getelementptr <1 x float>, <1 x float> addrspace(1)* %gep.0, i32 1
129
130 %a = load <1 x float>, <1 x float> addrspace(1)* %gep.0
131 %b = load <1 x float>, <1 x float> addrspace(1)* %gep.1
132
133 %cmp = fcmp ult <1 x float> %a, %b
134 %val = select <1 x i1> %cmp, <1 x float> %a, <1 x float> %b
135 store <1 x float> %val, <1 x float> addrspace(1)* %out
136 ret void
137}
138
139; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32:
140; SI: buffer_load_dwordx2
141; SI: buffer_load_dwordx2
142; SI-SAFE: v_min_legacy_f32_e32
143; SI-SAFE: v_min_legacy_f32_e32
144
145; SI-NONAN: v_min_f32_e32
146; SI-NONAN: v_min_f32_e32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000147define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
Matt Arsenaultfabab4b2015-12-11 23:16:47 +0000148 %tid = call i32 @llvm.r600.read.tidig.x() #1
149 %gep.0 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %tid
150 %gep.1 = getelementptr <2 x float>, <2 x float> addrspace(1)* %gep.0, i32 1
151
152 %a = load <2 x float>, <2 x float> addrspace(1)* %gep.0
153 %b = load <2 x float>, <2 x float> addrspace(1)* %gep.1
154
155 %cmp = fcmp ult <2 x float> %a, %b
156 %val = select <2 x i1> %cmp, <2 x float> %a, <2 x float> %b
157 store <2 x float> %val, <2 x float> addrspace(1)* %out
158 ret void
159}
160
161; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32:
162; SI-SAFE: v_min_legacy_f32_e32
163; SI-SAFE: v_min_legacy_f32_e32
164; SI-SAFE: v_min_legacy_f32_e32
165
166; SI-NONAN: v_min_f32_e32
167; SI-NONAN: v_min_f32_e32
168; SI-NONAN: v_min_f32_e32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000169define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(<3 x float> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
Matt Arsenaultfabab4b2015-12-11 23:16:47 +0000170 %tid = call i32 @llvm.r600.read.tidig.x() #1
171 %gep.0 = getelementptr <3 x float>, <3 x float> addrspace(1)* %in, i32 %tid
172 %gep.1 = getelementptr <3 x float>, <3 x float> addrspace(1)* %gep.0, i32 1
173
174 %a = load <3 x float>, <3 x float> addrspace(1)* %gep.0
175 %b = load <3 x float>, <3 x float> addrspace(1)* %gep.1
176
177 %cmp = fcmp ult <3 x float> %a, %b
178 %val = select <3 x i1> %cmp, <3 x float> %a, <3 x float> %b
179 store <3 x float> %val, <3 x float> addrspace(1)* %out
180 ret void
181}
182
Matt Arsenaultdc103072014-12-19 23:15:30 +0000183; FUNC-LABEL: @test_fmin_legacy_ole_f32_multi_use
184; SI: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
185; SI: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
186; SI-NOT: v_min
187; SI: v_cmp_le_f32
188; SI-NEXT: v_cndmask_b32
189; SI-NOT: v_min
190; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000191define amdgpu_kernel void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
Matt Arsenaultdc103072014-12-19 23:15:30 +0000192 %tid = call i32 @llvm.r600.read.tidig.x() #1
David Blaikie79e6c742015-02-27 19:29:02 +0000193 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
194 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
Matt Arsenaultdc103072014-12-19 23:15:30 +0000195
Matt Arsenault44e54832016-04-12 13:38:18 +0000196 %a = load volatile float, float addrspace(1)* %gep.0, align 4
197 %b = load volatile float, float addrspace(1)* %gep.1, align 4
Matt Arsenaultdc103072014-12-19 23:15:30 +0000198
199 %cmp = fcmp ole float %a, %b
200 %val0 = select i1 %cmp, float %a, float %b
201 store float %val0, float addrspace(1)* %out0, align 4
202 store i1 %cmp, i1 addrspace(1)* %out1
203 ret void
204}
205
Matt Arsenault36094d72014-11-15 05:02:57 +0000206attributes #0 = { nounwind }
207attributes #1 = { nounwind readnone }