blob: 48647a2cdb8984fc419845ebc04db089105f7b8b [file] [log] [blame]
Alexander Timofeev982aee62017-07-04 17:32:00 +00001; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +00004
Tom Stellard79243d92014-10-01 17:15:17 +00005; FUNC-LABEL: {{^}}v_fsub_f32:
Matt Arsenault6c29c5a2017-07-10 19:53:57 +00006; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +00007define amdgpu_kernel void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +00008 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +00009 %a = load float, float addrspace(1)* %in, align 4
10 %b = load float, float addrspace(1)* %b_ptr, align 4
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000011 %result = fsub float %a, %b
12 store float %result, float addrspace(1)* %out, align 4
13 ret void
14}
15
Tom Stellard79243d92014-10-01 17:15:17 +000016; FUNC-LABEL: {{^}}s_fsub_f32:
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000017; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W
18
Tom Stellard326d6ec2014-11-05 14:50:53 +000019; SI: v_sub_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000020define amdgpu_kernel void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) {
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000021 %sub = fsub float %a, %b
22 store float %sub, float addrspace(1)* %out, align 4
Tom Stellarda92ff872013-08-16 23:51:24 +000023 ret void
Tom Stellard75aadc22012-12-11 21:25:42 +000024}
25
Tom Stellard79243d92014-10-01 17:15:17 +000026; FUNC-LABEL: {{^}}fsub_v2f32:
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000027; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
28; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
29
Nicolai Haehnle82fc9622016-01-07 17:10:29 +000030; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
31; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000032define amdgpu_kernel void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000033 %sub = fsub <2 x float> %a, %b
34 store <2 x float> %sub, <2 x float> addrspace(1)* %out, align 8
Tom Stellard0344cdf2013-08-01 15:23:42 +000035 ret void
36}
Tom Stellard5a6b0d82013-04-19 02:10:53 +000037
Tom Stellard79243d92014-10-01 17:15:17 +000038; FUNC-LABEL: {{^}}v_fsub_v4f32:
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000039; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
40; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
41; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
42; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
43
Matt Arsenault6c29c5a2017-07-10 19:53:57 +000044; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
45; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
46; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
47; SI: v_sub_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000048define amdgpu_kernel void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000049 %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000050 %a = load <4 x float>, <4 x float> addrspace(1)* %in, align 16
51 %b = load <4 x float>, <4 x float> addrspace(1)* %b_ptr, align 16
Tom Stellard5a6b0d82013-04-19 02:10:53 +000052 %result = fsub <4 x float> %a, %b
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000053 store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
54 ret void
55}
56
Tom Stellard79243d92014-10-01 17:15:17 +000057; FUNC-LABEL: {{^}}s_fsub_v4f32:
Nicolai Haehnle82fc9622016-01-07 17:10:29 +000058; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
59; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
60; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
61; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
Tom Stellard326d6ec2014-11-05 14:50:53 +000062; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000063define amdgpu_kernel void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) {
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000064 %result = fsub <4 x float> %a, %b
65 store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
Tom Stellard5a6b0d82013-04-19 02:10:53 +000066 ret void
67}
Matt Arsenault732a5312017-01-25 06:08:42 +000068
69; FUNC-LABEL: {{^}}v_fneg_fsub_f32:
Matt Arsenault6c29c5a2017-07-10 19:53:57 +000070; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenault732a5312017-01-25 06:08:42 +000071; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000072define amdgpu_kernel void @v_fneg_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
Matt Arsenault732a5312017-01-25 06:08:42 +000073 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
74 %a = load float, float addrspace(1)* %in, align 4
75 %b = load float, float addrspace(1)* %b_ptr, align 4
76 %result = fsub float %a, %b
77 %neg.result = fsub float -0.0, %result
78 store float %neg.result, float addrspace(1)* %out, align 4
79 ret void
80}
81
82; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_f32:
Matt Arsenault6c29c5a2017-07-10 19:53:57 +000083; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenault732a5312017-01-25 06:08:42 +000084; SI-NOT: xor
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000085define amdgpu_kernel void @v_fneg_fsub_nsz_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
Matt Arsenault732a5312017-01-25 06:08:42 +000086 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
87 %a = load float, float addrspace(1)* %in, align 4
88 %b = load float, float addrspace(1)* %b_ptr, align 4
89 %result = fsub nsz float %a, %b
90 %neg.result = fsub float -0.0, %result
91 store float %neg.result, float addrspace(1)* %out, align 4
92 ret void
93}
94
95; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_attribute_f32:
Matt Arsenault6c29c5a2017-07-10 19:53:57 +000096; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenault732a5312017-01-25 06:08:42 +000097; SI-NOT: xor
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000098define amdgpu_kernel void @v_fneg_fsub_nsz_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
Matt Arsenault732a5312017-01-25 06:08:42 +000099 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
100 %a = load float, float addrspace(1)* %in, align 4
101 %b = load float, float addrspace(1)* %b_ptr, align 4
102 %result = fsub float %a, %b
103 %neg.result = fsub float -0.0, %result
104 store float %neg.result, float addrspace(1)* %out, align 4
105 ret void
106}
107
108; For some reason the attribute has a string "true" or "false", so
109; make sure it is disabled and the fneg is not folded if it is not
110; "true".
111; FUNC-LABEL: {{^}}v_fneg_fsub_nsz_false_attribute_f32:
Matt Arsenault6c29c5a2017-07-10 19:53:57 +0000112; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenault732a5312017-01-25 06:08:42 +0000113; SI: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[SUB]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000114define amdgpu_kernel void @v_fneg_fsub_nsz_false_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #1 {
Matt Arsenault732a5312017-01-25 06:08:42 +0000115 %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
116 %a = load float, float addrspace(1)* %in, align 4
117 %b = load float, float addrspace(1)* %b_ptr, align 4
118 %result = fsub float %a, %b
119 %neg.result = fsub float -0.0, %result
120 store float %neg.result, float addrspace(1)* %out, align 4
121 ret void
122}
123
Matt Arsenault9a3fd872017-03-09 01:36:39 +0000124; FUNC-LABEL: {{^}}v_fsub_0_nsz_attribute_f32:
125; SI-NOT: v_sub
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000126define amdgpu_kernel void @v_fsub_0_nsz_attribute_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
Matt Arsenault9a3fd872017-03-09 01:36:39 +0000127 %a = load float, float addrspace(1)* %in, align 4
128 %result = fsub float %a, 0.0
129 store float %result, float addrspace(1)* %out, align 4
130 ret void
131}
132
Matt Arsenault732a5312017-01-25 06:08:42 +0000133attributes #0 = { nounwind "no-signed-zeros-fp-math"="true" }
134attributes #1 = { nounwind "no-signed-zeros-fp-math"="false" }