blob: 4fe47e7badf3623a2297a22c055aa60f93f0bdca [file] [log] [blame]
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +00001; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
Tom Stellard49f8bfd2015-01-06 18:00:21 +00002; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
Tom Stellard75aadc22012-12-11 21:25:42 +00003
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +00004
Tom Stellard79243d92014-10-01 17:15:17 +00005; FUNC-LABEL: {{^}}v_fsub_f32:
Tom Stellard326d6ec2014-11-05 14:50:53 +00006; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +00007define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
8 %b_ptr = getelementptr float addrspace(1)* %in, i32 1
9 %a = load float addrspace(1)* %in, align 4
10 %b = load float addrspace(1)* %b_ptr, align 4
11 %result = fsub float %a, %b
12 store float %result, float addrspace(1)* %out, align 4
13 ret void
14}
15
Tom Stellard79243d92014-10-01 17:15:17 +000016; FUNC-LABEL: {{^}}s_fsub_f32:
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000017; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W
18
Tom Stellard326d6ec2014-11-05 14:50:53 +000019; SI: v_sub_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000020define void @s_fsub_f32(float addrspace(1)* %out, float %a, float %b) {
21 %sub = fsub float %a, %b
22 store float %sub, float addrspace(1)* %out, align 4
Tom Stellarda92ff872013-08-16 23:51:24 +000023 ret void
Tom Stellard75aadc22012-12-11 21:25:42 +000024}
25
26declare float @llvm.R600.load.input(i32) readnone
27
28declare void @llvm.AMDGPU.store.output(float, i32)
29
Tom Stellard79243d92014-10-01 17:15:17 +000030; FUNC-LABEL: {{^}}fsub_v2f32:
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000031; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z
32; R600-DAG: ADD {{\** *}}T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y
33
34; FIXME: Should be using SGPR directly for first operand
Tom Stellard326d6ec2014-11-05 14:50:53 +000035; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
36; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
Tom Stellard0344cdf2013-08-01 15:23:42 +000037define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) {
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000038 %sub = fsub <2 x float> %a, %b
39 store <2 x float> %sub, <2 x float> addrspace(1)* %out, align 8
Tom Stellard0344cdf2013-08-01 15:23:42 +000040 ret void
41}
Tom Stellard5a6b0d82013-04-19 02:10:53 +000042
Tom Stellard79243d92014-10-01 17:15:17 +000043; FUNC-LABEL: {{^}}v_fsub_v4f32:
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000044; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
45; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
46; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
47; R600: ADD {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}}
48
Tom Stellard326d6ec2014-11-05 14:50:53 +000049; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
50; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
51; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
52; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000053define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
Tom Stellard5a6b0d82013-04-19 02:10:53 +000054 %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000055 %a = load <4 x float> addrspace(1)* %in, align 16
56 %b = load <4 x float> addrspace(1)* %b_ptr, align 16
Tom Stellard5a6b0d82013-04-19 02:10:53 +000057 %result = fsub <4 x float> %a, %b
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000058 store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
59 ret void
60}
61
62; FIXME: Should be using SGPR directly for first operand
63
Tom Stellard79243d92014-10-01 17:15:17 +000064; FUNC-LABEL: {{^}}s_fsub_v4f32:
Tom Stellard326d6ec2014-11-05 14:50:53 +000065; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
66; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
67; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
68; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
69; SI: s_endpgm
Matt Arsenaulta6dc6c22014-08-06 20:27:55 +000070define void @s_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, <4 x float> %b) {
71 %result = fsub <4 x float> %a, %b
72 store <4 x float> %result, <4 x float> addrspace(1)* %out, align 16
Tom Stellard5a6b0d82013-04-19 02:10:53 +000073 ret void
74}