blob: b1a51a415321da1983d28be5552b9296883cf051 [file] [log] [blame]
Marek Olsakfa6607d2015-02-11 14:26:46 +00001; RUN: llc -march=amdgcn -mcpu=SI -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
2; RUN: llc -march=amdgcn -mcpu=bonaire -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
3; RUN: llc -march=amdgcn -mcpu=tonga -enable-misched < %s | FileCheck -check-prefix=CI -check-prefix=GCN -check-prefix=FUNC %s
Matt Arsenault16e31332014-09-10 21:44:27 +00004
Tom Stellard79243d92014-10-01 17:15:17 +00005; FUNC-LABEL: {{^}}frem_f32:
Marek Olsakfa6607d2015-02-11 14:26:46 +00006; GCN-DAG: buffer_load_dword [[X:v[0-9]+]], {{.*$}}
7; GCN-DAG: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
8; GCN-DAG: v_cmp
9; GCN-DAG: v_mul_f32
10; GCN: v_rcp_f32_e32
11; GCN: v_mul_f32_e32
12; GCN: v_mul_f32_e32
13; GCN: v_trunc_f32_e32
14; GCN: v_mad_f32
15; GCN: s_endpgm
Matt Arsenault16e31332014-09-10 21:44:27 +000016define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
17 float addrspace(1)* %in2) #0 {
18 %gep2 = getelementptr float addrspace(1)* %in2, i32 4
19 %r0 = load float addrspace(1)* %in1, align 4
20 %r1 = load float addrspace(1)* %gep2, align 4
21 %r2 = frem float %r0, %r1
22 store float %r2, float addrspace(1)* %out, align 4
23 ret void
24}
25
Tom Stellard79243d92014-10-01 17:15:17 +000026; FUNC-LABEL: {{^}}unsafe_frem_f32:
Marek Olsakfa6607d2015-02-11 14:26:46 +000027; GCN: buffer_load_dword [[Y:v[0-9]+]], {{.*}} offset:16
28; GCN: buffer_load_dword [[X:v[0-9]+]], {{.*}}
29; GCN: v_rcp_f32_e32 [[INVY:v[0-9]+]], [[Y]]
30; GCN: v_mul_f32_e32 [[DIV:v[0-9]+]], [[INVY]], [[X]]
31; GCN: v_trunc_f32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
32; GCN: v_mad_f32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
33; GCN: buffer_store_dword [[RESULT]]
34; GCN: s_endpgm
Matt Arsenault16e31332014-09-10 21:44:27 +000035define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
36 float addrspace(1)* %in2) #1 {
37 %gep2 = getelementptr float addrspace(1)* %in2, i32 4
38 %r0 = load float addrspace(1)* %in1, align 4
39 %r1 = load float addrspace(1)* %gep2, align 4
40 %r2 = frem float %r0, %r1
41 store float %r2, float addrspace(1)* %out, align 4
42 ret void
43}
44
Matt Arsenault16e31332014-09-10 21:44:27 +000045
Tom Stellard79243d92014-10-01 17:15:17 +000046; FUNC-LABEL: {{^}}frem_f64:
Marek Olsakfa6607d2015-02-11 14:26:46 +000047; GCN: buffer_load_dwordx2 [[Y:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
48; GCN: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]], {{.*}}, 0
49; TODO: Check SI.
50; CI: v_rcp_f64_e32 [[INVY:v\[[0-9]+:[0-9]+\]]], [[Y]]
51; CI: v_mul_f64 [[DIV:v\[[0-9]+:[0-9]+\]]], [[X]], [[INVY]]
52; CI: v_trunc_f64_e32 [[TRUNC:v\[[0-9]+:[0-9]+\]]], [[DIV]]
53; CI: v_mul_f64 [[RESULTM:v\[[0-9]+:[0-9]+\]]], [[TRUNC]], [[Y]]
54; SI: v_mul_f64 [[RESULTM:v\[[0-9]+:[0-9]+\]]], {{v\[[0-9]+:[0-9]+\]}}, [[Y]]
55; GCN: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[X]], -[[RESULTM]]
56; GCN: buffer_store_dwordx2 [[RESULT]], {{.*}}, 0
57; GCN: s_endpgm
Matt Arsenault16e31332014-09-10 21:44:27 +000058define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
59 double addrspace(1)* %in2) #0 {
60 %r0 = load double addrspace(1)* %in1, align 8
61 %r1 = load double addrspace(1)* %in2, align 8
62 %r2 = frem double %r0, %r1
63 store double %r2, double addrspace(1)* %out, align 8
64 ret void
65}
66
Tom Stellard79243d92014-10-01 17:15:17 +000067; FUNC-LABEL: {{^}}unsafe_frem_f64:
Marek Olsakfa6607d2015-02-11 14:26:46 +000068; GCN: v_rcp_f64_e32
69; GCN: v_mul_f64
Tom Stellard326d6ec2014-11-05 14:50:53 +000070; SI: v_bfe_u32
Marek Olsakfa6607d2015-02-11 14:26:46 +000071; CI: v_trunc_f64_e32
72; GCN: v_fma_f64
73; GCN: s_endpgm
Matt Arsenault16e31332014-09-10 21:44:27 +000074define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
75 double addrspace(1)* %in2) #1 {
76 %r0 = load double addrspace(1)* %in1, align 8
77 %r1 = load double addrspace(1)* %in2, align 8
78 %r2 = frem double %r0, %r1
79 store double %r2, double addrspace(1)* %out, align 8
80 ret void
81}
82
83define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
84 <2 x float> addrspace(1)* %in2) #0 {
85 %gep2 = getelementptr <2 x float> addrspace(1)* %in2, i32 4
86 %r0 = load <2 x float> addrspace(1)* %in1, align 8
87 %r1 = load <2 x float> addrspace(1)* %gep2, align 8
88 %r2 = frem <2 x float> %r0, %r1
89 store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
90 ret void
91}
92
93define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
94 <4 x float> addrspace(1)* %in2) #0 {
95 %gep2 = getelementptr <4 x float> addrspace(1)* %in2, i32 4
96 %r0 = load <4 x float> addrspace(1)* %in1, align 16
97 %r1 = load <4 x float> addrspace(1)* %gep2, align 16
98 %r2 = frem <4 x float> %r0, %r1
99 store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
100 ret void
101}
102
103define void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
104 <2 x double> addrspace(1)* %in2) #0 {
105 %gep2 = getelementptr <2 x double> addrspace(1)* %in2, i32 4
106 %r0 = load <2 x double> addrspace(1)* %in1, align 16
107 %r1 = load <2 x double> addrspace(1)* %gep2, align 16
108 %r2 = frem <2 x double> %r0, %r1
109 store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
110 ret void
111}
112
113attributes #0 = { nounwind "unsafe-fp-math"="false" }
114attributes #1 = { nounwind "unsafe-fp-math"="true" }