blob: c005ff890cf6bc06574280a08e10c230bd984538 [file] [log] [blame]
Matt Arsenault16e31332014-09-10 21:44:27 +00001; RUN: llc -march=r600 -mcpu=SI -enable-misched < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
2
Tom Stellard79243d92014-10-01 17:15:17 +00003; FUNC-LABEL: {{^}}frem_f32:
Matt Arsenault16e31332014-09-10 21:44:27 +00004; SI-DAG: BUFFER_LOAD_DWORD [[X:v[0-9]+]], {{.*$}}
5; SI-DAG: BUFFER_LOAD_DWORD [[Y:v[0-9]+]], {{.*}} offset:0x10
6; SI-DAG: V_CMP
7; SI-DAG: V_MUL_F32
8; SI: V_RCP_F32_e32
9; SI: V_MUL_F32_e32
10; SI: V_MUL_F32_e32
11; SI: V_TRUNC_F32_e32
12; SI: V_MAD_F32
13; SI: S_ENDPGM
14define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
15 float addrspace(1)* %in2) #0 {
16 %gep2 = getelementptr float addrspace(1)* %in2, i32 4
17 %r0 = load float addrspace(1)* %in1, align 4
18 %r1 = load float addrspace(1)* %gep2, align 4
19 %r2 = frem float %r0, %r1
20 store float %r2, float addrspace(1)* %out, align 4
21 ret void
22}
23
Tom Stellard79243d92014-10-01 17:15:17 +000024; FUNC-LABEL: {{^}}unsafe_frem_f32:
Matt Arsenault16e31332014-09-10 21:44:27 +000025; SI: BUFFER_LOAD_DWORD [[Y:v[0-9]+]], {{.*}} offset:0x10
26; SI: BUFFER_LOAD_DWORD [[X:v[0-9]+]], {{.*}}
27; SI: V_RCP_F32_e32 [[INVY:v[0-9]+]], [[Y]]
28; SI: V_MUL_F32_e32 [[DIV:v[0-9]+]], [[INVY]], [[X]]
29; SI: V_TRUNC_F32_e32 [[TRUNC:v[0-9]+]], [[DIV]]
Matt Arsenault97069782014-09-30 19:49:48 +000030; SI: V_MAD_F32 [[RESULT:v[0-9]+]], -[[TRUNC]], [[Y]], [[X]]
Matt Arsenault16e31332014-09-10 21:44:27 +000031; SI: BUFFER_STORE_DWORD [[RESULT]]
32; SI: S_ENDPGM
33define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
34 float addrspace(1)* %in2) #1 {
35 %gep2 = getelementptr float addrspace(1)* %in2, i32 4
36 %r0 = load float addrspace(1)* %in1, align 4
37 %r1 = load float addrspace(1)* %gep2, align 4
38 %r2 = frem float %r0, %r1
39 store float %r2, float addrspace(1)* %out, align 4
40 ret void
41}
42
43; TODO: This should check something when f64 fdiv is implemented
44; correctly
45
Tom Stellard79243d92014-10-01 17:15:17 +000046; FUNC-LABEL: {{^}}frem_f64:
Matt Arsenault16e31332014-09-10 21:44:27 +000047; SI: S_ENDPGM
48define void @frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
49 double addrspace(1)* %in2) #0 {
50 %r0 = load double addrspace(1)* %in1, align 8
51 %r1 = load double addrspace(1)* %in2, align 8
52 %r2 = frem double %r0, %r1
53 store double %r2, double addrspace(1)* %out, align 8
54 ret void
55}
56
Tom Stellard79243d92014-10-01 17:15:17 +000057; FUNC-LABEL: {{^}}unsafe_frem_f64:
Matt Arsenault16e31332014-09-10 21:44:27 +000058; SI: V_RCP_F64_e32
59; SI: V_MUL_F64
60; SI: V_BFE_I32
61; SI: V_FMA_F64
62; SI: S_ENDPGM
63define void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
64 double addrspace(1)* %in2) #1 {
65 %r0 = load double addrspace(1)* %in1, align 8
66 %r1 = load double addrspace(1)* %in2, align 8
67 %r2 = frem double %r0, %r1
68 store double %r2, double addrspace(1)* %out, align 8
69 ret void
70}
71
72define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
73 <2 x float> addrspace(1)* %in2) #0 {
74 %gep2 = getelementptr <2 x float> addrspace(1)* %in2, i32 4
75 %r0 = load <2 x float> addrspace(1)* %in1, align 8
76 %r1 = load <2 x float> addrspace(1)* %gep2, align 8
77 %r2 = frem <2 x float> %r0, %r1
78 store <2 x float> %r2, <2 x float> addrspace(1)* %out, align 8
79 ret void
80}
81
82define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
83 <4 x float> addrspace(1)* %in2) #0 {
84 %gep2 = getelementptr <4 x float> addrspace(1)* %in2, i32 4
85 %r0 = load <4 x float> addrspace(1)* %in1, align 16
86 %r1 = load <4 x float> addrspace(1)* %gep2, align 16
87 %r2 = frem <4 x float> %r0, %r1
88 store <4 x float> %r2, <4 x float> addrspace(1)* %out, align 16
89 ret void
90}
91
92define void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
93 <2 x double> addrspace(1)* %in2) #0 {
94 %gep2 = getelementptr <2 x double> addrspace(1)* %in2, i32 4
95 %r0 = load <2 x double> addrspace(1)* %in1, align 16
96 %r1 = load <2 x double> addrspace(1)* %gep2, align 16
97 %r2 = frem <2 x double> %r0, %r1
98 store <2 x double> %r2, <2 x double> addrspace(1)* %out, align 16
99 ret void
100}
101
102attributes #0 = { nounwind "unsafe-fp-math"="false" }
103attributes #1 = { nounwind "unsafe-fp-math"="true" }