blob: 204eeb9983868de73ea3c8f68d82d30695eb4b4d [file] [log] [blame]
Alexander Timofeev982aee62017-07-04 17:32:00 +00001; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-fp32-denormals -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=SI-UNSAFE -check-prefix=SI %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-fp32-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=SI-SAFE -check-prefix=SI %s
Matt Arsenault15130462014-06-05 00:15:55 +00003
Matt Arsenault9c47dd52016-02-11 06:02:01 +00004declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
Matt Arsenault15130462014-06-05 00:15:55 +00005declare float @llvm.sqrt.f32(float) nounwind readnone
6declare double @llvm.sqrt.f64(double) nounwind readnone
7
Tom Stellard79243d92014-10-01 17:15:17 +00008; SI-LABEL: {{^}}rsq_f32:
Tom Stellard326d6ec2014-11-05 14:50:53 +00009; SI: v_rsq_f32_e32
10; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000011define amdgpu_kernel void @rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
David Blaikiea79ac142015-02-27 21:17:42 +000012 %val = load float, float addrspace(1)* %in, align 4
Matt Arsenault15130462014-06-05 00:15:55 +000013 %sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
14 %div = fdiv float 1.0, %sqrt
15 store float %div, float addrspace(1)* %out, align 4
16 ret void
17}
18
Tom Stellard79243d92014-10-01 17:15:17 +000019; SI-LABEL: {{^}}rsq_f64:
Tom Stellard326d6ec2014-11-05 14:50:53 +000020; SI-UNSAFE: v_rsq_f64_e32
21; SI-SAFE: v_sqrt_f64_e32
22; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000023define amdgpu_kernel void @rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
David Blaikiea79ac142015-02-27 21:17:42 +000024 %val = load double, double addrspace(1)* %in, align 4
Matt Arsenault15130462014-06-05 00:15:55 +000025 %sqrt = call double @llvm.sqrt.f64(double %val) nounwind readnone
26 %div = fdiv double 1.0, %sqrt
27 store double %div, double addrspace(1)* %out, align 4
28 ret void
29}
Matt Arsenault49dd4282014-09-15 17:15:02 +000030
Tom Stellard79243d92014-10-01 17:15:17 +000031; SI-LABEL: {{^}}rsq_f32_sgpr:
Tom Stellard326d6ec2014-11-05 14:50:53 +000032; SI: v_rsq_f32_e32 {{v[0-9]+}}, {{s[0-9]+}}
33; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000034define amdgpu_kernel void @rsq_f32_sgpr(float addrspace(1)* noalias %out, float %val) nounwind {
Matt Arsenault49dd4282014-09-15 17:15:02 +000035 %sqrt = call float @llvm.sqrt.f32(float %val) nounwind readnone
36 %div = fdiv float 1.0, %sqrt
37 store float %div, float addrspace(1)* %out, align 4
38 ret void
39}
Matt Arsenaulte93d06a2015-01-13 20:53:18 +000040
41; Recognize that this is rsqrt(a) * rcp(b) * c,
42; not 1 / ( 1 / sqrt(a)) * rcp(b) * c.
43
44; SI-LABEL: @rsqrt_fmul
45; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
46; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4
47; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8
48
49; SI-UNSAFE-DAG: v_rsq_f32_e32 [[RSQA:v[0-9]+]], [[A]]
50; SI-UNSAFE-DAG: v_rcp_f32_e32 [[RCPB:v[0-9]+]], [[B]]
Matt Arsenault6c29c5a2017-07-10 19:53:57 +000051; SI-UNSAFE-DAG: v_mul_f32_e32 [[TMP:v[0-9]+]], [[RSQA]], [[RCPB]]
52; SI-UNSAFE: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[C]], [[TMP]]
Matt Arsenaulte93d06a2015-01-13 20:53:18 +000053; SI-UNSAFE: buffer_store_dword [[RESULT]]
54
55; SI-SAFE-NOT: v_rsq_f32
56
57; SI: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000058define amdgpu_kernel void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) {
Matt Arsenault9c47dd52016-02-11 06:02:01 +000059 %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
David Blaikie79e6c742015-02-27 19:29:02 +000060 %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
61 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
62 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
63 %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
Matt Arsenaulte93d06a2015-01-13 20:53:18 +000064
Matt Arsenault44e54832016-04-12 13:38:18 +000065 %a = load volatile float, float addrspace(1)* %gep.0
66 %b = load volatile float, float addrspace(1)* %gep.1
67 %c = load volatile float, float addrspace(1)* %gep.2
Matt Arsenaulte93d06a2015-01-13 20:53:18 +000068
69 %x = call float @llvm.sqrt.f32(float %a)
70 %y = fmul float %x, %b
71 %z = fdiv float %c, %y
72 store float %z, float addrspace(1)* %out.gep
73 ret void
74}
Matt Arsenault979902b2016-08-02 22:25:04 +000075
76; SI-LABEL: {{^}}neg_rsq_f32:
77; SI-SAFE: v_sqrt_f32_e32 [[SQRT:v[0-9]+]], v{{[0-9]+}}
78; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
79; SI-SAFE: buffer_store_dword [[RSQ]]
80
81; SI-UNSAFE: v_rsq_f32_e32 [[RSQ:v[0-9]+]], v{{[0-9]+}}
82; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
83; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000084define amdgpu_kernel void @neg_rsq_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
Matt Arsenault979902b2016-08-02 22:25:04 +000085 %val = load float, float addrspace(1)* %in, align 4
86 %sqrt = call float @llvm.sqrt.f32(float %val)
87 %div = fdiv float -1.0, %sqrt
88 store float %div, float addrspace(1)* %out, align 4
89 ret void
90}
91
92; SI-LABEL: {{^}}neg_rsq_f64:
93; SI-SAFE: v_sqrt_f64_e32
94; SI-SAFE: v_div_scale_f64
95
96; SI-UNSAFE: v_sqrt_f64_e32 [[SQRT:v\[[0-9]+:[0-9]+\]]], v{{\[[0-9]+:[0-9]+\]}}
97; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
98; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000099define amdgpu_kernel void @neg_rsq_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
Matt Arsenault979902b2016-08-02 22:25:04 +0000100 %val = load double, double addrspace(1)* %in, align 4
101 %sqrt = call double @llvm.sqrt.f64(double %val)
102 %div = fdiv double -1.0, %sqrt
103 store double %div, double addrspace(1)* %out, align 4
104 ret void
105}
106
107; SI-LABEL: {{^}}neg_rsq_neg_f32:
108; SI-SAFE: v_sqrt_f32_e64 [[SQRT:v[0-9]+]], -v{{[0-9]+}}
109; SI-SAFE: v_rcp_f32_e64 [[RSQ:v[0-9]+]], -[[SQRT]]
110; SI-SAFE: buffer_store_dword [[RSQ]]
111
112; SI-UNSAFE: v_rsq_f32_e64 [[RSQ:v[0-9]+]], -v{{[0-9]+}}
113; SI-UNSAFE: v_xor_b32_e32 [[NEG_RSQ:v[0-9]+]], 0x80000000, [[RSQ]]
114; SI-UNSAFE: buffer_store_dword [[NEG_RSQ]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000115define amdgpu_kernel void @neg_rsq_neg_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
Matt Arsenault979902b2016-08-02 22:25:04 +0000116 %val = load float, float addrspace(1)* %in, align 4
117 %val.fneg = fsub float -0.0, %val
118 %sqrt = call float @llvm.sqrt.f32(float %val.fneg)
119 %div = fdiv float -1.0, %sqrt
120 store float %div, float addrspace(1)* %out, align 4
121 ret void
122}
123
124; SI-LABEL: {{^}}neg_rsq_neg_f64:
125; SI-SAFE: v_sqrt_f64_e64 v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
126; SI-SAFE: v_div_scale_f64
127
128; SI-UNSAFE: v_sqrt_f64_e64 [[SQRT:v\[[0-9]+:[0-9]+\]]], -v{{\[[0-9]+:[0-9]+\]}}
129; SI-UNSAFE: v_rcp_f64_e64 [[RCP:v\[[0-9]+:[0-9]+\]]], -[[SQRT]]
130; SI-UNSAFE: buffer_store_dwordx2 [[RCP]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000131define amdgpu_kernel void @neg_rsq_neg_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) nounwind {
Matt Arsenault979902b2016-08-02 22:25:04 +0000132 %val = load double, double addrspace(1)* %in, align 4
133 %val.fneg = fsub double -0.0, %val
134 %sqrt = call double @llvm.sqrt.f64(double %val.fneg)
135 %div = fdiv double -1.0, %sqrt
136 store double %div, double addrspace(1)* %out, align 4
137 ret void
138}