blob: 236fb77f9b59edd00ad26eced3583e1391b46e09 [file] [log] [blame]
Craig Topper3bae2a42019-10-30 11:41:44 -07001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
Liu, Chen330414342019-12-06 09:44:33 +08002; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 -disable-strictnode-mutation | FileCheck %s
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 -disable-strictnode-mutation | FileCheck %s
Craig Topper3bae2a42019-10-30 11:41:44 -07004
5declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata)
6declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
7declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata)
8declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata)
9declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata)
10declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata)
11declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata)
12declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata)
Liu, Chen3bbf78602019-12-10 09:04:28 +080013declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata)
14declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata)
15declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(<8 x float>, metadata)
16declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, metadata)
Wang, Pengfei19492352019-12-17 21:27:46 +080017declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata)
18declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata)
Craig Topper3bae2a42019-10-30 11:41:44 -070019
20define <8 x double> @f1(<8 x double> %a, <8 x double> %b) #0 {
21; CHECK-LABEL: f1:
22; CHECK: # %bb.0:
Craig Topper95f44cf2019-11-21 14:24:32 -080023; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
Craig Topper3bae2a42019-10-30 11:41:44 -070024; CHECK-NEXT: ret{{[l|q]}}
25 %ret = call <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double> %a, <8 x double> %b,
26 metadata !"round.dynamic",
27 metadata !"fpexcept.strict") #0
28 ret <8 x double> %ret
29}
30
31define <16 x float> @f2(<16 x float> %a, <16 x float> %b) #0 {
32; CHECK-LABEL: f2:
33; CHECK: # %bb.0:
Craig Topper95f44cf2019-11-21 14:24:32 -080034; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
Craig Topper3bae2a42019-10-30 11:41:44 -070035; CHECK-NEXT: ret{{[l|q]}}
36 %ret = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %a, <16 x float> %b,
37 metadata !"round.dynamic",
38 metadata !"fpexcept.strict") #0
39 ret <16 x float> %ret
40}
41
42define <8 x double> @f3(<8 x double> %a, <8 x double> %b) #0 {
43; CHECK-LABEL: f3:
44; CHECK: # %bb.0:
Craig Topper95f44cf2019-11-21 14:24:32 -080045; CHECK-NEXT: vsubpd %zmm1, %zmm0, %zmm0
Craig Topper3bae2a42019-10-30 11:41:44 -070046; CHECK-NEXT: ret{{[l|q]}}
47 %ret = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %a, <8 x double> %b,
48 metadata !"round.dynamic",
49 metadata !"fpexcept.strict") #0
50 ret <8 x double> %ret
51}
52
53define <16 x float> @f4(<16 x float> %a, <16 x float> %b) #0 {
54; CHECK-LABEL: f4:
55; CHECK: # %bb.0:
Craig Topper95f44cf2019-11-21 14:24:32 -080056; CHECK-NEXT: vsubps %zmm1, %zmm0, %zmm0
Craig Topper3bae2a42019-10-30 11:41:44 -070057; CHECK-NEXT: ret{{[l|q]}}
58 %ret = call <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float> %a, <16 x float> %b,
59 metadata !"round.dynamic",
60 metadata !"fpexcept.strict") #0
61 ret <16 x float> %ret
62}
63
64define <8 x double> @f5(<8 x double> %a, <8 x double> %b) #0 {
65; CHECK-LABEL: f5:
66; CHECK: # %bb.0:
67; CHECK-NEXT: vmulpd %zmm1, %zmm0, %zmm0
68; CHECK-NEXT: ret{{[l|q]}}
69 %ret = call <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double> %a, <8 x double> %b,
70 metadata !"round.dynamic",
71 metadata !"fpexcept.strict") #0
72 ret <8 x double> %ret
73}
74
75define <16 x float> @f6(<16 x float> %a, <16 x float> %b) #0 {
76; CHECK-LABEL: f6:
77; CHECK: # %bb.0:
78; CHECK-NEXT: vmulps %zmm1, %zmm0, %zmm0
79; CHECK-NEXT: ret{{[l|q]}}
80 %ret = call <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float> %a, <16 x float> %b,
81 metadata !"round.dynamic",
82 metadata !"fpexcept.strict") #0
83 ret <16 x float> %ret
84}
85
86define <8 x double> @f7(<8 x double> %a, <8 x double> %b) #0 {
87; CHECK-LABEL: f7:
88; CHECK: # %bb.0:
89; CHECK-NEXT: vdivpd %zmm1, %zmm0, %zmm0
90; CHECK-NEXT: ret{{[l|q]}}
91 %ret = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %a, <8 x double> %b,
92 metadata !"round.dynamic",
93 metadata !"fpexcept.strict") #0
94 ret <8 x double> %ret
95}
96
97define <16 x float> @f8(<16 x float> %a, <16 x float> %b) #0 {
98; CHECK-LABEL: f8:
99; CHECK: # %bb.0:
100; CHECK-NEXT: vdivps %zmm1, %zmm0, %zmm0
101; CHECK-NEXT: ret{{[l|q]}}
102 %ret = call <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float> %a, <16 x float> %b,
103 metadata !"round.dynamic",
104 metadata !"fpexcept.strict") #0
105 ret <16 x float> %ret
106}
107
Liu, Chen3bbf78602019-12-10 09:04:28 +0800108define <8 x double> @f9(<8 x double> %a) #0 {
109; CHECK-LABEL: f9:
110; CHECK: # %bb.0:
111; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
112; CHECK-NEXT: ret{{[l|q]}}
113 %ret = call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(
114 <8 x double> %a,
115 metadata !"round.dynamic",
116 metadata !"fpexcept.strict") #0
117 ret <8 x double> %ret
118}
119
120
121define <16 x float> @f10(<16 x float> %a) #0 {
122; CHECK-LABEL: f10:
123; CHECK: # %bb.0:
124; CHECK-NEXT: vsqrtps %zmm0, %zmm0
125; CHECK-NEXT: ret{{[l|q]}}
126 %ret = call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(
127 <16 x float> %a,
128 metadata !"round.dynamic",
129 metadata !"fpexcept.strict") #0
130 ret <16 x float > %ret
131}
132
133define <8 x double> @f11(<8 x float> %a) #0 {
134; CHECK-LABEL: f11:
135; CHECK: # %bb.0:
136; CHECK-NEXT: vcvtps2pd %ymm0, %zmm0
137; CHECK-NEXT: ret{{[l|q]}}
138 %ret = call <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(
139 <8 x float> %a,
140 metadata !"fpexcept.strict") #0
141 ret <8 x double> %ret
142}
143
144define <8 x float> @f12(<8 x double> %a) #0 {
145; CHECK-LABEL: f12:
146; CHECK: # %bb.0:
147; CHECK-NEXT: vcvtpd2ps %zmm0, %ymm0
148; CHECK-NEXT: ret{{[l|q]}}
149 %ret = call <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(
150 <8 x double> %a,
151 metadata !"round.dynamic",
152 metadata !"fpexcept.strict") #0
153 ret <8 x float> %ret
154}
155
Wang, Pengfei19492352019-12-17 21:27:46 +0800156define <16 x float> @f13(<16 x float> %a, <16 x float> %b, <16 x float> %c) #0 {
157; CHECK-LABEL: f13:
158; CHECK: # %bb.0:
159; CHECK-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
160; CHECK-NEXT: ret{{[l|q]}}
161 %res = call <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c,
162 metadata !"round.dynamic",
163 metadata !"fpexcept.strict") #0
164 ret <16 x float> %res
165}
166
167define <8 x double> @f14(<8 x double> %a, <8 x double> %b, <8 x double> %c) #0 {
168; CHECK-LABEL: f14:
169; CHECK: # %bb.0:
170; CHECK-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
171; CHECK-NEXT: ret{{[l|q]}}
172 %res = call <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c,
173 metadata !"round.dynamic",
174 metadata !"fpexcept.strict") #0
175 ret <8 x double> %res
176}
177
Craig Topper3bae2a42019-10-30 11:41:44 -0700178attributes #0 = { strictfp }