blob: 4bd0cc3faa7a1757851bb7fc9415dc8c80efb3f7 [file] [log] [blame]
Ehsan Amiria538b0f2016-08-03 18:17:35 +00001; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
2; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
Nemanja Ivanovicf502a422015-06-26 19:26:53 +00003
4@vda = common global <2 x double> zeroinitializer, align 16
5@vdb = common global <2 x double> zeroinitializer, align 16
6@vdr = common global <2 x double> zeroinitializer, align 16
7@vfa = common global <4 x float> zeroinitializer, align 16
8@vfb = common global <4 x float> zeroinitializer, align 16
9@vfr = common global <4 x float> zeroinitializer, align 16
10@vbllr = common global <2 x i64> zeroinitializer, align 16
11@vbir = common global <4 x i32> zeroinitializer, align 16
12@vblla = common global <2 x i64> zeroinitializer, align 16
13@vbllb = common global <2 x i64> zeroinitializer, align 16
14@vbia = common global <4 x i32> zeroinitializer, align 16
15@vbib = common global <4 x i32> zeroinitializer, align 16
16
17; Function Attrs: nounwind
18define void @test1() {
19entry:
20 %0 = load <2 x double>, <2 x double>* @vda, align 16
21 %1 = load <2 x double>, <2 x double>* @vdb, align 16
22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
23 store <2 x double> %2, <2 x double>* @vdr, align 16
24 ret void
25; CHECK-LABEL: @test1
26; CHECK: xvdivdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
27}
28
29; Function Attrs: nounwind
30define void @test2() {
31entry:
32 %0 = load <4 x float>, <4 x float>* @vfa, align 16
33 %1 = load <4 x float>, <4 x float>* @vfb, align 16
34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
35 store <4 x float> %2, <4 x float>* @vfr, align 16
36 ret void
37; CHECK-LABEL: @test2
38; CHECK: xvdivsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
39}
40
41; Function Attrs: nounwind
42define void @test3() {
43entry:
44 %0 = load <2 x double>, <2 x double>* @vda, align 16
45 %1 = load <2 x double>, <2 x double>* @vda, align 16
46 %2 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %1)
47 store <2 x double> %2, <2 x double>* @vdr, align 16
48 ret void
49; CHECK-LABEL: @test3
50; CHECK: xvrdpip {{[0-9]+}}, {{[0-9]+}}
51}
52
53; Function Attrs: nounwind
54define void @test4() {
55entry:
56 %0 = load <4 x float>, <4 x float>* @vfa, align 16
57 %1 = load <4 x float>, <4 x float>* @vfa, align 16
58 %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %1)
59 store <4 x float> %2, <4 x float>* @vfr, align 16
60 ret void
61; CHECK-LABEL: @test4
62; CHECK: xvrspip {{[0-9]+}}, {{[0-9]+}}
63}
64
65; Function Attrs: nounwind
66define void @test5() {
67entry:
68 %0 = load <2 x double>, <2 x double>* @vda, align 16
69 %1 = load <2 x double>, <2 x double>* @vdb, align 16
70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
71 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
72 ret void
73; CHECK-LABEL: @test5
74; CHECK: xvcmpeqdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
75}
76
77; Function Attrs: nounwind
78define void @test6() {
79entry:
80 %0 = load <4 x float>, <4 x float>* @vfa, align 16
81 %1 = load <4 x float>, <4 x float>* @vfb, align 16
82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
83 store <4 x i32> %2, <4 x i32>* @vbir, align 16
84 ret void
85; CHECK-LABEL: @test6
86; CHECK: xvcmpeqsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
87}
88
89; Function Attrs: nounwind
90define void @test7() {
91entry:
92 %0 = load <2 x double>, <2 x double>* @vda, align 16
93 %1 = load <2 x double>, <2 x double>* @vdb, align 16
94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
95 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
96 ret void
97; CHECK-LABEL: @test7
98; CHECK: xvcmpgedp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
99}
100
101; Function Attrs: nounwind
102define void @test8() {
103entry:
104 %0 = load <4 x float>, <4 x float>* @vfa, align 16
105 %1 = load <4 x float>, <4 x float>* @vfb, align 16
106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
107 store <4 x i32> %2, <4 x i32>* @vbir, align 16
108 ret void
109; CHECK-LABEL: @test8
110; CHECK: xvcmpgesp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
111}
112
113; Function Attrs: nounwind
114define void @test9() {
115entry:
116 %0 = load <2 x double>, <2 x double>* @vda, align 16
117 %1 = load <2 x double>, <2 x double>* @vdb, align 16
118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
119 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
120 ret void
121; CHECK-LABEL: @test9
122; CHECK: xvcmpgtdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
123}
124
125; Function Attrs: nounwind
126define void @test10() {
127entry:
128 %0 = load <4 x float>, <4 x float>* @vfa, align 16
129 %1 = load <4 x float>, <4 x float>* @vfb, align 16
130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
131 store <4 x i32> %2, <4 x i32>* @vbir, align 16
132 ret void
133; CHECK-LABEL: @test10
134; CHECK: xvcmpgtsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
135}
136
Nemanja Ivanovic984a3612015-07-14 17:25:20 +0000137; Function Attrs: nounwind
138define <4 x float> @emit_xvresp(<4 x float> %a) {
139entry:
140 %a.addr = alloca <4 x float>, align 16
141 store <4 x float> %a, <4 x float>* %a.addr, align 16
142 %0 = load <4 x float>, <4 x float>* %a.addr, align 16
143 %1 = call <4 x float> @llvm.ppc.vsx.xvresp(<4 x float> %0)
144 ret <4 x float> %1
145; CHECK-LABEL: @emit_xvresp
146; CHECK: xvresp {{[0-9]+}}, {{[0-9]+}}
147}
148
149; Function Attrs: nounwind
150define <2 x double> @emit_xvredp(<2 x double> %a) {
151entry:
152 %a.addr = alloca <2 x double>, align 16
153 store <2 x double> %a, <2 x double>* %a.addr, align 16
154 %0 = load <2 x double>, <2 x double>* %a.addr, align 16
155 %1 = call <2 x double> @llvm.ppc.vsx.xvredp(<2 x double> %0)
156 ret <2 x double> %1
157; CHECK-LABEL: @emit_xvredp
158; CHECK: xvredp {{[0-9]+}}, {{[0-9]+}}
159}
160
161; Function Attrs: nounwind readnone
162declare <4 x float> @llvm.ppc.vsx.xvresp(<4 x float>)
163
164; Function Attrs: nounwind readnone
165declare <2 x double> @llvm.ppc.vsx.xvredp(<2 x double>)
166
Nemanja Ivanovicf502a422015-06-26 19:26:53 +0000167; Function Attrs: nounwind readnone
168declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
169
170; Function Attrs: nounwind readnone
171declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
172
173; Function Attrs: nounwind readnone
174declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>)
175
176; Function Attrs: nounwind readnone
177declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
178
179; Function Attrs: nounwind readnone
180declare <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double>, <2 x double>)
181
182; Function Attrs: nounwind readnone
183declare <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float>, <4 x float>)
184
185; Function Attrs: nounwind readnone
186declare <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double>, <2 x double>)
187
188; Function Attrs: nounwind readnone
189declare <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float>, <4 x float>)
190
191; Function Attrs: nounwind readnone
192declare <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double>, <2 x double>)
193
194; Function Attrs: nounwind readnone
195declare <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float>, <4 x float>)