blob: 16dc2ccb111d9209720ffa807f0275eeb595a12c [file] [log] [blame]
Nemanja Ivanovicf502a422015-06-26 19:26:53 +00001; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
2; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
3
4@vda = common global <2 x double> zeroinitializer, align 16
5@vdb = common global <2 x double> zeroinitializer, align 16
6@vdr = common global <2 x double> zeroinitializer, align 16
7@vfa = common global <4 x float> zeroinitializer, align 16
8@vfb = common global <4 x float> zeroinitializer, align 16
9@vfr = common global <4 x float> zeroinitializer, align 16
10@vbllr = common global <2 x i64> zeroinitializer, align 16
11@vbir = common global <4 x i32> zeroinitializer, align 16
12@vblla = common global <2 x i64> zeroinitializer, align 16
13@vbllb = common global <2 x i64> zeroinitializer, align 16
14@vbia = common global <4 x i32> zeroinitializer, align 16
15@vbib = common global <4 x i32> zeroinitializer, align 16
16
17; Function Attrs: nounwind
18define void @test1() {
19entry:
20 %0 = load <2 x double>, <2 x double>* @vda, align 16
21 %1 = load <2 x double>, <2 x double>* @vdb, align 16
22 %2 = call <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double> %0, <2 x double> %1)
23 store <2 x double> %2, <2 x double>* @vdr, align 16
24 ret void
25; CHECK-LABEL: @test1
26; CHECK: xvdivdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
27}
28
29; Function Attrs: nounwind
30define void @test2() {
31entry:
32 %0 = load <4 x float>, <4 x float>* @vfa, align 16
33 %1 = load <4 x float>, <4 x float>* @vfb, align 16
34 %2 = call <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float> %0, <4 x float> %1)
35 store <4 x float> %2, <4 x float>* @vfr, align 16
36 ret void
37; CHECK-LABEL: @test2
38; CHECK: xvdivsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
39}
40
41; Function Attrs: nounwind
42define void @test3() {
43entry:
44 %0 = load <2 x double>, <2 x double>* @vda, align 16
45 %1 = load <2 x double>, <2 x double>* @vda, align 16
46 %2 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %1)
47 store <2 x double> %2, <2 x double>* @vdr, align 16
48 ret void
49; CHECK-LABEL: @test3
50; CHECK: xvrdpip {{[0-9]+}}, {{[0-9]+}}
51}
52
53; Function Attrs: nounwind
54define void @test4() {
55entry:
56 %0 = load <4 x float>, <4 x float>* @vfa, align 16
57 %1 = load <4 x float>, <4 x float>* @vfa, align 16
58 %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %1)
59 store <4 x float> %2, <4 x float>* @vfr, align 16
60 ret void
61; CHECK-LABEL: @test4
62; CHECK: xvrspip {{[0-9]+}}, {{[0-9]+}}
63}
64
65; Function Attrs: nounwind
66define void @test5() {
67entry:
68 %0 = load <2 x double>, <2 x double>* @vda, align 16
69 %1 = load <2 x double>, <2 x double>* @vdb, align 16
70 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double> %0, <2 x double> %1)
71 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
72 ret void
73; CHECK-LABEL: @test5
74; CHECK: xvcmpeqdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
75}
76
77; Function Attrs: nounwind
78define void @test6() {
79entry:
80 %0 = load <4 x float>, <4 x float>* @vfa, align 16
81 %1 = load <4 x float>, <4 x float>* @vfb, align 16
82 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float> %0, <4 x float> %1)
83 store <4 x i32> %2, <4 x i32>* @vbir, align 16
84 ret void
85; CHECK-LABEL: @test6
86; CHECK: xvcmpeqsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
87}
88
89; Function Attrs: nounwind
90define void @test7() {
91entry:
92 %0 = load <2 x double>, <2 x double>* @vda, align 16
93 %1 = load <2 x double>, <2 x double>* @vdb, align 16
94 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double> %0, <2 x double> %1)
95 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
96 ret void
97; CHECK-LABEL: @test7
98; CHECK: xvcmpgedp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
99}
100
101; Function Attrs: nounwind
102define void @test8() {
103entry:
104 %0 = load <4 x float>, <4 x float>* @vfa, align 16
105 %1 = load <4 x float>, <4 x float>* @vfb, align 16
106 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float> %0, <4 x float> %1)
107 store <4 x i32> %2, <4 x i32>* @vbir, align 16
108 ret void
109; CHECK-LABEL: @test8
110; CHECK: xvcmpgesp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
111}
112
113; Function Attrs: nounwind
114define void @test9() {
115entry:
116 %0 = load <2 x double>, <2 x double>* @vda, align 16
117 %1 = load <2 x double>, <2 x double>* @vdb, align 16
118 %2 = call <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double> %0, <2 x double> %1)
119 store <2 x i64> %2, <2 x i64>* @vbllr, align 16
120 ret void
121; CHECK-LABEL: @test9
122; CHECK: xvcmpgtdp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
123}
124
125; Function Attrs: nounwind
126define void @test10() {
127entry:
128 %0 = load <4 x float>, <4 x float>* @vfa, align 16
129 %1 = load <4 x float>, <4 x float>* @vfb, align 16
130 %2 = call <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float> %0, <4 x float> %1)
131 store <4 x i32> %2, <4 x i32>* @vbir, align 16
132 ret void
133; CHECK-LABEL: @test10
134; CHECK: xvcmpgtsp {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}
135}
136
137; Function Attrs: nounwind readnone
138declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
139
140; Function Attrs: nounwind readnone
141declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
142
143; Function Attrs: nounwind readnone
144declare <2 x double> @llvm.ppc.vsx.xvdivdp(<2 x double>, <2 x double>)
145
146; Function Attrs: nounwind readnone
147declare <4 x float> @llvm.ppc.vsx.xvdivsp(<4 x float>, <4 x float>)
148
149; Function Attrs: nounwind readnone
150declare <2 x i64> @llvm.ppc.vsx.xvcmpeqdp(<2 x double>, <2 x double>)
151
152; Function Attrs: nounwind readnone
153declare <4 x i32> @llvm.ppc.vsx.xvcmpeqsp(<4 x float>, <4 x float>)
154
155; Function Attrs: nounwind readnone
156declare <2 x i64> @llvm.ppc.vsx.xvcmpgedp(<2 x double>, <2 x double>)
157
158; Function Attrs: nounwind readnone
159declare <4 x i32> @llvm.ppc.vsx.xvcmpgesp(<4 x float>, <4 x float>)
160
161; Function Attrs: nounwind readnone
162declare <2 x i64> @llvm.ppc.vsx.xvcmpgtdp(<2 x double>, <2 x double>)
163
164; Function Attrs: nounwind readnone
165declare <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float>, <4 x float>)