blob: 140c2544f19caf61d1406e5397ec6f4eeaf14fdc [file] [log] [blame]
Michael Zuckerman3eeac2d2016-10-10 05:48:56 +00001; RUN: llc < %s -march x86-64 -mtriple x86_64-unknown-linux-gnu -mattr +avx | FileCheck %s
2; RUN: llc < %s -march x86-64 -mtriple x86_64-unknown-linux-gnu -mattr +avx512f | FileCheck %s
3
4define <4 x float> @testXMM_1(<4 x float> %_xmm0, i64 %_l) {
5; CHECK: vmovhlps %xmm1, %xmm0, %xmm0
6entry:
7 %0 = tail call <4 x float> asm "vmovhlps $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
8 ret <4 x float> %0
9}
10
11define <4 x float> @testXMM_2(<4 x float> %_xmm0, i64 %_l) {
12; CHECK: movapd %xmm0, %xmm0
13entry:
14 %0 = tail call <4 x float> asm "movapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
15 ret <4 x float> %0
16}
17
18define <4 x float> @testXMM_3(<4 x float> %_xmm0, i64 %_l) {
19; CHECK: vmovapd %xmm0, %xmm0
20entry:
21 %0 = tail call <4 x float> asm "vmovapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
22 ret <4 x float> %0
23}
24
25define <4 x float> @testXMM_4(<4 x float> %_xmm0, i64 %_l) {
26; CHECK: vmpsadbw $0, %xmm1, %xmm0, %xmm0
27entry:
28 %0 = tail call <4 x float> asm "vmpsadbw $$0, $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
29 ret <4 x float> %0
30}
31
32define <4 x float> @testXMM_5(<4 x float> %_xmm0, i64 %_l) {
33; CHECK: vminpd %xmm0, %xmm0, %xmm0
34entry:
35 %0 = tail call <4 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, i64 %_l)
36 ret <4 x float> %0
37}
38
39define i64 @testXMM_6(i64 returned %_l) {
40; CHECK: vmovd %xmm0, %eax
41entry:
42 tail call void asm sideeffect "vmovd $0, %eax", "v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
43 ret i64 %_l
44}
45
46define <4 x float> @testXMM_7(<4 x float> returned %_xmm0) {
47; CHECK: vmovmskps %xmm0, %eax
48entry:
49 tail call void asm sideeffect "vmovmskps $0, %rax", "v,~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0)
50 ret <4 x float> %_xmm0
51}
52
53define i64 @testXMM_8(<4 x float> %_xmm0, i64 %_l) {
54; CHECK: vmulsd %xmm1, %xmm0, %xmm0
55entry:
56 %0 = tail call i64 asm "vmulsd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
57 ret i64 %0
58}
59
60define <4 x float> @testXMM_9(<4 x float> %_xmm0, i64 %_l) {
61; CHECK: vorpd %xmm1, %xmm0, %xmm0
62entry:
63 %0 = tail call <4 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
64 ret <4 x float> %0
65}
66
67define <4 x float> @testXMM_10(<4 x float> %_xmm0, i64 %_l) {
68; CHECK: pabsb %xmm0, %xmm0
69entry:
70 %0 = tail call <4 x float> asm "pabsb $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
71 ret <4 x float> %0
72}
73
74define <4 x float> @testXMM_11(<4 x float> %_xmm0, i64 %_l) {
75; CHECK: vpabsd %xmm0, %xmm0
76entry:
77 %0 = tail call <4 x float> asm "vpabsd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
78 ret <4 x float> %0
79}
80
81define <8 x float> @testYMM_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
82; CHECK: vmovsldup %ymm0, %ymm0
83entry:
84 %0 = tail call <8 x float> asm "vmovsldup $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm0)
85 ret <8 x float> %0
86}
87
88define <8 x float> @testYMM_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
89; CHECK: vmovapd %ymm1, %ymm0
90entry:
91 %0 = tail call <8 x float> asm "vmovapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
92 ret <8 x float> %0
93}
94
95define <8 x float> @testYMM_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
96; CHECK: vminpd %ymm1, %ymm0, %ymm0
97entry:
98 %0 = tail call <8 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
99 ret <8 x float> %0
100}
101
102define <8 x float> @testYMM_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
103; CHECK: vorpd %ymm1, %ymm0, %ymm0
104entry:
105 %0 = tail call <8 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
106 ret <8 x float> %0
107}
108
109define <8 x float> @testYMM(<8 x float> %_ymm0, <8 x float> %_ymm1) {
110; CHECK: vmulps %ymm1, %ymm0, %ymm0
111entry:
112 %0 = tail call <8 x float> asm "vmulps $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
113 ret <8 x float> %0
114}
115
116define <8 x float> @testYMM_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
117; CHECK: vmulpd %ymm1, %ymm0, %ymm0
118entry:
119 %0 = tail call <8 x float> asm "vmulpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
120 ret <8 x float> %0
121}
122
123define <8 x float> @testYMM_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
124; CHECK: vmovups %ymm1, %ymm0
125entry:
126 %0 = tail call <8 x float> asm "vmovups $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
127 ret <8 x float> %0
128}
129
130define <8 x float> @testYMM_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
131; CHECK: vmovupd %ymm1, %ymm0
132entry:
133 %0 = tail call <8 x float> asm "vmovupd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
134 ret <8 x float> %0
135}
136