blob: 13ebaa6f879706c79c2f0d838bdd52536a1fa416 [file] [log] [blame]
Craig Topperb80ada92011-11-09 09:37:21 +00001; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s
2
Nadav Rotembb539bf2011-11-09 13:21:28 +00003; CHECK: vpandn
Craig Topperb80ada92011-11-09 09:37:21 +00004; CHECK: vpandn %ymm
Nadav Rotembb539bf2011-11-09 13:21:28 +00005; CHECK: ret
Craig Topperb80ada92011-11-09 09:37:21 +00006define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
7entry:
8 ; Force the execution domain with an add.
9 %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
10 %y = xor <4 x i64> %a2, <i64 -1, i64 -1, i64 -1, i64 -1>
11 %x = and <4 x i64> %a, %y
12 ret <4 x i64> %x
13}
14
Nadav Rotembb539bf2011-11-09 13:21:28 +000015; CHECK: vpand
Craig Topperb80ada92011-11-09 09:37:21 +000016; CHECK: vpand %ymm
Nadav Rotembb539bf2011-11-09 13:21:28 +000017; CHECK: ret
Craig Topperb80ada92011-11-09 09:37:21 +000018define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
19entry:
20 ; Force the execution domain with an add.
21 %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
22 %x = and <4 x i64> %a2, %b
23 ret <4 x i64> %x
24}
25
Nadav Rotembb539bf2011-11-09 13:21:28 +000026; CHECK: vpor
Craig Topperb80ada92011-11-09 09:37:21 +000027; CHECK: vpor %ymm
Nadav Rotembb539bf2011-11-09 13:21:28 +000028; CHECK: ret
Craig Topperb80ada92011-11-09 09:37:21 +000029define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
30entry:
31 ; Force the execution domain with an add.
32 %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
33 %x = or <4 x i64> %a2, %b
34 ret <4 x i64> %x
35}
36
Nadav Rotembb539bf2011-11-09 13:21:28 +000037; CHECK: vpxor
Craig Topperb80ada92011-11-09 09:37:21 +000038; CHECK: vpxor %ymm
Nadav Rotembb539bf2011-11-09 13:21:28 +000039; CHECK: ret
Craig Topperb80ada92011-11-09 09:37:21 +000040define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
41entry:
42 ; Force the execution domain with an add.
43 %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
44 %x = xor <4 x i64> %a2, %b
45 ret <4 x i64> %x
46}
Nadav Rotembb539bf2011-11-09 13:21:28 +000047
Nadav Rotembb539bf2011-11-09 13:21:28 +000048; CHECK: vpblendvb
49; CHECK: vpblendvb %ymm
50; CHECK: ret
51define <32 x i8> @vpblendvb(<32 x i8> %x, <32 x i8> %y) {
52 %min_is_x = icmp ult <32 x i8> %x, %y
53 %min = select <32 x i1> %min_is_x, <32 x i8> %x, <32 x i8> %y
54 ret <32 x i8> %min
55}
Craig Topper1666cb62011-11-19 07:07:26 +000056
57define <8 x i32> @signd(<8 x i32> %a, <8 x i32> %b) nounwind {
58entry:
59; CHECK: signd:
60; CHECK: psignd
61; CHECK-NOT: sub
62; CHECK: ret
63 %b.lobit = ashr <8 x i32> %b, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
64 %sub = sub nsw <8 x i32> zeroinitializer, %a
65 %0 = xor <8 x i32> %b.lobit, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
66 %1 = and <8 x i32> %a, %0
67 %2 = and <8 x i32> %b.lobit, %sub
68 %cond = or <8 x i32> %1, %2
69 ret <8 x i32> %cond
70}
71
72define <8 x i32> @blendvb(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) nounwind {
73entry:
74; CHECK: blendvb:
75; CHECK: pblendvb
76; CHECK: ret
77 %b.lobit = ashr <8 x i32> %b, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
78 %sub = sub nsw <8 x i32> zeroinitializer, %a
79 %0 = xor <8 x i32> %b.lobit, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
80 %1 = and <8 x i32> %c, %0
81 %2 = and <8 x i32> %a, %b.lobit
82 %cond = or <8 x i32> %1, %2
83 ret <8 x i32> %cond
84}
Craig Topper745a86b2011-11-19 22:34:59 +000085
86define <8 x i32> @allOnes() nounwind {
87; CHECK: vpcmpeqd
88; CHECK-NOT: vinsert
89 ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
90}
91
92define <16 x i16> @allOnes2() nounwind {
93; CHECK: vpcmpeqd
94; CHECK-NOT: vinsert
95 ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
96}