blob: 6e82499ec718f6da813fe4b3e057f30f7139d41f [file] [log] [blame]
Sanjay Patel8f200112017-04-10 23:26:31 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
Sanjay Patel227c9012017-04-25 20:56:14 +00002; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s
Sanjay Patel8f200112017-04-10 23:26:31 +00003
4; FIXME: add (sext i1 X), 1 -> zext (not i1 X)
5
6define i32 @sext_inc(i1 zeroext %x) nounwind {
7; CHECK-LABEL: sext_inc:
8; CHECK: # BB#0:
9; CHECK-NEXT: movzbl %dil, %ecx
10; CHECK-NEXT: movl $1, %eax
11; CHECK-NEXT: subl %ecx, %eax
12; CHECK-NEXT: retq
13 %ext = sext i1 %x to i32
14 %add = add i32 %ext, 1
15 ret i32 %add
16}
17
18; FIXME: add (sext i1 X), 1 -> zext (not i1 X)
19
20define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
21; CHECK-LABEL: sext_inc_vec:
22; CHECK: # BB#0:
Sanjay Patel227c9012017-04-25 20:56:14 +000023; CHECK-NEXT: vpslld $31, %xmm0, %xmm0
24; CHECK-NEXT: vpsrad $31, %xmm0, %xmm0
25; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
26; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
Sanjay Patel8f200112017-04-10 23:26:31 +000027; CHECK-NEXT: retq
28 %ext = sext <4 x i1> %x to <4 x i32>
29 %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
30 ret <4 x i32> %add
31}
32
Sanjay Patel6b01b4f2017-04-24 22:42:34 +000033define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
34; CHECK-LABEL: cmpgt_sext_inc_vec:
35; CHECK: # BB#0:
Sanjay Patel227c9012017-04-25 20:56:14 +000036; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
37; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
38; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
Sanjay Patel6b01b4f2017-04-24 22:42:34 +000039; CHECK-NEXT: retq
40 %cmp = icmp sgt <4 x i32> %x, %y
41 %ext = sext <4 x i1> %cmp to <4 x i32>
42 %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
43 ret <4 x i32> %add
44}
45
46define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
47; CHECK-LABEL: cmpne_sext_inc_vec:
48; CHECK: # BB#0:
Sanjay Patel227c9012017-04-25 20:56:14 +000049; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
50; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
51; CHECK-NEXT: vpxor %xmm1, %xmm0, %xmm0
52; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
53; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
Sanjay Patel6b01b4f2017-04-24 22:42:34 +000054; CHECK-NEXT: retq
55 %cmp = icmp ne <4 x i32> %x, %y
56 %ext = sext <4 x i1> %cmp to <4 x i32>
57 %add = add <4 x i32> %ext, <i32 1, i32 1, i32 1, i32 1>
58 ret <4 x i32> %add
59}
Sanjay Patel8f200112017-04-10 23:26:31 +000060
Sanjay Patel227c9012017-04-25 20:56:14 +000061define <4 x i64> @cmpgt_sext_inc_vec256(<4 x i64> %x, <4 x i64> %y) nounwind {
62; CHECK-LABEL: cmpgt_sext_inc_vec256:
63; CHECK: # BB#0:
64; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
65; CHECK-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
66; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0
67; CHECK-NEXT: retq
68 %cmp = icmp sgt <4 x i64> %x, %y
69 %ext = sext <4 x i1> %cmp to <4 x i64>
70 %add = add <4 x i64> %ext, <i64 1, i64 1, i64 1, i64 1>
71 ret <4 x i64> %add
72}
73
74define i32 @bool_logic_and_math(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
75; CHECK-LABEL: bool_logic_and_math:
76; CHECK: # BB#0:
77; CHECK-NEXT: cmpl %esi, %edi
78; CHECK-NEXT: setne %al
79; CHECK-NEXT: cmpl %ecx, %edx
80; CHECK-NEXT: setne %cl
81; CHECK-NEXT: andb %al, %cl
82; CHECK-NEXT: movzbl %cl, %eax
83; CHECK-NEXT: incl %eax
84; CHECK-NEXT: retq
85 %cmp1 = icmp ne i32 %a, %b
86 %cmp2 = icmp ne i32 %c, %d
87 %and = and i1 %cmp1, %cmp2
88 %zext = zext i1 %and to i32
89 %add = add i32 %zext, 1
90 ret i32 %add
91}
92
93define <4 x i32> @bool_logic_and_math_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) nounwind {
94; CHECK-LABEL: bool_logic_and_math_vec:
95; CHECK: # BB#0:
96; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
97; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
98; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
99; CHECK-NEXT: vpxor %xmm1, %xmm2, %xmm1
100; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
101; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
102; CHECK-NEXT: vpsubd %xmm0, %xmm1, %xmm0
103; CHECK-NEXT: retq
104 %cmp1 = icmp ne <4 x i32> %a, %b
105 %cmp2 = icmp ne <4 x i32> %c, %d
106 %and = and <4 x i1> %cmp1, %cmp2
107 %zext = zext <4 x i1> %and to <4 x i32>
108 %add = add <4 x i32> %zext, <i32 1, i32 1, i32 1, i32 1>
109 ret <4 x i32> %add
110}
111