blob: 30a9668f37df8d3aa3d0c2837d24e7702526ee53 [file] [log] [blame]
Sanjay Patel7ad3dbe2017-05-23 16:53:05 +00001; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
5
6define i1 @slt_to_ult(i8 %x, i8 %y) {
7; CHECK-LABEL: @slt_to_ult(
8; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 %x, %y
9; CHECK-NEXT: ret i1 [[CMP]]
10;
11 %a = xor i8 %x, 128
12 %b = xor i8 %y, 128
13 %cmp = icmp slt i8 %a, %b
14 ret i1 %cmp
15}
16
17; PR33138 - https://bugs.llvm.org/show_bug.cgi?id=33138
18
19define <2 x i1> @slt_to_ult_splat(<2 x i8> %x, <2 x i8> %y) {
20; CHECK-LABEL: @slt_to_ult_splat(
Sanjay Pateld3106ad2017-05-23 17:29:58 +000021; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> %x, %y
Sanjay Patel7ad3dbe2017-05-23 16:53:05 +000022; CHECK-NEXT: ret <2 x i1> [[CMP]]
23;
24 %a = xor <2 x i8> %x, <i8 128, i8 128>
25 %b = xor <2 x i8> %y, <i8 128, i8 128>
26 %cmp = icmp slt <2 x i8> %a, %b
27 ret <2 x i1> %cmp
28}
29
30; Make sure that unsigned -> signed works too.
31
32define i1 @ult_to_slt(i8 %x, i8 %y) {
33; CHECK-LABEL: @ult_to_slt(
34; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, %y
35; CHECK-NEXT: ret i1 [[CMP]]
36;
37 %a = xor i8 %x, 128
38 %b = xor i8 %y, 128
39 %cmp = icmp ult i8 %a, %b
40 ret i1 %cmp
41}
42
43define <2 x i1> @ult_to_slt_splat(<2 x i8> %x, <2 x i8> %y) {
44; CHECK-LABEL: @ult_to_slt_splat(
Sanjay Pateld3106ad2017-05-23 17:29:58 +000045; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> %x, %y
Sanjay Patel7ad3dbe2017-05-23 16:53:05 +000046; CHECK-NEXT: ret <2 x i1> [[CMP]]
47;
48 %a = xor <2 x i8> %x, <i8 128, i8 128>
49 %b = xor <2 x i8> %y, <i8 128, i8 128>
50 %cmp = icmp ult <2 x i8> %a, %b
51 ret <2 x i1> %cmp
52}
53
54; icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
55
56define i1 @slt_to_ugt(i8 %x, i8 %y) {
57; CHECK-LABEL: @slt_to_ugt(
58; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 %x, %y
59; CHECK-NEXT: ret i1 [[CMP]]
60;
61 %a = xor i8 %x, 127
62 %b = xor i8 %y, 127
63 %cmp = icmp slt i8 %a, %b
64 ret i1 %cmp
65}
66
67define <2 x i1> @slt_to_ugt_splat(<2 x i8> %x, <2 x i8> %y) {
68; CHECK-LABEL: @slt_to_ugt_splat(
Sanjay Pateld3106ad2017-05-23 17:29:58 +000069; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i8> %x, %y
Sanjay Patel7ad3dbe2017-05-23 16:53:05 +000070; CHECK-NEXT: ret <2 x i1> [[CMP]]
71;
72 %a = xor <2 x i8> %x, <i8 127, i8 127>
73 %b = xor <2 x i8> %y, <i8 127, i8 127>
74 %cmp = icmp slt <2 x i8> %a, %b
75 ret <2 x i1> %cmp
76}
77
78; Make sure that unsigned -> signed works too.
79
80define i1 @ult_to_sgt(i8 %x, i8 %y) {
81; CHECK-LABEL: @ult_to_sgt(
82; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, %y
83; CHECK-NEXT: ret i1 [[CMP]]
84;
85 %a = xor i8 %x, 127
86 %b = xor i8 %y, 127
87 %cmp = icmp ult i8 %a, %b
88 ret i1 %cmp
89}
90
91define <2 x i1> @ult_to_sgt_splat(<2 x i8> %x, <2 x i8> %y) {
92; CHECK-LABEL: @ult_to_sgt_splat(
Sanjay Pateld3106ad2017-05-23 17:29:58 +000093; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> %x, %y
Sanjay Patel7ad3dbe2017-05-23 16:53:05 +000094; CHECK-NEXT: ret <2 x i1> [[CMP]]
95;
96 %a = xor <2 x i8> %x, <i8 127, i8 127>
97 %b = xor <2 x i8> %y, <i8 127, i8 127>
98 %cmp = icmp ult <2 x i8> %a, %b
99 ret <2 x i1> %cmp
100}
101
102; icmp u/s (a ^ signmask), C --> icmp s/u a, C'
103
104define i1 @sge_to_ugt(i8 %x) {
105; CHECK-LABEL: @sge_to_ugt(
106; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 %x, -114
107; CHECK-NEXT: ret i1 [[CMP]]
108;
109 %a = xor i8 %x, 128
110 %cmp = icmp sge i8 %a, 15
111 ret i1 %cmp
112}
113
114define <2 x i1> @sge_to_ugt_splat(<2 x i8> %x) {
115; CHECK-LABEL: @sge_to_ugt_splat(
116; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i8> %x, <i8 -114, i8 -114>
117; CHECK-NEXT: ret <2 x i1> [[CMP]]
118;
119 %a = xor <2 x i8> %x, <i8 128, i8 128>
120 %cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
121 ret <2 x i1> %cmp
122}
123
124; Make sure that unsigned -> signed works too.
125
126define i1 @uge_to_sgt(i8 %x) {
127; CHECK-LABEL: @uge_to_sgt(
128; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 %x, -114
129; CHECK-NEXT: ret i1 [[CMP]]
130;
131 %a = xor i8 %x, 128
132 %cmp = icmp uge i8 %a, 15
133 ret i1 %cmp
134}
135
136define <2 x i1> @uge_to_sgt_splat(<2 x i8> %x) {
137; CHECK-LABEL: @uge_to_sgt_splat(
138; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> %x, <i8 -114, i8 -114>
139; CHECK-NEXT: ret <2 x i1> [[CMP]]
140;
141 %a = xor <2 x i8> %x, <i8 128, i8 128>
142 %cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
143 ret <2 x i1> %cmp
144}
145
146; icmp u/s (a ^ maxsignval), C --> icmp s/u' a, C'
147
148define i1 @sge_to_ult(i8 %x) {
149; CHECK-LABEL: @sge_to_ult(
150; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 %x, 113
151; CHECK-NEXT: ret i1 [[CMP]]
152;
153 %a = xor i8 %x, 127
154 %cmp = icmp sge i8 %a, 15
155 ret i1 %cmp
156}
157
158define <2 x i1> @sge_to_ult_splat(<2 x i8> %x) {
159; CHECK-LABEL: @sge_to_ult_splat(
160; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> %x, <i8 113, i8 113>
161; CHECK-NEXT: ret <2 x i1> [[CMP]]
162;
163 %a = xor <2 x i8> %x, <i8 127, i8 127>
164 %cmp = icmp sge <2 x i8> %a, <i8 15, i8 15>
165 ret <2 x i1> %cmp
166}
167
168; Make sure that unsigned -> signed works too.
169
170define i1 @uge_to_slt(i8 %x) {
171; CHECK-LABEL: @uge_to_slt(
172; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 %x, 113
173; CHECK-NEXT: ret i1 [[CMP]]
174;
175 %a = xor i8 %x, 127
176 %cmp = icmp uge i8 %a, 15
177 ret i1 %cmp
178}
179
180define <2 x i1> @uge_to_slt_splat(<2 x i8> %x) {
181; CHECK-LABEL: @uge_to_slt_splat(
182; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> %x, <i8 113, i8 113>
183; CHECK-NEXT: ret <2 x i1> [[CMP]]
184;
185 %a = xor <2 x i8> %x, <i8 127, i8 127>
186 %cmp = icmp uge <2 x i8> %a, <i8 15, i8 15>
187 ret <2 x i1> %cmp
188}
189
Sanjay Patel62324062017-05-24 14:56:51 +0000190; PR33138, part 2: https://bugs.llvm.org/show_bug.cgi?id=33138
191; TODO: We could look through vector bitcasts for icmp folds,
192; or we could canonicalize bitcast ahead of logic ops with constants.
193
194define <8 x i1> @sgt_to_ugt_bitcasted_splat(<2 x i32> %x, <2 x i32> %y) {
195; CHECK-LABEL: @sgt_to_ugt_bitcasted_splat(
196; CHECK-NEXT: [[A:%.*]] = xor <2 x i32> %x, <i32 -2139062144, i32 -2139062144>
197; CHECK-NEXT: [[B:%.*]] = xor <2 x i32> %y, <i32 -2139062144, i32 -2139062144>
198; CHECK-NEXT: [[C:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
199; CHECK-NEXT: [[D:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
200; CHECK-NEXT: [[E:%.*]] = icmp sgt <8 x i8> [[C]], [[D]]
201; CHECK-NEXT: ret <8 x i1> [[E]]
202;
203 %a = xor <2 x i32> %x, <i32 2155905152, i32 2155905152> ; 0x80808080
204 %b = xor <2 x i32> %y, <i32 2155905152, i32 2155905152>
205 %c = bitcast <2 x i32> %a to <8 x i8>
206 %d = bitcast <2 x i32> %b to <8 x i8>
207 %e = icmp sgt <8 x i8> %c, %d
208 ret <8 x i1> %e
209}
210
211; TODO: This is false (little-endian). How should that be recognized?
212; Ie, should InstSimplify know this directly, should InstCombine canonicalize
213; this so InstSimplify can know this, or is that not something that we want
214; either pass to recognize?
215
216define <2 x i1> @negative_simplify_splat(<4 x i8> %x) {
217; CHECK-LABEL: @negative_simplify_splat(
218; CHECK-NEXT: [[A:%.*]] = or <4 x i8> %x, <i8 0, i8 -128, i8 0, i8 -128>
219; CHECK-NEXT: [[B:%.*]] = bitcast <4 x i8> [[A]] to <2 x i16>
220; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i16> [[B]], zeroinitializer
221; CHECK-NEXT: ret <2 x i1> [[C]]
222;
223 %a = or <4 x i8> %x, <i8 0, i8 128, i8 0, i8 128>
224 %b = bitcast <4 x i8> %a to <2 x i16>
225 %c = icmp sgt <2 x i16> %b, zeroinitializer
226 ret <2 x i1> %c
227}
228