blob: f3fdbefb47ae07aa22192c4d6b6be777d041eb08 [file] [log] [blame]
Tim Northover5896b062014-05-16 09:42:04 +00001; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
2; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
Tim Northovere0e3aef2013-01-31 12:12:40 +00003
4@var32 = global i32 0
5@var64 = global i64 0
6
7define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +00008; CHECK-LABEL: test_lsl_arith:
Tim Northovere0e3aef2013-01-31 12:12:40 +00009
10 %rhs1 = load volatile i32* @var32
11 %shift1 = shl i32 %rhs1, 18
12 %val1 = add i32 %lhs32, %shift1
13 store volatile i32 %val1, i32* @var32
14; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
15
16 %rhs2 = load volatile i32* @var32
17 %shift2 = shl i32 %rhs2, 31
18 %val2 = add i32 %shift2, %lhs32
19 store volatile i32 %val2, i32* @var32
20; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
21
22 %rhs3 = load volatile i32* @var32
23 %shift3 = shl i32 %rhs3, 5
24 %val3 = sub i32 %lhs32, %shift3
25 store volatile i32 %val3, i32* @var32
26; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
27
28; Subtraction is not commutative!
29 %rhs4 = load volatile i32* @var32
30 %shift4 = shl i32 %rhs4, 19
31 %val4 = sub i32 %shift4, %lhs32
32 store volatile i32 %val4, i32* @var32
33; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
34
35 %lhs4a = load volatile i32* @var32
36 %shift4a = shl i32 %lhs4a, 15
37 %val4a = sub i32 0, %shift4a
38 store volatile i32 %val4a, i32* @var32
Tim Northover5896b062014-05-16 09:42:04 +000039; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
Tim Northovere0e3aef2013-01-31 12:12:40 +000040
41 %rhs5 = load volatile i64* @var64
42 %shift5 = shl i64 %rhs5, 18
43 %val5 = add i64 %lhs64, %shift5
44 store volatile i64 %val5, i64* @var64
45; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
46
47 %rhs6 = load volatile i64* @var64
48 %shift6 = shl i64 %rhs6, 31
49 %val6 = add i64 %shift6, %lhs64
50 store volatile i64 %val6, i64* @var64
51; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
52
53 %rhs7 = load volatile i64* @var64
54 %shift7 = shl i64 %rhs7, 5
55 %val7 = sub i64 %lhs64, %shift7
56 store volatile i64 %val7, i64* @var64
57; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
58
59; Subtraction is not commutative!
60 %rhs8 = load volatile i64* @var64
61 %shift8 = shl i64 %rhs8, 19
62 %val8 = sub i64 %shift8, %lhs64
63 store volatile i64 %val8, i64* @var64
64; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
65
66 %lhs8a = load volatile i64* @var64
67 %shift8a = shl i64 %lhs8a, 60
68 %val8a = sub i64 0, %shift8a
69 store volatile i64 %val8a, i64* @var64
Tim Northover5896b062014-05-16 09:42:04 +000070; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
Tim Northovere0e3aef2013-01-31 12:12:40 +000071
72 ret void
73; CHECK: ret
74}
75
76define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +000077; CHECK-LABEL: test_lsr_arith:
Tim Northovere0e3aef2013-01-31 12:12:40 +000078
79 %shift1 = lshr i32 %rhs32, 18
80 %val1 = add i32 %lhs32, %shift1
81 store volatile i32 %val1, i32* @var32
82; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
83
84 %shift2 = lshr i32 %rhs32, 31
85 %val2 = add i32 %shift2, %lhs32
86 store volatile i32 %val2, i32* @var32
87; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
88
89 %shift3 = lshr i32 %rhs32, 5
90 %val3 = sub i32 %lhs32, %shift3
91 store volatile i32 %val3, i32* @var32
92; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
93
94; Subtraction is not commutative!
95 %shift4 = lshr i32 %rhs32, 19
96 %val4 = sub i32 %shift4, %lhs32
97 store volatile i32 %val4, i32* @var32
98; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
99
100 %shift4a = lshr i32 %lhs32, 15
101 %val4a = sub i32 0, %shift4a
102 store volatile i32 %val4a, i32* @var32
Tim Northover5896b062014-05-16 09:42:04 +0000103; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
Tim Northovere0e3aef2013-01-31 12:12:40 +0000104
105 %shift5 = lshr i64 %rhs64, 18
106 %val5 = add i64 %lhs64, %shift5
107 store volatile i64 %val5, i64* @var64
108; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
109
110 %shift6 = lshr i64 %rhs64, 31
111 %val6 = add i64 %shift6, %lhs64
112 store volatile i64 %val6, i64* @var64
113; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
114
115 %shift7 = lshr i64 %rhs64, 5
116 %val7 = sub i64 %lhs64, %shift7
117 store volatile i64 %val7, i64* @var64
118; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
119
120; Subtraction is not commutative!
121 %shift8 = lshr i64 %rhs64, 19
122 %val8 = sub i64 %shift8, %lhs64
123 store volatile i64 %val8, i64* @var64
124; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
125
126 %shift8a = lshr i64 %lhs64, 45
127 %val8a = sub i64 0, %shift8a
128 store volatile i64 %val8a, i64* @var64
Tim Northover5896b062014-05-16 09:42:04 +0000129; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
Tim Northovere0e3aef2013-01-31 12:12:40 +0000130
131 ret void
132; CHECK: ret
133}
134
135define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +0000136; CHECK-LABEL: test_asr_arith:
Tim Northovere0e3aef2013-01-31 12:12:40 +0000137
138 %shift1 = ashr i32 %rhs32, 18
139 %val1 = add i32 %lhs32, %shift1
140 store volatile i32 %val1, i32* @var32
141; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
142
143 %shift2 = ashr i32 %rhs32, 31
144 %val2 = add i32 %shift2, %lhs32
145 store volatile i32 %val2, i32* @var32
146; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
147
148 %shift3 = ashr i32 %rhs32, 5
149 %val3 = sub i32 %lhs32, %shift3
150 store volatile i32 %val3, i32* @var32
151; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
152
153; Subtraction is not commutative!
154 %shift4 = ashr i32 %rhs32, 19
155 %val4 = sub i32 %shift4, %lhs32
156 store volatile i32 %val4, i32* @var32
157; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
158
159 %shift4a = ashr i32 %lhs32, 15
160 %val4a = sub i32 0, %shift4a
161 store volatile i32 %val4a, i32* @var32
Tim Northover5896b062014-05-16 09:42:04 +0000162; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
Tim Northovere0e3aef2013-01-31 12:12:40 +0000163
164 %shift5 = ashr i64 %rhs64, 18
165 %val5 = add i64 %lhs64, %shift5
166 store volatile i64 %val5, i64* @var64
167; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
168
169 %shift6 = ashr i64 %rhs64, 31
170 %val6 = add i64 %shift6, %lhs64
171 store volatile i64 %val6, i64* @var64
172; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
173
174 %shift7 = ashr i64 %rhs64, 5
175 %val7 = sub i64 %lhs64, %shift7
176 store volatile i64 %val7, i64* @var64
177; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
178
179; Subtraction is not commutative!
180 %shift8 = ashr i64 %rhs64, 19
181 %val8 = sub i64 %shift8, %lhs64
182 store volatile i64 %val8, i64* @var64
183; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
184
185 %shift8a = ashr i64 %lhs64, 45
186 %val8a = sub i64 0, %shift8a
187 store volatile i64 %val8a, i64* @var64
Tim Northover5896b062014-05-16 09:42:04 +0000188; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
Tim Northovere0e3aef2013-01-31 12:12:40 +0000189
190 ret void
191; CHECK: ret
192}
193
194define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +0000195; CHECK-LABEL: test_cmp:
Tim Northovere0e3aef2013-01-31 12:12:40 +0000196
197 %shift1 = shl i32 %rhs32, 13
198 %tst1 = icmp uge i32 %lhs32, %shift1
199 br i1 %tst1, label %t2, label %end
200; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
201
202t2:
203 %shift2 = lshr i32 %rhs32, 20
204 %tst2 = icmp ne i32 %lhs32, %shift2
205 br i1 %tst2, label %t3, label %end
206; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
207
208t3:
209 %shift3 = ashr i32 %rhs32, 9
210 %tst3 = icmp ne i32 %lhs32, %shift3
211 br i1 %tst3, label %t4, label %end
212; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
213
214t4:
215 %shift4 = shl i64 %rhs64, 43
216 %tst4 = icmp uge i64 %lhs64, %shift4
217 br i1 %tst4, label %t5, label %end
218; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
219
220t5:
221 %shift5 = lshr i64 %rhs64, 20
222 %tst5 = icmp ne i64 %lhs64, %shift5
223 br i1 %tst5, label %t6, label %end
224; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
225
226t6:
227 %shift6 = ashr i64 %rhs64, 59
228 %tst6 = icmp ne i64 %lhs64, %shift6
229 br i1 %tst6, label %t7, label %end
230; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
231
232t7:
233 ret i32 1
234end:
235
236 ret i32 0
237; CHECK: ret
238}
239
240define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +0000241; CHECK-LABEL: test_cmn:
Tim Northovere0e3aef2013-01-31 12:12:40 +0000242
243 %shift1 = shl i32 %rhs32, 13
244 %val1 = sub i32 0, %shift1
245 %tst1 = icmp uge i32 %lhs32, %val1
246 br i1 %tst1, label %t2, label %end
247 ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
248 ; 0 then the results will differ.
Tim Northover5896b062014-05-16 09:42:04 +0000249; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
Tim Northovere0e3aef2013-01-31 12:12:40 +0000250; CHECK: cmp {{w[0-9]+}}, [[RHS]]
251
252t2:
253 %shift2 = lshr i32 %rhs32, 20
254 %val2 = sub i32 0, %shift2
255 %tst2 = icmp ne i32 %lhs32, %val2
256 br i1 %tst2, label %t3, label %end
257; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
258
259t3:
260 %shift3 = ashr i32 %rhs32, 9
261 %val3 = sub i32 0, %shift3
262 %tst3 = icmp eq i32 %lhs32, %val3
263 br i1 %tst3, label %t4, label %end
264; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
265
266t4:
267 %shift4 = shl i64 %rhs64, 43
268 %val4 = sub i64 0, %shift4
269 %tst4 = icmp slt i64 %lhs64, %val4
270 br i1 %tst4, label %t5, label %end
271 ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
Tim Northover5896b062014-05-16 09:42:04 +0000272; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
Tim Northovere0e3aef2013-01-31 12:12:40 +0000273; CHECK: cmp {{x[0-9]+}}, [[RHS]]
274
275t5:
276 %shift5 = lshr i64 %rhs64, 20
277 %val5 = sub i64 0, %shift5
278 %tst5 = icmp ne i64 %lhs64, %val5
279 br i1 %tst5, label %t6, label %end
280; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
281
282t6:
283 %shift6 = ashr i64 %rhs64, 59
284 %val6 = sub i64 0, %shift6
285 %tst6 = icmp ne i64 %lhs64, %val6
286 br i1 %tst6, label %t7, label %end
287; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59
288
289t7:
290 ret i32 1
291end:
292
293 ret i32 0
294; CHECK: ret
295}
296