blob: 269c1e8143b27cfda263b459ce85a26bfd4393cc [file] [log] [blame]
Tim Northovere3d42362013-02-01 11:40:47 +00001; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
Tim Northovere0e3aef2013-01-31 12:12:40 +00002
3@var32 = global i32 0
4@var64 = global i64 0
5
6define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +00007; CHECK-LABEL: test_lsl_arith:
Tim Northovere0e3aef2013-01-31 12:12:40 +00008
9 %rhs1 = load volatile i32* @var32
10 %shift1 = shl i32 %rhs1, 18
11 %val1 = add i32 %lhs32, %shift1
12 store volatile i32 %val1, i32* @var32
13; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
14
15 %rhs2 = load volatile i32* @var32
16 %shift2 = shl i32 %rhs2, 31
17 %val2 = add i32 %shift2, %lhs32
18 store volatile i32 %val2, i32* @var32
19; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
20
21 %rhs3 = load volatile i32* @var32
22 %shift3 = shl i32 %rhs3, 5
23 %val3 = sub i32 %lhs32, %shift3
24 store volatile i32 %val3, i32* @var32
25; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
26
27; Subtraction is not commutative!
28 %rhs4 = load volatile i32* @var32
29 %shift4 = shl i32 %rhs4, 19
30 %val4 = sub i32 %shift4, %lhs32
31 store volatile i32 %val4, i32* @var32
32; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
33
34 %lhs4a = load volatile i32* @var32
35 %shift4a = shl i32 %lhs4a, 15
36 %val4a = sub i32 0, %shift4a
37 store volatile i32 %val4a, i32* @var32
38; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsl #15
39
40 %rhs5 = load volatile i64* @var64
41 %shift5 = shl i64 %rhs5, 18
42 %val5 = add i64 %lhs64, %shift5
43 store volatile i64 %val5, i64* @var64
44; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
45
46 %rhs6 = load volatile i64* @var64
47 %shift6 = shl i64 %rhs6, 31
48 %val6 = add i64 %shift6, %lhs64
49 store volatile i64 %val6, i64* @var64
50; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
51
52 %rhs7 = load volatile i64* @var64
53 %shift7 = shl i64 %rhs7, 5
54 %val7 = sub i64 %lhs64, %shift7
55 store volatile i64 %val7, i64* @var64
56; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
57
58; Subtraction is not commutative!
59 %rhs8 = load volatile i64* @var64
60 %shift8 = shl i64 %rhs8, 19
61 %val8 = sub i64 %shift8, %lhs64
62 store volatile i64 %val8, i64* @var64
63; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
64
65 %lhs8a = load volatile i64* @var64
66 %shift8a = shl i64 %lhs8a, 60
67 %val8a = sub i64 0, %shift8a
68 store volatile i64 %val8a, i64* @var64
69; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsl #60
70
71 ret void
72; CHECK: ret
73}
74
75define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +000076; CHECK-LABEL: test_lsr_arith:
Tim Northovere0e3aef2013-01-31 12:12:40 +000077
78 %shift1 = lshr i32 %rhs32, 18
79 %val1 = add i32 %lhs32, %shift1
80 store volatile i32 %val1, i32* @var32
81; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
82
83 %shift2 = lshr i32 %rhs32, 31
84 %val2 = add i32 %shift2, %lhs32
85 store volatile i32 %val2, i32* @var32
86; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
87
88 %shift3 = lshr i32 %rhs32, 5
89 %val3 = sub i32 %lhs32, %shift3
90 store volatile i32 %val3, i32* @var32
91; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
92
93; Subtraction is not commutative!
94 %shift4 = lshr i32 %rhs32, 19
95 %val4 = sub i32 %shift4, %lhs32
96 store volatile i32 %val4, i32* @var32
97; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
98
99 %shift4a = lshr i32 %lhs32, 15
100 %val4a = sub i32 0, %shift4a
101 store volatile i32 %val4a, i32* @var32
102; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsr #15
103
104 %shift5 = lshr i64 %rhs64, 18
105 %val5 = add i64 %lhs64, %shift5
106 store volatile i64 %val5, i64* @var64
107; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
108
109 %shift6 = lshr i64 %rhs64, 31
110 %val6 = add i64 %shift6, %lhs64
111 store volatile i64 %val6, i64* @var64
112; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
113
114 %shift7 = lshr i64 %rhs64, 5
115 %val7 = sub i64 %lhs64, %shift7
116 store volatile i64 %val7, i64* @var64
117; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
118
119; Subtraction is not commutative!
120 %shift8 = lshr i64 %rhs64, 19
121 %val8 = sub i64 %shift8, %lhs64
122 store volatile i64 %val8, i64* @var64
123; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
124
125 %shift8a = lshr i64 %lhs64, 45
126 %val8a = sub i64 0, %shift8a
127 store volatile i64 %val8a, i64* @var64
128; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsr #45
129
130 ret void
131; CHECK: ret
132}
133
134define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +0000135; CHECK-LABEL: test_asr_arith:
Tim Northovere0e3aef2013-01-31 12:12:40 +0000136
137 %shift1 = ashr i32 %rhs32, 18
138 %val1 = add i32 %lhs32, %shift1
139 store volatile i32 %val1, i32* @var32
140; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
141
142 %shift2 = ashr i32 %rhs32, 31
143 %val2 = add i32 %shift2, %lhs32
144 store volatile i32 %val2, i32* @var32
145; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
146
147 %shift3 = ashr i32 %rhs32, 5
148 %val3 = sub i32 %lhs32, %shift3
149 store volatile i32 %val3, i32* @var32
150; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
151
152; Subtraction is not commutative!
153 %shift4 = ashr i32 %rhs32, 19
154 %val4 = sub i32 %shift4, %lhs32
155 store volatile i32 %val4, i32* @var32
156; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
157
158 %shift4a = ashr i32 %lhs32, 15
159 %val4a = sub i32 0, %shift4a
160 store volatile i32 %val4a, i32* @var32
161; CHECK: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, asr #15
162
163 %shift5 = ashr i64 %rhs64, 18
164 %val5 = add i64 %lhs64, %shift5
165 store volatile i64 %val5, i64* @var64
166; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
167
168 %shift6 = ashr i64 %rhs64, 31
169 %val6 = add i64 %shift6, %lhs64
170 store volatile i64 %val6, i64* @var64
171; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
172
173 %shift7 = ashr i64 %rhs64, 5
174 %val7 = sub i64 %lhs64, %shift7
175 store volatile i64 %val7, i64* @var64
176; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
177
178; Subtraction is not commutative!
179 %shift8 = ashr i64 %rhs64, 19
180 %val8 = sub i64 %shift8, %lhs64
181 store volatile i64 %val8, i64* @var64
182; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
183
184 %shift8a = ashr i64 %lhs64, 45
185 %val8a = sub i64 0, %shift8a
186 store volatile i64 %val8a, i64* @var64
187; CHECK: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, asr #45
188
189 ret void
190; CHECK: ret
191}
192
193define i32 @test_cmp(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +0000194; CHECK-LABEL: test_cmp:
Tim Northovere0e3aef2013-01-31 12:12:40 +0000195
196 %shift1 = shl i32 %rhs32, 13
197 %tst1 = icmp uge i32 %lhs32, %shift1
198 br i1 %tst1, label %t2, label %end
199; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsl #13
200
201t2:
202 %shift2 = lshr i32 %rhs32, 20
203 %tst2 = icmp ne i32 %lhs32, %shift2
204 br i1 %tst2, label %t3, label %end
205; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
206
207t3:
208 %shift3 = ashr i32 %rhs32, 9
209 %tst3 = icmp ne i32 %lhs32, %shift3
210 br i1 %tst3, label %t4, label %end
211; CHECK: cmp {{w[0-9]+}}, {{w[0-9]+}}, asr #9
212
213t4:
214 %shift4 = shl i64 %rhs64, 43
215 %tst4 = icmp uge i64 %lhs64, %shift4
216 br i1 %tst4, label %t5, label %end
217; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsl #43
218
219t5:
220 %shift5 = lshr i64 %rhs64, 20
221 %tst5 = icmp ne i64 %lhs64, %shift5
222 br i1 %tst5, label %t6, label %end
223; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
224
225t6:
226 %shift6 = ashr i64 %rhs64, 59
227 %tst6 = icmp ne i64 %lhs64, %shift6
228 br i1 %tst6, label %t7, label %end
229; CHECK: cmp {{x[0-9]+}}, {{x[0-9]+}}, asr #59
230
231t7:
232 ret i32 1
233end:
234
235 ret i32 0
236; CHECK: ret
237}
238
239define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
Stephen Linf799e3f2013-07-13 20:38:47 +0000240; CHECK-LABEL: test_cmn:
Tim Northovere0e3aef2013-01-31 12:12:40 +0000241
242 %shift1 = shl i32 %rhs32, 13
243 %val1 = sub i32 0, %shift1
244 %tst1 = icmp uge i32 %lhs32, %val1
245 br i1 %tst1, label %t2, label %end
246 ; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
247 ; 0 then the results will differ.
248; CHECK: sub [[RHS:w[0-9]+]], wzr, {{w[0-9]+}}, lsl #13
249; CHECK: cmp {{w[0-9]+}}, [[RHS]]
250
251t2:
252 %shift2 = lshr i32 %rhs32, 20
253 %val2 = sub i32 0, %shift2
254 %tst2 = icmp ne i32 %lhs32, %val2
255 br i1 %tst2, label %t3, label %end
256; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, lsr #20
257
258t3:
259 %shift3 = ashr i32 %rhs32, 9
260 %val3 = sub i32 0, %shift3
261 %tst3 = icmp eq i32 %lhs32, %val3
262 br i1 %tst3, label %t4, label %end
263; CHECK: cmn {{w[0-9]+}}, {{w[0-9]+}}, asr #9
264
265t4:
266 %shift4 = shl i64 %rhs64, 43
267 %val4 = sub i64 0, %shift4
268 %tst4 = icmp slt i64 %lhs64, %val4
269 br i1 %tst4, label %t5, label %end
270 ; Again, it's important that cmn isn't used here in case %rhs64 == 0.
271; CHECK: sub [[RHS:x[0-9]+]], xzr, {{x[0-9]+}}, lsl #43
272; CHECK: cmp {{x[0-9]+}}, [[RHS]]
273
274t5:
275 %shift5 = lshr i64 %rhs64, 20
276 %val5 = sub i64 0, %shift5
277 %tst5 = icmp ne i64 %lhs64, %val5
278 br i1 %tst5, label %t6, label %end
279; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, lsr #20
280
281t6:
282 %shift6 = ashr i64 %rhs64, 59
283 %val6 = sub i64 0, %shift6
284 %tst6 = icmp ne i64 %lhs64, %val6
285 br i1 %tst6, label %t7, label %end
286; CHECK: cmn {{x[0-9]+}}, {{x[0-9]+}}, asr #59
287
288t7:
289 ret i32 1
290end:
291
292 ret i32 0
293; CHECK: ret
294}
295