blob: fdf1fa49ba3dd11cb6e3fe0b0a67077d92d7e9cc [file] [log] [blame]
Geoff Berry98150e32018-05-24 18:29:42 +00001; RUN: llc -mtriple=aarch64 < %s | FileCheck %s
2
3; Check that we optimize out AND instructions and ADD/SUB instructions
4; modulo the shift size to take advantage of the implicit mod done on
5; the shift amount value by the variable shift/rotate instructions.
6
7define i32 @test1(i32 %x, i64 %y) {
8; CHECK-LABEL: test1:
9; CHECK-NOT: and
10; CHECK: lsr
11 %sh_prom = trunc i64 %y to i32
12 %shr = lshr i32 %x, %sh_prom
13 ret i32 %shr
14}
15
16define i64 @test2(i32 %x, i64 %y) {
17; CHECK-LABEL: test2:
18; CHECK-NOT: orr
19; CHECK-NOT: sub
20; CHECK: neg
21; CHECK: asr
22 %sub9 = sub nsw i32 64, %x
23 %sh_prom12.i = zext i32 %sub9 to i64
24 %shr.i = ashr i64 %y, %sh_prom12.i
25 ret i64 %shr.i
26}
27
28define i64 @test3(i64 %x, i64 %y) {
29; CHECK-LABEL: test3:
30; CHECK-NOT: add
31; CHECK: lsl
32 %add = add nsw i64 64, %x
33 %shl = shl i64 %y, %add
34 ret i64 %shl
35}