blob: 538fc2214d2c8663872d2f4e197a33857700c425 [file] [log] [blame]
Saleem Abdulrasool905b6d192014-04-03 23:47:24 +00001; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s --check-prefix=A8
2; RUN: llc -mtriple=thumb-eabi -mcpu=swift %s -o - | FileCheck %s --check-prefix=SWIFT
Evan Chengddc0cb62012-12-20 19:59:30 +00003
4; rdar://12892707
Anton Korobeynikov02bb33c2009-06-17 18:13:58 +00005
6define i32 @t2ADDrs_lsl(i32 %X, i32 %Y) {
Evan Chengddc0cb62012-12-20 19:59:30 +00007; A8: t2ADDrs_lsl
8; A8: add.w r0, r0, r1, lsl #16
Anton Korobeynikov02bb33c2009-06-17 18:13:58 +00009 %A = shl i32 %Y, 16
10 %B = add i32 %X, %A
11 ret i32 %B
12}
13
14define i32 @t2ADDrs_lsr(i32 %X, i32 %Y) {
Evan Chengddc0cb62012-12-20 19:59:30 +000015; A8: t2ADDrs_lsr
16; A8: add.w r0, r0, r1, lsr #16
Anton Korobeynikov02bb33c2009-06-17 18:13:58 +000017 %A = lshr i32 %Y, 16
18 %B = add i32 %X, %A
19 ret i32 %B
20}
21
22define i32 @t2ADDrs_asr(i32 %X, i32 %Y) {
Evan Chengddc0cb62012-12-20 19:59:30 +000023; A8: t2ADDrs_asr
24; A8: add.w r0, r0, r1, asr #16
Anton Korobeynikov02bb33c2009-06-17 18:13:58 +000025 %A = ashr i32 %Y, 16
26 %B = add i32 %X, %A
27 ret i32 %B
28}
29
30; i32 ror(n) = (x >> n) | (x << (32 - n))
31define i32 @t2ADDrs_ror(i32 %X, i32 %Y) {
Evan Chengddc0cb62012-12-20 19:59:30 +000032; A8: t2ADDrs_ror
33; A8: add.w r0, r0, r1, ror #16
Anton Korobeynikov02bb33c2009-06-17 18:13:58 +000034 %A = lshr i32 %Y, 16
35 %B = shl i32 %Y, 16
36 %C = or i32 %B, %A
37 %R = add i32 %X, %C
38 ret i32 %R
39}
40
41define i32 @t2ADDrs_noRegShift(i32 %X, i32 %Y, i8 %sh) {
Evan Chengddc0cb62012-12-20 19:59:30 +000042; A8: t2ADDrs_noRegShift
43; A8: uxtb r2, r2
44; A8: lsls r1, r2
45; A8: add r0, r1
46
47; SWIFT: t2ADDrs_noRegShift
48; SWIFT-NOT: lsls
49; SWIFT: lsl.w
Anton Korobeynikov02bb33c2009-06-17 18:13:58 +000050 %shift.upgrd.1 = zext i8 %sh to i32
51 %A = shl i32 %Y, %shift.upgrd.1
52 %B = add i32 %X, %A
53 ret i32 %B
54}
55
Evan Chengddc0cb62012-12-20 19:59:30 +000056define i32 @t2ADDrs_noRegShift2(i32 %X, i32 %Y, i8 %sh) {
57; A8: t2ADDrs_noRegShift2
58; A8: uxtb r2, r2
59; A8: lsrs r1, r2
60; A8: add r0, r1
61
62; SWIFT: t2ADDrs_noRegShift2
63; SWIFT-NOT: lsrs
64; SWIFT: lsr.w
65 %shift.upgrd.1 = zext i8 %sh to i32
66 %A = lshr i32 %Y, %shift.upgrd.1
67 %B = add i32 %X, %A
68 ret i32 %B
69}
70
71define i32 @t2ADDrs_noRegShift3(i32 %X, i32 %Y, i8 %sh) {
72; A8: t2ADDrs_noRegShift3
73; A8: uxtb r2, r2
74; A8: asrs r1, r2
75; A8: add r0, r1
76
77; SWIFT: t2ADDrs_noRegShift3
78; SWIFT-NOT: asrs
79; SWIFT: asr.w
80 %shift.upgrd.1 = zext i8 %sh to i32
81 %A = ashr i32 %Y, %shift.upgrd.1
82 %B = add i32 %X, %A
83 ret i32 %B
84}
85
86define i32 @t2ADDrs_optsize(i32 %X, i32 %Y, i8 %sh) optsize {
87; SWIFT: t2ADDrs_optsize
88; SWIFT-NOT: lsl.w
89; SWIFT: lsls
90 %shift.upgrd.1 = zext i8 %sh to i32
91 %A = shl i32 %Y, %shift.upgrd.1
92 %B = add i32 %X, %A
93 ret i32 %B
94}
95
96define i32 @t2ADDrs_minsize(i32 %X, i32 %Y, i8 %sh) minsize {
97; SWIFT: t2ADDrs_minsize
98; SWIFT-NOT: lsr.w
99; SWIFT: lsrs
100 %shift.upgrd.1 = zext i8 %sh to i32
101 %A = lshr i32 %Y, %shift.upgrd.1
102 %B = add i32 %X, %A
103 ret i32 %B
104}