blob: 161628864e1b40c45eb4a6ece742b51508c6bbcc [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Test 32-bit arithmetic shifts right.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
5; Check the low end of the SRAG range.
6define i64 @f1(i64 %a) {
Stephen Lind24ab202013-07-14 06:24:09 +00007; CHECK-LABEL: f1:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00008; CHECK: srag %r2, %r2, 1
9; CHECK: br %r14
10 %shift = ashr i64 %a, 1
11 ret i64 %shift
12}
13
14; Check the high end of the defined SRAG range.
15define i64 @f2(i64 %a) {
Stephen Lind24ab202013-07-14 06:24:09 +000016; CHECK-LABEL: f2:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000017; CHECK: srag %r2, %r2, 63
18; CHECK: br %r14
19 %shift = ashr i64 %a, 63
20 ret i64 %shift
21}
22
23; We don't generate shifts by out-of-range values.
24define i64 @f3(i64 %a) {
Stephen Lind24ab202013-07-14 06:24:09 +000025; CHECK-LABEL: f3:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000026; CHECK-NOT: srag
27; CHECK: br %r14
28 %shift = ashr i64 %a, 64
29 ret i64 %shift
30}
31
32; Check variable shifts.
33define i64 @f4(i64 %a, i64 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +000034; CHECK-LABEL: f4:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000035; CHECK: srag %r2, %r2, 0(%r3)
36; CHECK: br %r14
37 %shift = ashr i64 %a, %amt
38 ret i64 %shift
39}
40
41; Check shift amounts that have a constant term.
42define i64 @f5(i64 %a, i64 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +000043; CHECK-LABEL: f5:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000044; CHECK: srag %r2, %r2, 10(%r3)
45; CHECK: br %r14
46 %add = add i64 %amt, 10
47 %shift = ashr i64 %a, %add
48 ret i64 %shift
49}
50
51; ...and again with a sign-extended 32-bit shift amount.
52define i64 @f6(i64 %a, i32 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +000053; CHECK-LABEL: f6:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000054; CHECK: srag %r2, %r2, 10(%r3)
55; CHECK: br %r14
56 %add = add i32 %amt, 10
57 %addext = sext i32 %add to i64
58 %shift = ashr i64 %a, %addext
59 ret i64 %shift
60}
61
62; ...and now with a zero-extended 32-bit shift amount.
63define i64 @f7(i64 %a, i32 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +000064; CHECK-LABEL: f7:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000065; CHECK: srag %r2, %r2, 10(%r3)
66; CHECK: br %r14
67 %add = add i32 %amt, 10
68 %addext = zext i32 %add to i64
69 %shift = ashr i64 %a, %addext
70 ret i64 %shift
71}
72
73; Check shift amounts that have the largest in-range constant term. We could
74; mask the amount instead.
75define i64 @f8(i64 %a, i64 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +000076; CHECK-LABEL: f8:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000077; CHECK: srag %r2, %r2, 524287(%r3)
78; CHECK: br %r14
79 %add = add i64 %amt, 524287
80 %shift = ashr i64 %a, %add
81 ret i64 %shift
82}
83
84; Check the next value up, which without masking must use a separate
85; addition.
86define i64 @f9(i64 %a, i64 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +000087; CHECK-LABEL: f9:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000088; CHECK: a{{g?}}fi %r3, 524288
89; CHECK: srag %r2, %r2, 0(%r3)
90; CHECK: br %r14
91 %add = add i64 %amt, 524288
92 %shift = ashr i64 %a, %add
93 ret i64 %shift
94}
95
96; Check cases where 1 is subtracted from the shift amount.
97define i64 @f10(i64 %a, i64 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +000098; CHECK-LABEL: f10:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000099; CHECK: srag %r2, %r2, -1(%r3)
100; CHECK: br %r14
101 %sub = sub i64 %amt, 1
102 %shift = ashr i64 %a, %sub
103 ret i64 %shift
104}
105
106; Check the lowest value that can be subtracted from the shift amount.
107; Again, we could mask the shift amount instead.
108define i64 @f11(i64 %a, i64 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +0000109; CHECK-LABEL: f11:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000110; CHECK: srag %r2, %r2, -524288(%r3)
111; CHECK: br %r14
112 %sub = sub i64 %amt, 524288
113 %shift = ashr i64 %a, %sub
114 ret i64 %shift
115}
116
117; Check the next value down, which without masking must use a separate
118; addition.
119define i64 @f12(i64 %a, i64 %amt) {
Stephen Lind24ab202013-07-14 06:24:09 +0000120; CHECK-LABEL: f12:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000121; CHECK: a{{g?}}fi %r3, -524289
122; CHECK: srag %r2, %r2, 0(%r3)
123; CHECK: br %r14
124 %sub = sub i64 %amt, 524289
125 %shift = ashr i64 %a, %sub
126 ret i64 %shift
127}
128
129; Check that we don't try to generate "indexed" shifts.
130define i64 @f13(i64 %a, i64 %b, i64 %c) {
Stephen Lind24ab202013-07-14 06:24:09 +0000131; CHECK-LABEL: f13:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000132; CHECK: a{{g?}}r {{%r3, %r4|%r4, %r3}}
133; CHECK: srag %r2, %r2, 0({{%r[34]}})
134; CHECK: br %r14
135 %add = add i64 %b, %c
136 %shift = ashr i64 %a, %add
137 ret i64 %shift
138}
139
140; Check that the shift amount uses an address register. It cannot be in %r0.
141define i64 @f14(i64 %a, i64 *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +0000142; CHECK-LABEL: f14:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000143; CHECK: l %r1, 4(%r3)
144; CHECK: srag %r2, %r2, 0(%r1)
145; CHECK: br %r14
David Blaikiea79ac142015-02-27 21:17:42 +0000146 %amt = load i64 , i64 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000147 %shift = ashr i64 %a, %amt
148 ret i64 %shift
149}