blob: 56fe2799fdc0be713eede245e56cba31ea682c6d [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Test 32-bit ANDs in which the second operand is variable.
2;
Richard Sandiford0175b4a2013-07-19 16:21:55 +00003; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s
4; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00005
Richard Sandiforded1fab62013-07-03 10:10:02 +00006declare i32 @foo()
7
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00008; Check NR.
9define i32 @f1(i32 %a, i32 %b) {
Stephen Lind24ab202013-07-14 06:24:09 +000010; CHECK-LABEL: f1:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000011; CHECK: nr %r2, %r3
12; CHECK: br %r14
13 %and = and i32 %a, %b
14 ret i32 %and
15}
16
17; Check the low end of the N range.
18define i32 @f2(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000019; CHECK-LABEL: f2:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000020; CHECK: n %r2, 0(%r3)
21; CHECK: br %r14
David Blaikiea79ac142015-02-27 21:17:42 +000022 %b = load i32 , i32 *%src
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000023 %and = and i32 %a, %b
24 ret i32 %and
25}
26
27; Check the high end of the aligned N range.
28define i32 @f3(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000029; CHECK-LABEL: f3:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000030; CHECK: n %r2, 4092(%r3)
31; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000032 %ptr = getelementptr i32, i32 *%src, i64 1023
David Blaikiea79ac142015-02-27 21:17:42 +000033 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000034 %and = and i32 %a, %b
35 ret i32 %and
36}
37
38; Check the next word up, which should use NY instead of N.
39define i32 @f4(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000040; CHECK-LABEL: f4:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000041; CHECK: ny %r2, 4096(%r3)
42; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000043 %ptr = getelementptr i32, i32 *%src, i64 1024
David Blaikiea79ac142015-02-27 21:17:42 +000044 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000045 %and = and i32 %a, %b
46 ret i32 %and
47}
48
49; Check the high end of the aligned NY range.
50define i32 @f5(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000051; CHECK-LABEL: f5:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000052; CHECK: ny %r2, 524284(%r3)
53; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000054 %ptr = getelementptr i32, i32 *%src, i64 131071
David Blaikiea79ac142015-02-27 21:17:42 +000055 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000056 %and = and i32 %a, %b
57 ret i32 %and
58}
59
60; Check the next word up, which needs separate address logic.
61; Other sequences besides this one would be OK.
62define i32 @f6(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000063; CHECK-LABEL: f6:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000064; CHECK: agfi %r3, 524288
65; CHECK: n %r2, 0(%r3)
66; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000067 %ptr = getelementptr i32, i32 *%src, i64 131072
David Blaikiea79ac142015-02-27 21:17:42 +000068 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000069 %and = and i32 %a, %b
70 ret i32 %and
71}
72
73; Check the high end of the negative aligned NY range.
74define i32 @f7(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000075; CHECK-LABEL: f7:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000076; CHECK: ny %r2, -4(%r3)
77; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000078 %ptr = getelementptr i32, i32 *%src, i64 -1
David Blaikiea79ac142015-02-27 21:17:42 +000079 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000080 %and = and i32 %a, %b
81 ret i32 %and
82}
83
84; Check the low end of the NY range.
85define i32 @f8(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000086; CHECK-LABEL: f8:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000087; CHECK: ny %r2, -524288(%r3)
88; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000089 %ptr = getelementptr i32, i32 *%src, i64 -131072
David Blaikiea79ac142015-02-27 21:17:42 +000090 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000091 %and = and i32 %a, %b
92 ret i32 %and
93}
94
95; Check the next word down, which needs separate address logic.
96; Other sequences besides this one would be OK.
97define i32 @f9(i32 %a, i32 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000098; CHECK-LABEL: f9:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000099; CHECK: agfi %r3, -524292
100; CHECK: n %r2, 0(%r3)
101; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000102 %ptr = getelementptr i32, i32 *%src, i64 -131073
David Blaikiea79ac142015-02-27 21:17:42 +0000103 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000104 %and = and i32 %a, %b
105 ret i32 %and
106}
107
108; Check that N allows an index.
109define i32 @f10(i32 %a, i64 %src, i64 %index) {
Stephen Lind24ab202013-07-14 06:24:09 +0000110; CHECK-LABEL: f10:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000111; CHECK: n %r2, 4092({{%r4,%r3|%r3,%r4}})
112; CHECK: br %r14
113 %add1 = add i64 %src, %index
114 %add2 = add i64 %add1, 4092
115 %ptr = inttoptr i64 %add2 to i32 *
David Blaikiea79ac142015-02-27 21:17:42 +0000116 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000117 %and = and i32 %a, %b
118 ret i32 %and
119}
120
121; Check that NY allows an index.
122define i32 @f11(i32 %a, i64 %src, i64 %index) {
Stephen Lind24ab202013-07-14 06:24:09 +0000123; CHECK-LABEL: f11:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000124; CHECK: ny %r2, 4096({{%r4,%r3|%r3,%r4}})
125; CHECK: br %r14
126 %add1 = add i64 %src, %index
127 %add2 = add i64 %add1, 4096
128 %ptr = inttoptr i64 %add2 to i32 *
David Blaikiea79ac142015-02-27 21:17:42 +0000129 %b = load i32 , i32 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000130 %and = and i32 %a, %b
131 ret i32 %and
132}
Richard Sandiforded1fab62013-07-03 10:10:02 +0000133
134; Check that ANDs of spilled values can use N rather than NR.
135define i32 @f12(i32 *%ptr0) {
Stephen Lind24ab202013-07-14 06:24:09 +0000136; CHECK-LABEL: f12:
Richard Sandiforded1fab62013-07-03 10:10:02 +0000137; CHECK: brasl %r14, foo@PLT
138; CHECK: n %r2, 16{{[04]}}(%r15)
139; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000140 %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
141 %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
142 %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
143 %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
144 %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
145 %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
146 %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
147 %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
148 %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
Richard Sandiforded1fab62013-07-03 10:10:02 +0000149
David Blaikiea79ac142015-02-27 21:17:42 +0000150 %val0 = load i32 , i32 *%ptr0
151 %val1 = load i32 , i32 *%ptr1
152 %val2 = load i32 , i32 *%ptr2
153 %val3 = load i32 , i32 *%ptr3
154 %val4 = load i32 , i32 *%ptr4
155 %val5 = load i32 , i32 *%ptr5
156 %val6 = load i32 , i32 *%ptr6
157 %val7 = load i32 , i32 *%ptr7
158 %val8 = load i32 , i32 *%ptr8
159 %val9 = load i32 , i32 *%ptr9
Richard Sandiforded1fab62013-07-03 10:10:02 +0000160
161 %ret = call i32 @foo()
162
163 %and0 = and i32 %ret, %val0
164 %and1 = and i32 %and0, %val1
165 %and2 = and i32 %and1, %val2
166 %and3 = and i32 %and2, %val3
167 %and4 = and i32 %and3, %val4
168 %and5 = and i32 %and4, %val5
169 %and6 = and i32 %and5, %val6
170 %and7 = and i32 %and6, %val7
171 %and8 = and i32 %and7, %val8
172 %and9 = and i32 %and8, %val9
173
174 ret i32 %and9
175}