blob: f80a139238ea54cfe16b313cc8fb658a6a333b6c [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Testg 64-bit unsigned division and remainder.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
Richard Sandiforded1fab62013-07-03 10:10:02 +00005declare i64 @foo()
6
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00007; Testg register division. The result is in the second of the two registers.
8define void @f1(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
Stephen Lind24ab202013-07-14 06:24:09 +00009; CHECK-LABEL: f1:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000010; CHECK-NOT: %r3
11; CHECK: {{llill|lghi}} %r2, 0
12; CHECK-NOT: %r3
13; CHECK: dlgr %r2, %r4
14; CHECK: stg %r3, 0(%r5)
15; CHECK: br %r14
16 %div = udiv i64 %a, %b
17 store i64 %div, i64 *%dest
18 ret void
19}
20
21; Testg register remainder. The result is in the first of the two registers.
22define void @f2(i64 %dummy, i64 %a, i64 %b, i64 *%dest) {
Stephen Lind24ab202013-07-14 06:24:09 +000023; CHECK-LABEL: f2:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000024; CHECK-NOT: %r3
25; CHECK: {{llill|lghi}} %r2, 0
26; CHECK-NOT: %r3
27; CHECK: dlgr %r2, %r4
28; CHECK: stg %r2, 0(%r5)
29; CHECK: br %r14
30 %rem = urem i64 %a, %b
31 store i64 %rem, i64 *%dest
32 ret void
33}
34
35; Testg that division and remainder use a single instruction.
36define i64 @f3(i64 %dummy1, i64 %a, i64 %b) {
Stephen Lind24ab202013-07-14 06:24:09 +000037; CHECK-LABEL: f3:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000038; CHECK-NOT: %r3
39; CHECK: {{llill|lghi}} %r2, 0
40; CHECK-NOT: %r3
41; CHECK: dlgr %r2, %r4
42; CHECK-NOT: dlgr
43; CHECK: ogr %r2, %r3
44; CHECK: br %r14
45 %div = udiv i64 %a, %b
46 %rem = urem i64 %a, %b
47 %or = or i64 %rem, %div
48 ret i64 %or
49}
50
51; Testg memory division with no displacement.
52define void @f4(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
Stephen Lind24ab202013-07-14 06:24:09 +000053; CHECK-LABEL: f4:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000054; CHECK-NOT: %r3
55; CHECK: {{llill|lghi}} %r2, 0
56; CHECK-NOT: %r3
57; CHECK: dlg %r2, 0(%r4)
58; CHECK: stg %r3, 0(%r5)
59; CHECK: br %r14
David Blaikiea79ac142015-02-27 21:17:42 +000060 %b = load i64 , i64 *%src
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000061 %div = udiv i64 %a, %b
62 store i64 %div, i64 *%dest
63 ret void
64}
65
66; Testg memory remainder with no displacement.
67define void @f5(i64 %dummy, i64 %a, i64 *%src, i64 *%dest) {
Stephen Lind24ab202013-07-14 06:24:09 +000068; CHECK-LABEL: f5:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000069; CHECK-NOT: %r3
70; CHECK: {{llill|lghi}} %r2, 0
71; CHECK-NOT: %r3
72; CHECK: dlg %r2, 0(%r4)
73; CHECK: stg %r2, 0(%r5)
74; CHECK: br %r14
David Blaikiea79ac142015-02-27 21:17:42 +000075 %b = load i64 , i64 *%src
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000076 %rem = urem i64 %a, %b
77 store i64 %rem, i64 *%dest
78 ret void
79}
80
81; Testg both memory division and memory remainder.
82define i64 @f6(i64 %dummy, i64 %a, i64 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +000083; CHECK-LABEL: f6:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000084; CHECK-NOT: %r3
85; CHECK: {{llill|lghi}} %r2, 0
86; CHECK-NOT: %r3
87; CHECK: dlg %r2, 0(%r4)
88; CHECK-NOT: {{dlg|dlgr}}
89; CHECK: ogr %r2, %r3
90; CHECK: br %r14
David Blaikiea79ac142015-02-27 21:17:42 +000091 %b = load i64 , i64 *%src
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000092 %div = udiv i64 %a, %b
93 %rem = urem i64 %a, %b
94 %or = or i64 %rem, %div
95 ret i64 %or
96}
97
98; Check the high end of the DLG range.
99define i64 @f7(i64 %dummy, i64 %a, i64 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +0000100; CHECK-LABEL: f7:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000101; CHECK: dlg %r2, 524280(%r4)
102; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000103 %ptr = getelementptr i64, i64 *%src, i64 65535
David Blaikiea79ac142015-02-27 21:17:42 +0000104 %b = load i64 , i64 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000105 %rem = urem i64 %a, %b
106 ret i64 %rem
107}
108
109; Check the next doubleword up, which needs separate address logic.
110; Other sequences besides this one would be OK.
111define i64 @f8(i64 %dummy, i64 %a, i64 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +0000112; CHECK-LABEL: f8:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000113; CHECK: agfi %r4, 524288
114; CHECK: dlg %r2, 0(%r4)
115; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000116 %ptr = getelementptr i64, i64 *%src, i64 65536
David Blaikiea79ac142015-02-27 21:17:42 +0000117 %b = load i64 , i64 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000118 %rem = urem i64 %a, %b
119 ret i64 %rem
120}
121
122; Check the high end of the negative aligned DLG range.
123define i64 @f9(i64 %dummy, i64 %a, i64 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +0000124; CHECK-LABEL: f9:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000125; CHECK: dlg %r2, -8(%r4)
126; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000127 %ptr = getelementptr i64, i64 *%src, i64 -1
David Blaikiea79ac142015-02-27 21:17:42 +0000128 %b = load i64 , i64 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000129 %rem = urem i64 %a, %b
130 ret i64 %rem
131}
132
133; Check the low end of the DLG range.
134define i64 @f10(i64 %dummy, i64 %a, i64 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +0000135; CHECK-LABEL: f10:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000136; CHECK: dlg %r2, -524288(%r4)
137; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000138 %ptr = getelementptr i64, i64 *%src, i64 -65536
David Blaikiea79ac142015-02-27 21:17:42 +0000139 %b = load i64 , i64 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000140 %rem = urem i64 %a, %b
141 ret i64 %rem
142}
143
144; Check the next doubleword down, which needs separate address logic.
145; Other sequences besides this one would be OK.
146define i64 @f11(i64 %dummy, i64 %a, i64 *%src) {
Stephen Lind24ab202013-07-14 06:24:09 +0000147; CHECK-LABEL: f11:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000148; CHECK: agfi %r4, -524296
149; CHECK: dlg %r2, 0(%r4)
150; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000151 %ptr = getelementptr i64, i64 *%src, i64 -65537
David Blaikiea79ac142015-02-27 21:17:42 +0000152 %b = load i64 , i64 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000153 %rem = urem i64 %a, %b
154 ret i64 %rem
155}
156
157; Check that DLG allows an index.
158define i64 @f12(i64 %dummy, i64 %a, i64 %src, i64 %index) {
Stephen Lind24ab202013-07-14 06:24:09 +0000159; CHECK-LABEL: f12:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000160; CHECK: dlg %r2, 524287(%r5,%r4)
161; CHECK: br %r14
162 %add1 = add i64 %src, %index
163 %add2 = add i64 %add1, 524287
164 %ptr = inttoptr i64 %add2 to i64 *
David Blaikiea79ac142015-02-27 21:17:42 +0000165 %b = load i64 , i64 *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000166 %rem = urem i64 %a, %b
167 ret i64 %rem
168}
Richard Sandiforded1fab62013-07-03 10:10:02 +0000169
170; Check that divisions of spilled values can use DLG rather than DLGR.
171define i64 @f13(i64 *%ptr0) {
Stephen Lind24ab202013-07-14 06:24:09 +0000172; CHECK-LABEL: f13:
Richard Sandiforded1fab62013-07-03 10:10:02 +0000173; CHECK: brasl %r14, foo@PLT
174; CHECK: dlg {{%r[0-9]+}}, 160(%r15)
175; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +0000176 %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
177 %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
178 %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
179 %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
180 %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
181 %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
182 %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
183 %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
184 %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
185 %ptr10 = getelementptr i64, i64 *%ptr0, i64 20
Richard Sandiforded1fab62013-07-03 10:10:02 +0000186
David Blaikiea79ac142015-02-27 21:17:42 +0000187 %val0 = load i64 , i64 *%ptr0
188 %val1 = load i64 , i64 *%ptr1
189 %val2 = load i64 , i64 *%ptr2
190 %val3 = load i64 , i64 *%ptr3
191 %val4 = load i64 , i64 *%ptr4
192 %val5 = load i64 , i64 *%ptr5
193 %val6 = load i64 , i64 *%ptr6
194 %val7 = load i64 , i64 *%ptr7
195 %val8 = load i64 , i64 *%ptr8
196 %val9 = load i64 , i64 *%ptr9
197 %val10 = load i64 , i64 *%ptr10
Richard Sandiforded1fab62013-07-03 10:10:02 +0000198
199 %ret = call i64 @foo()
200
201 %div0 = udiv i64 %ret, %val0
202 %div1 = udiv i64 %div0, %val1
203 %div2 = udiv i64 %div1, %val2
204 %div3 = udiv i64 %div2, %val3
205 %div4 = udiv i64 %div3, %val4
206 %div5 = udiv i64 %div4, %val5
207 %div6 = udiv i64 %div5, %val6
208 %div7 = udiv i64 %div6, %val7
209 %div8 = udiv i64 %div7, %val8
210 %div9 = udiv i64 %div8, %val9
211 %div10 = udiv i64 %div9, %val10
212
213 ret i64 %div10
214}