blob: bd802bcedcd3d64f348b53d54e325f3a79a58503 [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Test 32-bit unsigned comparison in which the second operand is a variable.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
5; Check register comparison.
6define double @f1(double %a, double %b, i32 %i1, i32 %i2) {
7; CHECK: f1:
8; CHECK: clr %r2, %r3
Richard Sandiford586f4172013-05-21 08:53:17 +00009; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000010; CHECK: ldr %f0, %f2
11; CHECK: br %r14
12 %cond = icmp ult i32 %i1, %i2
13 %res = select i1 %cond, double %a, double %b
14 ret double %res
15}
16
17; Check the low end of the CL range.
18define double @f2(double %a, double %b, i32 %i1, i32 *%ptr) {
19; CHECK: f2:
20; CHECK: cl %r2, 0(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +000021; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000022; CHECK: ldr %f0, %f2
23; CHECK: br %r14
24 %i2 = load i32 *%ptr
25 %cond = icmp ult i32 %i1, %i2
26 %res = select i1 %cond, double %a, double %b
27 ret double %res
28}
29
30; Check the high end of the aligned CL range.
31define double @f3(double %a, double %b, i32 %i1, i32 *%base) {
32; CHECK: f3:
33; CHECK: cl %r2, 4092(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +000034; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000035; CHECK: ldr %f0, %f2
36; CHECK: br %r14
37 %ptr = getelementptr i32 *%base, i64 1023
38 %i2 = load i32 *%ptr
39 %cond = icmp ult i32 %i1, %i2
40 %res = select i1 %cond, double %a, double %b
41 ret double %res
42}
43
44; Check the next word up, which should use CLY instead of CL.
45define double @f4(double %a, double %b, i32 %i1, i32 *%base) {
46; CHECK: f4:
47; CHECK: cly %r2, 4096(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +000048; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000049; CHECK: ldr %f0, %f2
50; CHECK: br %r14
51 %ptr = getelementptr i32 *%base, i64 1024
52 %i2 = load i32 *%ptr
53 %cond = icmp ult i32 %i1, %i2
54 %res = select i1 %cond, double %a, double %b
55 ret double %res
56}
57
58; Check the high end of the aligned CLY range.
59define double @f5(double %a, double %b, i32 %i1, i32 *%base) {
60; CHECK: f5:
61; CHECK: cly %r2, 524284(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +000062; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000063; CHECK: ldr %f0, %f2
64; CHECK: br %r14
65 %ptr = getelementptr i32 *%base, i64 131071
66 %i2 = load i32 *%ptr
67 %cond = icmp ult i32 %i1, %i2
68 %res = select i1 %cond, double %a, double %b
69 ret double %res
70}
71
72; Check the next word up, which needs separate address logic.
73; Other sequences besides this one would be OK.
74define double @f6(double %a, double %b, i32 %i1, i32 *%base) {
75; CHECK: f6:
76; CHECK: agfi %r3, 524288
77; CHECK: cl %r2, 0(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +000078; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000079; CHECK: ldr %f0, %f2
80; CHECK: br %r14
81 %ptr = getelementptr i32 *%base, i64 131072
82 %i2 = load i32 *%ptr
83 %cond = icmp ult i32 %i1, %i2
84 %res = select i1 %cond, double %a, double %b
85 ret double %res
86}
87
88; Check the high end of the negative aligned CLY range.
89define double @f7(double %a, double %b, i32 %i1, i32 *%base) {
90; CHECK: f7:
91; CHECK: cly %r2, -4(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +000092; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000093; CHECK: ldr %f0, %f2
94; CHECK: br %r14
95 %ptr = getelementptr i32 *%base, i64 -1
96 %i2 = load i32 *%ptr
97 %cond = icmp ult i32 %i1, %i2
98 %res = select i1 %cond, double %a, double %b
99 ret double %res
100}
101
102; Check the low end of the CLY range.
103define double @f8(double %a, double %b, i32 %i1, i32 *%base) {
104; CHECK: f8:
105; CHECK: cly %r2, -524288(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +0000106; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000107; CHECK: ldr %f0, %f2
108; CHECK: br %r14
109 %ptr = getelementptr i32 *%base, i64 -131072
110 %i2 = load i32 *%ptr
111 %cond = icmp ult i32 %i1, %i2
112 %res = select i1 %cond, double %a, double %b
113 ret double %res
114}
115
116; Check the next word down, which needs separate address logic.
117; Other sequences besides this one would be OK.
118define double @f9(double %a, double %b, i32 %i1, i32 *%base) {
119; CHECK: f9:
120; CHECK: agfi %r3, -524292
121; CHECK: cl %r2, 0(%r3)
Richard Sandiford586f4172013-05-21 08:53:17 +0000122; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000123; CHECK: ldr %f0, %f2
124; CHECK: br %r14
125 %ptr = getelementptr i32 *%base, i64 -131073
126 %i2 = load i32 *%ptr
127 %cond = icmp ult i32 %i1, %i2
128 %res = select i1 %cond, double %a, double %b
129 ret double %res
130}
131
132; Check that CL allows an index.
133define double @f10(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
134; CHECK: f10:
135; CHECK: cl %r2, 4092({{%r4,%r3|%r3,%r4}})
Richard Sandiford586f4172013-05-21 08:53:17 +0000136; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000137; CHECK: ldr %f0, %f2
138; CHECK: br %r14
139 %add1 = add i64 %base, %index
140 %add2 = add i64 %add1, 4092
141 %ptr = inttoptr i64 %add2 to i32 *
142 %i2 = load i32 *%ptr
143 %cond = icmp ult i32 %i1, %i2
144 %res = select i1 %cond, double %a, double %b
145 ret double %res
146}
147
148; Check that CLY allows an index.
149define double @f11(double %a, double %b, i32 %i1, i64 %base, i64 %index) {
150; CHECK: f11:
151; CHECK: cly %r2, 4096({{%r4,%r3|%r3,%r4}})
Richard Sandiford586f4172013-05-21 08:53:17 +0000152; CHECK-NEXT: jl
Ulrich Weigand9e3577f2013-05-06 16:17:29 +0000153; CHECK: ldr %f0, %f2
154; CHECK: br %r14
155 %add1 = add i64 %base, %index
156 %add2 = add i64 %add1, 4096
157 %ptr = inttoptr i64 %add2 to i32 *
158 %i2 = load i32 *%ptr
159 %cond = icmp ult i32 %i1, %i2
160 %res = select i1 %cond, double %a, double %b
161 ret double %res
162}