blob: 3680207e7f2078215872b892efe92762aed2beec [file] [log] [blame]
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00001; Test 32-bit square root.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
Richard Sandiford37cd6cf2013-08-23 10:27:02 +00005declare float @llvm.sqrt.f32(float)
6declare float @sqrtf(float)
Ulrich Weigand9e3577f2013-05-06 16:17:29 +00007
8; Check register square root.
9define float @f1(float %val) {
Stephen Lind24ab202013-07-14 06:24:09 +000010; CHECK-LABEL: f1:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000011; CHECK: sqebr %f0, %f0
12; CHECK: br %r14
13 %res = call float @llvm.sqrt.f32(float %val)
14 ret float %res
15}
16
17; Check the low end of the SQEB range.
18define float @f2(float *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +000019; CHECK-LABEL: f2:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000020; CHECK: sqeb %f0, 0(%r2)
21; CHECK: br %r14
David Blaikiea79ac142015-02-27 21:17:42 +000022 %val = load float , float *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000023 %res = call float @llvm.sqrt.f32(float %val)
24 ret float %res
25}
26
27; Check the high end of the aligned SQEB range.
28define float @f3(float *%base) {
Stephen Lind24ab202013-07-14 06:24:09 +000029; CHECK-LABEL: f3:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000030; CHECK: sqeb %f0, 4092(%r2)
31; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000032 %ptr = getelementptr float, float *%base, i64 1023
David Blaikiea79ac142015-02-27 21:17:42 +000033 %val = load float , float *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000034 %res = call float @llvm.sqrt.f32(float %val)
35 ret float %res
36}
37
38; Check the next word up, which needs separate address logic.
39; Other sequences besides this one would be OK.
40define float @f4(float *%base) {
Stephen Lind24ab202013-07-14 06:24:09 +000041; CHECK-LABEL: f4:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000042; CHECK: aghi %r2, 4096
43; CHECK: sqeb %f0, 0(%r2)
44; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000045 %ptr = getelementptr float, float *%base, i64 1024
David Blaikiea79ac142015-02-27 21:17:42 +000046 %val = load float , float *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000047 %res = call float @llvm.sqrt.f32(float %val)
48 ret float %res
49}
50
51; Check negative displacements, which also need separate address logic.
52define float @f5(float *%base) {
Stephen Lind24ab202013-07-14 06:24:09 +000053; CHECK-LABEL: f5:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000054; CHECK: aghi %r2, -4
55; CHECK: sqeb %f0, 0(%r2)
56; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000057 %ptr = getelementptr float, float *%base, i64 -1
David Blaikiea79ac142015-02-27 21:17:42 +000058 %val = load float , float *%ptr
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000059 %res = call float @llvm.sqrt.f32(float %val)
60 ret float %res
61}
62
63; Check that SQEB allows indices.
64define float @f6(float *%base, i64 %index) {
Stephen Lind24ab202013-07-14 06:24:09 +000065; CHECK-LABEL: f6:
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000066; CHECK: sllg %r1, %r3, 2
67; CHECK: sqeb %f0, 400(%r1,%r2)
68; CHECK: br %r14
David Blaikie79e6c742015-02-27 19:29:02 +000069 %ptr1 = getelementptr float, float *%base, i64 %index
70 %ptr2 = getelementptr float, float *%ptr1, i64 100
David Blaikiea79ac142015-02-27 21:17:42 +000071 %val = load float , float *%ptr2
Ulrich Weigand9e3577f2013-05-06 16:17:29 +000072 %res = call float @llvm.sqrt.f32(float %val)
73 ret float %res
74}
Richard Sandiforded1fab62013-07-03 10:10:02 +000075
76; Test a case where we spill the source of at least one SQEBR. We want
77; to use SQEB if possible.
78define void @f7(float *%ptr) {
Stephen Lind24ab202013-07-14 06:24:09 +000079; CHECK-LABEL: f7:
Richard Sandiforded1fab62013-07-03 10:10:02 +000080; CHECK: sqeb {{%f[0-9]+}}, 16{{[04]}}(%r15)
81; CHECK: br %r14
David Blaikiea79ac142015-02-27 21:17:42 +000082 %val0 = load volatile float , float *%ptr
83 %val1 = load volatile float , float *%ptr
84 %val2 = load volatile float , float *%ptr
85 %val3 = load volatile float , float *%ptr
86 %val4 = load volatile float , float *%ptr
87 %val5 = load volatile float , float *%ptr
88 %val6 = load volatile float , float *%ptr
89 %val7 = load volatile float , float *%ptr
90 %val8 = load volatile float , float *%ptr
91 %val9 = load volatile float , float *%ptr
92 %val10 = load volatile float , float *%ptr
93 %val11 = load volatile float , float *%ptr
94 %val12 = load volatile float , float *%ptr
95 %val13 = load volatile float , float *%ptr
96 %val14 = load volatile float , float *%ptr
97 %val15 = load volatile float , float *%ptr
98 %val16 = load volatile float , float *%ptr
Richard Sandiforded1fab62013-07-03 10:10:02 +000099
100 %sqrt0 = call float @llvm.sqrt.f32(float %val0)
101 %sqrt1 = call float @llvm.sqrt.f32(float %val1)
102 %sqrt2 = call float @llvm.sqrt.f32(float %val2)
103 %sqrt3 = call float @llvm.sqrt.f32(float %val3)
104 %sqrt4 = call float @llvm.sqrt.f32(float %val4)
105 %sqrt5 = call float @llvm.sqrt.f32(float %val5)
106 %sqrt6 = call float @llvm.sqrt.f32(float %val6)
107 %sqrt7 = call float @llvm.sqrt.f32(float %val7)
108 %sqrt8 = call float @llvm.sqrt.f32(float %val8)
109 %sqrt9 = call float @llvm.sqrt.f32(float %val9)
110 %sqrt10 = call float @llvm.sqrt.f32(float %val10)
111 %sqrt11 = call float @llvm.sqrt.f32(float %val11)
112 %sqrt12 = call float @llvm.sqrt.f32(float %val12)
113 %sqrt13 = call float @llvm.sqrt.f32(float %val13)
114 %sqrt14 = call float @llvm.sqrt.f32(float %val14)
115 %sqrt15 = call float @llvm.sqrt.f32(float %val15)
116 %sqrt16 = call float @llvm.sqrt.f32(float %val16)
117
118 store volatile float %val0, float *%ptr
119 store volatile float %val1, float *%ptr
120 store volatile float %val2, float *%ptr
121 store volatile float %val3, float *%ptr
122 store volatile float %val4, float *%ptr
123 store volatile float %val5, float *%ptr
124 store volatile float %val6, float *%ptr
125 store volatile float %val7, float *%ptr
126 store volatile float %val8, float *%ptr
127 store volatile float %val9, float *%ptr
128 store volatile float %val10, float *%ptr
129 store volatile float %val11, float *%ptr
130 store volatile float %val12, float *%ptr
131 store volatile float %val13, float *%ptr
132 store volatile float %val14, float *%ptr
133 store volatile float %val15, float *%ptr
134 store volatile float %val16, float *%ptr
135
136 store volatile float %sqrt0, float *%ptr
137 store volatile float %sqrt1, float *%ptr
138 store volatile float %sqrt2, float *%ptr
139 store volatile float %sqrt3, float *%ptr
140 store volatile float %sqrt4, float *%ptr
141 store volatile float %sqrt5, float *%ptr
142 store volatile float %sqrt6, float *%ptr
143 store volatile float %sqrt7, float *%ptr
144 store volatile float %sqrt8, float *%ptr
145 store volatile float %sqrt9, float *%ptr
146 store volatile float %sqrt10, float *%ptr
147 store volatile float %sqrt11, float *%ptr
148 store volatile float %sqrt12, float *%ptr
149 store volatile float %sqrt13, float *%ptr
150 store volatile float %sqrt14, float *%ptr
151 store volatile float %sqrt15, float *%ptr
152 store volatile float %sqrt16, float *%ptr
153
154 ret void
155}
Richard Sandiford37cd6cf2013-08-23 10:27:02 +0000156
157; Check that a call to the normal sqrtf function is lowered.
158define float @f8(float %dummy, float %val) {
159; CHECK-LABEL: f8:
160; CHECK: sqebr %f0, %f2
161; CHECK: cebr %f0, %f0
Ulrich Weigand2eb027d2016-04-07 16:11:44 +0000162; CHECK: bnor %r14
Richard Sandiford37cd6cf2013-08-23 10:27:02 +0000163; CHECK: ler %f0, %f2
164; CHECK: jg sqrtf@PLT
165 %res = tail call float @sqrtf(float %val)
166 ret float %res
167}