blob: e01a8531bcdd569db0a9afea14a6c95c6a07db45 [file] [log] [blame]
Richard Sandifordb86a8342013-06-27 09:27:40 +00001; Test 16-bit conditional stores that are presented as selects.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
5declare void @foo(i16 *)
6
7; Test the simple case, with the loaded value first.
8define void @f1(i16 *%ptr, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +00009; CHECK-LABEL: f1:
Richard Sandifordb86a8342013-06-27 09:27:40 +000010; CHECK-NOT: %r2
11; CHECK: jl [[LABEL:[^ ]*]]
12; CHECK-NOT: %r2
13; CHECK: sth %r3, 0(%r2)
14; CHECK: [[LABEL]]:
15; CHECK: br %r14
16 %cond = icmp ult i32 %limit, 42
17 %orig = load i16 *%ptr
18 %res = select i1 %cond, i16 %orig, i16 %alt
19 store i16 %res, i16 *%ptr
20 ret void
21}
22
23; ...and with the loaded value second
24define void @f2(i16 *%ptr, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +000025; CHECK-LABEL: f2:
Richard Sandifordb86a8342013-06-27 09:27:40 +000026; CHECK-NOT: %r2
Richard Sandiford3d768e32013-07-31 12:30:20 +000027; CHECK: jhe [[LABEL:[^ ]*]]
Richard Sandifordb86a8342013-06-27 09:27:40 +000028; CHECK-NOT: %r2
29; CHECK: sth %r3, 0(%r2)
30; CHECK: [[LABEL]]:
31; CHECK: br %r14
32 %cond = icmp ult i32 %limit, 42
33 %orig = load i16 *%ptr
34 %res = select i1 %cond, i16 %alt, i16 %orig
35 store i16 %res, i16 *%ptr
36 ret void
37}
38
39; Test cases where the value is explicitly sign-extended to 32 bits, with the
40; loaded value first.
41define void @f3(i16 *%ptr, i32 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +000042; CHECK-LABEL: f3:
Richard Sandifordb86a8342013-06-27 09:27:40 +000043; CHECK-NOT: %r2
44; CHECK: jl [[LABEL:[^ ]*]]
45; CHECK-NOT: %r2
46; CHECK: sth %r3, 0(%r2)
47; CHECK: [[LABEL]]:
48; CHECK: br %r14
49 %cond = icmp ult i32 %limit, 42
50 %orig = load i16 *%ptr
51 %ext = sext i16 %orig to i32
52 %res = select i1 %cond, i32 %ext, i32 %alt
53 %trunc = trunc i32 %res to i16
54 store i16 %trunc, i16 *%ptr
55 ret void
56}
57
58; ...and with the loaded value second
59define void @f4(i16 *%ptr, i32 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +000060; CHECK-LABEL: f4:
Richard Sandifordb86a8342013-06-27 09:27:40 +000061; CHECK-NOT: %r2
Richard Sandiford3d768e32013-07-31 12:30:20 +000062; CHECK: jhe [[LABEL:[^ ]*]]
Richard Sandifordb86a8342013-06-27 09:27:40 +000063; CHECK-NOT: %r2
64; CHECK: sth %r3, 0(%r2)
65; CHECK: [[LABEL]]:
66; CHECK: br %r14
67 %cond = icmp ult i32 %limit, 42
68 %orig = load i16 *%ptr
69 %ext = sext i16 %orig to i32
70 %res = select i1 %cond, i32 %alt, i32 %ext
71 %trunc = trunc i32 %res to i16
72 store i16 %trunc, i16 *%ptr
73 ret void
74}
75
76; Test cases where the value is explicitly zero-extended to 32 bits, with the
77; loaded value first.
78define void @f5(i16 *%ptr, i32 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +000079; CHECK-LABEL: f5:
Richard Sandifordb86a8342013-06-27 09:27:40 +000080; CHECK-NOT: %r2
81; CHECK: jl [[LABEL:[^ ]*]]
82; CHECK-NOT: %r2
83; CHECK: sth %r3, 0(%r2)
84; CHECK: [[LABEL]]:
85; CHECK: br %r14
86 %cond = icmp ult i32 %limit, 42
87 %orig = load i16 *%ptr
88 %ext = zext i16 %orig to i32
89 %res = select i1 %cond, i32 %ext, i32 %alt
90 %trunc = trunc i32 %res to i16
91 store i16 %trunc, i16 *%ptr
92 ret void
93}
94
95; ...and with the loaded value second
96define void @f6(i16 *%ptr, i32 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +000097; CHECK-LABEL: f6:
Richard Sandifordb86a8342013-06-27 09:27:40 +000098; CHECK-NOT: %r2
Richard Sandiford3d768e32013-07-31 12:30:20 +000099; CHECK: jhe [[LABEL:[^ ]*]]
Richard Sandifordb86a8342013-06-27 09:27:40 +0000100; CHECK-NOT: %r2
101; CHECK: sth %r3, 0(%r2)
102; CHECK: [[LABEL]]:
103; CHECK: br %r14
104 %cond = icmp ult i32 %limit, 42
105 %orig = load i16 *%ptr
106 %ext = zext i16 %orig to i32
107 %res = select i1 %cond, i32 %alt, i32 %ext
108 %trunc = trunc i32 %res to i16
109 store i16 %trunc, i16 *%ptr
110 ret void
111}
112
113; Test cases where the value is explicitly sign-extended to 64 bits, with the
114; loaded value first.
115define void @f7(i16 *%ptr, i64 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000116; CHECK-LABEL: f7:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000117; CHECK-NOT: %r2
118; CHECK: jl [[LABEL:[^ ]*]]
119; CHECK-NOT: %r2
120; CHECK: sth %r3, 0(%r2)
121; CHECK: [[LABEL]]:
122; CHECK: br %r14
123 %cond = icmp ult i32 %limit, 42
124 %orig = load i16 *%ptr
125 %ext = sext i16 %orig to i64
126 %res = select i1 %cond, i64 %ext, i64 %alt
127 %trunc = trunc i64 %res to i16
128 store i16 %trunc, i16 *%ptr
129 ret void
130}
131
132; ...and with the loaded value second
133define void @f8(i16 *%ptr, i64 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000134; CHECK-LABEL: f8:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000135; CHECK-NOT: %r2
Richard Sandiford3d768e32013-07-31 12:30:20 +0000136; CHECK: jhe [[LABEL:[^ ]*]]
Richard Sandifordb86a8342013-06-27 09:27:40 +0000137; CHECK-NOT: %r2
138; CHECK: sth %r3, 0(%r2)
139; CHECK: [[LABEL]]:
140; CHECK: br %r14
141 %cond = icmp ult i32 %limit, 42
142 %orig = load i16 *%ptr
143 %ext = sext i16 %orig to i64
144 %res = select i1 %cond, i64 %alt, i64 %ext
145 %trunc = trunc i64 %res to i16
146 store i16 %trunc, i16 *%ptr
147 ret void
148}
149
150; Test cases where the value is explicitly zero-extended to 64 bits, with the
151; loaded value first.
152define void @f9(i16 *%ptr, i64 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000153; CHECK-LABEL: f9:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000154; CHECK-NOT: %r2
155; CHECK: jl [[LABEL:[^ ]*]]
156; CHECK-NOT: %r2
157; CHECK: sth %r3, 0(%r2)
158; CHECK: [[LABEL]]:
159; CHECK: br %r14
160 %cond = icmp ult i32 %limit, 42
161 %orig = load i16 *%ptr
162 %ext = zext i16 %orig to i64
163 %res = select i1 %cond, i64 %ext, i64 %alt
164 %trunc = trunc i64 %res to i16
165 store i16 %trunc, i16 *%ptr
166 ret void
167}
168
169; ...and with the loaded value second
170define void @f10(i16 *%ptr, i64 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000171; CHECK-LABEL: f10:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000172; CHECK-NOT: %r2
Richard Sandiford3d768e32013-07-31 12:30:20 +0000173; CHECK: jhe [[LABEL:[^ ]*]]
Richard Sandifordb86a8342013-06-27 09:27:40 +0000174; CHECK-NOT: %r2
175; CHECK: sth %r3, 0(%r2)
176; CHECK: [[LABEL]]:
177; CHECK: br %r14
178 %cond = icmp ult i32 %limit, 42
179 %orig = load i16 *%ptr
180 %ext = zext i16 %orig to i64
181 %res = select i1 %cond, i64 %alt, i64 %ext
182 %trunc = trunc i64 %res to i16
183 store i16 %trunc, i16 *%ptr
184 ret void
185}
186
187; Check the high end of the aligned STH range.
188define void @f11(i16 *%base, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000189; CHECK-LABEL: f11:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000190; CHECK-NOT: %r2
191; CHECK: jl [[LABEL:[^ ]*]]
192; CHECK-NOT: %r2
193; CHECK: sth %r3, 4094(%r2)
194; CHECK: [[LABEL]]:
195; CHECK: br %r14
196 %ptr = getelementptr i16 *%base, i64 2047
197 %cond = icmp ult i32 %limit, 42
198 %orig = load i16 *%ptr
199 %res = select i1 %cond, i16 %orig, i16 %alt
200 store i16 %res, i16 *%ptr
201 ret void
202}
203
204; Check the next halfword up, which should use STHY instead of STH.
205define void @f12(i16 *%base, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000206; CHECK-LABEL: f12:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000207; CHECK-NOT: %r2
208; CHECK: jl [[LABEL:[^ ]*]]
209; CHECK-NOT: %r2
210; CHECK: sthy %r3, 4096(%r2)
211; CHECK: [[LABEL]]:
212; CHECK: br %r14
213 %ptr = getelementptr i16 *%base, i64 2048
214 %cond = icmp ult i32 %limit, 42
215 %orig = load i16 *%ptr
216 %res = select i1 %cond, i16 %orig, i16 %alt
217 store i16 %res, i16 *%ptr
218 ret void
219}
220
221; Check the high end of the aligned STHY range.
222define void @f13(i16 *%base, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000223; CHECK-LABEL: f13:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000224; CHECK-NOT: %r2
225; CHECK: jl [[LABEL:[^ ]*]]
226; CHECK-NOT: %r2
227; CHECK: sthy %r3, 524286(%r2)
228; CHECK: [[LABEL]]:
229; CHECK: br %r14
230 %ptr = getelementptr i16 *%base, i64 262143
231 %cond = icmp ult i32 %limit, 42
232 %orig = load i16 *%ptr
233 %res = select i1 %cond, i16 %orig, i16 %alt
234 store i16 %res, i16 *%ptr
235 ret void
236}
237
238; Check the next halfword up, which needs separate address logic.
239; Other sequences besides this one would be OK.
240define void @f14(i16 *%base, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000241; CHECK-LABEL: f14:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000242; CHECK-NOT: %r2
243; CHECK: jl [[LABEL:[^ ]*]]
244; CHECK-NOT: %r2
245; CHECK: agfi %r2, 524288
246; CHECK: sth %r3, 0(%r2)
247; CHECK: [[LABEL]]:
248; CHECK: br %r14
249 %ptr = getelementptr i16 *%base, i64 262144
250 %cond = icmp ult i32 %limit, 42
251 %orig = load i16 *%ptr
252 %res = select i1 %cond, i16 %orig, i16 %alt
253 store i16 %res, i16 *%ptr
254 ret void
255}
256
257; Check the low end of the STHY range.
258define void @f15(i16 *%base, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000259; CHECK-LABEL: f15:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000260; CHECK-NOT: %r2
261; CHECK: jl [[LABEL:[^ ]*]]
262; CHECK-NOT: %r2
263; CHECK: sthy %r3, -524288(%r2)
264; CHECK: [[LABEL]]:
265; CHECK: br %r14
266 %ptr = getelementptr i16 *%base, i64 -262144
267 %cond = icmp ult i32 %limit, 42
268 %orig = load i16 *%ptr
269 %res = select i1 %cond, i16 %orig, i16 %alt
270 store i16 %res, i16 *%ptr
271 ret void
272}
273
274; Check the next halfword down, which needs separate address logic.
275; Other sequences besides this one would be OK.
276define void @f16(i16 *%base, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000277; CHECK-LABEL: f16:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000278; CHECK-NOT: %r2
279; CHECK: jl [[LABEL:[^ ]*]]
280; CHECK-NOT: %r2
281; CHECK: agfi %r2, -524290
282; CHECK: sth %r3, 0(%r2)
283; CHECK: [[LABEL]]:
284; CHECK: br %r14
285 %ptr = getelementptr i16 *%base, i64 -262145
286 %cond = icmp ult i32 %limit, 42
287 %orig = load i16 *%ptr
288 %res = select i1 %cond, i16 %orig, i16 %alt
289 store i16 %res, i16 *%ptr
290 ret void
291}
292
293; Check that STHY allows an index.
294define void @f17(i64 %base, i64 %index, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000295; CHECK-LABEL: f17:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000296; CHECK-NOT: %r2
297; CHECK: jl [[LABEL:[^ ]*]]
298; CHECK-NOT: %r2
299; CHECK: sthy %r4, 4096(%r3,%r2)
300; CHECK: [[LABEL]]:
301; CHECK: br %r14
302 %add1 = add i64 %base, %index
303 %add2 = add i64 %add1, 4096
304 %ptr = inttoptr i64 %add2 to i16 *
305 %cond = icmp ult i32 %limit, 42
306 %orig = load i16 *%ptr
307 %res = select i1 %cond, i16 %orig, i16 %alt
308 store i16 %res, i16 *%ptr
309 ret void
310}
311
312; Check that volatile loads are not matched.
313define void @f18(i16 *%ptr, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000314; CHECK-LABEL: f18:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000315; CHECK: lh {{%r[0-5]}}, 0(%r2)
316; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
317; CHECK: [[LABEL]]:
318; CHECK: sth {{%r[0-5]}}, 0(%r2)
319; CHECK: br %r14
320 %cond = icmp ult i32 %limit, 42
321 %orig = load volatile i16 *%ptr
322 %res = select i1 %cond, i16 %orig, i16 %alt
323 store i16 %res, i16 *%ptr
324 ret void
325}
326
327; ...likewise stores. In this case we should have a conditional load into %r3.
328define void @f19(i16 *%ptr, i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000329; CHECK-LABEL: f19:
Richard Sandiford3d768e32013-07-31 12:30:20 +0000330; CHECK: jhe [[LABEL:[^ ]*]]
Richard Sandifordb86a8342013-06-27 09:27:40 +0000331; CHECK: lh %r3, 0(%r2)
332; CHECK: [[LABEL]]:
333; CHECK: sth %r3, 0(%r2)
334; CHECK: br %r14
335 %cond = icmp ult i32 %limit, 42
336 %orig = load i16 *%ptr
337 %res = select i1 %cond, i16 %orig, i16 %alt
338 store volatile i16 %res, i16 *%ptr
339 ret void
340}
341
342; Check that atomic loads are not matched. The transformation is OK for
343; the "unordered" case tested here, but since we don't try to handle atomic
344; operations at all in this context, it seems better to assert that than
345; to restrict the test to a stronger ordering.
346define void @f20(i16 *%ptr, i16 %alt, i32 %limit) {
347; FIXME: should use a normal load instead of CS.
Stephen Lind24ab202013-07-14 06:24:09 +0000348; CHECK-LABEL: f20:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000349; CHECK: cs {{%r[0-9]+}},
Richard Sandiford3d768e32013-07-31 12:30:20 +0000350; CHECK: jl
Richard Sandifordb86a8342013-06-27 09:27:40 +0000351; CHECK: {{jl|jnl}} [[LABEL:[^ ]*]]
352; CHECK: [[LABEL]]:
353; CHECK: sth {{%r[0-9]+}},
354; CHECK: br %r14
355 %cond = icmp ult i32 %limit, 42
356 %orig = load atomic i16 *%ptr unordered, align 2
357 %res = select i1 %cond, i16 %orig, i16 %alt
358 store i16 %res, i16 *%ptr
359 ret void
360}
361
362; ...likewise stores.
363define void @f21(i16 *%ptr, i16 %alt, i32 %limit) {
364; FIXME: should use a normal store instead of CS.
Stephen Lind24ab202013-07-14 06:24:09 +0000365; CHECK-LABEL: f21:
Richard Sandiford3d768e32013-07-31 12:30:20 +0000366; CHECK: jhe [[LABEL:[^ ]*]]
Richard Sandifordb86a8342013-06-27 09:27:40 +0000367; CHECK: lh %r3, 0(%r2)
368; CHECK: [[LABEL]]:
369; CHECK: cs {{%r[0-9]+}},
370; CHECK: br %r14
371 %cond = icmp ult i32 %limit, 42
372 %orig = load i16 *%ptr
373 %res = select i1 %cond, i16 %orig, i16 %alt
374 store atomic i16 %res, i16 *%ptr unordered, align 2
375 ret void
376}
377
378; Try a frame index base.
379define void @f22(i16 %alt, i32 %limit) {
Stephen Lind24ab202013-07-14 06:24:09 +0000380; CHECK-LABEL: f22:
Richard Sandifordb86a8342013-06-27 09:27:40 +0000381; CHECK: brasl %r14, foo@PLT
382; CHECK-NOT: %r15
383; CHECK: jl [[LABEL:[^ ]*]]
384; CHECK-NOT: %r15
385; CHECK: sth {{%r[0-9]+}}, {{[0-9]+}}(%r15)
386; CHECK: [[LABEL]]:
387; CHECK: brasl %r14, foo@PLT
388; CHECK: br %r14
389 %ptr = alloca i16
390 call void @foo(i16 *%ptr)
391 %cond = icmp ult i32 %limit, 42
392 %orig = load i16 *%ptr
393 %res = select i1 %cond, i16 %orig, i16 %alt
394 store i16 %res, i16 *%ptr
395 call void @foo(i16 *%ptr)
396 ret void
397}