blob: 98c53b234323579c7628e44cee1b149ebdc3a5e7 [file] [log] [blame]
Derek Schuff885dc592017-10-05 21:18:42 +00001; RUN: not llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt
Dan Gohman5d2b9352018-01-19 17:16:24 +00002; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -mattr=+atomics,+sign-ext | FileCheck %s
Derek Schuff885dc592017-10-05 21:18:42 +00003
4; Test that atomic loads are assembled properly.
5
6target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
Sam Clegga5908002018-05-10 17:49:11 +00007target triple = "wasm32-unknown-unknown"
Derek Schuff885dc592017-10-05 21:18:42 +00008
Heejin Ahn402b4902018-07-02 21:22:59 +00009; Basic load.
10
Derek Schuff885dc592017-10-05 21:18:42 +000011; CHECK-LABEL: load_i32_no_offset:
12; CHECK: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
13; CHECK-NEXT: return $pop[[NUM]]{{$}}
14define i32 @load_i32_no_offset(i32 *%p) {
15 %v = load atomic i32, i32* %p seq_cst, align 4
16 ret i32 %v
17}
18
19; With an nuw add, we can fold an offset.
20
21; CHECK-LABEL: load_i32_with_folded_offset:
22; CHECK: i32.atomic.load $push0=, 24($0){{$}}
23define i32 @load_i32_with_folded_offset(i32* %p) {
24 %q = ptrtoint i32* %p to i32
25 %r = add nuw i32 %q, 24
26 %s = inttoptr i32 %r to i32*
27 %t = load atomic i32, i32* %s seq_cst, align 4
28 ret i32 %t
29}
30
31; With an inbounds gep, we can fold an offset.
32
33; CHECK-LABEL: load_i32_with_folded_gep_offset:
34; CHECK: i32.atomic.load $push0=, 24($0){{$}}
35define i32 @load_i32_with_folded_gep_offset(i32* %p) {
36 %s = getelementptr inbounds i32, i32* %p, i32 6
37 %t = load atomic i32, i32* %s seq_cst, align 4
38 ret i32 %t
39}
40
41; We can't fold a negative offset though, even with an inbounds gep.
42
43; CHECK-LABEL: load_i32_with_unfolded_gep_negative_offset:
44; CHECK: i32.const $push0=, -24{{$}}
45; CHECK: i32.add $push1=, $0, $pop0{{$}}
46; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
47define i32 @load_i32_with_unfolded_gep_negative_offset(i32* %p) {
48 %s = getelementptr inbounds i32, i32* %p, i32 -6
49 %t = load atomic i32, i32* %s seq_cst, align 4
50 ret i32 %t
51}
52
53; Without nuw, and even with nsw, we can't fold an offset.
54
55; CHECK-LABEL: load_i32_with_unfolded_offset:
56; CHECK: i32.const $push0=, 24{{$}}
57; CHECK: i32.add $push1=, $0, $pop0{{$}}
58; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
59define i32 @load_i32_with_unfolded_offset(i32* %p) {
60 %q = ptrtoint i32* %p to i32
61 %r = add nsw i32 %q, 24
62 %s = inttoptr i32 %r to i32*
63 %t = load atomic i32, i32* %s seq_cst, align 4
64 ret i32 %t
65}
66
67; Without inbounds, we can't fold a gep offset.
68
69; CHECK-LABEL: load_i32_with_unfolded_gep_offset:
70; CHECK: i32.const $push0=, 24{{$}}
71; CHECK: i32.add $push1=, $0, $pop0{{$}}
72; CHECK: i32.atomic.load $push2=, 0($pop1){{$}}
73define i32 @load_i32_with_unfolded_gep_offset(i32* %p) {
74 %s = getelementptr i32, i32* %p, i32 6
75 %t = load atomic i32, i32* %s seq_cst, align 4
76 ret i32 %t
77}
78
79; CHECK-LABEL: load_i64_no_offset:
80; CHECK: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
81; CHECK-NEXT: return $pop[[NUM]]{{$}}
82define i64 @load_i64_no_offset(i64 *%p) {
83 %v = load atomic i64, i64* %p seq_cst, align 8
84 ret i64 %v
85}
86
87; Same as above but with i64.
88
89; CHECK-LABEL: load_i64_with_folded_offset:
90; CHECK: i64.atomic.load $push0=, 24($0){{$}}
91define i64 @load_i64_with_folded_offset(i64* %p) {
92 %q = ptrtoint i64* %p to i32
93 %r = add nuw i32 %q, 24
94 %s = inttoptr i32 %r to i64*
95 %t = load atomic i64, i64* %s seq_cst, align 8
96 ret i64 %t
97}
98
99; Same as above but with i64.
100
101; CHECK-LABEL: load_i64_with_folded_gep_offset:
102; CHECK: i64.atomic.load $push0=, 24($0){{$}}
103define i64 @load_i64_with_folded_gep_offset(i64* %p) {
104 %s = getelementptr inbounds i64, i64* %p, i32 3
105 %t = load atomic i64, i64* %s seq_cst, align 8
106 ret i64 %t
107}
108
109; Same as above but with i64.
110
111; CHECK-LABEL: load_i64_with_unfolded_gep_negative_offset:
112; CHECK: i32.const $push0=, -24{{$}}
113; CHECK: i32.add $push1=, $0, $pop0{{$}}
114; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
115define i64 @load_i64_with_unfolded_gep_negative_offset(i64* %p) {
116 %s = getelementptr inbounds i64, i64* %p, i32 -3
117 %t = load atomic i64, i64* %s seq_cst, align 8
118 ret i64 %t
119}
120
121; Same as above but with i64.
122
123; CHECK-LABEL: load_i64_with_unfolded_offset:
124; CHECK: i32.const $push0=, 24{{$}}
125; CHECK: i32.add $push1=, $0, $pop0{{$}}
126; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
127define i64 @load_i64_with_unfolded_offset(i64* %p) {
128 %q = ptrtoint i64* %p to i32
129 %r = add nsw i32 %q, 24
130 %s = inttoptr i32 %r to i64*
131 %t = load atomic i64, i64* %s seq_cst, align 8
132 ret i64 %t
133}
134
135; Same as above but with i64.
136
137; CHECK-LABEL: load_i64_with_unfolded_gep_offset:
138; CHECK: i32.const $push0=, 24{{$}}
139; CHECK: i32.add $push1=, $0, $pop0{{$}}
140; CHECK: i64.atomic.load $push2=, 0($pop1){{$}}
141define i64 @load_i64_with_unfolded_gep_offset(i64* %p) {
142 %s = getelementptr i64, i64* %p, i32 3
143 %t = load atomic i64, i64* %s seq_cst, align 8
144 ret i64 %t
145}
146
147; CHECK-LABEL: load_i32_with_folded_or_offset:
148; CHECK: i32.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
149; CHECK-NEXT: i32.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
150define i32 @load_i32_with_folded_or_offset(i32 %x) {
151 %and = and i32 %x, -4
152 %t0 = inttoptr i32 %and to i8*
153 %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
154 %t1 = load atomic i8, i8* %arrayidx seq_cst, align 8
155 %conv = sext i8 %t1 to i32
156 ret i32 %conv
157}
158
Heejin Ahn402b4902018-07-02 21:22:59 +0000159; Same as above but with store.
160
161; CHECK-LABEL: store_i32_no_offset:
162; CHECK-NEXT: .param i32, i32{{$}}
163; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
164; CHECK-NEXT: return{{$}}
165define void @store_i32_no_offset(i32 *%p, i32 %v) {
166 store atomic i32 %v, i32* %p seq_cst, align 4
167 ret void
168}
169
170; Same as above but with store.
171
172; CHECK-LABEL: store_i32_with_folded_offset:
173; CHECK: i32.atomic.store 24($0), $pop0{{$}}
174define void @store_i32_with_folded_offset(i32* %p) {
175 %q = ptrtoint i32* %p to i32
176 %r = add nuw i32 %q, 24
177 %s = inttoptr i32 %r to i32*
178 store atomic i32 0, i32* %s seq_cst, align 4
179 ret void
180}
181
182; Same as above but with store.
183
184; CHECK-LABEL: store_i32_with_folded_gep_offset:
185; CHECK: i32.atomic.store 24($0), $pop0{{$}}
186define void @store_i32_with_folded_gep_offset(i32* %p) {
187 %s = getelementptr inbounds i32, i32* %p, i32 6
188 store atomic i32 0, i32* %s seq_cst, align 4
189 ret void
190}
191
192; Same as above but with store.
193
194; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset:
195; CHECK: i32.const $push0=, -24{{$}}
196; CHECK: i32.add $push1=, $0, $pop0{{$}}
197; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
198define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
199 %s = getelementptr inbounds i32, i32* %p, i32 -6
200 store atomic i32 0, i32* %s seq_cst, align 4
201 ret void
202}
203
204; Same as above but with store.
205
206; CHECK-LABEL: store_i32_with_unfolded_offset:
207; CHECK: i32.const $push0=, 24{{$}}
208; CHECK: i32.add $push1=, $0, $pop0{{$}}
209; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
210define void @store_i32_with_unfolded_offset(i32* %p) {
211 %q = ptrtoint i32* %p to i32
212 %r = add nsw i32 %q, 24
213 %s = inttoptr i32 %r to i32*
214 store atomic i32 0, i32* %s seq_cst, align 4
215 ret void
216}
217
218; Same as above but with store.
219
220; CHECK-LABEL: store_i32_with_unfolded_gep_offset:
221; CHECK: i32.const $push0=, 24{{$}}
222; CHECK: i32.add $push1=, $0, $pop0{{$}}
223; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
224define void @store_i32_with_unfolded_gep_offset(i32* %p) {
225 %s = getelementptr i32, i32* %p, i32 6
226 store atomic i32 0, i32* %s seq_cst, align 4
227 ret void
228}
229
230; Same as above but with store with i64.
231
232; CHECK-LABEL: store_i64_no_offset:
233; CHECK-NEXT: .param i32, i64{{$}}
234; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
235; CHECK-NEXT: return{{$}}
236define void @store_i64_no_offset(i64 *%p, i64 %v) {
237 store atomic i64 %v, i64* %p seq_cst, align 8
238 ret void
239}
240
241; Same as above but with store with i64.
242
243; CHECK-LABEL: store_i64_with_folded_offset:
244; CHECK: i64.atomic.store 24($0), $pop0{{$}}
245define void @store_i64_with_folded_offset(i64* %p) {
246 %q = ptrtoint i64* %p to i32
247 %r = add nuw i32 %q, 24
248 %s = inttoptr i32 %r to i64*
249 store atomic i64 0, i64* %s seq_cst, align 8
250 ret void
251}
252
253; Same as above but with store with i64.
254
255; CHECK-LABEL: store_i64_with_folded_gep_offset:
256; CHECK: i64.atomic.store 24($0), $pop0{{$}}
257define void @store_i64_with_folded_gep_offset(i64* %p) {
258 %s = getelementptr inbounds i64, i64* %p, i32 3
259 store atomic i64 0, i64* %s seq_cst, align 8
260 ret void
261}
262
263; Same as above but with store with i64.
264
265; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset:
266; CHECK: i32.const $push0=, -24{{$}}
267; CHECK: i32.add $push1=, $0, $pop0{{$}}
268; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
269define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
270 %s = getelementptr inbounds i64, i64* %p, i32 -3
271 store atomic i64 0, i64* %s seq_cst, align 8
272 ret void
273}
274
275; Same as above but with store with i64.
276
277; CHECK-LABEL: store_i64_with_unfolded_offset:
278; CHECK: i32.const $push0=, 24{{$}}
279; CHECK: i32.add $push1=, $0, $pop0{{$}}
280; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
281define void @store_i64_with_unfolded_offset(i64* %p) {
282 %q = ptrtoint i64* %p to i32
283 %r = add nsw i32 %q, 24
284 %s = inttoptr i32 %r to i64*
285 store atomic i64 0, i64* %s seq_cst, align 8
286 ret void
287}
288
289; Same as above but with store with i64.
290
291; CHECK-LABEL: store_i64_with_unfolded_gep_offset:
292; CHECK: i32.const $push0=, 24{{$}}
293; CHECK: i32.add $push1=, $0, $pop0{{$}}
294; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
295define void @store_i64_with_unfolded_gep_offset(i64* %p) {
296 %s = getelementptr i64, i64* %p, i32 3
297 store atomic i64 0, i64* %s seq_cst, align 8
298 ret void
299}
300
Derek Schuff885dc592017-10-05 21:18:42 +0000301; When loading from a fixed address, materialize a zero.
302
303; CHECK-LABEL: load_i32_from_numeric_address
304; CHECK: i32.const $push0=, 0{{$}}
305; CHECK: i32.atomic.load $push1=, 42($pop0){{$}}
306define i32 @load_i32_from_numeric_address() {
307 %s = inttoptr i32 42 to i32*
308 %t = load atomic i32, i32* %s seq_cst, align 4
309 ret i32 %t
310}
311
312
313; CHECK-LABEL: load_i32_from_global_address
314; CHECK: i32.const $push0=, 0{{$}}
315; CHECK: i32.atomic.load $push1=, gv($pop0){{$}}
316@gv = global i32 0
317define i32 @load_i32_from_global_address() {
318 %t = load atomic i32, i32* @gv seq_cst, align 4
319 ret i32 %t
320}
321
Heejin Ahn402b4902018-07-02 21:22:59 +0000322; CHECK-LABEL: store_i32_to_numeric_address:
323; CHECK-NEXT: i32.const $push0=, 0{{$}}
324; CHECK-NEXT: i32.const $push1=, 0{{$}}
325; CHECK-NEXT: i32.atomic.store 42($pop0), $pop1{{$}}
326define void @store_i32_to_numeric_address() {
327 %s = inttoptr i32 42 to i32*
328 store atomic i32 0, i32* %s seq_cst, align 4
329 ret void
330}
331
332; CHECK-LABEL: store_i32_to_global_address:
333; CHECK: i32.const $push0=, 0{{$}}
334; CHECK: i32.const $push1=, 0{{$}}
335; CHECK: i32.atomic.store gv($pop0), $pop1{{$}}
336define void @store_i32_to_global_address() {
337 store atomic i32 0, i32* @gv seq_cst, align 4
338 ret void
339}
340
Derek Schuff885dc592017-10-05 21:18:42 +0000341; Fold an offset into a sign-extending load.
342
343; CHECK-LABEL: load_i8_s_with_folded_offset:
344; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
345; CHECK-NEXT: i32.extend8_s $push1=, $pop0
346define i32 @load_i8_s_with_folded_offset(i8* %p) {
347 %q = ptrtoint i8* %p to i32
348 %r = add nuw i32 %q, 24
349 %s = inttoptr i32 %r to i8*
350 %t = load atomic i8, i8* %s seq_cst, align 1
351 %u = sext i8 %t to i32
352 ret i32 %u
353}
354
355; Fold a gep offset into a sign-extending load.
356
357; CHECK-LABEL: load_i8_s_with_folded_gep_offset:
358; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
359; CHECK-NEXT: i32.extend8_s $push1=, $pop0
360define i32 @load_i8_s_with_folded_gep_offset(i8* %p) {
361 %s = getelementptr inbounds i8, i8* %p, i32 24
362 %t = load atomic i8, i8* %s seq_cst, align 1
363 %u = sext i8 %t to i32
364 ret i32 %u
365}
366
367; CHECK-LABEL: load_i16_s_i64_with_folded_gep_offset:
368; CHECK: i64.atomic.load16_u $push0=, 6($0){{$}}
369define i64 @load_i16_s_i64_with_folded_gep_offset(i16* %p) {
370 %s = getelementptr inbounds i16, i16* %p, i32 3
371 %t = load atomic i16, i16* %s seq_cst, align 2
372 %u = zext i16 %t to i64
373 ret i64 %u
374}
375
376; CHECK-LABEL: load_i64_with_folded_or_offset:
377; CHECK: i64.atomic.load8_u $push[[R1:[0-9]+]]=, 2($pop{{[0-9]+}}){{$}}
378; CHECK-NEXT: i64.extend8_s $push{{[0-9]+}}=, $pop[[R1]]{{$}}
379define i64 @load_i64_with_folded_or_offset(i32 %x) {
380 %and = and i32 %x, -4
381 %t0 = inttoptr i32 %and to i8*
382 %arrayidx = getelementptr inbounds i8, i8* %t0, i32 2
383 %t1 = load atomic i8, i8* %arrayidx seq_cst, align 8
384 %conv = sext i8 %t1 to i64
385 ret i64 %conv
386}
387
388
389; Fold an offset into a zero-extending load.
390
391; CHECK-LABEL: load_i16_u_with_folded_offset:
392; CHECK: i32.atomic.load16_u $push0=, 24($0){{$}}
393define i32 @load_i16_u_with_folded_offset(i8* %p) {
394 %q = ptrtoint i8* %p to i32
395 %r = add nuw i32 %q, 24
396 %s = inttoptr i32 %r to i16*
397 %t = load atomic i16, i16* %s seq_cst, align 2
398 %u = zext i16 %t to i32
399 ret i32 %u
400}
401
402; Fold a gep offset into a zero-extending load.
403
404; CHECK-LABEL: load_i8_u_with_folded_gep_offset:
405; CHECK: i32.atomic.load8_u $push0=, 24($0){{$}}
406define i32 @load_i8_u_with_folded_gep_offset(i8* %p) {
407 %s = getelementptr inbounds i8, i8* %p, i32 24
408 %t = load atomic i8, i8* %s seq_cst, align 1
409 %u = zext i8 %t to i32
410 ret i32 %u
411}
412
413
414; When loading from a fixed address, materialize a zero.
415; As above but with extending load.
416
417; CHECK-LABEL: load_zext_i32_from_numeric_address
418; CHECK: i32.const $push0=, 0{{$}}
419; CHECK: i32.atomic.load16_u $push1=, 42($pop0){{$}}
420define i32 @load_zext_i32_from_numeric_address() {
421 %s = inttoptr i32 42 to i16*
422 %t = load atomic i16, i16* %s seq_cst, align 2
423 %u = zext i16 %t to i32
424 ret i32 %u
425}
426
427; CHECK-LABEL: load_sext_i32_from_global_address
428; CHECK: i32.const $push0=, 0{{$}}
429; CHECK: i32.atomic.load8_u $push1=, gv8($pop0){{$}}
430; CHECK-NEXT: i32.extend8_s $push2=, $pop1{{$}}
431@gv8 = global i8 0
432define i32 @load_sext_i32_from_global_address() {
433 %t = load atomic i8, i8* @gv8 seq_cst, align 1
434 %u = sext i8 %t to i32
435 ret i32 %u
436}
437
438; Fold an offset into a sign-extending load.
439; As above but 32 extended to 64 bit.
440; CHECK-LABEL: load_i32_i64_s_with_folded_offset:
441; CHECK: i32.atomic.load $push0=, 24($0){{$}}
442; CHECK-NEXT: i64.extend_s/i32 $push1=, $pop0{{$}}
443define i64 @load_i32_i64_s_with_folded_offset(i32* %p) {
444 %q = ptrtoint i32* %p to i32
445 %r = add nuw i32 %q, 24
446 %s = inttoptr i32 %r to i32*
447 %t = load atomic i32, i32* %s seq_cst, align 4
448 %u = sext i32 %t to i64
449 ret i64 %u
450}
451
452; Fold a gep offset into a zero-extending load.
453; As above but 32 extended to 64 bit.
454; CHECK-LABEL: load_i32_i64_u_with_folded_gep_offset:
455; CHECK: i64.atomic.load32_u $push0=, 96($0){{$}}
456define i64 @load_i32_i64_u_with_folded_gep_offset(i32* %p) {
457 %s = getelementptr inbounds i32, i32* %p, i32 24
458 %t = load atomic i32, i32* %s seq_cst, align 4
459 %u = zext i32 %t to i64
460 ret i64 %u
461}
462
463; i8 return value should test anyext loads
464; CHECK-LABEL: ldi8_a1:
465; CHECK: i32.atomic.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
466; CHECK-NEXT: return $pop[[NUM]]{{$}}
467define i8 @ldi8_a1(i8 *%p) {
468 %v = load atomic i8, i8* %p seq_cst, align 1
469 ret i8 %v
470}
Heejin Ahn402b4902018-07-02 21:22:59 +0000471
472; Fold an offset into a truncating store.
473
474; CHECK-LABEL: store_i8_with_folded_offset:
475; CHECK: i32.atomic.store8 24($0), $pop0{{$}}
476define void @store_i8_with_folded_offset(i8* %p) {
477 %q = ptrtoint i8* %p to i32
478 %r = add nuw i32 %q, 24
479 %s = inttoptr i32 %r to i8*
480 store atomic i8 0, i8* %s seq_cst, align 1
481 ret void
482}
483
484; CHECK-LABEL: store_i16_with_folded_offset:
485; CHECK: i32.atomic.store16 24($0), $pop0{{$}}
486define void @store_i16_with_folded_offset(i16* %p) {
487 %q = ptrtoint i16* %p to i32
488 %r = add nuw i32 %q, 24
489 %s = inttoptr i32 %r to i16*
490 store atomic i16 0, i16* %s seq_cst, align 2
491 ret void
492}
493
494; CHECK-LABEL: store_i8_i64_with_folded_offset:
495; CHECK: i64.atomic.store8 24($0), $1{{$}}
496define void @store_i8_i64_with_folded_offset(i8* %p, i64 %v) {
497 %q = ptrtoint i8* %p to i32
498 %r = add nuw i32 %q, 24
499 %s = inttoptr i32 %r to i8*
500 %t = trunc i64 %v to i8
501 store atomic i8 %t, i8* %s seq_cst, align 1
502 ret void
503}
504
505; CHECK-LABEL: store_i16_i64_with_folded_offset:
506; CHECK: i64.atomic.store16 24($0), $1{{$}}
507define void @store_i16_i64_with_folded_offset(i16* %p, i64 %v) {
508 %q = ptrtoint i16* %p to i32
509 %r = add nuw i32 %q, 24
510 %s = inttoptr i32 %r to i16*
511 %t = trunc i64 %v to i16
512 store atomic i16 %t, i16* %s seq_cst, align 2
513 ret void
514}
515
516; CHECK-LABEL: store_i32_i64_with_folded_offset:
517; CHECK: i64.atomic.store32 24($0), $1{{$}}
518define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
519 %q = ptrtoint i32* %p to i32
520 %r = add nuw i32 %q, 24
521 %s = inttoptr i32 %r to i32*
522 %t = trunc i64 %v to i32
523 store atomic i32 %t, i32* %s seq_cst, align 4
524 ret void
525}
526
527; Fold a gep offset into a truncating store.
528
529; CHECK-LABEL: store_i8_with_folded_gep_offset:
530; CHECK: i32.atomic.store8 24($0), $pop0{{$}}
531define void @store_i8_with_folded_gep_offset(i8* %p) {
532 %s = getelementptr inbounds i8, i8* %p, i32 24
533 store atomic i8 0, i8* %s seq_cst, align 1
534 ret void
535}
536
537; CHECK-LABEL: store_i16_with_folded_gep_offset:
538; CHECK: i32.atomic.store16 48($0), $pop0{{$}}
539define void @store_i16_with_folded_gep_offset(i16* %p) {
540 %s = getelementptr inbounds i16, i16* %p, i32 24
541 store atomic i16 0, i16* %s seq_cst, align 2
542 ret void
543}
544
545; CHECK-LABEL: store_i8_i64_with_folded_gep_offset:
546; CHECK: i64.atomic.store8 24($0), $1{{$}}
547define void @store_i8_i64_with_folded_gep_offset(i8* %p, i64 %v) {
548 %s = getelementptr inbounds i8, i8* %p, i32 24
549 %t = trunc i64 %v to i8
550 store atomic i8 %t, i8* %s seq_cst, align 2
551 ret void
552}
553
554; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
555; CHECK: i64.atomic.store16 48($0), $1{{$}}
556define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
557 %s = getelementptr inbounds i16, i16* %p, i32 24
558 %t = trunc i64 %v to i16
559 store atomic i16 %t, i16* %s seq_cst, align 2
560 ret void
561}
562
563; CHECK-LABEL: store_i32_i64_with_folded_gep_offset:
564; CHECK: i64.atomic.store32 96($0), $1{{$}}
565define void @store_i32_i64_with_folded_gep_offset(i32* %p, i64 %v) {
566 %s = getelementptr inbounds i32, i32* %p, i32 24
567 %t = trunc i64 %v to i32
568 store atomic i32 %t, i32* %s seq_cst, align 4
569 ret void
570}
571
572; Fold an or_is_add pattern based offset into a truncating store.
573
574; CHECK-LABEL: store_i8_with_folded_or_offset:
575; CHECK: i32.atomic.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
576define void @store_i8_with_folded_or_offset(i32 %x) {
577 %and = and i32 %x, -4
578 %p = inttoptr i32 %and to i8*
579 %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
580 store atomic i8 0, i8* %arrayidx seq_cst, align 1
581 ret void
582}
583
584; CHECK-LABEL: store_i16_with_folded_or_offset:
585; CHECK: i32.atomic.store16 4($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
586define void @store_i16_with_folded_or_offset(i32 %x) {
587 %and = and i32 %x, -4
588 %p = inttoptr i32 %and to i16*
589 %arrayidx = getelementptr inbounds i16, i16* %p, i32 2
590 store atomic i16 0, i16* %arrayidx seq_cst, align 2
591 ret void
592}
593
594; CHECK-LABEL: store_i8_i64_with_folded_or_offset:
595; CHECK: i64.atomic.store8 2($pop{{[0-9]+}}), $1{{$}}
596define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
597 %and = and i32 %x, -4
598 %p = inttoptr i32 %and to i8*
599 %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
600 %t = trunc i64 %v to i8
601 store atomic i8 %t, i8* %arrayidx seq_cst, align 1
602 ret void
603}
604
605; CHECK-LABEL: store_i16_i64_with_folded_or_offset:
606; CHECK: i64.atomic.store16 4($pop{{[0-9]+}}), $1{{$}}
607define void @store_i16_i64_with_folded_or_offset(i32 %x, i64 %v) {
608 %and = and i32 %x, -4
609 %p = inttoptr i32 %and to i16*
610 %arrayidx = getelementptr inbounds i16, i16* %p, i32 2
611 %t = trunc i64 %v to i16
612 store atomic i16 %t, i16* %arrayidx seq_cst, align 2
613 ret void
614}
615
616; CHECK-LABEL: store_i32_i64_with_folded_or_offset:
617; CHECK: i64.atomic.store32 8($pop{{[0-9]+}}), $1{{$}}
618define void @store_i32_i64_with_folded_or_offset(i32 %x, i64 %v) {
619 %and = and i32 %x, -4
620 %p = inttoptr i32 %and to i32*
621 %arrayidx = getelementptr inbounds i32, i32* %p, i32 2
622 %t = trunc i64 %v to i32
623 store atomic i32 %t, i32* %arrayidx seq_cst, align 4
624 ret void
625}