blob: 5e608e728c3727f5f52b34ddaab5f5e9a2a0e097 [file] [log] [blame]
Jakob Stoklund Olesen05ae2d62014-01-24 06:23:31 +00001; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +00002
James Y Knightfdcc7272016-05-23 20:33:00 +00003; CHECK-LABEL: test_atomic_i8
4; CHECK: ldub [%o0]
5; CHECK: membar
6; CHECK: ldub [%o1]
7; CHECK: membar
8; CHECK: membar
9; CHECK: stb {{.+}}, [%o2]
10define i8 @test_atomic_i8(i8* %ptr1, i8* %ptr2, i8* %ptr3) {
11entry:
12 %0 = load atomic i8, i8* %ptr1 acquire, align 1
13 %1 = load atomic i8, i8* %ptr2 acquire, align 1
14 %2 = add i8 %0, %1
15 store atomic i8 %2, i8* %ptr3 release, align 1
16 ret i8 %2
17}
18
19; CHECK-LABEL: test_atomic_i16
20; CHECK: lduh [%o0]
21; CHECK: membar
22; CHECK: lduh [%o1]
23; CHECK: membar
24; CHECK: membar
25; CHECK: sth {{.+}}, [%o2]
26define i16 @test_atomic_i16(i16* %ptr1, i16* %ptr2, i16* %ptr3) {
27entry:
28 %0 = load atomic i16, i16* %ptr1 acquire, align 2
29 %1 = load atomic i16, i16* %ptr2 acquire, align 2
30 %2 = add i16 %0, %1
31 store atomic i16 %2, i16* %ptr3 release, align 2
32 ret i16 %2
33}
34
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +000035; CHECK-LABEL: test_atomic_i32
36; CHECK: ld [%o0]
37; CHECK: membar
38; CHECK: ld [%o1]
39; CHECK: membar
40; CHECK: membar
41; CHECK: st {{.+}}, [%o2]
42define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
43entry:
James Y Knightfdcc7272016-05-23 20:33:00 +000044 %0 = load atomic i32, i32* %ptr1 acquire, align 4
45 %1 = load atomic i32, i32* %ptr2 acquire, align 4
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +000046 %2 = add i32 %0, %1
James Y Knightfdcc7272016-05-23 20:33:00 +000047 store atomic i32 %2, i32* %ptr3 release, align 4
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +000048 ret i32 %2
49}
50
51; CHECK-LABEL: test_atomic_i64
52; CHECK: ldx [%o0]
53; CHECK: membar
54; CHECK: ldx [%o1]
55; CHECK: membar
56; CHECK: membar
57; CHECK: stx {{.+}}, [%o2]
58define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
59entry:
David Blaikiea79ac142015-02-27 21:17:42 +000060 %0 = load atomic i64, i64* %ptr1 acquire, align 8
61 %1 = load atomic i64, i64* %ptr2 acquire, align 8
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +000062 %2 = add i64 %0, %1
63 store atomic i64 %2, i64* %ptr3 release, align 8
64 ret i64 %2
65}
66
James Y Knight148a6462016-06-17 18:11:48 +000067;; TODO: the "move %icc" and related instructions are totally
68;; redundant here. There's something weird happening in optimization
69;; of the success value of cmpxchg.
70
71; CHECK-LABEL: test_cmpxchg_i8
72; CHECK: and %o1, -4, %o2
73; CHECK: mov 3, %o3
74; CHECK: andn %o3, %o1, %o1
75; CHECK: sll %o1, 3, %o1
76; CHECK: mov 255, %o3
77; CHECK: sll %o3, %o1, %o5
78; CHECK: xor %o5, -1, %o3
79; CHECK: mov 123, %o4
80; CHECK: ld [%o2], %g2
81; CHECK: sll %o4, %o1, %o4
82; CHECK: and %o0, 255, %o0
83; CHECK: sll %o0, %o1, %o0
84; CHECK: andn %g2, %o5, %g2
85; CHECK: sethi 0, %o5
86; CHECK: [[LABEL1:\.L.*]]:
87; CHECK: or %g2, %o4, %g3
88; CHECK: or %g2, %o0, %g4
89; CHECK: cas [%o2], %g4, %g3
90; CHECK: cmp %g3, %g4
91; CHECK: mov %o5, %g4
92; CHECK: move %icc, 1, %g4
93; CHECK: cmp %g4, 0
94; CHECK: bne [[LABEL2:\.L.*]]
95; CHECK: nop
96; CHECK: and %g3, %o3, %g4
97; CHECK: cmp %g2, %g4
98; CHECK: bne [[LABEL1]]
99; CHECK: mov %g4, %g2
100; CHECK: [[LABEL2]]:
101; CHECK: retl
102; CHECK: srl %g3, %o1, %o0
103define i8 @test_cmpxchg_i8(i8 %a, i8* %ptr) {
104entry:
105 %pair = cmpxchg i8* %ptr, i8 %a, i8 123 monotonic monotonic
106 %b = extractvalue { i8, i1 } %pair, 0
107 ret i8 %b
108}
109
110; CHECK-LABEL: test_cmpxchg_i16
111
112; CHECK: and %o1, -4, %o2
113; CHECK: and %o1, 3, %o1
114; CHECK: xor %o1, 2, %o1
115; CHECK: sll %o1, 3, %o1
116; CHECK: sethi 63, %o3
117; CHECK: or %o3, 1023, %o4
118; CHECK: sll %o4, %o1, %o5
119; CHECK: xor %o5, -1, %o3
120; CHECK: and %o0, %o4, %o4
121; CHECK: ld [%o2], %g2
122; CHECK: mov 123, %o0
123; CHECK: sll %o0, %o1, %o0
124; CHECK: sll %o4, %o1, %o4
125; CHECK: andn %g2, %o5, %g2
126; CHECK: sethi 0, %o5
127; CHECK: [[LABEL1:\.L.*]]:
128; CHECK: or %g2, %o0, %g3
129; CHECK: or %g2, %o4, %g4
130; CHECK: cas [%o2], %g4, %g3
131; CHECK: cmp %g3, %g4
132; CHECK: mov %o5, %g4
133; CHECK: move %icc, 1, %g4
134; CHECK: cmp %g4, 0
135; CHECK: bne [[LABEL2:\.L.*]]
136; CHECK: nop
137; CHECK: and %g3, %o3, %g4
138; CHECK: cmp %g2, %g4
139; CHECK: bne [[LABEL1]]
140; CHECK: mov %g4, %g2
141; CHECK: [[LABEL2]]:
142; CHECK: retl
143; CHECK: srl %g3, %o1, %o0
144define i16 @test_cmpxchg_i16(i16 %a, i16* %ptr) {
145entry:
146 %pair = cmpxchg i16* %ptr, i16 %a, i16 123 monotonic monotonic
147 %b = extractvalue { i16, i1 } %pair, 0
148 ret i16 %b
149}
150
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +0000151; CHECK-LABEL: test_cmpxchg_i32
Tim Northover5896b062014-05-16 09:42:04 +0000152; CHECK: mov 123, [[R:%[gilo][0-7]]]
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +0000153; CHECK: cas [%o1], %o0, [[R]]
154
155define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
156entry:
Tim Northover420a2162014-06-13 14:24:07 +0000157 %pair = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
158 %b = extractvalue { i32, i1 } %pair, 0
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +0000159 ret i32 %b
160}
161
162; CHECK-LABEL: test_cmpxchg_i64
Tim Northover5896b062014-05-16 09:42:04 +0000163; CHECK: mov 123, [[R:%[gilo][0-7]]]
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +0000164; CHECK: casx [%o1], %o0, [[R]]
165
166define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
167entry:
Tim Northover420a2162014-06-13 14:24:07 +0000168 %pair = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
169 %b = extractvalue { i64, i1 } %pair, 0
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +0000170 ret i64 %b
171}
172
James Y Knight148a6462016-06-17 18:11:48 +0000173; CHECK-LABEL: test_swap_i8
174; CHECK: mov 42, [[R:%[gilo][0-7]]]
175; CHECK: cas
176
177define i8 @test_swap_i8(i8 %a, i8* %ptr) {
178entry:
179 %b = atomicrmw xchg i8* %ptr, i8 42 monotonic
180 ret i8 %b
181}
182
183; CHECK-LABEL: test_swap_i16
184; CHECK: mov 42, [[R:%[gilo][0-7]]]
185; CHECK: cas
186
187define i16 @test_swap_i16(i16 %a, i16* %ptr) {
188entry:
189 %b = atomicrmw xchg i16* %ptr, i16 42 monotonic
190 ret i16 %b
191}
192
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +0000193; CHECK-LABEL: test_swap_i32
Tim Northover5896b062014-05-16 09:42:04 +0000194; CHECK: mov 42, [[R:%[gilo][0-7]]]
Venkatraman Govindaraju9a3da522014-01-01 22:11:54 +0000195; CHECK: swap [%o1], [[R]]
196
197define i32 @test_swap_i32(i32 %a, i32* %ptr) {
198entry:
199 %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
200 ret i32 %b
201}
Jakob Stoklund Olesen05ae2d62014-01-24 06:23:31 +0000202
Jakob Stoklund Olesenef1d59a2014-01-30 04:48:46 +0000203; CHECK-LABEL: test_swap_i64
204; CHECK: casx [%o1],
205
206define i64 @test_swap_i64(i64 %a, i64* %ptr) {
207entry:
208 %b = atomicrmw xchg i64* %ptr, i64 42 monotonic
209 ret i64 %b
210}
211
James Y Knight148a6462016-06-17 18:11:48 +0000212; CHECK-LABEL: test_load_sub_i8
213; CHECK: membar
214; CHECK: .L{{.*}}:
215; CHECK: sub
216; CHECK: cas [{{%[gilo][0-7]}}]
217; CHECK: membar
218define zeroext i8 @test_load_sub_i8(i8* %p, i8 zeroext %v) {
219entry:
220 %0 = atomicrmw sub i8* %p, i8 %v seq_cst
221 ret i8 %0
222}
223
224; CHECK-LABEL: test_load_sub_i16
225; CHECK: membar
226; CHECK: .L{{.*}}:
227; CHECK: sub
228; CHECK: cas [{{%[gilo][0-7]}}]
229; CHECK: membar
230define zeroext i16 @test_load_sub_i16(i16* %p, i16 zeroext %v) {
231entry:
232 %0 = atomicrmw sub i16* %p, i16 %v seq_cst
233 ret i16 %0
234}
235
236; CHECK-LABEL: test_load_add_i32
Jakob Stoklund Olesen05ae2d62014-01-24 06:23:31 +0000237; CHECK: membar
Jakob Stoklund Olesen39f08332014-01-26 06:09:54 +0000238; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
239; CHECK: cas [%o0], [[V]], [[U]]
Jakob Stoklund Olesen05ae2d62014-01-24 06:23:31 +0000240; CHECK: membar
James Y Knight148a6462016-06-17 18:11:48 +0000241define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
Jakob Stoklund Olesen05ae2d62014-01-24 06:23:31 +0000242entry:
243 %0 = atomicrmw add i32* %p, i32 %v seq_cst
244 ret i32 %0
245}
246
247; CHECK-LABEL: test_load_sub_64
248; CHECK: membar
249; CHECK: sub
250; CHECK: casx [%o0]
251; CHECK: membar
252define zeroext i64 @test_load_sub_64(i64* %p, i64 zeroext %v) {
253entry:
254 %0 = atomicrmw sub i64* %p, i64 %v seq_cst
255 ret i64 %0
256}
257
258; CHECK-LABEL: test_load_xor_32
259; CHECK: membar
260; CHECK: xor
261; CHECK: cas [%o0]
262; CHECK: membar
263define zeroext i32 @test_load_xor_32(i32* %p, i32 zeroext %v) {
264entry:
265 %0 = atomicrmw xor i32* %p, i32 %v seq_cst
266 ret i32 %0
267}
268
269; CHECK-LABEL: test_load_and_32
270; CHECK: membar
271; CHECK: and
272; CHECK-NOT: xor
273; CHECK: cas [%o0]
274; CHECK: membar
275define zeroext i32 @test_load_and_32(i32* %p, i32 zeroext %v) {
276entry:
277 %0 = atomicrmw and i32* %p, i32 %v seq_cst
278 ret i32 %0
279}
280
281; CHECK-LABEL: test_load_nand_32
282; CHECK: membar
283; CHECK: and
284; CHECK: xor
285; CHECK: cas [%o0]
286; CHECK: membar
287define zeroext i32 @test_load_nand_32(i32* %p, i32 zeroext %v) {
288entry:
289 %0 = atomicrmw nand i32* %p, i32 %v seq_cst
290 ret i32 %0
291}
292
293; CHECK-LABEL: test_load_max_64
294; CHECK: membar
295; CHECK: cmp
296; CHECK: movg %xcc
297; CHECK: casx [%o0]
298; CHECK: membar
299define zeroext i64 @test_load_max_64(i64* %p, i64 zeroext %v) {
300entry:
301 %0 = atomicrmw max i64* %p, i64 %v seq_cst
302 ret i64 %0
303}
304
305; CHECK-LABEL: test_load_umin_32
306; CHECK: membar
307; CHECK: cmp
308; CHECK: movleu %icc
309; CHECK: cas [%o0]
310; CHECK: membar
311define zeroext i32 @test_load_umin_32(i32* %p, i32 zeroext %v) {
312entry:
313 %0 = atomicrmw umin i32* %p, i32 %v seq_cst
314 ret i32 %0
315}