blob: 975fed2bd6c3a2e7b4d989ced9019ef3fa60e445 [file] [log] [blame]
Matt Arsenaultcaa0ec22014-06-11 18:08:54 +00001; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
2
3; FUNC-LABEL: @lds_atomic_xchg_ret_i64:
4; SI: DS_WRXCHG_RTN_B64
5; SI: S_ENDPGM
6define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
7 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
8 store i64 %result, i64 addrspace(1)* %out, align 8
9 ret void
10}
11
12; FUNC-LABEL: @lds_atomic_xchg_ret_i64_offset:
13; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
14; SI: S_ENDPGM
15define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
16 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
17 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
18 store i64 %result, i64 addrspace(1)* %out, align 8
19 ret void
20}
21
22; FUNC-LABEL: @lds_atomic_add_ret_i64:
23; SI: DS_ADD_RTN_U64
24; SI: S_ENDPGM
25define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
26 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
27 store i64 %result, i64 addrspace(1)* %out, align 8
28 ret void
29}
30
31; FUNC-LABEL: @lds_atomic_add_ret_i64_offset:
32; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
33; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
34; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
35; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
36; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
37; SI: DS_ADD_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}, 0x20, [M0]
38; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
39; SI: S_ENDPGM
40define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
41 %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
42 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
43 store i64 %result, i64 addrspace(1)* %out, align 8
44 ret void
45}
46
47; FUNC-LABEL: @lds_atomic_inc_ret_i64:
Matt Arsenault2c819942014-06-12 08:21:54 +000048; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
49; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
50; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
51; SI: DS_INC_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
52; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
Matt Arsenaultcaa0ec22014-06-11 18:08:54 +000053; SI: S_ENDPGM
54define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
55 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
56 store i64 %result, i64 addrspace(1)* %out, align 8
57 ret void
58}
59
60; FUNC-LABEL: @lds_atomic_inc_ret_i64_offset:
61; SI: DS_INC_RTN_U64 {{.*}} 0x20
62; SI: S_ENDPGM
63define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
64 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
65 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
66 store i64 %result, i64 addrspace(1)* %out, align 8
67 ret void
68}
69
70; FUNC-LABEL: @lds_atomic_sub_ret_i64:
71; SI: DS_SUB_RTN_U64
72; SI: S_ENDPGM
73define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
74 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
75 store i64 %result, i64 addrspace(1)* %out, align 8
76 ret void
77}
78
79; FUNC-LABEL: @lds_atomic_sub_ret_i64_offset:
80; SI: DS_SUB_RTN_U64 {{.*}} 0x20
81; SI: S_ENDPGM
82define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
83 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
84 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
85 store i64 %result, i64 addrspace(1)* %out, align 8
86 ret void
87}
88
89; FUNC-LABEL: @lds_atomic_dec_ret_i64:
Matt Arsenault2c819942014-06-12 08:21:54 +000090; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
91; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
92; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
93; SI: DS_DEC_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
94; SI: BUFFER_STORE_DWORDX2 [[RESULT]],
Matt Arsenaultcaa0ec22014-06-11 18:08:54 +000095; SI: S_ENDPGM
96define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
97 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
98 store i64 %result, i64 addrspace(1)* %out, align 8
99 ret void
100}
101
102; FUNC-LABEL: @lds_atomic_dec_ret_i64_offset:
103; SI: DS_DEC_RTN_U64 {{.*}} 0x20
104; SI: S_ENDPGM
105define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
106 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
107 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
108 store i64 %result, i64 addrspace(1)* %out, align 8
109 ret void
110}
111
112; FUNC-LABEL: @lds_atomic_and_ret_i64:
113; SI: DS_AND_RTN_B64
114; SI: S_ENDPGM
115define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
116 %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
117 store i64 %result, i64 addrspace(1)* %out, align 8
118 ret void
119}
120
121; FUNC-LABEL: @lds_atomic_and_ret_i64_offset:
122; SI: DS_AND_RTN_B64 {{.*}} 0x20
123; SI: S_ENDPGM
124define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
125 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
126 %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
127 store i64 %result, i64 addrspace(1)* %out, align 8
128 ret void
129}
130
131; FUNC-LABEL: @lds_atomic_or_ret_i64:
132; SI: DS_OR_RTN_B64
133; SI: S_ENDPGM
134define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
135 %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
136 store i64 %result, i64 addrspace(1)* %out, align 8
137 ret void
138}
139
140; FUNC-LABEL: @lds_atomic_or_ret_i64_offset:
141; SI: DS_OR_RTN_B64 {{.*}} 0x20
142; SI: S_ENDPGM
143define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
144 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
145 %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
146 store i64 %result, i64 addrspace(1)* %out, align 8
147 ret void
148}
149
150; FUNC-LABEL: @lds_atomic_xor_ret_i64:
151; SI: DS_XOR_RTN_B64
152; SI: S_ENDPGM
153define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
154 %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
155 store i64 %result, i64 addrspace(1)* %out, align 8
156 ret void
157}
158
159; FUNC-LABEL: @lds_atomic_xor_ret_i64_offset:
160; SI: DS_XOR_RTN_B64 {{.*}} 0x20
161; SI: S_ENDPGM
162define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
163 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
164 %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
165 store i64 %result, i64 addrspace(1)* %out, align 8
166 ret void
167}
168
169; FIXME: There is no atomic nand instr
170; XFUNC-LABEL: @lds_atomic_nand_ret_i64:uction, so we somehow need to expand this.
171; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
172; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
173; store i64 %result, i64 addrspace(1)* %out, align 8
174; ret void
175; }
176
177; FUNC-LABEL: @lds_atomic_min_ret_i64:
178; SI: DS_MIN_RTN_I64
179; SI: S_ENDPGM
180define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
181 %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
182 store i64 %result, i64 addrspace(1)* %out, align 8
183 ret void
184}
185
186; FUNC-LABEL: @lds_atomic_min_ret_i64_offset:
187; SI: DS_MIN_RTN_I64 {{.*}} 0x20
188; SI: S_ENDPGM
189define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
190 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
191 %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
192 store i64 %result, i64 addrspace(1)* %out, align 8
193 ret void
194}
195
196; FUNC-LABEL: @lds_atomic_max_ret_i64:
197; SI: DS_MAX_RTN_I64
198; SI: S_ENDPGM
199define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
200 %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
201 store i64 %result, i64 addrspace(1)* %out, align 8
202 ret void
203}
204
205; FUNC-LABEL: @lds_atomic_max_ret_i64_offset:
206; SI: DS_MAX_RTN_I64 {{.*}} 0x20
207; SI: S_ENDPGM
208define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
209 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
210 %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
211 store i64 %result, i64 addrspace(1)* %out, align 8
212 ret void
213}
214
215; FUNC-LABEL: @lds_atomic_umin_ret_i64:
216; SI: DS_MIN_RTN_U64
217; SI: S_ENDPGM
218define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
219 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
220 store i64 %result, i64 addrspace(1)* %out, align 8
221 ret void
222}
223
224; FUNC-LABEL: @lds_atomic_umin_ret_i64_offset:
225; SI: DS_MIN_RTN_U64 {{.*}} 0x20
226; SI: S_ENDPGM
227define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
228 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
229 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
230 store i64 %result, i64 addrspace(1)* %out, align 8
231 ret void
232}
233
234; FUNC-LABEL: @lds_atomic_umax_ret_i64:
235; SI: DS_MAX_RTN_U64
236; SI: S_ENDPGM
237define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
238 %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
239 store i64 %result, i64 addrspace(1)* %out, align 8
240 ret void
241}
242
243; FUNC-LABEL: @lds_atomic_umax_ret_i64_offset:
244; SI: DS_MAX_RTN_U64 {{.*}} 0x20
245; SI: S_ENDPGM
246define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
247 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
248 %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
249 store i64 %result, i64 addrspace(1)* %out, align 8
250 ret void
251}
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +0000252
253; FUNC-LABEL: @lds_atomic_xchg_noret_i64:
254; SI: DS_WRXCHG_RTN_B64
255; SI: S_ENDPGM
256define void @lds_atomic_xchg_noret_i64(i64 addrspace(3)* %ptr) nounwind {
257 %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst
258 ret void
259}
260
261; FUNC-LABEL: @lds_atomic_xchg_noret_i64_offset:
262; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20
263; SI: S_ENDPGM
264define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
265 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
266 %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
267 ret void
268}
269
270; FUNC-LABEL: @lds_atomic_add_noret_i64:
271; SI: DS_ADD_U64
272; SI: S_ENDPGM
273define void @lds_atomic_add_noret_i64(i64 addrspace(3)* %ptr) nounwind {
274 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst
275 ret void
276}
277
278; FUNC-LABEL: @lds_atomic_add_noret_i64_offset:
279; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x9
280; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9
281; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
282; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
283; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]]
284; SI: DS_ADD_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}, 0x20, [M0]
285; SI: S_ENDPGM
286define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
287 %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
288 %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
289 ret void
290}
291
292; FUNC-LABEL: @lds_atomic_inc_noret_i64:
293; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
294; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
295; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
296; SI: DS_INC_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
297; SI: S_ENDPGM
298define void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
299 %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst
300 ret void
301}
302
303; FUNC-LABEL: @lds_atomic_inc_noret_i64_offset:
304; SI: DS_INC_U64 {{.*}} 0x20
305; SI: S_ENDPGM
306define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
307 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
308 %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
309 ret void
310}
311
312; FUNC-LABEL: @lds_atomic_sub_noret_i64:
313; SI: DS_SUB_U64
314; SI: S_ENDPGM
315define void @lds_atomic_sub_noret_i64(i64 addrspace(3)* %ptr) nounwind {
316 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst
317 ret void
318}
319
320; FUNC-LABEL: @lds_atomic_sub_noret_i64_offset:
321; SI: DS_SUB_U64 {{.*}} 0x20
322; SI: S_ENDPGM
323define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
324 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
325 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
326 ret void
327}
328
329; FUNC-LABEL: @lds_atomic_dec_noret_i64:
330; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, -1
331; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]]
332; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]]
333; SI: DS_DEC_U64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}},
334; SI: S_ENDPGM
335define void @lds_atomic_dec_noret_i64(i64 addrspace(3)* %ptr) nounwind {
336 %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst
337 ret void
338}
339
340; FUNC-LABEL: @lds_atomic_dec_noret_i64_offset:
341; SI: DS_DEC_U64 {{.*}} 0x20
342; SI: S_ENDPGM
343define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
344 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
345 %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
346 ret void
347}
348
349; FUNC-LABEL: @lds_atomic_and_noret_i64:
350; SI: DS_AND_B64
351; SI: S_ENDPGM
352define void @lds_atomic_and_noret_i64(i64 addrspace(3)* %ptr) nounwind {
353 %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst
354 ret void
355}
356
357; FUNC-LABEL: @lds_atomic_and_noret_i64_offset:
358; SI: DS_AND_B64 {{.*}} 0x20
359; SI: S_ENDPGM
360define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
361 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
362 %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
363 ret void
364}
365
366; FUNC-LABEL: @lds_atomic_or_noret_i64:
367; SI: DS_OR_B64
368; SI: S_ENDPGM
369define void @lds_atomic_or_noret_i64(i64 addrspace(3)* %ptr) nounwind {
370 %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst
371 ret void
372}
373
374; FUNC-LABEL: @lds_atomic_or_noret_i64_offset:
375; SI: DS_OR_B64 {{.*}} 0x20
376; SI: S_ENDPGM
377define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
378 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
379 %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
380 ret void
381}
382
383; FUNC-LABEL: @lds_atomic_xor_noret_i64:
384; SI: DS_XOR_B64
385; SI: S_ENDPGM
386define void @lds_atomic_xor_noret_i64(i64 addrspace(3)* %ptr) nounwind {
387 %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst
388 ret void
389}
390
391; FUNC-LABEL: @lds_atomic_xor_noret_i64_offset:
392; SI: DS_XOR_B64 {{.*}} 0x20
393; SI: S_ENDPGM
394define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
395 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
396 %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
397 ret void
398}
399
400; FIXME: There is no atomic nand instr
401; XFUNC-LABEL: @lds_atomic_nand_noret_i64:uction, so we somehow need to expand this.
402; define void @lds_atomic_nand_noret_i64(i64 addrspace(3)* %ptr) nounwind {
403; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst
404; ret void
405; }
406
407; FUNC-LABEL: @lds_atomic_min_noret_i64:
408; SI: DS_MIN_I64
409; SI: S_ENDPGM
410define void @lds_atomic_min_noret_i64(i64 addrspace(3)* %ptr) nounwind {
411 %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst
412 ret void
413}
414
415; FUNC-LABEL: @lds_atomic_min_noret_i64_offset:
416; SI: DS_MIN_I64 {{.*}} 0x20
417; SI: S_ENDPGM
418define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
419 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
420 %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
421 ret void
422}
423
424; FUNC-LABEL: @lds_atomic_max_noret_i64:
425; SI: DS_MAX_I64
426; SI: S_ENDPGM
427define void @lds_atomic_max_noret_i64(i64 addrspace(3)* %ptr) nounwind {
428 %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst
429 ret void
430}
431
432; FUNC-LABEL: @lds_atomic_max_noret_i64_offset:
433; SI: DS_MAX_I64 {{.*}} 0x20
434; SI: S_ENDPGM
435define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
436 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
437 %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
438 ret void
439}
440
441; FUNC-LABEL: @lds_atomic_umin_noret_i64:
442; SI: DS_MIN_U64
443; SI: S_ENDPGM
444define void @lds_atomic_umin_noret_i64(i64 addrspace(3)* %ptr) nounwind {
445 %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst
446 ret void
447}
448
449; FUNC-LABEL: @lds_atomic_umin_noret_i64_offset:
450; SI: DS_MIN_U64 {{.*}} 0x20
451; SI: S_ENDPGM
452define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
453 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
454 %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
455 ret void
456}
457
458; FUNC-LABEL: @lds_atomic_umax_noret_i64:
459; SI: DS_MAX_U64
460; SI: S_ENDPGM
461define void @lds_atomic_umax_noret_i64(i64 addrspace(3)* %ptr) nounwind {
462 %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst
463 ret void
464}
465
466; FUNC-LABEL: @lds_atomic_umax_noret_i64_offset:
467; SI: DS_MAX_U64 {{.*}} 0x20
468; SI: S_ENDPGM
469define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
470 %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
471 %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
472 ret void
473}