blob: 255f9ebba4f2c5f0793b7f2efc84ac392da5d93a [file] [log] [blame]
Tom Stellard7980fc82014-09-25 18:30:26 +00001; RUN: llc < %s -march=r600 -mcpu=SI -verify-machineinstrs | FileCheck --check-prefix=SI --check-prefix=FUNC %s
2
Aaron Watry1d13d362014-10-17 23:32:49 +00003; FUNC-LABEL: {{^}}atomic_add_i32_offset:
Aaron Watry28682cf2014-10-17 23:32:50 +00004; SI: BUFFER_ATOMIC_ADD v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
Aaron Watry1d13d362014-10-17 23:32:49 +00005define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
Tom Stellard7980fc82014-09-25 18:30:26 +00006entry:
Aaron Watry28682cf2014-10-17 23:32:50 +00007 %gep = getelementptr i32 addrspace(1)* %out, i32 4
8 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
9 ret void
10}
11
12; FUNC-LABEL: {{^}}atomic_add_i32_ret_offset:
13; SI: BUFFER_ATOMIC_ADD [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
14; SI: BUFFER_STORE_DWORD [[RET]]
15define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
16entry:
17 %gep = getelementptr i32 addrspace(1)* %out, i32 4
18 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
19 store i32 %0, i32 addrspace(1)* %out2
20 ret void
21}
22
23; FUNC-LABEL: {{^}}atomic_add_i32_addr64_offset:
24; SI: BUFFER_ATOMIC_ADD v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
25define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
26entry:
27 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
28 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
29 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
30 ret void
31}
32
33; FUNC-LABEL: {{^}}atomic_add_i32_ret_addr64_offset:
34; SI: BUFFER_ATOMIC_ADD [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
35; SI: BUFFER_STORE_DWORD [[RET]]
36define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
37entry:
38 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
39 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
40 %0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
41 store i32 %0, i32 addrspace(1)* %out2
42 ret void
43}
44
45; FUNC-LABEL: {{^}}atomic_add_i32:
46; SI: BUFFER_ATOMIC_ADD v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
47define void @atomic_add_i32(i32 addrspace(1)* %out, i32 %in) {
48entry:
Tom Stellard7980fc82014-09-25 18:30:26 +000049 %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
50 ret void
51}
52
Aaron Watry28682cf2014-10-17 23:32:50 +000053; FUNC-LABEL: {{^}}atomic_add_i32_ret:
Tom Stellard7980fc82014-09-25 18:30:26 +000054; SI: BUFFER_ATOMIC_ADD [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
55; SI: BUFFER_STORE_DWORD [[RET]]
Aaron Watry28682cf2014-10-17 23:32:50 +000056define void @atomic_add_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
Tom Stellard7980fc82014-09-25 18:30:26 +000057entry:
58 %0 = atomicrmw volatile add i32 addrspace(1)* %out, i32 %in seq_cst
59 store i32 %0, i32 addrspace(1)* %out2
60 ret void
61}
62
Aaron Watry1d13d362014-10-17 23:32:49 +000063; FUNC-LABEL: {{^}}atomic_add_i32_addr64:
Tom Stellard7980fc82014-09-25 18:30:26 +000064; SI: BUFFER_ATOMIC_ADD v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
Aaron Watry1d13d362014-10-17 23:32:49 +000065define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
Tom Stellard7980fc82014-09-25 18:30:26 +000066entry:
67 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
68 %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
69 ret void
70}
71
Aaron Watry1d13d362014-10-17 23:32:49 +000072; FUNC-LABEL: {{^}}atomic_add_i32_ret_addr64:
Tom Stellard7980fc82014-09-25 18:30:26 +000073; SI: BUFFER_ATOMIC_ADD [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
74; SI: BUFFER_STORE_DWORD [[RET]]
Aaron Watry1d13d362014-10-17 23:32:49 +000075define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
Tom Stellard7980fc82014-09-25 18:30:26 +000076entry:
77 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
78 %0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
79 store i32 %0, i32 addrspace(1)* %out2
80 ret void
81}
Aaron Watry328f1ba2014-10-17 23:32:52 +000082
Aaron Watry62127802014-10-17 23:32:54 +000083; FUNC-LABEL: {{^}}atomic_and_i32_offset:
84; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
85define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
86entry:
87 %gep = getelementptr i32 addrspace(1)* %out, i32 4
88 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
89 ret void
90}
91
92; FUNC-LABEL: {{^}}atomic_and_i32_ret_offset:
93; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
94; SI: BUFFER_STORE_DWORD [[RET]]
95define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
96entry:
97 %gep = getelementptr i32 addrspace(1)* %out, i32 4
98 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
99 store i32 %0, i32 addrspace(1)* %out2
100 ret void
101}
102
103; FUNC-LABEL: {{^}}atomic_and_i32_addr64_offset:
104; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
105define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
106entry:
107 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
108 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
109 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
110 ret void
111}
112
113; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64_offset:
114; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
115; SI: BUFFER_STORE_DWORD [[RET]]
116define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
117entry:
118 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
119 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
120 %0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
121 store i32 %0, i32 addrspace(1)* %out2
122 ret void
123}
124
125; FUNC-LABEL: {{^}}atomic_and_i32:
126; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
127define void @atomic_and_i32(i32 addrspace(1)* %out, i32 %in) {
128entry:
129 %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
130 ret void
131}
132
133; FUNC-LABEL: {{^}}atomic_and_i32_ret:
134; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
135; SI: BUFFER_STORE_DWORD [[RET]]
136define void @atomic_and_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
137entry:
138 %0 = atomicrmw volatile and i32 addrspace(1)* %out, i32 %in seq_cst
139 store i32 %0, i32 addrspace(1)* %out2
140 ret void
141}
142
143; FUNC-LABEL: {{^}}atomic_and_i32_addr64:
144; SI: BUFFER_ATOMIC_AND v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
145define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
146entry:
147 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
148 %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
149 ret void
150}
151
152; FUNC-LABEL: {{^}}atomic_and_i32_ret_addr64:
153; SI: BUFFER_ATOMIC_AND [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
154; SI: BUFFER_STORE_DWORD [[RET]]
155define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
156entry:
157 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
158 %0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
159 store i32 %0, i32 addrspace(1)* %out2
160 ret void
161}
162
Aaron Watry328f1ba2014-10-17 23:32:52 +0000163; FUNC-LABEL: {{^}}atomic_sub_i32_offset:
164; SI: BUFFER_ATOMIC_SUB v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
165define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
166entry:
167 %gep = getelementptr i32 addrspace(1)* %out, i32 4
168 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
169 ret void
170}
171
172; FUNC-LABEL: {{^}}atomic_sub_i32_ret_offset:
173; SI: BUFFER_ATOMIC_SUB [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
174; SI: BUFFER_STORE_DWORD [[RET]]
175define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
176entry:
177 %gep = getelementptr i32 addrspace(1)* %out, i32 4
178 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
179 store i32 %0, i32 addrspace(1)* %out2
180 ret void
181}
182
183; FUNC-LABEL: {{^}}atomic_sub_i32_addr64_offset:
184; SI: BUFFER_ATOMIC_SUB v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
185define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
186entry:
187 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
188 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
189 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
190 ret void
191}
192
193; FUNC-LABEL: {{^}}atomic_sub_i32_ret_addr64_offset:
194; SI: BUFFER_ATOMIC_SUB [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
195; SI: BUFFER_STORE_DWORD [[RET]]
196define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
197entry:
198 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
199 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
200 %0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
201 store i32 %0, i32 addrspace(1)* %out2
202 ret void
203}
204
205; FUNC-LABEL: {{^}}atomic_sub_i32:
206; SI: BUFFER_ATOMIC_SUB v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
207define void @atomic_sub_i32(i32 addrspace(1)* %out, i32 %in) {
208entry:
209 %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
210 ret void
211}
212
213; FUNC-LABEL: {{^}}atomic_sub_i32_ret:
214; SI: BUFFER_ATOMIC_SUB [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
215; SI: BUFFER_STORE_DWORD [[RET]]
216define void @atomic_sub_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
217entry:
218 %0 = atomicrmw volatile sub i32 addrspace(1)* %out, i32 %in seq_cst
219 store i32 %0, i32 addrspace(1)* %out2
220 ret void
221}
222
223; FUNC-LABEL: {{^}}atomic_sub_i32_addr64:
224; SI: BUFFER_ATOMIC_SUB v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
225define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
226entry:
227 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
228 %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
229 ret void
230}
231
232; FUNC-LABEL: {{^}}atomic_sub_i32_ret_addr64:
233; SI: BUFFER_ATOMIC_SUB [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
234; SI: BUFFER_STORE_DWORD [[RET]]
235define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
236entry:
237 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
238 %0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
239 store i32 %0, i32 addrspace(1)* %out2
240 ret void
241}
Aaron Watry29f295d2014-10-17 23:32:56 +0000242
243; FUNC-LABEL: {{^}}atomic_max_i32_offset:
244; SI: BUFFER_ATOMIC_SMAX v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
245define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
246entry:
247 %gep = getelementptr i32 addrspace(1)* %out, i32 4
248 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
249 ret void
250}
251
252; FUNC-LABEL: {{^}}atomic_max_i32_ret_offset:
253; SI: BUFFER_ATOMIC_SMAX [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
254; SI: BUFFER_STORE_DWORD [[RET]]
255define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
256entry:
257 %gep = getelementptr i32 addrspace(1)* %out, i32 4
258 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
259 store i32 %0, i32 addrspace(1)* %out2
260 ret void
261}
262
263; FUNC-LABEL: {{^}}atomic_max_i32_addr64_offset:
264; SI: BUFFER_ATOMIC_SMAX v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
265define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
266entry:
267 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
268 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
269 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
270 ret void
271}
272
273; FUNC-LABEL: {{^}}atomic_max_i32_ret_addr64_offset:
274; SI: BUFFER_ATOMIC_SMAX [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
275; SI: BUFFER_STORE_DWORD [[RET]]
276define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
277entry:
278 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
279 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
280 %0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
281 store i32 %0, i32 addrspace(1)* %out2
282 ret void
283}
284
285; FUNC-LABEL: {{^}}atomic_max_i32:
286; SI: BUFFER_ATOMIC_SMAX v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
287define void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) {
288entry:
289 %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
290 ret void
291}
292
293; FUNC-LABEL: {{^}}atomic_max_i32_ret:
294; SI: BUFFER_ATOMIC_SMAX [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
295; SI: BUFFER_STORE_DWORD [[RET]]
296define void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
297entry:
298 %0 = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst
299 store i32 %0, i32 addrspace(1)* %out2
300 ret void
301}
302
303; FUNC-LABEL: {{^}}atomic_max_i32_addr64:
304; SI: BUFFER_ATOMIC_SMAX v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
305define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
306entry:
307 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
308 %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
309 ret void
310}
311
312; FUNC-LABEL: {{^}}atomic_max_i32_ret_addr64:
313; SI: BUFFER_ATOMIC_SMAX [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
314; SI: BUFFER_STORE_DWORD [[RET]]
315define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
316entry:
317 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
318 %0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
319 store i32 %0, i32 addrspace(1)* %out2
320 ret void
321}
322
323; FUNC-LABEL: {{^}}atomic_umax_i32_offset:
324; SI: BUFFER_ATOMIC_UMAX v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
325define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
326entry:
327 %gep = getelementptr i32 addrspace(1)* %out, i32 4
328 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
329 ret void
330}
331
332; FUNC-LABEL: {{^}}atomic_umax_i32_ret_offset:
333; SI: BUFFER_ATOMIC_UMAX [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
334; SI: BUFFER_STORE_DWORD [[RET]]
335define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
336entry:
337 %gep = getelementptr i32 addrspace(1)* %out, i32 4
338 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
339 store i32 %0, i32 addrspace(1)* %out2
340 ret void
341}
342
343; FUNC-LABEL: {{^}}atomic_umax_i32_addr64_offset:
344; SI: BUFFER_ATOMIC_UMAX v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
345define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
346entry:
347 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
348 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
349 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
350 ret void
351}
352
353; FUNC-LABEL: {{^}}atomic_umax_i32_ret_addr64_offset:
354; SI: BUFFER_ATOMIC_UMAX [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
355; SI: BUFFER_STORE_DWORD [[RET]]
356define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
357entry:
358 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
359 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
360 %0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
361 store i32 %0, i32 addrspace(1)* %out2
362 ret void
363}
364
365; FUNC-LABEL: {{^}}atomic_umax_i32:
366; SI: BUFFER_ATOMIC_UMAX v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
367define void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) {
368entry:
369 %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
370 ret void
371}
372
373; FUNC-LABEL: {{^}}atomic_umax_i32_ret:
374; SI: BUFFER_ATOMIC_UMAX [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
375; SI: BUFFER_STORE_DWORD [[RET]]
376define void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
377entry:
378 %0 = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst
379 store i32 %0, i32 addrspace(1)* %out2
380 ret void
381}
382
383; FUNC-LABEL: {{^}}atomic_umax_i32_addr64:
384; SI: BUFFER_ATOMIC_UMAX v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
385define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
386entry:
387 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
388 %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
389 ret void
390}
391
392; FUNC-LABEL: {{^}}atomic_umax_i32_ret_addr64:
393; SI: BUFFER_ATOMIC_UMAX [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
394; SI: BUFFER_STORE_DWORD [[RET]]
395define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
396entry:
397 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
398 %0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
399 store i32 %0, i32 addrspace(1)* %out2
400 ret void
401}
Aaron Watry58c99922014-10-17 23:32:57 +0000402
403; FUNC-LABEL: {{^}}atomic_min_i32_offset:
404; SI: BUFFER_ATOMIC_SMIN v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
405define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
406entry:
407 %gep = getelementptr i32 addrspace(1)* %out, i32 4
408 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
409 ret void
410}
411
412; FUNC-LABEL: {{^}}atomic_min_i32_ret_offset:
413; SI: BUFFER_ATOMIC_SMIN [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
414; SI: BUFFER_STORE_DWORD [[RET]]
415define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
416entry:
417 %gep = getelementptr i32 addrspace(1)* %out, i32 4
418 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
419 store i32 %0, i32 addrspace(1)* %out2
420 ret void
421}
422
423; FUNC-LABEL: {{^}}atomic_min_i32_addr64_offset:
424; SI: BUFFER_ATOMIC_SMIN v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
425define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
426entry:
427 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
428 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
429 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
430 ret void
431}
432
433; FUNC-LABEL: {{^}}atomic_min_i32_ret_addr64_offset:
434; SI: BUFFER_ATOMIC_SMIN [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
435; SI: BUFFER_STORE_DWORD [[RET]]
436define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
437entry:
438 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
439 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
440 %0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
441 store i32 %0, i32 addrspace(1)* %out2
442 ret void
443}
444
445; FUNC-LABEL: {{^}}atomic_min_i32:
446; SI: BUFFER_ATOMIC_SMIN v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
447define void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) {
448entry:
449 %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
450 ret void
451}
452
453; FUNC-LABEL: {{^}}atomic_min_i32_ret:
454; SI: BUFFER_ATOMIC_SMIN [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
455; SI: BUFFER_STORE_DWORD [[RET]]
456define void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
457entry:
458 %0 = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst
459 store i32 %0, i32 addrspace(1)* %out2
460 ret void
461}
462
463; FUNC-LABEL: {{^}}atomic_min_i32_addr64:
464; SI: BUFFER_ATOMIC_SMIN v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
465define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
466entry:
467 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
468 %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
469 ret void
470}
471
472; FUNC-LABEL: {{^}}atomic_min_i32_ret_addr64:
473; SI: BUFFER_ATOMIC_SMIN [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
474; SI: BUFFER_STORE_DWORD [[RET]]
475define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
476entry:
477 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
478 %0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
479 store i32 %0, i32 addrspace(1)* %out2
480 ret void
481}
482
483; FUNC-LABEL: {{^}}atomic_umin_i32_offset:
484; SI: BUFFER_ATOMIC_UMIN v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
485define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
486entry:
487 %gep = getelementptr i32 addrspace(1)* %out, i32 4
488 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
489 ret void
490}
491
492; FUNC-LABEL: {{^}}atomic_umin_i32_ret_offset:
493; SI: BUFFER_ATOMIC_UMIN [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
494; SI: BUFFER_STORE_DWORD [[RET]]
495define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
496entry:
497 %gep = getelementptr i32 addrspace(1)* %out, i32 4
498 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
499 store i32 %0, i32 addrspace(1)* %out2
500 ret void
501}
502
503; FUNC-LABEL: {{^}}atomic_umin_i32_addr64_offset:
504; SI: BUFFER_ATOMIC_UMIN v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
505define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
506entry:
507 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
508 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
509 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
510 ret void
511}
512
513; FUNC-LABEL: {{^}}atomic_umin_i32_ret_addr64_offset:
514; SI: BUFFER_ATOMIC_UMIN [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
515; SI: BUFFER_STORE_DWORD [[RET]]
516define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
517entry:
518 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
519 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
520 %0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
521 store i32 %0, i32 addrspace(1)* %out2
522 ret void
523}
524
525; FUNC-LABEL: {{^}}atomic_umin_i32:
526; SI: BUFFER_ATOMIC_UMIN v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
527define void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) {
528entry:
529 %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
530 ret void
531}
532
533; FUNC-LABEL: {{^}}atomic_umin_i32_ret:
534; SI: BUFFER_ATOMIC_UMIN [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
535; SI: BUFFER_STORE_DWORD [[RET]]
536define void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
537entry:
538 %0 = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst
539 store i32 %0, i32 addrspace(1)* %out2
540 ret void
541}
542
543; FUNC-LABEL: {{^}}atomic_umin_i32_addr64:
544; SI: BUFFER_ATOMIC_UMIN v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
545define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
546entry:
547 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
548 %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
549 ret void
550}
551
552; FUNC-LABEL: {{^}}atomic_umin_i32_ret_addr64:
553; SI: BUFFER_ATOMIC_UMIN [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
554; SI: BUFFER_STORE_DWORD [[RET]]
555define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
556entry:
557 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
558 %0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
559 store i32 %0, i32 addrspace(1)* %out2
560 ret void
561}
Aaron Watry8a911e62014-10-17 23:32:59 +0000562
563; FUNC-LABEL: {{^}}atomic_or_i32_offset:
564; SI: BUFFER_ATOMIC_OR v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
565define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
566entry:
567 %gep = getelementptr i32 addrspace(1)* %out, i32 4
568 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
569 ret void
570}
571
572; FUNC-LABEL: {{^}}atomic_or_i32_ret_offset:
573; SI: BUFFER_ATOMIC_OR [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
574; SI: BUFFER_STORE_DWORD [[RET]]
575define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
576entry:
577 %gep = getelementptr i32 addrspace(1)* %out, i32 4
578 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
579 store i32 %0, i32 addrspace(1)* %out2
580 ret void
581}
582
583; FUNC-LABEL: {{^}}atomic_or_i32_addr64_offset:
584; SI: BUFFER_ATOMIC_OR v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
585define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
586entry:
587 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
588 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
589 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
590 ret void
591}
592
593; FUNC-LABEL: {{^}}atomic_or_i32_ret_addr64_offset:
594; SI: BUFFER_ATOMIC_OR [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
595; SI: BUFFER_STORE_DWORD [[RET]]
596define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
597entry:
598 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
599 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
600 %0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
601 store i32 %0, i32 addrspace(1)* %out2
602 ret void
603}
604
605; FUNC-LABEL: {{^}}atomic_or_i32:
606; SI: BUFFER_ATOMIC_OR v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
607define void @atomic_or_i32(i32 addrspace(1)* %out, i32 %in) {
608entry:
609 %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
610 ret void
611}
612
613; FUNC-LABEL: {{^}}atomic_or_i32_ret:
614; SI: BUFFER_ATOMIC_OR [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
615; SI: BUFFER_STORE_DWORD [[RET]]
616define void @atomic_or_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
617entry:
618 %0 = atomicrmw volatile or i32 addrspace(1)* %out, i32 %in seq_cst
619 store i32 %0, i32 addrspace(1)* %out2
620 ret void
621}
622
623; FUNC-LABEL: {{^}}atomic_or_i32_addr64:
624; SI: BUFFER_ATOMIC_OR v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
625define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
626entry:
627 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
628 %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
629 ret void
630}
631
632; FUNC-LABEL: {{^}}atomic_or_i32_ret_addr64:
633; SI: BUFFER_ATOMIC_OR [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
634; SI: BUFFER_STORE_DWORD [[RET]]
635define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
636entry:
637 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
638 %0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
639 store i32 %0, i32 addrspace(1)* %out2
640 ret void
641}
Aaron Watryd672ee22014-10-17 23:33:01 +0000642
Aaron Watry81144372014-10-17 23:33:03 +0000643; FUNC-LABEL: {{^}}atomic_xchg_i32_offset:
644; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
645define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
646entry:
647 %gep = getelementptr i32 addrspace(1)* %out, i32 4
648 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
649 ret void
650}
651
652; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_offset:
653; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
654; SI: BUFFER_STORE_DWORD [[RET]]
655define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
656entry:
657 %gep = getelementptr i32 addrspace(1)* %out, i32 4
658 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
659 store i32 %0, i32 addrspace(1)* %out2
660 ret void
661}
662
663; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64_offset:
664; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
665define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
666entry:
667 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
668 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
669 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
670 ret void
671}
672
673; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64_offset:
674; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
675; SI: BUFFER_STORE_DWORD [[RET]]
676define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
677entry:
678 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
679 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
680 %0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
681 store i32 %0, i32 addrspace(1)* %out2
682 ret void
683}
684
685; FUNC-LABEL: {{^}}atomic_xchg_i32:
686; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
687define void @atomic_xchg_i32(i32 addrspace(1)* %out, i32 %in) {
688entry:
689 %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
690 ret void
691}
692
693; FUNC-LABEL: {{^}}atomic_xchg_i32_ret:
694; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
695; SI: BUFFER_STORE_DWORD [[RET]]
696define void @atomic_xchg_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
697entry:
698 %0 = atomicrmw volatile xchg i32 addrspace(1)* %out, i32 %in seq_cst
699 store i32 %0, i32 addrspace(1)* %out2
700 ret void
701}
702
703; FUNC-LABEL: {{^}}atomic_xchg_i32_addr64:
704; SI: BUFFER_ATOMIC_SWAP v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
705define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
706entry:
707 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
708 %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
709 ret void
710}
711
712; FUNC-LABEL: {{^}}atomic_xchg_i32_ret_addr64:
713; SI: BUFFER_ATOMIC_SWAP [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
714; SI: BUFFER_STORE_DWORD [[RET]]
715define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
716entry:
717 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
718 %0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
719 store i32 %0, i32 addrspace(1)* %out2
720 ret void
721}
722
Aaron Watryd672ee22014-10-17 23:33:01 +0000723; FUNC-LABEL: {{^}}atomic_xor_i32_offset:
724; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10{{$}}
725define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
726entry:
727 %gep = getelementptr i32 addrspace(1)* %out, i32 4
728 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
729 ret void
730}
731
732; FUNC-LABEL: {{^}}atomic_xor_i32_ret_offset:
733; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:0x10 glc {{$}}
734; SI: BUFFER_STORE_DWORD [[RET]]
735define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
736entry:
737 %gep = getelementptr i32 addrspace(1)* %out, i32 4
738 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
739 store i32 %0, i32 addrspace(1)* %out2
740 ret void
741}
742
743; FUNC-LABEL: {{^}}atomic_xor_i32_addr64_offset:
744; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10{{$}}
745define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
746entry:
747 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
748 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
749 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
750 ret void
751}
752
753; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64_offset:
754; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:0x10 glc{{$}}
755; SI: BUFFER_STORE_DWORD [[RET]]
756define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
757entry:
758 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
759 %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
760 %0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
761 store i32 %0, i32 addrspace(1)* %out2
762 ret void
763}
764
765; FUNC-LABEL: {{^}}atomic_xor_i32:
766; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0{{$}}
767define void @atomic_xor_i32(i32 addrspace(1)* %out, i32 %in) {
768entry:
769 %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
770 ret void
771}
772
773; FUNC-LABEL: {{^}}atomic_xor_i32_ret:
774; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], s[{{[0-9]+}}:{{[0-9]+}}], 0 glc
775; SI: BUFFER_STORE_DWORD [[RET]]
776define void @atomic_xor_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
777entry:
778 %0 = atomicrmw volatile xor i32 addrspace(1)* %out, i32 %in seq_cst
779 store i32 %0, i32 addrspace(1)* %out2
780 ret void
781}
782
783; FUNC-LABEL: {{^}}atomic_xor_i32_addr64:
784; SI: BUFFER_ATOMIC_XOR v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
785define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
786entry:
787 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
788 %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
789 ret void
790}
791
792; FUNC-LABEL: {{^}}atomic_xor_i32_ret_addr64:
793; SI: BUFFER_ATOMIC_XOR [[RET:v[0-9]+]], v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 glc{{$}}
794; SI: BUFFER_STORE_DWORD [[RET]]
795define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
796entry:
797 %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
798 %0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
799 store i32 %0, i32 addrspace(1)* %out2
800 ret void
801}