blob: e6dfc587eca752eedcf97193150c4f3611bd69b6 [file] [log] [blame]
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +00001; RUN: llc -march=r600 -mcpu=bonaire -verify-machineinstrs -mattr=+load-store-opt -enable-misched < %s | FileCheck -check-prefix=SI %s
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00002
3; Test that doing a shift of a pointer with a constant add will be
4; folded into the constant offset addressing mode even if the add has
5; multiple uses. This is relevant to accessing 2 separate, adjacent
6; LDS globals.
7
8
9declare i32 @llvm.r600.read.tidig.x() #1
10
11@lds0 = addrspace(3) global [512 x float] zeroinitializer, align 4
12@lds1 = addrspace(3) global [512 x float] zeroinitializer, align 4
13
14
15; Make sure the (add tid, 2) << 2 gets folded into the ds's offset as (tid << 2) + 8
16
Tom Stellard79243d92014-10-01 17:15:17 +000017; SI-LABEL: {{^}}load_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +000018; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
19; SI: ds_read_b32 {{v[0-9]+}}, [[PTR]] offset:8 [M0]
20; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +000021define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
22 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
23 %idx.0 = add nsw i32 %tid.x, 2
24 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
25 %val0 = load float addrspace(3)* %arrayidx0, align 4
26 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
27 store float %val0, float addrspace(1)* %out
28 ret void
29}
30
31; Make sure once the first use is folded into the addressing mode, the
32; remaining add use goes through the normal shl + add constant fold.
33
Tom Stellard79243d92014-10-01 17:15:17 +000034; SI-LABEL: {{^}}load_shl_base_lds_1:
Tom Stellard326d6ec2014-11-05 14:50:53 +000035; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
36; SI: ds_read_b32 [[RESULT:v[0-9]+]], [[PTR]] offset:8 [M0]
37; SI: v_add_i32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
38; SI-DAG: buffer_store_dword [[RESULT]]
39; SI-DAG: buffer_store_dword [[ADDUSE]]
40; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +000041define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
42 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
43 %idx.0 = add nsw i32 %tid.x, 2
44 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
45 %val0 = load float addrspace(3)* %arrayidx0, align 4
46 %shl_add_use = shl i32 %idx.0, 2
47 store i32 %shl_add_use, i32 addrspace(1)* %add_use, align 4
48 store float %val0, float addrspace(1)* %out
49 ret void
50}
51
52@maxlds = addrspace(3) global [65536 x i8] zeroinitializer, align 4
53
Matt Arsenault61cc9082014-10-10 22:16:07 +000054; SI-LABEL: {{^}}load_shl_base_lds_max_offset
Tom Stellard326d6ec2014-11-05 14:50:53 +000055; SI: ds_read_u8 v{{[0-9]+}}, v{{[0-9]+}} offset:65535
56; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +000057define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
58 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
59 %idx.0 = add nsw i32 %tid.x, 65535
60 %arrayidx0 = getelementptr inbounds [65536 x i8] addrspace(3)* @maxlds, i32 0, i32 %idx.0
61 %val0 = load i8 addrspace(3)* %arrayidx0
62 store i32 %idx.0, i32 addrspace(1)* %add_use
63 store i8 %val0, i8 addrspace(1)* %out
64 ret void
65}
66
67; The two globals are placed adjacent in memory, so the same base
68; pointer can be used with an offset into the second one.
69
Tom Stellard79243d92014-10-01 17:15:17 +000070; SI-LABEL: {{^}}load_shl_base_lds_2:
Tom Stellard326d6ec2014-11-05 14:50:53 +000071; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
72; SI-NEXT: ds_read2st64_b32 {{v\[[0-9]+:[0-9]+\]}}, [[PTR]] offset0:1 offset1:9 [M0]
73; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +000074define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
75 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
76 %idx.0 = add nsw i32 %tid.x, 64
77 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
78 %val0 = load float addrspace(3)* %arrayidx0, align 4
79 %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
80 %val1 = load float addrspace(3)* %arrayidx1, align 4
81 %sum = fadd float %val0, %val1
82 store float %sum, float addrspace(1)* %out, align 4
83 ret void
84}
85
Tom Stellard79243d92014-10-01 17:15:17 +000086; SI-LABEL: {{^}}store_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +000087; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
88; SI: ds_write_b32 [[PTR]], {{v[0-9]+}} offset:8 [M0]
89; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +000090define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
91 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
92 %idx.0 = add nsw i32 %tid.x, 2
93 %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
94 store float 1.0, float addrspace(3)* %arrayidx0, align 4
95 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
96 ret void
97}
98
99
100; --------------------------------------------------------------------------------
101; Atomics.
102
103@lds2 = addrspace(3) global [512 x i32] zeroinitializer, align 4
104
105; define void @atomic_load_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
106; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
107; %idx.0 = add nsw i32 %tid.x, 2
108; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
109; %val = load atomic i32 addrspace(3)* %arrayidx0 seq_cst, align 4
110; store i32 %val, i32 addrspace(1)* %out, align 4
111; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
112; ret void
113; }
114
115
Tom Stellard79243d92014-10-01 17:15:17 +0000116; SI-LABEL: {{^}}atomic_cmpxchg_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000117; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
118; SI: ds_cmpst_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}} offset:8
119; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000120define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
121 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
122 %idx.0 = add nsw i32 %tid.x, 2
123 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
124 %pair = cmpxchg i32 addrspace(3)* %arrayidx0, i32 7, i32 %swap seq_cst monotonic
125 %result = extractvalue { i32, i1 } %pair, 0
126 store i32 %result, i32 addrspace(1)* %out, align 4
127 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
128 ret void
129}
130
Tom Stellard79243d92014-10-01 17:15:17 +0000131; SI-LABEL: {{^}}atomic_swap_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000132; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
133; SI: ds_wrxchg_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
134; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000135define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
136 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
137 %idx.0 = add nsw i32 %tid.x, 2
138 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
139 %val = atomicrmw xchg i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
140 store i32 %val, i32 addrspace(1)* %out, align 4
141 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
142 ret void
143}
144
Tom Stellard79243d92014-10-01 17:15:17 +0000145; SI-LABEL: {{^}}atomic_add_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000146; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
147; SI: ds_add_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
148; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000149define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
150 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
151 %idx.0 = add nsw i32 %tid.x, 2
152 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
153 %val = atomicrmw add i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
154 store i32 %val, i32 addrspace(1)* %out, align 4
155 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
156 ret void
157}
158
Tom Stellard79243d92014-10-01 17:15:17 +0000159; SI-LABEL: {{^}}atomic_sub_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000160; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
161; SI: ds_sub_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
162; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000163define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
164 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
165 %idx.0 = add nsw i32 %tid.x, 2
166 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
167 %val = atomicrmw sub i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
168 store i32 %val, i32 addrspace(1)* %out, align 4
169 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
170 ret void
171}
172
Tom Stellard79243d92014-10-01 17:15:17 +0000173; SI-LABEL: {{^}}atomic_and_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000174; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
175; SI: ds_and_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
176; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000177define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
178 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
179 %idx.0 = add nsw i32 %tid.x, 2
180 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
181 %val = atomicrmw and i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
182 store i32 %val, i32 addrspace(1)* %out, align 4
183 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
184 ret void
185}
186
Tom Stellard79243d92014-10-01 17:15:17 +0000187; SI-LABEL: {{^}}atomic_or_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000188; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
189; SI: ds_or_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
190; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000191define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
192 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
193 %idx.0 = add nsw i32 %tid.x, 2
194 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
195 %val = atomicrmw or i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
196 store i32 %val, i32 addrspace(1)* %out, align 4
197 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
198 ret void
199}
200
Tom Stellard79243d92014-10-01 17:15:17 +0000201; SI-LABEL: {{^}}atomic_xor_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000202; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
203; SI: ds_xor_rtn_b32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
204; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000205define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
206 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
207 %idx.0 = add nsw i32 %tid.x, 2
208 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
209 %val = atomicrmw xor i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
210 store i32 %val, i32 addrspace(1)* %out, align 4
211 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
212 ret void
213}
214
215; define void @atomic_nand_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
216; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
217; %idx.0 = add nsw i32 %tid.x, 2
218; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
219; %val = atomicrmw nand i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
220; store i32 %val, i32 addrspace(1)* %out, align 4
221; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
222; ret void
223; }
224
Tom Stellard79243d92014-10-01 17:15:17 +0000225; SI-LABEL: {{^}}atomic_min_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000226; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
227; SI: ds_min_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
228; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000229define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
230 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
231 %idx.0 = add nsw i32 %tid.x, 2
232 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
233 %val = atomicrmw min i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
234 store i32 %val, i32 addrspace(1)* %out, align 4
235 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
236 ret void
237}
238
Tom Stellard79243d92014-10-01 17:15:17 +0000239; SI-LABEL: {{^}}atomic_max_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000240; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
241; SI: ds_max_rtn_i32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
242; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000243define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
244 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
245 %idx.0 = add nsw i32 %tid.x, 2
246 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
247 %val = atomicrmw max i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
248 store i32 %val, i32 addrspace(1)* %out, align 4
249 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
250 ret void
251}
252
Tom Stellard79243d92014-10-01 17:15:17 +0000253; SI-LABEL: {{^}}atomic_umin_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000254; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
255; SI: ds_min_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
256; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000257define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
258 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
259 %idx.0 = add nsw i32 %tid.x, 2
260 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
261 %val = atomicrmw umin i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
262 store i32 %val, i32 addrspace(1)* %out, align 4
263 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
264 ret void
265}
266
Tom Stellard79243d92014-10-01 17:15:17 +0000267; SI-LABEL: {{^}}atomic_umax_shl_base_lds_0:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000268; SI: v_lshlrev_b32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
269; SI: ds_max_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
270; SI: s_endpgm
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000271define void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
272 %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
273 %idx.0 = add nsw i32 %tid.x, 2
274 %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
275 %val = atomicrmw umax i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
276 store i32 %val, i32 addrspace(1)* %out, align 4
277 store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
278 ret void
279}
280
281attributes #0 = { nounwind }
282attributes #1 = { nounwind readnone }