blob: ce2e86827ad093d0b7296b1e4bb9e4581377d7b2 [file] [log] [blame]
Matt Arsenault0607a442017-03-24 20:57:10 +00001; RUN: opt -mtriple=amdgcn-- -S -structurizecfg -si-annotate-control-flow %s | FileCheck -check-prefix=IR %s
2; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3
4; After structurizing, there are 3 levels of loops. The i1 phi
5; conditions mutually depend on each other, so it isn't safe to delete
6; the condition that appears to have no uses until the loop is
7; completely processed.
8
9
10; IR-LABEL: @reduced_nested_loop_conditions(
11
12; IR: bb5:
13; IR-NEXT: %phi.broken = phi i64 [ %loop.phi, %bb10 ], [ 0, %bb ]
14; IR-NEXT: %tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ]
15; IR-NEXT: %tmp7 = icmp eq i32 %tmp6, 1
16; IR-NEXT: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %tmp7)
17; IR-NEXT: %1 = extractvalue { i1, i64 } %0, 0
18; IR-NEXT: %2 = extractvalue { i1, i64 } %0, 1
19; IR-NEXT: br i1 %1, label %bb8, label %Flow
20
21; IR: bb8:
22; IR-NEXT: %3 = call i64 @llvm.amdgcn.break(i64 %phi.broken)
23; IR-NEXT: br label %bb13
24
25; IR: bb10:
26; IR-NEXT: %loop.phi = phi i64 [ %6, %Flow ]
27; IR-NEXT: %tmp11 = phi i32 [ %5, %Flow ]
28; IR-NEXT: %4 = call i1 @llvm.amdgcn.loop(i64 %loop.phi)
29; IR-NEXT: br i1 %4, label %bb23, label %bb5
30
31; IR: Flow:
32; IR-NEXT: %loop.phi1 = phi i64 [ %loop.phi2, %bb4 ], [ %phi.broken, %bb5 ]
33; IR-NEXT: %5 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ]
34; IR-NEXT: %6 = call i64 @llvm.amdgcn.else.break(i64 %2, i64 %loop.phi1)
35; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %2)
36; IR-NEXT: br label %bb10
37
38; IR: bb13:
39; IR-NEXT: %loop.phi3 = phi i64 [ %loop.phi4, %bb3 ], [ %3, %bb8 ]
40; IR-NEXT: %tmp14 = phi i1 [ false, %bb3 ], [ true, %bb8 ]
41; IR-NEXT: %tmp15 = bitcast i64 %tmp2 to <2 x i32>
42; IR-NEXT: br i1 %tmp14, label %bb16, label %bb20
43
44; IR: bb16:
45; IR-NEXT: %tmp17 = extractelement <2 x i32> %tmp15, i64 1
46; IR-NEXT: %tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %tmp17
47; IR-NEXT: %tmp19 = load volatile i32, i32 addrspace(3)* %tmp18
48; IR-NEXT: br label %bb20
49
50; IR: bb20:
51; IR-NEXT: %loop.phi4 = phi i64 [ %phi.broken, %bb16 ], [ %phi.broken, %bb13 ]
52; IR-NEXT: %loop.phi2 = phi i64 [ %phi.broken, %bb16 ], [ %loop.phi3, %bb13 ]
53; IR-NEXT: %tmp21 = phi i32 [ %tmp19, %bb16 ], [ 0, %bb13 ]
54; IR-NEXT: br label %bb9
55
56; IR: bb23:
57; IR-NEXT: call void @llvm.amdgcn.end.cf(i64 %loop.phi)
58; IR-NEXT: ret void
59
60; GCN-LABEL: {{^}}reduced_nested_loop_conditions:
61
62; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 1
63; GCN-NEXT: s_cbranch_scc1
64
65; FIXME: Should fold to unconditional branch?
Tim Renouf6eaad1e2018-01-09 21:34:43 +000066; GCN: ; implicit-def
Matt Arsenault0607a442017-03-24 20:57:10 +000067; GCN: s_cbranch_vccz
68
69; GCN: ds_read_b32
70
71; GCN: [[BB9:BB[0-9]+_[0-9]+]]: ; %bb9
72; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
73; GCN-NEXT: s_branch [[BB9]]
74define amdgpu_kernel void @reduced_nested_loop_conditions(i64 addrspace(3)* nocapture %arg) #0 {
75bb:
76 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
77 %tmp1 = getelementptr inbounds i64, i64 addrspace(3)* %arg, i32 %tmp
78 %tmp2 = load volatile i64, i64 addrspace(3)* %tmp1
79 br label %bb5
80
81bb3: ; preds = %bb9
82 br i1 true, label %bb4, label %bb13
83
84bb4: ; preds = %bb3
85 br label %bb10
86
87bb5: ; preds = %bb10, %bb
88 %tmp6 = phi i32 [ 0, %bb ], [ %tmp11, %bb10 ]
89 %tmp7 = icmp eq i32 %tmp6, 1
90 br i1 %tmp7, label %bb8, label %bb10
91
92bb8: ; preds = %bb5
93 br label %bb13
94
95bb9: ; preds = %bb20, %bb9
96 br i1 false, label %bb3, label %bb9
97
98bb10: ; preds = %bb5, %bb4
99 %tmp11 = phi i32 [ %tmp21, %bb4 ], [ undef, %bb5 ]
100 %tmp12 = phi i1 [ %tmp22, %bb4 ], [ true, %bb5 ]
101 br i1 %tmp12, label %bb23, label %bb5
102
103bb13: ; preds = %bb8, %bb3
104 %tmp14 = phi i1 [ %tmp22, %bb3 ], [ true, %bb8 ]
105 %tmp15 = bitcast i64 %tmp2 to <2 x i32>
106 br i1 %tmp14, label %bb16, label %bb20
107
108bb16: ; preds = %bb13
109 %tmp17 = extractelement <2 x i32> %tmp15, i64 1
110 %tmp18 = getelementptr inbounds i32, i32 addrspace(3)* undef, i32 %tmp17
111 %tmp19 = load volatile i32, i32 addrspace(3)* %tmp18
112 br label %bb20
113
114bb20: ; preds = %bb16, %bb13
115 %tmp21 = phi i32 [ %tmp19, %bb16 ], [ 0, %bb13 ]
116 %tmp22 = phi i1 [ false, %bb16 ], [ %tmp14, %bb13 ]
117 br label %bb9
118
119bb23: ; preds = %bb10
120 ret void
121}
122
123; Earlier version of above, before a run of the structurizer.
124; IR-LABEL: @nested_loop_conditions(
125
Matt Arsenault8070882b2018-01-03 18:45:37 +0000126; IR: %tmp1235 = icmp slt i32 %tmp1134, 9
127; IR: br i1 %tmp1235, label %bb14.lr.ph, label %Flow
Matt Arsenault0607a442017-03-24 20:57:10 +0000128
Matt Arsenault8070882b2018-01-03 18:45:37 +0000129; IR: bb14.lr.ph:
130; IR: br label %bb14
Matt Arsenault0607a442017-03-24 20:57:10 +0000131
132; IR: Flow3:
Matt Arsenault8070882b2018-01-03 18:45:37 +0000133; IR: call void @llvm.amdgcn.end.cf(i64 %18)
134; IR: %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %17)
135; IR: %1 = extractvalue { i1, i64 } %0, 0
136; IR: %2 = extractvalue { i1, i64 } %0, 1
137; IR: br i1 %1, label %bb4.bb13_crit_edge, label %Flow4
138
139; IR: bb4.bb13_crit_edge:
140; IR: br label %Flow4
141
142; IR: Flow4:
143; IR: %3 = phi i1 [ true, %bb4.bb13_crit_edge ], [ false, %Flow3 ]
144; IR: call void @llvm.amdgcn.end.cf(i64 %2)
145; IR: br label %Flow
146
147; IR: bb13:
148; IR: br label %bb31
149
150; IR: Flow:
151; IR: %4 = phi i1 [ %3, %Flow4 ], [ true, %bb ]
152; IR: %5 = call { i1, i64 } @llvm.amdgcn.if(i1 %4)
153; IR: %6 = extractvalue { i1, i64 } %5, 0
154; IR: %7 = extractvalue { i1, i64 } %5, 1
155; IR: br i1 %6, label %bb13, label %bb31
156
157; IR: bb14:
158; IR: %phi.broken = phi i64 [ %18, %Flow2 ], [ 0, %bb14.lr.ph ]
159; IR: %tmp1037 = phi i32 [ %tmp1033, %bb14.lr.ph ], [ %16, %Flow2 ]
160; IR: %tmp936 = phi <4 x i32> [ %tmp932, %bb14.lr.ph ], [ %15, %Flow2 ]
161; IR: %tmp15 = icmp eq i32 %tmp1037, 1
162; IR: %8 = xor i1 %tmp15, true
163; IR: %9 = call { i1, i64 } @llvm.amdgcn.if(i1 %8)
164; IR: %10 = extractvalue { i1, i64 } %9, 0
165; IR: %11 = extractvalue { i1, i64 } %9, 1
166; IR: br i1 %10, label %bb31.loopexit, label %Flow1
167
168; IR: Flow1:
169; IR: %12 = call { i1, i64 } @llvm.amdgcn.else(i64 %11)
170; IR: %13 = extractvalue { i1, i64 } %12, 0
171; IR: %14 = extractvalue { i1, i64 } %12, 1
172; IR: br i1 %13, label %bb16, label %Flow2
173
174; IR: bb16:
175; IR: %tmp17 = bitcast i64 %tmp3 to <2 x i32>
176; IR: br label %bb18
177
178; IR: Flow2:
179; IR: %loop.phi = phi i64 [ %21, %bb21 ], [ %phi.broken, %Flow1 ]
180; IR: %15 = phi <4 x i32> [ %tmp9, %bb21 ], [ undef, %Flow1 ]
181; IR: %16 = phi i32 [ %tmp10, %bb21 ], [ undef, %Flow1 ]
182; IR: %17 = phi i1 [ %20, %bb21 ], [ false, %Flow1 ]
183; IR: %18 = call i64 @llvm.amdgcn.else.break(i64 %14, i64 %loop.phi)
184; IR: call void @llvm.amdgcn.end.cf(i64 %14)
185; IR: %19 = call i1 @llvm.amdgcn.loop(i64 %18)
186; IR: br i1 %19, label %Flow3, label %bb14
187
188; IR: bb18:
189; IR: %tmp19 = load volatile i32, i32 addrspace(1)* undef
190; IR: %tmp20 = icmp slt i32 %tmp19, 9
191; IR: br i1 %tmp20, label %bb21, label %bb18
192
193; IR: bb21:
194; IR: %tmp22 = extractelement <2 x i32> %tmp17, i64 1
195; IR: %tmp23 = lshr i32 %tmp22, 16
196; IR: %tmp24 = select i1 undef, i32 undef, i32 %tmp23
197; IR: %tmp25 = uitofp i32 %tmp24 to float
198; IR: %tmp26 = fmul float %tmp25, 0x3EF0001000000000
199; IR: %tmp27 = fsub float %tmp26, undef
200; IR: %tmp28 = fcmp olt float %tmp27, 5.000000e-01
201; IR: %tmp29 = select i1 %tmp28, i64 1, i64 2
202; IR: %tmp30 = extractelement <4 x i32> %tmp936, i64 %tmp29
203; IR: %tmp7 = zext i32 %tmp30 to i64
204; IR: %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 %tmp7
205; IR: %tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16
206; IR: %tmp10 = extractelement <4 x i32> %tmp9, i64 0
207; IR: %tmp11 = load volatile i32, i32 addrspace(1)* undef
208; IR: %tmp12 = icmp slt i32 %tmp11, 9
209; IR: %20 = xor i1 %tmp12, true
210; IR: %21 = call i64 @llvm.amdgcn.if.break(i1 %20, i64 %phi.broken)
211; IR: br label %Flow2
212
213; IR: bb31.loopexit:
214; IR: br label %Flow1
Matt Arsenault0607a442017-03-24 20:57:10 +0000215
216; IR: bb31:
Matt Arsenault8070882b2018-01-03 18:45:37 +0000217; IR: call void @llvm.amdgcn.end.cf(i64 %7)
218; IR: store volatile i32 0, i32 addrspace(1)* undef
219; IR: ret void
Matt Arsenault0607a442017-03-24 20:57:10 +0000220
221
222; GCN-LABEL: {{^}}nested_loop_conditions:
223
224; GCN: v_cmp_lt_i32_e32 vcc, 8, v
225; GCN: s_and_b64 vcc, exec, vcc
226; GCN: s_cbranch_vccnz [[BB31:BB[0-9]+_[0-9]+]]
227
228; GCN: [[BB14:BB[0-9]+_[0-9]+]]: ; %bb14
229; GCN: v_cmp_ne_u32_e32 vcc, 1, v
230; GCN-NEXT: s_and_b64 vcc, exec, vcc
231; GCN-NEXT: s_cbranch_vccnz [[BB31]]
232
233; GCN: [[BB18:BB[0-9]+_[0-9]+]]: ; %bb18
234; GCN: buffer_load_dword
235; GCN: v_cmp_lt_i32_e32 vcc, 8, v
236; GCN-NEXT: s_and_b64 vcc, exec, vcc
237; GCN-NEXT: s_cbranch_vccnz [[BB18]]
238
239; GCN: buffer_load_dword
240; GCN: buffer_load_dword
241; GCN: v_cmp_gt_i32_e32 vcc, 9
242; GCN-NEXT: s_and_b64 vcc, exec, vcc
243; GCN-NEXT: s_cbranch_vccnz [[BB14]]
244
245; GCN: [[BB31]]:
246; GCN: buffer_store_dword
247; GCN: s_endpgm
248define amdgpu_kernel void @nested_loop_conditions(i64 addrspace(1)* nocapture %arg) #0 {
249bb:
250 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #1
251 %tmp1 = zext i32 %tmp to i64
252 %tmp2 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp1
253 %tmp3 = load i64, i64 addrspace(1)* %tmp2, align 16
254 %tmp932 = load <4 x i32>, <4 x i32> addrspace(1)* undef, align 16
255 %tmp1033 = extractelement <4 x i32> %tmp932, i64 0
256 %tmp1134 = load volatile i32, i32 addrspace(1)* undef
257 %tmp1235 = icmp slt i32 %tmp1134, 9
258 br i1 %tmp1235, label %bb14.lr.ph, label %bb13
259
260bb14.lr.ph: ; preds = %bb
261 br label %bb14
262
263bb4.bb13_crit_edge: ; preds = %bb21
264 br label %bb13
265
266bb13: ; preds = %bb4.bb13_crit_edge, %bb
267 br label %bb31
268
269bb14: ; preds = %bb21, %bb14.lr.ph
270 %tmp1037 = phi i32 [ %tmp1033, %bb14.lr.ph ], [ %tmp10, %bb21 ]
271 %tmp936 = phi <4 x i32> [ %tmp932, %bb14.lr.ph ], [ %tmp9, %bb21 ]
272 %tmp15 = icmp eq i32 %tmp1037, 1
273 br i1 %tmp15, label %bb16, label %bb31.loopexit
274
275bb16: ; preds = %bb14
276 %tmp17 = bitcast i64 %tmp3 to <2 x i32>
277 br label %bb18
278
279bb18: ; preds = %bb18, %bb16
280 %tmp19 = load volatile i32, i32 addrspace(1)* undef
281 %tmp20 = icmp slt i32 %tmp19, 9
282 br i1 %tmp20, label %bb21, label %bb18
283
284bb21: ; preds = %bb18
285 %tmp22 = extractelement <2 x i32> %tmp17, i64 1
286 %tmp23 = lshr i32 %tmp22, 16
287 %tmp24 = select i1 undef, i32 undef, i32 %tmp23
288 %tmp25 = uitofp i32 %tmp24 to float
289 %tmp26 = fmul float %tmp25, 0x3EF0001000000000
290 %tmp27 = fsub float %tmp26, undef
291 %tmp28 = fcmp olt float %tmp27, 5.000000e-01
292 %tmp29 = select i1 %tmp28, i64 1, i64 2
293 %tmp30 = extractelement <4 x i32> %tmp936, i64 %tmp29
294 %tmp7 = zext i32 %tmp30 to i64
295 %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* undef, i64 %tmp7
296 %tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16
297 %tmp10 = extractelement <4 x i32> %tmp9, i64 0
298 %tmp11 = load volatile i32, i32 addrspace(1)* undef
299 %tmp12 = icmp slt i32 %tmp11, 9
300 br i1 %tmp12, label %bb14, label %bb4.bb13_crit_edge
301
302bb31.loopexit: ; preds = %bb14
303 br label %bb31
304
305bb31: ; preds = %bb31.loopexit, %bb13
306 store volatile i32 0, i32 addrspace(1)* undef
307 ret void
308}
309
310declare i32 @llvm.amdgcn.workitem.id.x() #1
311
312attributes #0 = { nounwind }
313attributes #1 = { nounwind readnone }