blob: 4a9c916cf64cf881260fc9580326cdc480f9dcf8 [file] [log] [blame]
Matt Arsenault8070882b2018-01-03 18:45:37 +00001; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -structurizecfg %s | FileCheck %s
3
4; StructurizeCFG::orderNodes used an arbitrary and nonsensical sorting
5; function which broke the basic backedge identification algorithm. It
6; would use RPO order, but then do a weird partial sort by the loop
7; depth assuming blocks are sorted by loop. However a block can appear
8; in between blocks of a loop that is not part of a loop, breaking the
9; assumption of the sort.
10;
11; The collectInfos must be done in RPO order. The actual
12; structurization order I think is less important, but unless the loop
13; headers are identified in RPO order, it finds the wrong set of back
14; edges.
15
16define amdgpu_kernel void @loop_backedge_misidentified(i32 addrspace(1)* %arg0) #0 {
17; CHECK-LABEL: @loop_backedge_misidentified(
18; CHECK-NEXT: entry:
19; CHECK-NEXT: [[TMP:%.*]] = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16
20; CHECK-NEXT: [[LOAD1:%.*]] = load volatile <2 x float>, <2 x float> addrspace(1)* undef
21; CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
22; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[ARG0:%.*]], i32 [[TID]]
23; CHECK-NEXT: [[I_INITIAL:%.*]] = load volatile i32, i32 addrspace(1)* [[GEP]], align 4
24; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
25; CHECK: LOOP.HEADER:
26; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_INITIAL]], [[ENTRY:%.*]] ], [ [[TMP10:%.*]], [[FLOW4:%.*]] ]
27; CHECK-NEXT: call void asm sideeffect "s_nop 0x100b
28; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[I]] to i64
29; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 [[TMP12]]
30; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, <4 x i32> addrspace(1)* [[TMP13]], align 16
31; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP14]], i64 0
32; CHECK-NEXT: [[TMP16:%.*]] = and i32 [[TMP15]], 65535
33; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 1
34; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[TMP17]], true
35; CHECK-NEXT: br i1 [[TMP0]], label [[BB62:%.*]], label [[FLOW:%.*]]
36; CHECK: Flow2:
37; CHECK-NEXT: br label [[FLOW]]
38; CHECK: bb18:
39; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x i32> [[TMP]], i64 0
40; CHECK-NEXT: [[TMP22:%.*]] = lshr i32 [[TMP19]], 16
41; CHECK-NEXT: [[TMP24:%.*]] = urem i32 [[TMP22]], 52
42; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i32 [[TMP24]], 52
43; CHECK-NEXT: br label [[INNER_LOOP:%.*]]
44; CHECK: Flow3:
45; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[TMP59:%.*]], [[INNER_LOOP_BREAK:%.*]] ], [ [[TMP7:%.*]], [[FLOW]] ]
46; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ true, [[INNER_LOOP_BREAK]] ], [ [[TMP8:%.*]], [[FLOW]] ]
47; CHECK-NEXT: br i1 [[TMP2]], label [[END_ELSE_BLOCK:%.*]], label [[FLOW4]]
48; CHECK: INNER_LOOP:
49; CHECK-NEXT: [[INNER_LOOP_J:%.*]] = phi i32 [ [[INNER_LOOP_J_INC:%.*]], [[INNER_LOOP]] ], [ [[TMP25]], [[BB18:%.*]] ]
50; CHECK-NEXT: call void asm sideeffect "
51; CHECK-NEXT: [[INNER_LOOP_J_INC]] = add nsw i32 [[INNER_LOOP_J]], 1
52; CHECK-NEXT: [[INNER_LOOP_CMP:%.*]] = icmp eq i32 [[INNER_LOOP_J]], 0
53; CHECK-NEXT: br i1 [[INNER_LOOP_CMP]], label [[INNER_LOOP_BREAK]], label [[INNER_LOOP]]
54; CHECK: INNER_LOOP_BREAK:
55; CHECK-NEXT: [[TMP59]] = extractelement <4 x i32> [[TMP14]], i64 2
56; CHECK-NEXT: call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
57; CHECK-NEXT: br label [[FLOW3:%.*]]
58; CHECK: bb62:
59; CHECK-NEXT: [[LOAD13:%.*]] = icmp ult i32 [[TMP16]], 271
60; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[LOAD13]], true
61; CHECK-NEXT: br i1 [[TMP3]], label [[INCREMENT_I:%.*]], label [[FLOW1:%.*]]
62; CHECK: Flow1:
63; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[INC_I:%.*]], [[INCREMENT_I]] ], [ undef, [[BB62]] ]
64; CHECK-NEXT: [[TMP5:%.*]] = phi i1 [ true, [[INCREMENT_I]] ], [ false, [[BB62]] ]
65; CHECK-NEXT: [[TMP6:%.*]] = phi i1 [ false, [[INCREMENT_I]] ], [ true, [[BB62]] ]
66; CHECK-NEXT: br i1 [[TMP6]], label [[BB64:%.*]], label [[FLOW2:%.*]]
67; CHECK: bb64:
68; CHECK-NEXT: call void asm sideeffect "s_nop 42", "~{memory}"() #0
69; CHECK-NEXT: br label [[FLOW2]]
70; CHECK: Flow:
71; CHECK-NEXT: [[TMP7]] = phi i32 [ [[TMP4]], [[FLOW2]] ], [ undef, [[LOOP_HEADER]] ]
72; CHECK-NEXT: [[TMP8]] = phi i1 [ [[TMP5]], [[FLOW2]] ], [ false, [[LOOP_HEADER]] ]
73; CHECK-NEXT: [[TMP9:%.*]] = phi i1 [ false, [[FLOW2]] ], [ true, [[LOOP_HEADER]] ]
74; CHECK-NEXT: br i1 [[TMP9]], label [[BB18]], label [[FLOW3]]
75; CHECK: INCREMENT_I:
76; CHECK-NEXT: [[INC_I]] = add i32 [[I]], 1
77; CHECK-NEXT: call void asm sideeffect "s_nop 0x1336
78; CHECK-NEXT: br label [[FLOW1]]
79; CHECK: END_ELSE_BLOCK:
80; CHECK-NEXT: [[I_FINAL:%.*]] = phi i32 [ [[TMP1]], [[FLOW3]] ]
81; CHECK-NEXT: call void asm sideeffect "s_nop 0x1337
82; CHECK-NEXT: [[CMP_END_ELSE_BLOCK:%.*]] = icmp eq i32 [[I_FINAL]], -1
83; CHECK-NEXT: br label [[FLOW4]]
84; CHECK: Flow4:
85; CHECK-NEXT: [[TMP10]] = phi i32 [ [[I_FINAL]], [[END_ELSE_BLOCK]] ], [ undef, [[FLOW3]] ]
86; CHECK-NEXT: [[TMP11:%.*]] = phi i1 [ [[CMP_END_ELSE_BLOCK]], [[END_ELSE_BLOCK]] ], [ true, [[FLOW3]] ]
87; CHECK-NEXT: br i1 [[TMP11]], label [[RETURN:%.*]], label [[LOOP_HEADER]]
88; CHECK: RETURN:
89; CHECK-NEXT: call void asm sideeffect "s_nop 0x99
90; CHECK-NEXT: store volatile <2 x float> [[LOAD1]], <2 x float> addrspace(1)* undef, align 8
91; CHECK-NEXT: ret void
92;
93entry:
94 %tmp = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16
95 %load1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
96 %tid = call i32 @llvm.amdgcn.workitem.id.x()
97 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i32 %tid
98 %i.initial = load volatile i32, i32 addrspace(1)* %gep, align 4
99 br label %LOOP.HEADER
100
101LOOP.HEADER:
102 %i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ]
103 call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0
104 %tmp12 = zext i32 %i to i64
105 %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 %tmp12
106 %tmp14 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp13, align 16
107 %tmp15 = extractelement <4 x i32> %tmp14, i64 0
108 %tmp16 = and i32 %tmp15, 65535
109 %tmp17 = icmp eq i32 %tmp16, 1
110 br i1 %tmp17, label %bb18, label %bb62
111
112bb18:
113 %tmp19 = extractelement <2 x i32> %tmp, i64 0
114 %tmp22 = lshr i32 %tmp19, 16
115 %tmp24 = urem i32 %tmp22, 52
116 %tmp25 = mul nuw nsw i32 %tmp24, 52
117 br label %INNER_LOOP
118
119INNER_LOOP:
120 %inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ]
121 call void asm sideeffect "; inner loop body", ""() #0
122 %inner.loop.j.inc = add nsw i32 %inner.loop.j, 1
123 %inner.loop.cmp = icmp eq i32 %inner.loop.j, 0
124 br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP
125
126INNER_LOOP_BREAK:
127 %tmp59 = extractelement <4 x i32> %tmp14, i64 2
128 call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
129 br label %END_ELSE_BLOCK
130
131bb62:
132 %load13 = icmp ult i32 %tmp16, 271
133 br i1 %load13, label %bb64, label %INCREMENT_I
134
135bb64:
136 call void asm sideeffect "s_nop 42", "~{memory}"() #0
137 br label %RETURN
138
139INCREMENT_I:
140 %inc.i = add i32 %i, 1
141 call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0
142 br label %END_ELSE_BLOCK
143
144END_ELSE_BLOCK:
145 %i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ]
146 call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0
147 %cmp.end.else.block = icmp eq i32 %i.final, -1
148 br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER
149
150RETURN:
151 call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0
152 store volatile <2 x float> %load1, <2 x float> addrspace(1)* undef, align 8
153 ret void
154}
155
156; The same function, except break to return block goes directly to the
157; return, which managed to hide the bug.
158define amdgpu_kernel void @loop_backedge_misidentified_alt(i32 addrspace(1)* %arg0) #0 {
159; CHECK-LABEL: @loop_backedge_misidentified_alt(
160; CHECK-NEXT: entry:
161; CHECK-NEXT: [[TMP:%.*]] = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16
162; CHECK-NEXT: [[LOAD1:%.*]] = load volatile <2 x float>, <2 x float> addrspace(1)* undef
163; CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
164; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[ARG0:%.*]], i32 [[TID]]
165; CHECK-NEXT: [[I_INITIAL:%.*]] = load volatile i32, i32 addrspace(1)* [[GEP]], align 4
166; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
167; CHECK: LOOP.HEADER:
168; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_INITIAL]], [[ENTRY:%.*]] ], [ [[TMP9:%.*]], [[FLOW3:%.*]] ]
169; CHECK-NEXT: call void asm sideeffect "s_nop 0x100b
170; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[I]] to i64
171; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 [[TMP12]]
172; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, <4 x i32> addrspace(1)* [[TMP13]], align 16
173; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP14]], i64 0
174; CHECK-NEXT: [[TMP16:%.*]] = and i32 [[TMP15]], 65535
175; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 1
176; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[TMP17]], true
177; CHECK-NEXT: br i1 [[TMP0]], label [[BB62:%.*]], label [[FLOW:%.*]]
178; CHECK: Flow1:
179; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[INC_I:%.*]], [[INCREMENT_I:%.*]] ], [ undef, [[BB62]] ]
180; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ true, [[INCREMENT_I]] ], [ false, [[BB62]] ]
181; CHECK-NEXT: br label [[FLOW]]
182; CHECK: bb18:
183; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x i32> [[TMP]], i64 0
184; CHECK-NEXT: [[TMP22:%.*]] = lshr i32 [[TMP19]], 16
185; CHECK-NEXT: [[TMP24:%.*]] = urem i32 [[TMP22]], 52
186; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i32 [[TMP24]], 52
187; CHECK-NEXT: br label [[INNER_LOOP:%.*]]
188; CHECK: Flow2:
189; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ [[TMP59:%.*]], [[INNER_LOOP_BREAK:%.*]] ], [ [[TMP6:%.*]], [[FLOW]] ]
190; CHECK-NEXT: [[TMP4:%.*]] = phi i1 [ true, [[INNER_LOOP_BREAK]] ], [ [[TMP7:%.*]], [[FLOW]] ]
191; CHECK-NEXT: br i1 [[TMP4]], label [[END_ELSE_BLOCK:%.*]], label [[FLOW3]]
192; CHECK: INNER_LOOP:
193; CHECK-NEXT: [[INNER_LOOP_J:%.*]] = phi i32 [ [[INNER_LOOP_J_INC:%.*]], [[INNER_LOOP]] ], [ [[TMP25]], [[BB18:%.*]] ]
194; CHECK-NEXT: call void asm sideeffect "
195; CHECK-NEXT: [[INNER_LOOP_J_INC]] = add nsw i32 [[INNER_LOOP_J]], 1
196; CHECK-NEXT: [[INNER_LOOP_CMP:%.*]] = icmp eq i32 [[INNER_LOOP_J]], 0
197; CHECK-NEXT: br i1 [[INNER_LOOP_CMP]], label [[INNER_LOOP_BREAK]], label [[INNER_LOOP]]
198; CHECK: INNER_LOOP_BREAK:
199; CHECK-NEXT: [[TMP59]] = extractelement <4 x i32> [[TMP14]], i64 2
200; CHECK-NEXT: call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
201; CHECK-NEXT: br label [[FLOW2:%.*]]
202; CHECK: bb62:
203; CHECK-NEXT: [[LOAD13:%.*]] = icmp ult i32 [[TMP16]], 271
204; CHECK-NEXT: [[TMP5:%.*]] = xor i1 [[LOAD13]], true
205; CHECK-NEXT: br i1 [[TMP5]], label [[INCREMENT_I]], label [[FLOW1:%.*]]
206; CHECK: bb64:
207; CHECK-NEXT: call void asm sideeffect "s_nop 42", "~{memory}"() #0
208; CHECK-NEXT: br label [[RETURN:%.*]]
209; CHECK: Flow:
210; CHECK-NEXT: [[TMP6]] = phi i32 [ [[TMP1]], [[FLOW1]] ], [ undef, [[LOOP_HEADER]] ]
211; CHECK-NEXT: [[TMP7]] = phi i1 [ [[TMP2]], [[FLOW1]] ], [ false, [[LOOP_HEADER]] ]
212; CHECK-NEXT: [[TMP8:%.*]] = phi i1 [ false, [[FLOW1]] ], [ true, [[LOOP_HEADER]] ]
213; CHECK-NEXT: br i1 [[TMP8]], label [[BB18]], label [[FLOW2]]
214; CHECK: INCREMENT_I:
215; CHECK-NEXT: [[INC_I]] = add i32 [[I]], 1
216; CHECK-NEXT: call void asm sideeffect "s_nop 0x1336
217; CHECK-NEXT: br label [[FLOW1]]
218; CHECK: END_ELSE_BLOCK:
219; CHECK-NEXT: [[I_FINAL:%.*]] = phi i32 [ [[TMP3]], [[FLOW2]] ]
220; CHECK-NEXT: call void asm sideeffect "s_nop 0x1337
221; CHECK-NEXT: [[CMP_END_ELSE_BLOCK:%.*]] = icmp eq i32 [[I_FINAL]], -1
222; CHECK-NEXT: br label [[FLOW3]]
223; CHECK: Flow3:
224; CHECK-NEXT: [[TMP9]] = phi i32 [ [[I_FINAL]], [[END_ELSE_BLOCK]] ], [ undef, [[FLOW2]] ]
225; CHECK-NEXT: [[TMP10:%.*]] = phi i1 [ [[CMP_END_ELSE_BLOCK]], [[END_ELSE_BLOCK]] ], [ true, [[FLOW2]] ]
226; CHECK-NEXT: br i1 [[TMP10]], label [[RETURN]], label [[LOOP_HEADER]]
227; CHECK: RETURN:
228; CHECK-NEXT: call void asm sideeffect "s_nop 0x99
229; CHECK-NEXT: store volatile <2 x float> [[LOAD1]], <2 x float> addrspace(1)* undef, align 8
230; CHECK-NEXT: ret void
231;
232entry:
233 %tmp = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16
234 %load1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
235 %tid = call i32 @llvm.amdgcn.workitem.id.x()
236 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i32 %tid
237 %i.initial = load volatile i32, i32 addrspace(1)* %gep, align 4
238 br label %LOOP.HEADER
239
240LOOP.HEADER:
241 %i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ]
242 call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0
243 %tmp12 = zext i32 %i to i64
244 %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 %tmp12
245 %tmp14 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp13, align 16
246 %tmp15 = extractelement <4 x i32> %tmp14, i64 0
247 %tmp16 = and i32 %tmp15, 65535
248 %tmp17 = icmp eq i32 %tmp16, 1
249 br i1 %tmp17, label %bb18, label %bb62
250
251bb18:
252 %tmp19 = extractelement <2 x i32> %tmp, i64 0
253 %tmp22 = lshr i32 %tmp19, 16
254 %tmp24 = urem i32 %tmp22, 52
255 %tmp25 = mul nuw nsw i32 %tmp24, 52
256 br label %INNER_LOOP
257
258INNER_LOOP:
259 %inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ]
260 call void asm sideeffect "; inner loop body", ""() #0
261 %inner.loop.j.inc = add nsw i32 %inner.loop.j, 1
262 %inner.loop.cmp = icmp eq i32 %inner.loop.j, 0
263 br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP
264
265INNER_LOOP_BREAK:
266 %tmp59 = extractelement <4 x i32> %tmp14, i64 2
267 call void asm sideeffect "s_nop 23 ", "~{memory}"() #0
268 br label %END_ELSE_BLOCK
269
270bb62:
271 %load13 = icmp ult i32 %tmp16, 271
272 ;br i1 %load13, label %bb64, label %INCREMENT_I
273 ; branching directly to the return avoids the bug
274 br i1 %load13, label %RETURN, label %INCREMENT_I
275
276
277bb64:
278 call void asm sideeffect "s_nop 42", "~{memory}"() #0
279 br label %RETURN
280
281INCREMENT_I:
282 %inc.i = add i32 %i, 1
283 call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0
284 br label %END_ELSE_BLOCK
285
286END_ELSE_BLOCK:
287 %i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ]
288 call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0
289 %cmp.end.else.block = icmp eq i32 %i.final, -1
290 br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER
291
292RETURN:
293 call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0
294 store volatile <2 x float> %load1, <2 x float> addrspace(1)* undef, align 8
295 ret void
296}
297
298declare i32 @llvm.amdgcn.workitem.id.x() #1
299
300attributes #0 = { convergent nounwind }
301attributes #1 = { convergent nounwind readnone }