| Matt Arsenault | 35c4244 | 2018-01-04 17:23:24 +0000 | [diff] [blame] | 1 | ; XFAIL: * |
| 2 | ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -structurizecfg -verify-region-info %s |
| 3 | |
| 4 | ; FIXME: Merge into backedge-id-bug |
| 5 | ; Variant which has an issue with region construction |
| 6 | |
| 7 | define amdgpu_kernel void @loop_backedge_misidentified_alt(i32 addrspace(1)* %arg0) #0 { |
| 8 | entry: |
| 9 | %tmp = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16 |
| 10 | %load1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef |
| 11 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 12 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i32 %tid |
| 13 | %i.initial = load volatile i32, i32 addrspace(1)* %gep, align 4 |
| 14 | br label %LOOP.HEADER |
| 15 | |
| 16 | LOOP.HEADER: |
| 17 | %i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ] |
| 18 | call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0 |
| 19 | %tmp12 = zext i32 %i to i64 |
| 20 | %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 %tmp12 |
| 21 | %tmp14 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp13, align 16 |
| 22 | %tmp15 = extractelement <4 x i32> %tmp14, i64 0 |
| 23 | %tmp16 = and i32 %tmp15, 65535 |
| 24 | %tmp17 = icmp eq i32 %tmp16, 1 |
| 25 | br i1 %tmp17, label %bb18, label %bb62 |
| 26 | |
| 27 | bb18: |
| 28 | %tmp19 = extractelement <2 x i32> %tmp, i64 0 |
| 29 | %tmp22 = lshr i32 %tmp19, 16 |
| 30 | %tmp24 = urem i32 %tmp22, 52 |
| 31 | %tmp25 = mul nuw nsw i32 %tmp24, 52 |
| 32 | br label %INNER_LOOP |
| 33 | |
| 34 | INNER_LOOP: |
| 35 | %inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ] |
| 36 | call void asm sideeffect "; inner loop body", ""() #0 |
| 37 | %inner.loop.j.inc = add nsw i32 %inner.loop.j, 1 |
| 38 | %inner.loop.cmp = icmp eq i32 %inner.loop.j, 0 |
| 39 | br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP |
| 40 | |
| 41 | INNER_LOOP_BREAK: |
| 42 | %tmp59 = extractelement <4 x i32> %tmp14, i64 2 |
| 43 | call void asm sideeffect "s_nop 23 ", "~{memory}"() #0 |
| 44 | br label %END_ELSE_BLOCK |
| 45 | |
| 46 | bb62: |
| 47 | %load13 = icmp ult i32 %tmp16, 271 |
| 48 | ;br i1 %load13, label %bb64, label %INCREMENT_I |
| 49 | ; branching directly to the return avoids the bug |
| 50 | br i1 %load13, label %RETURN, label %INCREMENT_I |
| 51 | |
| 52 | |
| 53 | bb64: |
| 54 | call void asm sideeffect "s_nop 42", "~{memory}"() #0 |
| 55 | br label %RETURN |
| 56 | |
| 57 | INCREMENT_I: |
| 58 | %inc.i = add i32 %i, 1 |
| 59 | call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0 |
| 60 | br label %END_ELSE_BLOCK |
| 61 | |
| 62 | END_ELSE_BLOCK: |
| 63 | %i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ] |
| 64 | call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0 |
| 65 | %cmp.end.else.block = icmp eq i32 %i.final, -1 |
| 66 | br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER |
| 67 | |
| 68 | RETURN: |
| 69 | call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0 |
| 70 | store volatile <2 x float> %load1, <2 x float> addrspace(1)* undef, align 8 |
| 71 | ret void |
| 72 | } |
| 73 | |
| 74 | declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| 75 | |
| 76 | attributes #0 = { convergent nounwind } |
| 77 | attributes #1 = { convergent nounwind readnone } |