Matt Arsenault | 8070882b | 2018-01-03 18:45:37 +0000 | [diff] [blame] | 1 | ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | ; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -structurizecfg %s | FileCheck %s |
| 3 | |
| 4 | ; StructurizeCFG::orderNodes used an arbitrary and nonsensical sorting |
| 5 | ; function which broke the basic backedge identification algorithm. It |
| 6 | ; would use RPO order, but then do a weird partial sort by the loop |
| 7 | ; depth assuming blocks are sorted by loop. However a block can appear |
| 8 | ; in between blocks of a loop that is not part of a loop, breaking the |
| 9 | ; assumption of the sort. |
| 10 | ; |
| 11 | ; The collectInfos must be done in RPO order. The actual |
| 12 | ; structurization order I think is less important, but unless the loop |
| 13 | ; headers are identified in RPO order, it finds the wrong set of back |
| 14 | ; edges. |
| 15 | |
| 16 | define amdgpu_kernel void @loop_backedge_misidentified(i32 addrspace(1)* %arg0) #0 { |
| 17 | ; CHECK-LABEL: @loop_backedge_misidentified( |
| 18 | ; CHECK-NEXT: entry: |
| 19 | ; CHECK-NEXT: [[TMP:%.*]] = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16 |
| 20 | ; CHECK-NEXT: [[LOAD1:%.*]] = load volatile <2 x float>, <2 x float> addrspace(1)* undef |
| 21 | ; CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x() |
| 22 | ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[ARG0:%.*]], i32 [[TID]] |
| 23 | ; CHECK-NEXT: [[I_INITIAL:%.*]] = load volatile i32, i32 addrspace(1)* [[GEP]], align 4 |
| 24 | ; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] |
| 25 | ; CHECK: LOOP.HEADER: |
| 26 | ; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_INITIAL]], [[ENTRY:%.*]] ], [ [[TMP10:%.*]], [[FLOW4:%.*]] ] |
| 27 | ; CHECK-NEXT: call void asm sideeffect "s_nop 0x100b |
| 28 | ; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[I]] to i64 |
| 29 | ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 [[TMP12]] |
| 30 | ; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, <4 x i32> addrspace(1)* [[TMP13]], align 16 |
| 31 | ; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP14]], i64 0 |
| 32 | ; CHECK-NEXT: [[TMP16:%.*]] = and i32 [[TMP15]], 65535 |
| 33 | ; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 1 |
| 34 | ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[TMP17]], true |
| 35 | ; CHECK-NEXT: br i1 [[TMP0]], label [[BB62:%.*]], label [[FLOW:%.*]] |
| 36 | ; CHECK: Flow2: |
| 37 | ; CHECK-NEXT: br label [[FLOW]] |
| 38 | ; CHECK: bb18: |
| 39 | ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x i32> [[TMP]], i64 0 |
| 40 | ; CHECK-NEXT: [[TMP22:%.*]] = lshr i32 [[TMP19]], 16 |
| 41 | ; CHECK-NEXT: [[TMP24:%.*]] = urem i32 [[TMP22]], 52 |
| 42 | ; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i32 [[TMP24]], 52 |
| 43 | ; CHECK-NEXT: br label [[INNER_LOOP:%.*]] |
| 44 | ; CHECK: Flow3: |
| 45 | ; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[TMP59:%.*]], [[INNER_LOOP_BREAK:%.*]] ], [ [[TMP7:%.*]], [[FLOW]] ] |
| 46 | ; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ true, [[INNER_LOOP_BREAK]] ], [ [[TMP8:%.*]], [[FLOW]] ] |
| 47 | ; CHECK-NEXT: br i1 [[TMP2]], label [[END_ELSE_BLOCK:%.*]], label [[FLOW4]] |
| 48 | ; CHECK: INNER_LOOP: |
| 49 | ; CHECK-NEXT: [[INNER_LOOP_J:%.*]] = phi i32 [ [[INNER_LOOP_J_INC:%.*]], [[INNER_LOOP]] ], [ [[TMP25]], [[BB18:%.*]] ] |
| 50 | ; CHECK-NEXT: call void asm sideeffect " |
| 51 | ; CHECK-NEXT: [[INNER_LOOP_J_INC]] = add nsw i32 [[INNER_LOOP_J]], 1 |
| 52 | ; CHECK-NEXT: [[INNER_LOOP_CMP:%.*]] = icmp eq i32 [[INNER_LOOP_J]], 0 |
| 53 | ; CHECK-NEXT: br i1 [[INNER_LOOP_CMP]], label [[INNER_LOOP_BREAK]], label [[INNER_LOOP]] |
| 54 | ; CHECK: INNER_LOOP_BREAK: |
| 55 | ; CHECK-NEXT: [[TMP59]] = extractelement <4 x i32> [[TMP14]], i64 2 |
| 56 | ; CHECK-NEXT: call void asm sideeffect "s_nop 23 ", "~{memory}"() #0 |
| 57 | ; CHECK-NEXT: br label [[FLOW3:%.*]] |
| 58 | ; CHECK: bb62: |
| 59 | ; CHECK-NEXT: [[LOAD13:%.*]] = icmp ult i32 [[TMP16]], 271 |
| 60 | ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[LOAD13]], true |
| 61 | ; CHECK-NEXT: br i1 [[TMP3]], label [[INCREMENT_I:%.*]], label [[FLOW1:%.*]] |
| 62 | ; CHECK: Flow1: |
| 63 | ; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ [[INC_I:%.*]], [[INCREMENT_I]] ], [ undef, [[BB62]] ] |
| 64 | ; CHECK-NEXT: [[TMP5:%.*]] = phi i1 [ true, [[INCREMENT_I]] ], [ false, [[BB62]] ] |
| 65 | ; CHECK-NEXT: [[TMP6:%.*]] = phi i1 [ false, [[INCREMENT_I]] ], [ true, [[BB62]] ] |
| 66 | ; CHECK-NEXT: br i1 [[TMP6]], label [[BB64:%.*]], label [[FLOW2:%.*]] |
| 67 | ; CHECK: bb64: |
| 68 | ; CHECK-NEXT: call void asm sideeffect "s_nop 42", "~{memory}"() #0 |
| 69 | ; CHECK-NEXT: br label [[FLOW2]] |
| 70 | ; CHECK: Flow: |
| 71 | ; CHECK-NEXT: [[TMP7]] = phi i32 [ [[TMP4]], [[FLOW2]] ], [ undef, [[LOOP_HEADER]] ] |
| 72 | ; CHECK-NEXT: [[TMP8]] = phi i1 [ [[TMP5]], [[FLOW2]] ], [ false, [[LOOP_HEADER]] ] |
| 73 | ; CHECK-NEXT: [[TMP9:%.*]] = phi i1 [ false, [[FLOW2]] ], [ true, [[LOOP_HEADER]] ] |
| 74 | ; CHECK-NEXT: br i1 [[TMP9]], label [[BB18]], label [[FLOW3]] |
| 75 | ; CHECK: INCREMENT_I: |
| 76 | ; CHECK-NEXT: [[INC_I]] = add i32 [[I]], 1 |
| 77 | ; CHECK-NEXT: call void asm sideeffect "s_nop 0x1336 |
| 78 | ; CHECK-NEXT: br label [[FLOW1]] |
| 79 | ; CHECK: END_ELSE_BLOCK: |
| 80 | ; CHECK-NEXT: [[I_FINAL:%.*]] = phi i32 [ [[TMP1]], [[FLOW3]] ] |
| 81 | ; CHECK-NEXT: call void asm sideeffect "s_nop 0x1337 |
| 82 | ; CHECK-NEXT: [[CMP_END_ELSE_BLOCK:%.*]] = icmp eq i32 [[I_FINAL]], -1 |
| 83 | ; CHECK-NEXT: br label [[FLOW4]] |
| 84 | ; CHECK: Flow4: |
| 85 | ; CHECK-NEXT: [[TMP10]] = phi i32 [ [[I_FINAL]], [[END_ELSE_BLOCK]] ], [ undef, [[FLOW3]] ] |
| 86 | ; CHECK-NEXT: [[TMP11:%.*]] = phi i1 [ [[CMP_END_ELSE_BLOCK]], [[END_ELSE_BLOCK]] ], [ true, [[FLOW3]] ] |
| 87 | ; CHECK-NEXT: br i1 [[TMP11]], label [[RETURN:%.*]], label [[LOOP_HEADER]] |
| 88 | ; CHECK: RETURN: |
| 89 | ; CHECK-NEXT: call void asm sideeffect "s_nop 0x99 |
| 90 | ; CHECK-NEXT: store volatile <2 x float> [[LOAD1]], <2 x float> addrspace(1)* undef, align 8 |
| 91 | ; CHECK-NEXT: ret void |
| 92 | ; |
| 93 | entry: |
| 94 | %tmp = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16 |
| 95 | %load1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef |
| 96 | %tid = call i32 @llvm.amdgcn.workitem.id.x() |
| 97 | %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i32 %tid |
| 98 | %i.initial = load volatile i32, i32 addrspace(1)* %gep, align 4 |
| 99 | br label %LOOP.HEADER |
| 100 | |
| 101 | LOOP.HEADER: |
| 102 | %i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ] |
| 103 | call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0 |
| 104 | %tmp12 = zext i32 %i to i64 |
| 105 | %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 %tmp12 |
| 106 | %tmp14 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp13, align 16 |
| 107 | %tmp15 = extractelement <4 x i32> %tmp14, i64 0 |
| 108 | %tmp16 = and i32 %tmp15, 65535 |
| 109 | %tmp17 = icmp eq i32 %tmp16, 1 |
| 110 | br i1 %tmp17, label %bb18, label %bb62 |
| 111 | |
| 112 | bb18: |
| 113 | %tmp19 = extractelement <2 x i32> %tmp, i64 0 |
| 114 | %tmp22 = lshr i32 %tmp19, 16 |
| 115 | %tmp24 = urem i32 %tmp22, 52 |
| 116 | %tmp25 = mul nuw nsw i32 %tmp24, 52 |
| 117 | br label %INNER_LOOP |
| 118 | |
| 119 | INNER_LOOP: |
| 120 | %inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ] |
| 121 | call void asm sideeffect "; inner loop body", ""() #0 |
| 122 | %inner.loop.j.inc = add nsw i32 %inner.loop.j, 1 |
| 123 | %inner.loop.cmp = icmp eq i32 %inner.loop.j, 0 |
| 124 | br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP |
| 125 | |
| 126 | INNER_LOOP_BREAK: |
| 127 | %tmp59 = extractelement <4 x i32> %tmp14, i64 2 |
| 128 | call void asm sideeffect "s_nop 23 ", "~{memory}"() #0 |
| 129 | br label %END_ELSE_BLOCK |
| 130 | |
| 131 | bb62: |
| 132 | %load13 = icmp ult i32 %tmp16, 271 |
| 133 | br i1 %load13, label %bb64, label %INCREMENT_I |
| 134 | |
| 135 | bb64: |
| 136 | call void asm sideeffect "s_nop 42", "~{memory}"() #0 |
| 137 | br label %RETURN |
| 138 | |
| 139 | INCREMENT_I: |
| 140 | %inc.i = add i32 %i, 1 |
| 141 | call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0 |
| 142 | br label %END_ELSE_BLOCK |
| 143 | |
| 144 | END_ELSE_BLOCK: |
| 145 | %i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ] |
| 146 | call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0 |
| 147 | %cmp.end.else.block = icmp eq i32 %i.final, -1 |
| 148 | br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER |
| 149 | |
| 150 | RETURN: |
| 151 | call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0 |
| 152 | store volatile <2 x float> %load1, <2 x float> addrspace(1)* undef, align 8 |
| 153 | ret void |
| 154 | } |
| 155 | |
| 156 | ; The same function, except break to return block goes directly to the |
| 157 | ; return, which managed to hide the bug. |
Matt Arsenault | 35c4244 | 2018-01-04 17:23:24 +0000 | [diff] [blame^] | 158 | ; FIXME: Merge variant from backedge-id-bug-xfail |
Matt Arsenault | 8070882b | 2018-01-03 18:45:37 +0000 | [diff] [blame] | 159 | |
| 160 | declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| 161 | |
| 162 | attributes #0 = { convergent nounwind } |
| 163 | attributes #1 = { convergent nounwind readnone } |