AMDGPU: Run pointer optimization passes

llvm-svn: 272736
diff --git a/llvm/test/CodeGen/AMDGPU/predicates.ll b/llvm/test/CodeGen/AMDGPU/predicates.ll
index 0ce74d9..79dee61 100644
--- a/llvm/test/CodeGen/AMDGPU/predicates.ll
+++ b/llvm/test/CodeGen/AMDGPU/predicates.ll
@@ -1,27 +1,27 @@
-; RUN: llc < %s -march=r600 -mattr=disable-irstructurizer -mcpu=redwood | FileCheck %s
+; RUN: llc -spec-exec-max-speculation-cost=0 -march=r600 -mattr=disable-irstructurizer -mcpu=redwood < %s | FileCheck %s
 
 ; These tests make sure the compiler is optimizing branches using predicates
 ; when it is legal to do so.
 
-; CHECK: {{^}}simple_if:
+; CHECK-LABEL: {{^}}simple_if:
 ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
 ; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
 define void @simple_if(i32 addrspace(1)* %out, i32 %in) {
 entry:
-  %0 = icmp sgt i32 %in, 0
-  br i1 %0, label %IF, label %ENDIF
+  %cmp0 = icmp sgt i32 %in, 0
+  br i1 %cmp0, label %IF, label %ENDIF
 
 IF:
-  %1 = shl i32 %in, 1
+  %tmp1 = shl i32 %in, 1
   br label %ENDIF
 
 ENDIF:
-  %2 = phi i32 [ %in, %entry ], [ %1, %IF ]
-  store i32 %2, i32 addrspace(1)* %out
+  %tmp2 = phi i32 [ %in, %entry ], [ %tmp1, %IF ]
+  store i32 %tmp2, i32 addrspace(1)* %out
   ret void
 }
 
-; CHECK: {{^}}simple_if_else:
+; CHECK-LABEL: {{^}}simple_if_else:
 ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
 ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
 ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
@@ -44,7 +44,7 @@
   ret void
 }
 
-; CHECK: {{^}}nested_if:
+; CHECK-LABEL: {{^}}nested_if:
 ; CHECK: ALU_PUSH_BEFORE
 ; CHECK: JUMP
 ; CHECK: POP
@@ -71,7 +71,7 @@
   ret void
 }
 
-; CHECK: {{^}}nested_if_else:
+; CHECK-LABEL: {{^}}nested_if_else:
 ; CHECK: ALU_PUSH_BEFORE
 ; CHECK: JUMP
 ; CHECK: POP