AMDGPU: Run pointer optimization passes

llvm-svn: 272736
diff --git a/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll b/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll
index 969769b..cec334f 100644
--- a/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-memory-two-objects.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=EG %s
-; RUN: llc < %s -march=amdgcn -mcpu=verde -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=SI %s
-; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
 @local_memory_two_objects.local_mem0 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
 @local_memory_two_objects.local_mem1 = internal unnamed_addr addrspace(3) global [4 x i32] undef, align 4
@@ -12,15 +12,14 @@
 ; GCN: .long 47180
 ; GCN-NEXT: .long 32900
 
-; EG: {{^}}local_memory_two_objects:
+
+; FUNC-LABEL: {{^}}local_memory_two_objects:
 
 ; We would like to check the lds writes are using different
 ; addresses, but due to variations in the scheduler, we can't do
 ; this consistently on evergreen GPUs.
 ; EG: LDS_WRITE
 ; EG: LDS_WRITE
-; GCN: ds_write_b32 {{v[0-9]*}}, v[[ADDRW:[0-9]*]]
-; GCN-NOT: ds_write_b32 {{v[0-9]*}}, v[[ADDRW]]
 
 ; GROUP_BARRIER must be the last instruction in a clause
 ; EG: GROUP_BARRIER
@@ -30,9 +29,29 @@
 ; constant offsets.
 ; EG: LDS_READ_RET {{[*]*}} OQAP, {{PV|T}}[[ADDRR:[0-9]*\.[XYZW]]]
 ; EG-NOT: LDS_READ_RET {{[*]*}} OQAP, T[[ADDRR]]
-; SI: v_add_i32_e32 [[SIPTR:v[0-9]+]], vcc, 16, v{{[0-9]+}}
-; SI: ds_read_b32 {{v[0-9]+}}, [[SIPTR]]
-; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:4
+
+
+; GCN: v_lshlrev_b32_e32 [[ADDRW:v[0-9]+]], 2, v0
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*}} offset:16
+; CI-DAG: ds_write_b32 [[ADDRW]], {{v[0-9]*$}}
+
+
+; SI: v_add_i32_e32 [[ADDRW_OFF:v[0-9]+]], vcc, 16, [[ADDRW]]
+
+; SI-DAG: ds_write_b32 [[ADDRW]],
+; SI-DAG: ds_write_b32 [[ADDRW_OFF]],
+
+; GCN: s_barrier
+
+; SI-DAG: v_sub_i32_e32 [[SUB0:v[0-9]+]], vcc, 28, [[ADDRW]]
+; SI-DAG: v_sub_i32_e32 [[SUB1:v[0-9]+]], vcc, 12, [[ADDRW]]
+
+; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB0]]
+; SI-DAG: ds_read_b32 v{{[0-9]+}}, [[SUB1]]
+
+; CI: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, 0, [[ADDRW]]
+; CI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, [[SUB]] offset0:3 offset1:7
+
 define void @local_memory_two_objects(i32 addrspace(1)* %out) {
 entry:
   %x.i = call i32 @llvm.r600.read.tidig.x() #0
diff --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll
index f9355c5..5d64a15 100644
--- a/llvm/test/CodeGen/AMDGPU/min.ll
+++ b/llvm/test/CodeGen/AMDGPU/min.ll
@@ -223,7 +223,7 @@
 ; EG: MIN_UINT
 define void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
   %a = load i32, i32 addrspace(1)* %aptr, align 4
-  %b = load i32, i32 addrspace(1)* %aptr, align 4
+  %b = load i32, i32 addrspace(1)* %bptr, align 4
   %cmp = icmp ult i32 %a, %b
   %val = select i1 %cmp, i32 %a, i32 %b
   store i32 %val, i32 addrspace(1)* %out, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/predicates.ll b/llvm/test/CodeGen/AMDGPU/predicates.ll
index 0ce74d9..79dee61 100644
--- a/llvm/test/CodeGen/AMDGPU/predicates.ll
+++ b/llvm/test/CodeGen/AMDGPU/predicates.ll
@@ -1,27 +1,27 @@
-; RUN: llc < %s -march=r600 -mattr=disable-irstructurizer -mcpu=redwood | FileCheck %s
+; RUN: llc -spec-exec-max-speculation-cost=0 -march=r600 -mattr=disable-irstructurizer -mcpu=redwood < %s | FileCheck %s
 
 ; These tests make sure the compiler is optimizing branches using predicates
 ; when it is legal to do so.
 
-; CHECK: {{^}}simple_if:
+; CHECK-LABEL: {{^}}simple_if:
 ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
 ; CHECK: LSHL * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
 define void @simple_if(i32 addrspace(1)* %out, i32 %in) {
 entry:
-  %0 = icmp sgt i32 %in, 0
-  br i1 %0, label %IF, label %ENDIF
+  %cmp0 = icmp sgt i32 %in, 0
+  br i1 %cmp0, label %IF, label %ENDIF
 
 IF:
-  %1 = shl i32 %in, 1
+  %tmp1 = shl i32 %in, 1
   br label %ENDIF
 
 ENDIF:
-  %2 = phi i32 [ %in, %entry ], [ %1, %IF ]
-  store i32 %2, i32 addrspace(1)* %out
+  %tmp2 = phi i32 [ %in, %entry ], [ %tmp1, %IF ]
+  store i32 %tmp2, i32 addrspace(1)* %out
   ret void
 }
 
-; CHECK: {{^}}simple_if_else:
+; CHECK-LABEL: {{^}}simple_if_else:
 ; CHECK: PRED_SET{{[EGN][ET]*}}_INT * Pred,
 ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
 ; CHECK: LSH{{[LR] \* T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, 1, Pred_sel
@@ -44,7 +44,7 @@
   ret void
 }
 
-; CHECK: {{^}}nested_if:
+; CHECK-LABEL: {{^}}nested_if:
 ; CHECK: ALU_PUSH_BEFORE
 ; CHECK: JUMP
 ; CHECK: POP
@@ -71,7 +71,7 @@
   ret void
 }
 
-; CHECK: {{^}}nested_if_else:
+; CHECK-LABEL: {{^}}nested_if_else:
 ; CHECK: ALU_PUSH_BEFORE
 ; CHECK: JUMP
 ; CHECK: POP
diff --git a/llvm/test/CodeGen/AMDGPU/setcc-opt.ll b/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
index 405640c..d2c57a8 100644
--- a/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=GCN -check-prefix=FUNC %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN -check-prefix=FUNC %s
 ; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
@@ -36,38 +36,6 @@
   ret void
 }
 
-; This really folds away to false
-; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
-; GCN: v_cmp_eq_i32_e32 vcc,
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
-; GCN-NEXT: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}}
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
-; GCN-NEXT: buffer_store_byte [[TMP]]
-; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
-  %icmp0 = icmp eq i32 %a, %b
-  %ext = sext i1 %icmp0 to i32
-  %icmp1 = icmp eq i32 %ext, 1
-  store i1 %icmp1, i1 addrspace(1)* %out
-  ret void
-}
-
-; This really folds away to true
-; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
-; GCN: v_cmp_ne_i32_e32 vcc,
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc
-; GCN-NEXT: v_cmp_ne_i32_e32 vcc, 1, [[TMP]]{{$}}
-; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1,
-; GCN-NEXT: buffer_store_byte [[TMP]]
-; GCN-NEXT: s_endpgm
-define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
-  %icmp0 = icmp ne i32 %a, %b
-  %ext = sext i1 %icmp0 to i32
-  %icmp1 = icmp ne i32 %ext, 1
-  store i1 %icmp1, i1 addrspace(1)* %out
-  ret void
-}
-
 ; FUNC-LABEL: {{^}}sext_bool_icmp_eq_neg1:
 ; GCN-NOT: v_cmp
 ; GCN: v_cmp_eq_i32_e32 vcc,
@@ -177,24 +145,6 @@
   ret void
 }
 
-; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
-; SI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc
-; VI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
-; VI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30
-; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]]
-; GCN: v_cmp_ne_i32_e32 vcc, 2, [[VB]]{{$}}
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
-; GCN: buffer_store_byte
-; GCN: s_endpgm
-define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
-  %icmp0 = icmp ne i32 %a, %b
-  %ext = sext i1 %icmp0 to i32
-  %icmp1 = icmp ne i32 %ext, 2
-  store i1 %icmp1, i1 addrspace(1)* %out
-  ret void
-}
-
 ; FUNC-LABEL: {{^}}cmp_zext_k_i8max:
 ; SI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
 ; VI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
@@ -294,3 +244,40 @@
   store i1 %icmp1, i1 addrspace(1)* %out
   ret void
 }
+
+; FIXME: These cases should really be able fold to true/false in
+; DAGCombiner
+
+; This really folds away to false
+; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+  %icmp0 = icmp eq i32 %a, %b
+  %ext = sext i1 %icmp0 to i32
+  %icmp1 = icmp eq i32 %ext, 1
+  store i1 %icmp1, i1 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+  %icmp0 = icmp ne i32 %a, %b
+  %ext = sext i1 %icmp0 to i32
+  %icmp1 = icmp ne i32 %ext, 1
+  store i1 %icmp1, i1 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}sext_bool_icmp_ne_k:
+; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 1{{$}}
+; GCN: buffer_store_byte [[K]]
+define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
+  %icmp0 = icmp ne i32 %a, %b
+  %ext = sext i1 %icmp0 to i32
+  %icmp1 = icmp ne i32 %ext, 2
+  store i1 %icmp1, i1 addrspace(1)* %out
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
index dfc82f8..ac9e2b5 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -259,16 +259,17 @@
 ; SI: buffer_store
 ; SI: {{^}}[[EXIT]]:
 ; SI: s_endpgm
-define void @icmp_users_different_blocks(i32 %cond, i32 addrspace(1)* %out) {
+define void @icmp_users_different_blocks(i32 %cond0, i32 %cond1, i32 addrspace(1)* %out) {
 bb:
   %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
-  %tmp1 = icmp sgt i32 %cond, 0
-  br i1 %tmp1, label %bb2, label %bb9
+  %cmp0 = icmp sgt i32 %cond0, 0
+  %cmp1 = icmp sgt i32 %cond1, 0
+  br i1 %cmp0, label %bb2, label %bb9
 
 bb2:                                              ; preds = %bb
-  %tmp2 = sext i1 %tmp1 to i32
+  %tmp2 = sext i1 %cmp1 to i32
   %tmp3 = add i32 %tmp2, %tmp
-  br i1 %tmp1, label %bb9, label %bb7
+  br i1 %cmp1, label %bb9, label %bb7
 
 bb7:                                              ; preds = %bb5
   store i32 %tmp3, i32 addrspace(1)* %out