AMDGPU: Don't use MUBUF vaddr if address may overflow

Effectively revert r263964. Before we would not
allow this if vaddr was not known to be positive.

llvm-svn: 318240
diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index b98428a..9bc4643 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -101,6 +101,9 @@
   ret void
 }
 
+; FIXME: Should be able to see that this can use vaddr, but the
+; FrameIndex is hidden behind a CopyFromReg in the second block.
+
 ; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_nonentry_block:
 ; GCN: s_sub_u32 [[SUB_OFFSET:s[0-9]+]], s5, s4
 ; GCN: v_lshr_b32_e64 [[SHIFT:v[0-9]+]], [[SUB_OFFSET]], 6
@@ -108,7 +111,7 @@
 ; GCN: s_and_saveexec_b64
 
 ; GCN: v_add_i32_e32 v0, vcc, 4, [[ADD]]
-; GCN: buffer_load_dword v1, v1, s[0:3], s4 offen offset:4
+; GCN: buffer_load_dword v1, v0, s[0:3], s4 offen{{$}}
 ; GCN: ds_write_b32
 define void @void_func_byval_struct_i8_i32_ptr_nonentry_block({ i8, i32 }* byval %arg0, i32 %arg2) #0 {
   %cmp = icmp eq i32 %arg2, 0
@@ -195,4 +198,23 @@
   ret void
 }
 
+; GCN-LABEL: {{^}}alloca_ptr_nonentry_block:
+; GCN: s_and_saveexec_b64
+; GCN: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s5 offset:12
+define void @alloca_ptr_nonentry_block(i32 %arg0) #0 {
+  %alloca0 = alloca { i8, i32 }, align 4
+  %cmp = icmp eq i32 %arg0, 0
+  br i1 %cmp, label %bb, label %ret
+
+bb:
+  %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %alloca0, i32 0, i32 0
+  %gep1 = getelementptr inbounds { i8, i32 }, { i8, i32 }* %alloca0, i32 0, i32 1
+  %load1 = load volatile i32, i32* %gep1
+  store volatile i32* %gep1, i32* addrspace(3)* undef
+  br label %ret
+
+ret:
+  ret void
+}
+
 attributes #0 = { nounwind }