AMDGPU: Fix using incorrect private resource with no allocation

It's possible to have a use of the private resource descriptor or
scratch wave offset registers even though there are no allocated
stack objects. This would result in continuing to use the maximum
number reserved registers. This could go over the number of SGPRs
available on VI, or violate the SGPR limit requested by
the function attributes.

llvm-svn: 285435
diff --git a/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
index 404b125..aba0b63 100644
--- a/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll
@@ -1,9 +1,16 @@
 ; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck %s
 
-; CHECK-LABEL: {{^}}max_18_sgprs:
+; CHECK-LABEL: {{^}}max_14_sgprs:
+
+; FIXME: Should be ablo to skip this copying of the private segment
+; buffer because all the SGPR spills are to VGPRs.
+
+; CHECK: s_mov_b64 s[6:7], s[2:3]
+; CHECK: s_mov_b64 s[4:5], s[0:1]
+
 ; CHECK: SGPRBlocks: 1
-; CHECK: NumSGPRsForWavesPerEU: 13
-define void @max_18_sgprs(i32 addrspace(1)* %out1,
+; CHECK: NumSGPRsForWavesPerEU: 14
+define void @max_14_sgprs(i32 addrspace(1)* %out1,
                           i32 addrspace(1)* %out2,
                           i32 addrspace(1)* %out3,
                           i32 addrspace(1)* %out4,
@@ -14,4 +21,102 @@
   store i32 %four, i32 addrspace(1)* %out4
   ret void
 }
-attributes #0 = {"amdgpu-num-sgpr"="18"}
+
+; private resource: 4
+; scratch wave offset: 1
+; workgroup ids: 3
+; dispatch id: 2
+; queue ptr: 2
+; flat scratch init: 2
+; ---------------------
+; total: 14
+
+; + reserved vcc, flat_scratch = 18
+
+; Because we can't handle re-using the last few input registers as the
+; special vcc etc. registers (as well as decide to not use the unused
+; features when the number of registers is frozen), this ends up using
+; more than expected.
+
+; ALL-LABEL: {{^}}max_12_sgprs_14_input_sgprs:
+; TOSGPR: SGPRBlocks: 2
+; TOSGPR: NumSGPRsForWavesPerEU: 18
+
+; TOSMEM: s_mov_b64 s[6:7], s[2:3]
+; TOSMEM: s_mov_b32 s9, s13
+; TOSMEM: s_mov_b64 s[4:5], s[0:1]
+
+; TOSMEM: SGPRBlocks: 2
+; TOSMEM: NumSGPRsForWavesPerEU: 18
+define void @max_12_sgprs_14_input_sgprs(i32 addrspace(1)* %out1,
+                                        i32 addrspace(1)* %out2,
+                                        i32 addrspace(1)* %out3,
+                                        i32 addrspace(1)* %out4,
+                                        i32 %one, i32 %two, i32 %three, i32 %four) #2 {
+  store volatile i32 0, i32* undef
+  %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
+  store volatile i32 %x.0, i32 addrspace(1)* undef
+  %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
+  store volatile i32 %x.0, i32 addrspace(1)* undef
+  %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
+  store volatile i32 %x.0, i32 addrspace(1)* undef
+  %x.3 = call i64 @llvm.amdgcn.dispatch.id()
+  store volatile i64 %x.3, i64 addrspace(1)* undef
+  %x.4 = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
+  store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
+  %x.5 = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr()
+  store volatile i8 addrspace(2)* %x.5, i8 addrspace(2)* addrspace(1)* undef
+
+  store i32 %one, i32 addrspace(1)* %out1
+  store i32 %two, i32 addrspace(1)* %out2
+  store i32 %three, i32 addrspace(1)* %out3
+  store i32 %four, i32 addrspace(1)* %out4
+  ret void
+}
+
+; ALL-LABEL: max_12_sgprs_12_input_sgprs{{$}}
+; ; Make sure copies for input buffer are not clobbered. This requires
+; ; swapping the order the registers are copied from what normally
+; ; happens.
+
+; TOSMEM: s_mov_b64 s[6:7], s[2:3]
+; TOSMEM: s_mov_b64 s[4:5], s[0:1]
+; TOSMEM: s_mov_b32 s3, s11
+
+; ALL: SGPRBlocks: 1
+; ALL: NumSGPRsForWavesPerEU: 16
+define void @max_12_sgprs_12_input_sgprs(i32 addrspace(1)* %out1,
+                                        i32 addrspace(1)* %out2,
+                                        i32 addrspace(1)* %out3,
+                                        i32 addrspace(1)* %out4,
+                                        i32 %one, i32 %two, i32 %three, i32 %four) #2 {
+  store volatile i32 0, i32* undef
+  %x.0 = call i32 @llvm.amdgcn.workgroup.id.x()
+  store volatile i32 %x.0, i32 addrspace(1)* undef
+  %x.1 = call i32 @llvm.amdgcn.workgroup.id.y()
+  store volatile i32 %x.0, i32 addrspace(1)* undef
+  %x.2 = call i32 @llvm.amdgcn.workgroup.id.z()
+  store volatile i32 %x.0, i32 addrspace(1)* undef
+  %x.3 = call i64 @llvm.amdgcn.dispatch.id()
+  store volatile i64 %x.3, i64 addrspace(1)* undef
+  %x.4 = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
+  store volatile i8 addrspace(2)* %x.4, i8 addrspace(2)* addrspace(1)* undef
+
+  store i32 %one, i32 addrspace(1)* %out1
+  store i32 %two, i32 addrspace(1)* %out2
+  store i32 %three, i32 addrspace(1)* %out3
+  store i32 %four, i32 addrspace(1)* %out4
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workgroup.id.x() #1
+declare i32 @llvm.amdgcn.workgroup.id.y() #1
+declare i32 @llvm.amdgcn.workgroup.id.z() #1
+declare i64 @llvm.amdgcn.dispatch.id() #1
+declare i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() #1
+declare i8 addrspace(2)* @llvm.amdgcn.queue.ptr() #1
+
+attributes #0 = { nounwind "amdgpu-num-sgpr"="14" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind "amdgpu-num-sgpr"="12" }
+attributes #3 = { nounwind "amdgpu-num-sgpr"="11" }
diff --git a/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll b/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll
index c24cfef..2894730 100644
--- a/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll
+++ b/llvm/test/CodeGen/AMDGPU/private-access-no-objects.ll
@@ -1,6 +1,17 @@
-; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=OPTNONE %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=OPT %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=iceland -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=OPT %s
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=OPTNONE %s
+
+; There are no stack objects, but still a private memory access. The
+; private access regiters need to be correctly initialized anyway, and
+; shifted down to the end of the used registers.
 
 ; GCN-LABEL: {{^}}store_to_undef:
+; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
+; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
 
 ; -O0 should assume spilling, so the input scratch resource descriptor
 ; -should be used directly without any copies.
@@ -13,18 +24,30 @@
 }
 
 ; GCN-LABEL: {{^}}store_to_inttoptr:
+; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
+; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
 define void @store_to_inttoptr() #0 {
  store volatile i32 0, i32* inttoptr (i32 123 to i32*)
  ret void
 }
 
 ; GCN-LABEL: {{^}}load_from_undef:
+; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
+; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
 define void @load_from_undef() #0 {
   %ld = load volatile i32, i32* undef
   ret void
 }
 
 ; GCN-LABEL: {{^}}load_from_inttoptr:
+; OPT-DAG: s_mov_b64 s{{\[}}[[RSRC_LO:[0-9]+]]:{{[0-9]+\]}}, s[0:1]
+; OPT-DAG: s_mov_b64 s{{\[[0-9]+}}:[[RSRC_HI:[0-9]+]]{{\]}}, s[2:3]
+; OPT-DAG: s_mov_b32 [[SOFFSET:s[0-9]+]], s7{{$}}
+; OPT: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[}}[[RSRC_LO]]:[[RSRC_HI]]{{\]}}, [[SOFFSET]] offen{{$}}
 define void @load_from_inttoptr() #0 {
   %ld = load volatile i32, i32* inttoptr (i32 123 to i32*)
   ret void
diff --git a/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll b/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
index 63b1b71..ac06c26 100644
--- a/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-sgpr-spill.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck %s
 
 ; These tests check that the compiler won't crash when it needs to spill
 ; SGPRs.
diff --git a/llvm/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll b/llvm/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll
index cc4b6bc..ff94298 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-alloc-sgpr-init-bug.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=TONGA %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=TONGA %s
 
 ; On Tonga and Iceland, limited SGPR availability means care must be taken to
 ; allocate scratch registers correctly. Check that this test compiles without
diff --git a/llvm/test/CodeGen/AMDGPU/spill-m0.ll b/llvm/test/CodeGen/AMDGPU/spill-m0.ll
index 2f99efd..74e33d1 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-m0.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-m0.ll
@@ -6,6 +6,8 @@
 ; XXX - Why does it like to use vcc?
 
 ; GCN-LABEL: {{^}}spill_m0:
+; TOSMEM: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
+
 ; GCN: s_cmp_lg_u32
 
 ; TOVGPR: s_mov_b32 vcc_hi, m0
diff --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll
index be49b69..14c279b 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.ll
+++ b/llvm/test/CodeGen/AMDGPU/wqm.ll
@@ -459,7 +459,7 @@
   br i1 %cc, label %if, label %else
 
 if:
-  store volatile <4 x float> %dtex, <4 x float>* undef
+  store volatile <4 x float> %dtex, <4 x float> addrspace(1)* undef
   unreachable
 
 else: