blob: 450fa5db5743d167c4af4c3e3a9015578214699a [file] [log] [blame]
Matt Arsenault8d4b0ed2016-06-23 20:00:34 +00001//===-- SIMachineFunctionInfo.cpp -------- SI Machine Function Info -------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
Tom Stellard75aadc22012-12-11 21:25:42 +00008//===----------------------------------------------------------------------===//
9
Tom Stellard75aadc22012-12-11 21:25:42 +000010#include "SIMachineFunctionInfo.h"
Tom Stellard96468902014-09-24 01:33:17 +000011#include "AMDGPUSubtarget.h"
Tom Stellardeba61072014-05-02 15:41:42 +000012#include "SIInstrInfo.h"
Tom Stellardc5cf2f02014-08-21 20:40:54 +000013#include "llvm/CodeGen/MachineFrameInfo.h"
NAKAMURA Takumif619b502016-06-27 10:26:36 +000014#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000015#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellardeba61072014-05-02 15:41:42 +000016#include "llvm/IR/Function.h"
17#include "llvm/IR/LLVMContext.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000018
19#define MAX_LANES 64
Tom Stellard75aadc22012-12-11 21:25:42 +000020
21using namespace llvm;
22
Matt Arsenault8d4b0ed2016-06-23 20:00:34 +000023static cl::opt<bool> EnableSpillSGPRToVGPR(
24 "amdgpu-spill-sgpr-to-vgpr",
25 cl::desc("Enable spilling VGPRs to SGPRs"),
26 cl::ReallyHidden,
27 cl::init(true));
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000028
29// Pin the vtable to this file.
30void SIMachineFunctionInfo::anchor() {}
31
Tom Stellard75aadc22012-12-11 21:25:42 +000032SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
Vincent Lejeuneace6f732013-04-01 21:47:53 +000033 : AMDGPUMachineFunction(MF),
Tom Stellard96468902014-09-24 01:33:17 +000034 TIDReg(AMDGPU::NoRegister),
Matt Arsenault49affb82015-11-25 20:55:12 +000035 ScratchRSrcReg(AMDGPU::NoRegister),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000036 ScratchWaveOffsetReg(AMDGPU::NoRegister),
37 PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister),
38 DispatchPtrUserSGPR(AMDGPU::NoRegister),
39 QueuePtrUserSGPR(AMDGPU::NoRegister),
40 KernargSegmentPtrUserSGPR(AMDGPU::NoRegister),
41 DispatchIDUserSGPR(AMDGPU::NoRegister),
42 FlatScratchInitUserSGPR(AMDGPU::NoRegister),
43 PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister),
44 GridWorkGroupCountXUserSGPR(AMDGPU::NoRegister),
45 GridWorkGroupCountYUserSGPR(AMDGPU::NoRegister),
46 GridWorkGroupCountZUserSGPR(AMDGPU::NoRegister),
47 WorkGroupIDXSystemSGPR(AMDGPU::NoRegister),
48 WorkGroupIDYSystemSGPR(AMDGPU::NoRegister),
49 WorkGroupIDZSystemSGPR(AMDGPU::NoRegister),
50 WorkGroupInfoSystemSGPR(AMDGPU::NoRegister),
51 PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister),
Tom Stellardc149dc02013-11-27 21:23:35 +000052 PSInputAddr(0),
Marek Olsak8e9cc632016-01-13 17:23:09 +000053 ReturnsVoid(true),
Tom Stellard79a1fd72016-04-14 16:27:07 +000054 MaximumWorkGroupSize(0),
Konstantin Zhuravlyov29ddd2b2016-05-24 18:37:18 +000055 DebuggerReservedVGPRCount(0),
NAKAMURA Takumi5cbd41e2016-06-27 10:26:43 +000056 DebuggerWorkGroupIDStackObjectIndices({{0, 0, 0}}),
57 DebuggerWorkItemIDStackObjectIndices({{0, 0, 0}}),
Marek Olsakfccabaf2016-01-13 11:45:36 +000058 LDSWaveSpillSize(0),
59 PSInputEna(0),
Tom Stellard96468902014-09-24 01:33:17 +000060 NumUserSGPRs(0),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000061 NumSystemSGPRs(0),
Matt Arsenault49affb82015-11-25 20:55:12 +000062 HasSpilledSGPRs(false),
63 HasSpilledVGPRs(false),
Matt Arsenault296b8492016-02-12 06:31:30 +000064 HasNonSpillStackObjects(false),
65 HasFlatInstructions(false),
Marek Olsak0532c192016-07-13 17:35:15 +000066 NumSpilledSGPRs(0),
67 NumSpilledVGPRs(0),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000068 PrivateSegmentBuffer(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000069 DispatchPtr(false),
70 QueuePtr(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000071 KernargSegmentPtr(false),
Matt Arsenault8d718dc2016-07-22 17:01:30 +000072 DispatchID(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000073 FlatScratchInit(false),
74 GridWorkgroupCountX(false),
75 GridWorkgroupCountY(false),
76 GridWorkgroupCountZ(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000077 WorkGroupIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000078 WorkGroupIDY(false),
79 WorkGroupIDZ(false),
80 WorkGroupInfo(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000081 PrivateSegmentWaveByteOffset(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000082 WorkItemIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000083 WorkItemIDY(false),
84 WorkItemIDZ(false) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000085 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matt Arsenault49affb82015-11-25 20:55:12 +000086 const Function *F = MF.getFunction();
87
Marek Olsakfccabaf2016-01-13 11:45:36 +000088 PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
89
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000090 const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
91
Tom Stellardf110f8f2016-04-14 16:27:03 +000092 if (!AMDGPU::isShader(F->getCallingConv())) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000093 KernargSegmentPtr = true;
Tom Stellardf110f8f2016-04-14 16:27:03 +000094 WorkGroupIDX = true;
95 WorkItemIDX = true;
96 }
Matt Arsenault49affb82015-11-25 20:55:12 +000097
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +000098 if (F->hasFnAttribute("amdgpu-work-group-id-y") || ST.debuggerEmitPrologue())
Matt Arsenault49affb82015-11-25 20:55:12 +000099 WorkGroupIDY = true;
100
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000101 if (F->hasFnAttribute("amdgpu-work-group-id-z") || ST.debuggerEmitPrologue())
Matt Arsenault49affb82015-11-25 20:55:12 +0000102 WorkGroupIDZ = true;
103
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000104 if (F->hasFnAttribute("amdgpu-work-item-id-y") || ST.debuggerEmitPrologue())
Matt Arsenault49affb82015-11-25 20:55:12 +0000105 WorkItemIDY = true;
106
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000107 if (F->hasFnAttribute("amdgpu-work-item-id-z") || ST.debuggerEmitPrologue())
Matt Arsenault49affb82015-11-25 20:55:12 +0000108 WorkItemIDZ = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000109
Matt Arsenault296b8492016-02-12 06:31:30 +0000110 // X, XY, and XYZ are the only supported combinations, so make sure Y is
111 // enabled if Z is.
112 if (WorkItemIDZ)
113 WorkItemIDY = true;
114
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000115 bool MaySpill = ST.isVGPRSpillingEnabled(*F);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000116 bool HasStackObjects = FrameInfo->hasStackObjects();
117
118 if (HasStackObjects || MaySpill)
119 PrivateSegmentWaveByteOffset = true;
120
121 if (ST.isAmdHsaOS()) {
122 if (HasStackObjects || MaySpill)
123 PrivateSegmentBuffer = true;
124
125 if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
126 DispatchPtr = true;
Matt Arsenault48ab5262016-04-25 19:27:18 +0000127
128 if (F->hasFnAttribute("amdgpu-queue-ptr"))
129 QueuePtr = true;
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000130
131 if (F->hasFnAttribute("amdgpu-dispatch-id"))
132 DispatchID = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000133 }
134
Matt Arsenault296b8492016-02-12 06:31:30 +0000135 // We don't need to worry about accessing spills with flat instructions.
136 // TODO: On VI where we must use flat for global, we should be able to omit
137 // this if it is never used for generic access.
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000138 if (HasStackObjects && ST.getGeneration() >= SISubtarget::SEA_ISLANDS &&
Matt Arsenault296b8492016-02-12 06:31:30 +0000139 ST.isAmdHsaOS())
140 FlatScratchInit = true;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000141
142 if (AMDGPU::isCompute(F->getCallingConv()))
143 MaximumWorkGroupSize = AMDGPU::getMaximumWorkGroupSize(*F);
144 else
145 MaximumWorkGroupSize = ST.getWavefrontSize();
Konstantin Zhuravlyov71515e52016-04-26 17:24:40 +0000146
Konstantin Zhuravlyov29ddd2b2016-05-24 18:37:18 +0000147 if (ST.debuggerReserveRegs())
148 DebuggerReservedVGPRCount = 4;
Matt Arsenault49affb82015-11-25 20:55:12 +0000149}
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000150
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000151unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
152 const SIRegisterInfo &TRI) {
153 PrivateSegmentBufferUserSGPR = TRI.getMatchingSuperReg(
154 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
155 NumUserSGPRs += 4;
156 return PrivateSegmentBufferUserSGPR;
157}
158
159unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
160 DispatchPtrUserSGPR = TRI.getMatchingSuperReg(
161 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
162 NumUserSGPRs += 2;
163 return DispatchPtrUserSGPR;
164}
165
166unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
167 QueuePtrUserSGPR = TRI.getMatchingSuperReg(
168 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
169 NumUserSGPRs += 2;
170 return QueuePtrUserSGPR;
171}
172
173unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
174 KernargSegmentPtrUserSGPR = TRI.getMatchingSuperReg(
175 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
176 NumUserSGPRs += 2;
177 return KernargSegmentPtrUserSGPR;
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000178}
179
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000180unsigned SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
181 DispatchIDUserSGPR = TRI.getMatchingSuperReg(
182 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
183 NumUserSGPRs += 2;
184 return DispatchIDUserSGPR;
185}
186
Matt Arsenault296b8492016-02-12 06:31:30 +0000187unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
188 FlatScratchInitUserSGPR = TRI.getMatchingSuperReg(
189 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
190 NumUserSGPRs += 2;
191 return FlatScratchInitUserSGPR;
192}
193
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000194SIMachineFunctionInfo::SpilledReg SIMachineFunctionInfo::getSpilledReg (
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000195 MachineFunction *MF,
196 unsigned FrameIndex,
197 unsigned SubIdx) {
Matt Arsenault8d4b0ed2016-06-23 20:00:34 +0000198 if (!EnableSpillSGPRToVGPR)
199 return SpilledReg();
200
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000201 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
202 const SIRegisterInfo *TRI = ST.getRegisterInfo();
203
Tom Stellard649b5db2016-03-04 18:31:18 +0000204 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000205 MachineRegisterInfo &MRI = MF->getRegInfo();
206 int64_t Offset = FrameInfo->getObjectOffset(FrameIndex);
207 Offset += SubIdx * 4;
208
209 unsigned LaneVGPRIdx = Offset / (64 * 4);
210 unsigned Lane = (Offset / 4) % 64;
211
212 struct SpilledReg Spill;
Tom Stellard649b5db2016-03-04 18:31:18 +0000213 Spill.Lane = Lane;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000214
215 if (!LaneVGPRs.count(LaneVGPRIdx)) {
Tom Stellard42fb60e2015-01-14 15:42:31 +0000216 unsigned LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass);
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000217
Tom Stellard649b5db2016-03-04 18:31:18 +0000218 if (LaneVGPR == AMDGPU::NoRegister)
219 // We have no VGPRs left for spilling SGPRs.
220 return Spill;
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000221
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000222 LaneVGPRs[LaneVGPRIdx] = LaneVGPR;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000223
224 // Add this register as live-in to all blocks to avoid machine verifer
225 // complaining about use of an undefined physical register.
226 for (MachineFunction::iterator BI = MF->begin(), BE = MF->end();
227 BI != BE; ++BI) {
228 BI->addLiveIn(LaneVGPR);
229 }
230 }
231
232 Spill.VGPR = LaneVGPRs[LaneVGPRIdx];
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000233 return Spill;
Tom Stellardc149dc02013-11-27 21:23:35 +0000234}
Tom Stellard96468902014-09-24 01:33:17 +0000235
236unsigned SIMachineFunctionInfo::getMaximumWorkGroupSize(
237 const MachineFunction &MF) const {
Tom Stellard79a1fd72016-04-14 16:27:07 +0000238 return MaximumWorkGroupSize;
Tom Stellard96468902014-09-24 01:33:17 +0000239}