blob: 3203c38dae34401bc182a5db5b219d4518a9e34d [file] [log] [blame]
Matt Arsenault8d4b0ed2016-06-23 20:00:34 +00001//===-- SIMachineFunctionInfo.cpp -------- SI Machine Function Info -------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
Tom Stellard75aadc22012-12-11 21:25:42 +00008//===----------------------------------------------------------------------===//
9
Tom Stellard75aadc22012-12-11 21:25:42 +000010#include "SIMachineFunctionInfo.h"
Tom Stellard96468902014-09-24 01:33:17 +000011#include "AMDGPUSubtarget.h"
Tom Stellardeba61072014-05-02 15:41:42 +000012#include "SIInstrInfo.h"
Tom Stellardc5cf2f02014-08-21 20:40:54 +000013#include "llvm/CodeGen/MachineFrameInfo.h"
NAKAMURA Takumif619b502016-06-27 10:26:36 +000014#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000015#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellardeba61072014-05-02 15:41:42 +000016#include "llvm/IR/Function.h"
17#include "llvm/IR/LLVMContext.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000018
19#define MAX_LANES 64
Tom Stellard75aadc22012-12-11 21:25:42 +000020
21using namespace llvm;
22
23SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
Vincent Lejeuneace6f732013-04-01 21:47:53 +000024 : AMDGPUMachineFunction(MF),
Tom Stellard96468902014-09-24 01:33:17 +000025 TIDReg(AMDGPU::NoRegister),
Matt Arsenault49affb82015-11-25 20:55:12 +000026 ScratchRSrcReg(AMDGPU::NoRegister),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000027 ScratchWaveOffsetReg(AMDGPU::NoRegister),
Matt Arsenault1c0ae392017-04-24 18:05:16 +000028 FrameOffsetReg(AMDGPU::NoRegister),
29 StackPtrOffsetReg(AMDGPU::NoRegister),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000030 PrivateSegmentBufferUserSGPR(AMDGPU::NoRegister),
31 DispatchPtrUserSGPR(AMDGPU::NoRegister),
32 QueuePtrUserSGPR(AMDGPU::NoRegister),
33 KernargSegmentPtrUserSGPR(AMDGPU::NoRegister),
34 DispatchIDUserSGPR(AMDGPU::NoRegister),
35 FlatScratchInitUserSGPR(AMDGPU::NoRegister),
36 PrivateSegmentSizeUserSGPR(AMDGPU::NoRegister),
37 GridWorkGroupCountXUserSGPR(AMDGPU::NoRegister),
38 GridWorkGroupCountYUserSGPR(AMDGPU::NoRegister),
39 GridWorkGroupCountZUserSGPR(AMDGPU::NoRegister),
40 WorkGroupIDXSystemSGPR(AMDGPU::NoRegister),
41 WorkGroupIDYSystemSGPR(AMDGPU::NoRegister),
42 WorkGroupIDZSystemSGPR(AMDGPU::NoRegister),
43 WorkGroupInfoSystemSGPR(AMDGPU::NoRegister),
44 PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister),
Tom Stellardc149dc02013-11-27 21:23:35 +000045 PSInputAddr(0),
Matt Arsenaulte622dc32017-04-11 22:29:24 +000046 PSInputEnable(0),
Marek Olsak8e9cc632016-01-13 17:23:09 +000047 ReturnsVoid(true),
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +000048 FlatWorkGroupSizes(0, 0),
49 WavesPerEU(0, 0),
NAKAMURA Takumi5cbd41e2016-06-27 10:26:43 +000050 DebuggerWorkGroupIDStackObjectIndices({{0, 0, 0}}),
51 DebuggerWorkItemIDStackObjectIndices({{0, 0, 0}}),
Marek Olsakfccabaf2016-01-13 11:45:36 +000052 LDSWaveSpillSize(0),
Tom Stellard96468902014-09-24 01:33:17 +000053 NumUserSGPRs(0),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000054 NumSystemSGPRs(0),
Matt Arsenault49affb82015-11-25 20:55:12 +000055 HasSpilledSGPRs(false),
56 HasSpilledVGPRs(false),
Matt Arsenault296b8492016-02-12 06:31:30 +000057 HasNonSpillStackObjects(false),
Marek Olsak0532c192016-07-13 17:35:15 +000058 NumSpilledSGPRs(0),
59 NumSpilledVGPRs(0),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000060 PrivateSegmentBuffer(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000061 DispatchPtr(false),
62 QueuePtr(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000063 KernargSegmentPtr(false),
Matt Arsenault8d718dc2016-07-22 17:01:30 +000064 DispatchID(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000065 FlatScratchInit(false),
66 GridWorkgroupCountX(false),
67 GridWorkgroupCountY(false),
68 GridWorkgroupCountZ(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000069 WorkGroupIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000070 WorkGroupIDY(false),
71 WorkGroupIDZ(false),
72 WorkGroupInfo(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000073 PrivateSegmentWaveByteOffset(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000074 WorkItemIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000075 WorkItemIDY(false),
Tom Stellard2f3f9852017-01-25 01:25:13 +000076 WorkItemIDZ(false),
Matt Arsenault10fc0622017-06-26 03:01:31 +000077 ImplicitBufferPtr(false) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000078 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matt Arsenault49affb82015-11-25 20:55:12 +000079 const Function *F = MF.getFunction();
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +000080 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(*F);
81 WavesPerEU = ST.getWavesPerEU(*F);
Matt Arsenault49affb82015-11-25 20:55:12 +000082
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000083 if (!isEntryFunction()) {
84 // Non-entry functions have no special inputs for now, other registers
85 // required for scratch access.
86 ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
87 ScratchWaveOffsetReg = AMDGPU::SGPR4;
88 FrameOffsetReg = AMDGPU::SGPR5;
Matt Arsenaultf28683c2017-06-26 17:53:59 +000089 StackPtrOffsetReg = AMDGPU::SGPR32;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000090 return;
91 }
Marek Olsakfccabaf2016-01-13 11:45:36 +000092
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +000093 CallingConv::ID CC = F->getCallingConv();
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000094 if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000095 KernargSegmentPtr = true;
Tom Stellardf110f8f2016-04-14 16:27:03 +000096 WorkGroupIDX = true;
97 WorkItemIDX = true;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000098 } else if (CC == CallingConv::AMDGPU_PS) {
99 PSInputAddr = AMDGPU::getInitialPSInputAddr(*F);
Tom Stellardf110f8f2016-04-14 16:27:03 +0000100 }
Matt Arsenault49affb82015-11-25 20:55:12 +0000101
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000102 if (ST.debuggerEmitPrologue()) {
103 // Enable everything.
Matt Arsenault49affb82015-11-25 20:55:12 +0000104 WorkGroupIDY = true;
Matt Arsenault49affb82015-11-25 20:55:12 +0000105 WorkGroupIDZ = true;
Matt Arsenault49affb82015-11-25 20:55:12 +0000106 WorkItemIDY = true;
Matt Arsenault49affb82015-11-25 20:55:12 +0000107 WorkItemIDZ = true;
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000108 } else {
109 if (F->hasFnAttribute("amdgpu-work-group-id-y"))
110 WorkGroupIDY = true;
111
112 if (F->hasFnAttribute("amdgpu-work-group-id-z"))
113 WorkGroupIDZ = true;
114
115 if (F->hasFnAttribute("amdgpu-work-item-id-y"))
116 WorkItemIDY = true;
117
118 if (F->hasFnAttribute("amdgpu-work-item-id-z"))
119 WorkItemIDZ = true;
120 }
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000121
Matt Arsenault296b8492016-02-12 06:31:30 +0000122 // X, XY, and XYZ are the only supported combinations, so make sure Y is
123 // enabled if Z is.
124 if (WorkItemIDZ)
125 WorkItemIDY = true;
126
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000127 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000128 bool MaySpill = ST.isVGPRSpillingEnabled(*F);
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000129 bool HasStackObjects = FrameInfo.hasStackObjects() || FrameInfo.hasCalls();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000130
Marek Olsak584d2c02017-05-04 22:25:20 +0000131 if (HasStackObjects || MaySpill) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000132 PrivateSegmentWaveByteOffset = true;
133
Marek Olsak584d2c02017-05-04 22:25:20 +0000134 // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
135 if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
136 (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
137 PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5;
138 }
139
Tom Stellard2f3f9852017-01-25 01:25:13 +0000140 if (ST.isAmdCodeObjectV2(MF)) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000141 if (HasStackObjects || MaySpill)
142 PrivateSegmentBuffer = true;
143
144 if (F->hasFnAttribute("amdgpu-dispatch-ptr"))
145 DispatchPtr = true;
Matt Arsenault48ab5262016-04-25 19:27:18 +0000146
147 if (F->hasFnAttribute("amdgpu-queue-ptr"))
148 QueuePtr = true;
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000149
150 if (F->hasFnAttribute("amdgpu-dispatch-id"))
151 DispatchID = true;
Tom Stellard2f3f9852017-01-25 01:25:13 +0000152 } else if (ST.isMesaGfxShader(MF)) {
153 if (HasStackObjects || MaySpill)
Matt Arsenault10fc0622017-06-26 03:01:31 +0000154 ImplicitBufferPtr = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000155 }
156
Matt Arsenault296b8492016-02-12 06:31:30 +0000157 // We don't need to worry about accessing spills with flat instructions.
158 // TODO: On VI where we must use flat for global, we should be able to omit
159 // this if it is never used for generic access.
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000160 if (HasStackObjects && ST.hasFlatAddressSpace() && ST.isAmdHsaOS())
Matt Arsenault296b8492016-02-12 06:31:30 +0000161 FlatScratchInit = true;
Matt Arsenault49affb82015-11-25 20:55:12 +0000162}
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000163
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000164unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
165 const SIRegisterInfo &TRI) {
166 PrivateSegmentBufferUserSGPR = TRI.getMatchingSuperReg(
167 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
168 NumUserSGPRs += 4;
169 return PrivateSegmentBufferUserSGPR;
170}
171
172unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
173 DispatchPtrUserSGPR = TRI.getMatchingSuperReg(
174 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
175 NumUserSGPRs += 2;
176 return DispatchPtrUserSGPR;
177}
178
179unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
180 QueuePtrUserSGPR = TRI.getMatchingSuperReg(
181 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
182 NumUserSGPRs += 2;
183 return QueuePtrUserSGPR;
184}
185
186unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
187 KernargSegmentPtrUserSGPR = TRI.getMatchingSuperReg(
188 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
189 NumUserSGPRs += 2;
190 return KernargSegmentPtrUserSGPR;
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000191}
192
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000193unsigned SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
194 DispatchIDUserSGPR = TRI.getMatchingSuperReg(
195 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
196 NumUserSGPRs += 2;
197 return DispatchIDUserSGPR;
198}
199
Matt Arsenault296b8492016-02-12 06:31:30 +0000200unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
201 FlatScratchInitUserSGPR = TRI.getMatchingSuperReg(
202 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
203 NumUserSGPRs += 2;
204 return FlatScratchInitUserSGPR;
205}
206
Matt Arsenault10fc0622017-06-26 03:01:31 +0000207unsigned SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) {
208 ImplicitBufferPtrUserSGPR = TRI.getMatchingSuperReg(
Tom Stellard2f3f9852017-01-25 01:25:13 +0000209 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass);
210 NumUserSGPRs += 2;
Matt Arsenault10fc0622017-06-26 03:01:31 +0000211 return ImplicitBufferPtrUserSGPR;
Tom Stellard2f3f9852017-01-25 01:25:13 +0000212}
213
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000214/// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI.
215bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
216 int FI) {
217 std::vector<SpilledReg> &SpillLanes = SGPRToVGPRSpills[FI];
Matt Arsenault8d4b0ed2016-06-23 20:00:34 +0000218
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000219 // This has already been allocated.
220 if (!SpillLanes.empty())
221 return true;
222
223 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000224 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000225 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
226 MachineRegisterInfo &MRI = MF.getRegInfo();
227 unsigned WaveSize = ST.getWavefrontSize();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000228
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000229 unsigned Size = FrameInfo.getObjectSize(FI);
230 assert(Size >= 4 && Size <= 64 && "invalid sgpr spill size");
231 assert(TRI->spillSGPRToVGPR() && "not spilling SGPRs to VGPRs");
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000232
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000233 int NumLanes = Size / 4;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000234
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000235 // Make sure to handle the case where a wide SGPR spill may span between two
236 // VGPRs.
237 for (int I = 0; I < NumLanes; ++I, ++NumVGPRSpillLanes) {
238 unsigned LaneVGPR;
239 unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize);
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000240
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000241 if (VGPRIndex == 0) {
242 LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
243 if (LaneVGPR == AMDGPU::NoRegister) {
244 // We have no VGPRs left for spilling SGPRs. Reset because we won't
245 // partially spill the SGPR to VGPRs.
246 SGPRToVGPRSpills.erase(FI);
247 NumVGPRSpillLanes -= I;
248 return false;
249 }
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000250
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000251 SpillVGPRs.push_back(LaneVGPR);
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000252
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000253 // Add this register as live-in to all blocks to avoid machine verifer
254 // complaining about use of an undefined physical register.
255 for (MachineBasicBlock &BB : MF)
256 BB.addLiveIn(LaneVGPR);
257 } else {
258 LaneVGPR = SpillVGPRs.back();
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000259 }
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000260
261 SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex));
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000262 }
263
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000264 return true;
265}
266
267void SIMachineFunctionInfo::removeSGPRToVGPRFrameIndices(MachineFrameInfo &MFI) {
268 for (auto &R : SGPRToVGPRSpills)
269 MFI.RemoveStackObject(R.first);
Tom Stellardc149dc02013-11-27 21:23:35 +0000270}