blob: 0d5ff75e37ed8f06bc2dbaf5fccc4f9cdc643aad [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SIMachineFunctionInfo.cpp - SI Machine Function Info ---------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
Tom Stellard75aadc22012-12-11 21:25:42 +00008//===----------------------------------------------------------------------===//
9
Tom Stellard75aadc22012-12-11 21:25:42 +000010#include "SIMachineFunctionInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000011#include "AMDGPUArgumentUsageInfo.h"
Tom Stellard96468902014-09-24 01:33:17 +000012#include "AMDGPUSubtarget.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000013#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000014#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000015#include "Utils/AMDGPUBaseInfo.h"
16#include "llvm/ADT/Optional.h"
17#include "llvm/CodeGen/MachineBasicBlock.h"
Tom Stellardc5cf2f02014-08-21 20:40:54 +000018#include "llvm/CodeGen/MachineFrameInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000019#include "llvm/CodeGen/MachineFunction.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000020#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000021#include "llvm/IR/CallingConv.h"
Tom Stellardeba61072014-05-02 15:41:42 +000022#include "llvm/IR/Function.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000023#include <cassert>
24#include <vector>
Tom Stellardc149dc02013-11-27 21:23:35 +000025
26#define MAX_LANES 64
Tom Stellard75aadc22012-12-11 21:25:42 +000027
28using namespace llvm;
29
30SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
Vincent Lejeuneace6f732013-04-01 21:47:53 +000031 : AMDGPUMachineFunction(MF),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000032 PrivateSegmentBuffer(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000033 DispatchPtr(false),
34 QueuePtr(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000035 KernargSegmentPtr(false),
Matt Arsenault8d718dc2016-07-22 17:01:30 +000036 DispatchID(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000037 FlatScratchInit(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000038 WorkGroupIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000039 WorkGroupIDY(false),
40 WorkGroupIDZ(false),
41 WorkGroupInfo(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000042 PrivateSegmentWaveByteOffset(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000043 WorkItemIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000044 WorkItemIDY(false),
Tom Stellard2f3f9852017-01-25 01:25:13 +000045 WorkItemIDZ(false),
Matt Arsenault817c2532017-08-03 23:12:44 +000046 ImplicitBufferPtr(false),
Tim Renouf13229152017-09-29 09:49:35 +000047 ImplicitArgPtr(false),
Matt Arsenault923712b2018-02-09 16:57:57 +000048 GITPtrHigh(0xffffffff),
49 HighBitsOf32BitAddress(0) {
Tom Stellard5bfbae52018-07-11 20:59:01 +000050 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +000051 const Function &F = MF.getFunction();
52 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
53 WavesPerEU = ST.getWavesPerEU(F);
Matt Arsenault49affb82015-11-25 20:55:12 +000054
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +000055 Occupancy = getMaxWavesPerEU();
56 limitOccupancy(MF);
Matt Arsenault4bec7d42018-07-20 09:05:08 +000057 CallingConv::ID CC = F.getCallingConv();
58
59 if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) {
60 if (!F.arg_empty())
61 KernargSegmentPtr = true;
62 WorkGroupIDX = true;
63 WorkItemIDX = true;
64 } else if (CC == CallingConv::AMDGPU_PS) {
65 PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
66 }
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +000067
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000068 if (!isEntryFunction()) {
69 // Non-entry functions have no special inputs for now, other registers
70 // required for scratch access.
71 ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
72 ScratchWaveOffsetReg = AMDGPU::SGPR4;
73 FrameOffsetReg = AMDGPU::SGPR5;
Matt Arsenaultf28683c2017-06-26 17:53:59 +000074 StackPtrOffsetReg = AMDGPU::SGPR32;
Matt Arsenault1cc47f82017-07-18 16:44:56 +000075
Matt Arsenault8623e8d2017-08-03 23:00:29 +000076 ArgInfo.PrivateSegmentBuffer =
77 ArgDescriptor::createRegister(ScratchRSrcReg);
78 ArgInfo.PrivateSegmentWaveByteOffset =
79 ArgDescriptor::createRegister(ScratchWaveOffsetReg);
80
Matthias Braunf1caa282017-12-15 22:22:58 +000081 if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
Matt Arsenault9166ce82017-07-28 15:52:08 +000082 ImplicitArgPtr = true;
83 } else {
Matt Arsenault1ea04022018-05-29 19:35:00 +000084 if (F.hasFnAttribute("amdgpu-implicitarg-ptr")) {
Matt Arsenault9166ce82017-07-28 15:52:08 +000085 KernargSegmentPtr = true;
Matt Arsenault4bec7d42018-07-20 09:05:08 +000086 MaxKernArgAlign = std::max(ST.getAlignmentForImplicitArgPtr(),
87 MaxKernArgAlign);
Matt Arsenault1ea04022018-05-29 19:35:00 +000088 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000089 }
Marek Olsakfccabaf2016-01-13 11:45:36 +000090
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +000091 if (ST.debuggerEmitPrologue()) {
92 // Enable everything.
Matt Arsenaulte15855d2017-07-17 22:35:50 +000093 WorkGroupIDX = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000094 WorkGroupIDY = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000095 WorkGroupIDZ = true;
Matt Arsenaulte15855d2017-07-17 22:35:50 +000096 WorkItemIDX = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000097 WorkItemIDY = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000098 WorkItemIDZ = true;
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +000099 } else {
Matthias Braunf1caa282017-12-15 22:22:58 +0000100 if (F.hasFnAttribute("amdgpu-work-group-id-x"))
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000101 WorkGroupIDX = true;
102
Matthias Braunf1caa282017-12-15 22:22:58 +0000103 if (F.hasFnAttribute("amdgpu-work-group-id-y"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000104 WorkGroupIDY = true;
105
Matthias Braunf1caa282017-12-15 22:22:58 +0000106 if (F.hasFnAttribute("amdgpu-work-group-id-z"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000107 WorkGroupIDZ = true;
108
Matthias Braunf1caa282017-12-15 22:22:58 +0000109 if (F.hasFnAttribute("amdgpu-work-item-id-x"))
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000110 WorkItemIDX = true;
111
Matthias Braunf1caa282017-12-15 22:22:58 +0000112 if (F.hasFnAttribute("amdgpu-work-item-id-y"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000113 WorkItemIDY = true;
114
Matthias Braunf1caa282017-12-15 22:22:58 +0000115 if (F.hasFnAttribute("amdgpu-work-item-id-z"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000116 WorkItemIDZ = true;
117 }
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000118
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000119 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
Matthias Braunf1caa282017-12-15 22:22:58 +0000120 bool MaySpill = ST.isVGPRSpillingEnabled(F);
Matt Arsenault1cc47f82017-07-18 16:44:56 +0000121 bool HasStackObjects = FrameInfo.hasStackObjects();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000122
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000123 if (isEntryFunction()) {
124 // X, XY, and XYZ are the only supported combinations, so make sure Y is
125 // enabled if Z is.
126 if (WorkItemIDZ)
127 WorkItemIDY = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000128
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000129 if (HasStackObjects || MaySpill) {
130 PrivateSegmentWaveByteOffset = true;
131
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000132 // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
133 if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
134 (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
135 ArgInfo.PrivateSegmentWaveByteOffset
136 = ArgDescriptor::createRegister(AMDGPU::SGPR5);
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000137 }
Marek Olsak584d2c02017-05-04 22:25:20 +0000138 }
139
Matt Arsenaultceafc552018-05-29 17:42:50 +0000140 bool IsCOV2 = ST.isAmdCodeObjectV2(F);
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000141 if (IsCOV2) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000142 if (HasStackObjects || MaySpill)
143 PrivateSegmentBuffer = true;
144
Matthias Braunf1caa282017-12-15 22:22:58 +0000145 if (F.hasFnAttribute("amdgpu-dispatch-ptr"))
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000146 DispatchPtr = true;
Matt Arsenault48ab5262016-04-25 19:27:18 +0000147
Matthias Braunf1caa282017-12-15 22:22:58 +0000148 if (F.hasFnAttribute("amdgpu-queue-ptr"))
Matt Arsenault48ab5262016-04-25 19:27:18 +0000149 QueuePtr = true;
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000150
Matthias Braunf1caa282017-12-15 22:22:58 +0000151 if (F.hasFnAttribute("amdgpu-dispatch-id"))
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000152 DispatchID = true;
Matt Arsenaultceafc552018-05-29 17:42:50 +0000153 } else if (ST.isMesaGfxShader(F)) {
Tom Stellard2f3f9852017-01-25 01:25:13 +0000154 if (HasStackObjects || MaySpill)
Matt Arsenault10fc0622017-06-26 03:01:31 +0000155 ImplicitBufferPtr = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000156 }
157
Matthias Braunf1caa282017-12-15 22:22:58 +0000158 if (F.hasFnAttribute("amdgpu-kernarg-segment-ptr"))
Matt Arsenault23e4df62017-07-14 00:11:13 +0000159 KernargSegmentPtr = true;
160
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000161 if (ST.hasFlatAddressSpace() && isEntryFunction() && IsCOV2) {
162 // TODO: This could be refined a lot. The attribute is a poor way of
163 // detecting calls that may require it before argument lowering.
Matthias Braunf1caa282017-12-15 22:22:58 +0000164 if (HasStackObjects || F.hasFnAttribute("amdgpu-flat-scratch"))
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000165 FlatScratchInit = true;
166 }
Tim Renouf13229152017-09-29 09:49:35 +0000167
Matthias Braunf1caa282017-12-15 22:22:58 +0000168 Attribute A = F.getFnAttribute("amdgpu-git-ptr-high");
Tim Renouf13229152017-09-29 09:49:35 +0000169 StringRef S = A.getValueAsString();
170 if (!S.empty())
171 S.consumeInteger(0, GITPtrHigh);
Matt Arsenault923712b2018-02-09 16:57:57 +0000172
173 A = F.getFnAttribute("amdgpu-32bit-address-high-bits");
174 S = A.getValueAsString();
175 if (!S.empty())
176 S.consumeInteger(0, HighBitsOf32BitAddress);
Matt Arsenault49affb82015-11-25 20:55:12 +0000177}
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000178
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +0000179void SIMachineFunctionInfo::limitOccupancy(const MachineFunction &MF) {
180 limitOccupancy(getMaxWavesPerEU());
Tom Stellard5bfbae52018-07-11 20:59:01 +0000181 const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>();
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +0000182 limitOccupancy(ST.getOccupancyWithLocalMemSize(getLDSSize(),
183 MF.getFunction()));
184}
185
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000186unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
187 const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000188 ArgInfo.PrivateSegmentBuffer =
189 ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
190 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000191 NumUserSGPRs += 4;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000192 return ArgInfo.PrivateSegmentBuffer.getRegister();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000193}
194
195unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000196 ArgInfo.DispatchPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
197 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000198 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000199 return ArgInfo.DispatchPtr.getRegister();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000200}
201
202unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000203 ArgInfo.QueuePtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
204 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000205 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000206 return ArgInfo.QueuePtr.getRegister();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000207}
208
209unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000210 ArgInfo.KernargSegmentPtr
211 = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
212 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000213 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000214 return ArgInfo.KernargSegmentPtr.getRegister();
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000215}
216
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000217unsigned SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000218 ArgInfo.DispatchID = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
219 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000220 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000221 return ArgInfo.DispatchID.getRegister();
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000222}
223
Matt Arsenault296b8492016-02-12 06:31:30 +0000224unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000225 ArgInfo.FlatScratchInit = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
226 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault296b8492016-02-12 06:31:30 +0000227 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000228 return ArgInfo.FlatScratchInit.getRegister();
Matt Arsenault296b8492016-02-12 06:31:30 +0000229}
230
Matt Arsenault10fc0622017-06-26 03:01:31 +0000231unsigned SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000232 ArgInfo.ImplicitBufferPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
233 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Tom Stellard2f3f9852017-01-25 01:25:13 +0000234 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000235 return ArgInfo.ImplicitBufferPtr.getRegister();
Tom Stellard2f3f9852017-01-25 01:25:13 +0000236}
237
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000238static bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg) {
239 for (unsigned I = 0; CSRegs[I]; ++I) {
240 if (CSRegs[I] == Reg)
241 return true;
242 }
243
244 return false;
245}
246
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000247/// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI.
248bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
249 int FI) {
250 std::vector<SpilledReg> &SpillLanes = SGPRToVGPRSpills[FI];
Matt Arsenault8d4b0ed2016-06-23 20:00:34 +0000251
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000252 // This has already been allocated.
253 if (!SpillLanes.empty())
254 return true;
255
Tom Stellard5bfbae52018-07-11 20:59:01 +0000256 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000257 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000258 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
259 MachineRegisterInfo &MRI = MF.getRegInfo();
260 unsigned WaveSize = ST.getWavefrontSize();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000261
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000262 unsigned Size = FrameInfo.getObjectSize(FI);
263 assert(Size >= 4 && Size <= 64 && "invalid sgpr spill size");
264 assert(TRI->spillSGPRToVGPR() && "not spilling SGPRs to VGPRs");
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000265
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000266 int NumLanes = Size / 4;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000267
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000268 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
269
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000270 // Make sure to handle the case where a wide SGPR spill may span between two
271 // VGPRs.
272 for (int I = 0; I < NumLanes; ++I, ++NumVGPRSpillLanes) {
273 unsigned LaneVGPR;
274 unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize);
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000275
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000276 if (VGPRIndex == 0) {
277 LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
278 if (LaneVGPR == AMDGPU::NoRegister) {
Tim Renouf6cb007f2017-09-11 08:31:32 +0000279 // We have no VGPRs left for spilling SGPRs. Reset because we will not
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000280 // partially spill the SGPR to VGPRs.
281 SGPRToVGPRSpills.erase(FI);
282 NumVGPRSpillLanes -= I;
283 return false;
284 }
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000285
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000286 Optional<int> CSRSpillFI;
Matt Arsenault17f33382018-03-27 19:42:55 +0000287 if ((FrameInfo.hasCalls() || !isEntryFunction()) && CSRegs &&
288 isCalleeSavedReg(CSRegs, LaneVGPR)) {
289 CSRSpillFI = FrameInfo.CreateSpillStackObject(4, 4);
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000290 }
291
292 SpillVGPRs.push_back(SGPRSpillVGPRCSR(LaneVGPR, CSRSpillFI));
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000293
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000294 // Add this register as live-in to all blocks to avoid machine verifer
295 // complaining about use of an undefined physical register.
296 for (MachineBasicBlock &BB : MF)
297 BB.addLiveIn(LaneVGPR);
298 } else {
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000299 LaneVGPR = SpillVGPRs.back().VGPR;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000300 }
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000301
302 SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex));
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000303 }
304
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000305 return true;
306}
307
308void SIMachineFunctionInfo::removeSGPRToVGPRFrameIndices(MachineFrameInfo &MFI) {
309 for (auto &R : SGPRToVGPRSpills)
310 MFI.RemoveStackObject(R.first);
Tom Stellardc149dc02013-11-27 21:23:35 +0000311}
Tom Stellard44b30b42018-05-22 02:03:23 +0000312
313
314/// \returns VGPR used for \p Dim' work item ID.
315unsigned SIMachineFunctionInfo::getWorkItemIDVGPR(unsigned Dim) const {
316 switch (Dim) {
317 case 0:
318 assert(hasWorkItemIDX());
319 return AMDGPU::VGPR0;
320 case 1:
321 assert(hasWorkItemIDY());
322 return AMDGPU::VGPR1;
323 case 2:
324 assert(hasWorkItemIDZ());
325 return AMDGPU::VGPR2;
326 }
327 llvm_unreachable("unexpected dimension");
328}
329
330MCPhysReg SIMachineFunctionInfo::getNextUserSGPR() const {
331 assert(NumSystemSGPRs == 0 && "System SGPRs must be added after user SGPRs");
332 return AMDGPU::SGPR0 + NumUserSGPRs;
333}
334
335MCPhysReg SIMachineFunctionInfo::getNextSystemSGPR() const {
336 return AMDGPU::SGPR0 + NumUserSGPRs + NumSystemSGPRs;
337}