blob: f78578ad9c8360cf23c80ef5fb8bab2cd45ad83b [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SIMachineFunctionInfo.cpp - SI Machine Function Info ---------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
Tom Stellard75aadc22012-12-11 21:25:42 +00008//===----------------------------------------------------------------------===//
9
Tom Stellard75aadc22012-12-11 21:25:42 +000010#include "SIMachineFunctionInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000011#include "AMDGPUArgumentUsageInfo.h"
Tom Stellard96468902014-09-24 01:33:17 +000012#include "AMDGPUSubtarget.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000013#include "SIRegisterInfo.h"
14#include "Utils/AMDGPUBaseInfo.h"
15#include "llvm/ADT/Optional.h"
16#include "llvm/CodeGen/MachineBasicBlock.h"
Tom Stellardc5cf2f02014-08-21 20:40:54 +000017#include "llvm/CodeGen/MachineFrameInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000018#include "llvm/CodeGen/MachineFunction.h"
Tom Stellardc149dc02013-11-27 21:23:35 +000019#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000020#include "llvm/IR/CallingConv.h"
Tom Stellardeba61072014-05-02 15:41:42 +000021#include "llvm/IR/Function.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000022#include <cassert>
23#include <vector>
Tom Stellardc149dc02013-11-27 21:23:35 +000024
25#define MAX_LANES 64
Tom Stellard75aadc22012-12-11 21:25:42 +000026
27using namespace llvm;
28
29SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
Vincent Lejeuneace6f732013-04-01 21:47:53 +000030 : AMDGPUMachineFunction(MF),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000031 PrivateSegmentBuffer(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000032 DispatchPtr(false),
33 QueuePtr(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000034 KernargSegmentPtr(false),
Matt Arsenault8d718dc2016-07-22 17:01:30 +000035 DispatchID(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000036 FlatScratchInit(false),
37 GridWorkgroupCountX(false),
38 GridWorkgroupCountY(false),
39 GridWorkgroupCountZ(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000040 WorkGroupIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000041 WorkGroupIDY(false),
42 WorkGroupIDZ(false),
43 WorkGroupInfo(false),
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000044 PrivateSegmentWaveByteOffset(false),
Tom Stellardf110f8f2016-04-14 16:27:03 +000045 WorkItemIDX(false),
Matt Arsenault49affb82015-11-25 20:55:12 +000046 WorkItemIDY(false),
Tom Stellard2f3f9852017-01-25 01:25:13 +000047 WorkItemIDZ(false),
Matt Arsenault817c2532017-08-03 23:12:44 +000048 ImplicitBufferPtr(false),
Tim Renouf13229152017-09-29 09:49:35 +000049 ImplicitArgPtr(false),
Matt Arsenault923712b2018-02-09 16:57:57 +000050 GITPtrHigh(0xffffffff),
51 HighBitsOf32BitAddress(0) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000052 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matthias Braunf1caa282017-12-15 22:22:58 +000053 const Function &F = MF.getFunction();
54 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
55 WavesPerEU = ST.getWavesPerEU(F);
Matt Arsenault49affb82015-11-25 20:55:12 +000056
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000057 if (!isEntryFunction()) {
58 // Non-entry functions have no special inputs for now, other registers
59 // required for scratch access.
60 ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
61 ScratchWaveOffsetReg = AMDGPU::SGPR4;
62 FrameOffsetReg = AMDGPU::SGPR5;
Matt Arsenaultf28683c2017-06-26 17:53:59 +000063 StackPtrOffsetReg = AMDGPU::SGPR32;
Matt Arsenault1cc47f82017-07-18 16:44:56 +000064
Matt Arsenault8623e8d2017-08-03 23:00:29 +000065 ArgInfo.PrivateSegmentBuffer =
66 ArgDescriptor::createRegister(ScratchRSrcReg);
67 ArgInfo.PrivateSegmentWaveByteOffset =
68 ArgDescriptor::createRegister(ScratchWaveOffsetReg);
69
Matthias Braunf1caa282017-12-15 22:22:58 +000070 if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
Matt Arsenault9166ce82017-07-28 15:52:08 +000071 ImplicitArgPtr = true;
72 } else {
Matthias Braunf1caa282017-12-15 22:22:58 +000073 if (F.hasFnAttribute("amdgpu-implicitarg-ptr"))
Matt Arsenault9166ce82017-07-28 15:52:08 +000074 KernargSegmentPtr = true;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000075 }
Marek Olsakfccabaf2016-01-13 11:45:36 +000076
Matthias Braunf1caa282017-12-15 22:22:58 +000077 CallingConv::ID CC = F.getCallingConv();
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000078 if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL) {
Matthias Braunf1caa282017-12-15 22:22:58 +000079 if (!F.arg_empty())
Matt Arsenault9166ce82017-07-28 15:52:08 +000080 KernargSegmentPtr = true;
Tom Stellardf110f8f2016-04-14 16:27:03 +000081 WorkGroupIDX = true;
82 WorkItemIDX = true;
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +000083 } else if (CC == CallingConv::AMDGPU_PS) {
Matthias Braunf1caa282017-12-15 22:22:58 +000084 PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
Tom Stellardf110f8f2016-04-14 16:27:03 +000085 }
Matt Arsenault49affb82015-11-25 20:55:12 +000086
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +000087 if (ST.debuggerEmitPrologue()) {
88 // Enable everything.
Matt Arsenaulte15855d2017-07-17 22:35:50 +000089 WorkGroupIDX = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000090 WorkGroupIDY = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000091 WorkGroupIDZ = true;
Matt Arsenaulte15855d2017-07-17 22:35:50 +000092 WorkItemIDX = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000093 WorkItemIDY = true;
Matt Arsenault49affb82015-11-25 20:55:12 +000094 WorkItemIDZ = true;
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +000095 } else {
Matthias Braunf1caa282017-12-15 22:22:58 +000096 if (F.hasFnAttribute("amdgpu-work-group-id-x"))
Matt Arsenaulte15855d2017-07-17 22:35:50 +000097 WorkGroupIDX = true;
98
Matthias Braunf1caa282017-12-15 22:22:58 +000099 if (F.hasFnAttribute("amdgpu-work-group-id-y"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000100 WorkGroupIDY = true;
101
Matthias Braunf1caa282017-12-15 22:22:58 +0000102 if (F.hasFnAttribute("amdgpu-work-group-id-z"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000103 WorkGroupIDZ = true;
104
Matthias Braunf1caa282017-12-15 22:22:58 +0000105 if (F.hasFnAttribute("amdgpu-work-item-id-x"))
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000106 WorkItemIDX = true;
107
Matthias Braunf1caa282017-12-15 22:22:58 +0000108 if (F.hasFnAttribute("amdgpu-work-item-id-y"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000109 WorkItemIDY = true;
110
Matthias Braunf1caa282017-12-15 22:22:58 +0000111 if (F.hasFnAttribute("amdgpu-work-item-id-z"))
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000112 WorkItemIDZ = true;
113 }
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000114
Matt Arsenaultefa9f4b2017-04-11 22:29:28 +0000115 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
Matthias Braunf1caa282017-12-15 22:22:58 +0000116 bool MaySpill = ST.isVGPRSpillingEnabled(F);
Matt Arsenault1cc47f82017-07-18 16:44:56 +0000117 bool HasStackObjects = FrameInfo.hasStackObjects();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000118
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000119 if (isEntryFunction()) {
120 // X, XY, and XYZ are the only supported combinations, so make sure Y is
121 // enabled if Z is.
122 if (WorkItemIDZ)
123 WorkItemIDY = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000124
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000125 if (HasStackObjects || MaySpill) {
126 PrivateSegmentWaveByteOffset = true;
127
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000128 // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
129 if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
130 (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
131 ArgInfo.PrivateSegmentWaveByteOffset
132 = ArgDescriptor::createRegister(AMDGPU::SGPR5);
Matt Arsenaulte15855d2017-07-17 22:35:50 +0000133 }
Marek Olsak584d2c02017-05-04 22:25:20 +0000134 }
135
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000136 bool IsCOV2 = ST.isAmdCodeObjectV2(MF);
137 if (IsCOV2) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000138 if (HasStackObjects || MaySpill)
139 PrivateSegmentBuffer = true;
140
Matthias Braunf1caa282017-12-15 22:22:58 +0000141 if (F.hasFnAttribute("amdgpu-dispatch-ptr"))
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000142 DispatchPtr = true;
Matt Arsenault48ab5262016-04-25 19:27:18 +0000143
Matthias Braunf1caa282017-12-15 22:22:58 +0000144 if (F.hasFnAttribute("amdgpu-queue-ptr"))
Matt Arsenault48ab5262016-04-25 19:27:18 +0000145 QueuePtr = true;
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000146
Matthias Braunf1caa282017-12-15 22:22:58 +0000147 if (F.hasFnAttribute("amdgpu-dispatch-id"))
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000148 DispatchID = true;
Tom Stellard2f3f9852017-01-25 01:25:13 +0000149 } else if (ST.isMesaGfxShader(MF)) {
150 if (HasStackObjects || MaySpill)
Matt Arsenault10fc0622017-06-26 03:01:31 +0000151 ImplicitBufferPtr = true;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000152 }
153
Matthias Braunf1caa282017-12-15 22:22:58 +0000154 if (F.hasFnAttribute("amdgpu-kernarg-segment-ptr"))
Matt Arsenault23e4df62017-07-14 00:11:13 +0000155 KernargSegmentPtr = true;
156
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000157 if (ST.hasFlatAddressSpace() && isEntryFunction() && IsCOV2) {
158 // TODO: This could be refined a lot. The attribute is a poor way of
159 // detecting calls that may require it before argument lowering.
Matthias Braunf1caa282017-12-15 22:22:58 +0000160 if (HasStackObjects || F.hasFnAttribute("amdgpu-flat-scratch"))
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000161 FlatScratchInit = true;
162 }
Tim Renouf13229152017-09-29 09:49:35 +0000163
Matthias Braunf1caa282017-12-15 22:22:58 +0000164 Attribute A = F.getFnAttribute("amdgpu-git-ptr-high");
Tim Renouf13229152017-09-29 09:49:35 +0000165 StringRef S = A.getValueAsString();
166 if (!S.empty())
167 S.consumeInteger(0, GITPtrHigh);
Matt Arsenault923712b2018-02-09 16:57:57 +0000168
169 A = F.getFnAttribute("amdgpu-32bit-address-high-bits");
170 S = A.getValueAsString();
171 if (!S.empty())
172 S.consumeInteger(0, HighBitsOf32BitAddress);
Matt Arsenault49affb82015-11-25 20:55:12 +0000173}
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000174
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000175unsigned SIMachineFunctionInfo::addPrivateSegmentBuffer(
176 const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000177 ArgInfo.PrivateSegmentBuffer =
178 ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
179 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000180 NumUserSGPRs += 4;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000181 return ArgInfo.PrivateSegmentBuffer.getRegister();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000182}
183
184unsigned SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000185 ArgInfo.DispatchPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
186 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000187 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000188 return ArgInfo.DispatchPtr.getRegister();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000189}
190
191unsigned SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000192 ArgInfo.QueuePtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
193 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000194 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000195 return ArgInfo.QueuePtr.getRegister();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000196}
197
198unsigned SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000199 ArgInfo.KernargSegmentPtr
200 = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
201 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000202 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000203 return ArgInfo.KernargSegmentPtr.getRegister();
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000204}
205
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000206unsigned SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000207 ArgInfo.DispatchID = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
208 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000209 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000210 return ArgInfo.DispatchID.getRegister();
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000211}
212
Matt Arsenault296b8492016-02-12 06:31:30 +0000213unsigned SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000214 ArgInfo.FlatScratchInit = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
215 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Matt Arsenault296b8492016-02-12 06:31:30 +0000216 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000217 return ArgInfo.FlatScratchInit.getRegister();
Matt Arsenault296b8492016-02-12 06:31:30 +0000218}
219
Matt Arsenault10fc0622017-06-26 03:01:31 +0000220unsigned SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000221 ArgInfo.ImplicitBufferPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
222 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
Tom Stellard2f3f9852017-01-25 01:25:13 +0000223 NumUserSGPRs += 2;
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000224 return ArgInfo.ImplicitBufferPtr.getRegister();
Tom Stellard2f3f9852017-01-25 01:25:13 +0000225}
226
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000227static bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg) {
228 for (unsigned I = 0; CSRegs[I]; ++I) {
229 if (CSRegs[I] == Reg)
230 return true;
231 }
232
233 return false;
234}
235
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000236/// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI.
237bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
238 int FI) {
239 std::vector<SpilledReg> &SpillLanes = SGPRToVGPRSpills[FI];
Matt Arsenault8d4b0ed2016-06-23 20:00:34 +0000240
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000241 // This has already been allocated.
242 if (!SpillLanes.empty())
243 return true;
244
245 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000246 const SIRegisterInfo *TRI = ST.getRegisterInfo();
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000247 MachineFrameInfo &FrameInfo = MF.getFrameInfo();
248 MachineRegisterInfo &MRI = MF.getRegInfo();
249 unsigned WaveSize = ST.getWavefrontSize();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000250
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000251 unsigned Size = FrameInfo.getObjectSize(FI);
252 assert(Size >= 4 && Size <= 64 && "invalid sgpr spill size");
253 assert(TRI->spillSGPRToVGPR() && "not spilling SGPRs to VGPRs");
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000254
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000255 int NumLanes = Size / 4;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000256
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000257 const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
258
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000259 // Make sure to handle the case where a wide SGPR spill may span between two
260 // VGPRs.
261 for (int I = 0; I < NumLanes; ++I, ++NumVGPRSpillLanes) {
262 unsigned LaneVGPR;
263 unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize);
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000264
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000265 if (VGPRIndex == 0) {
266 LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
267 if (LaneVGPR == AMDGPU::NoRegister) {
Tim Renouf6cb007f2017-09-11 08:31:32 +0000268 // We have no VGPRs left for spilling SGPRs. Reset because we will not
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000269 // partially spill the SGPR to VGPRs.
270 SGPRToVGPRSpills.erase(FI);
271 NumVGPRSpillLanes -= I;
272 return false;
273 }
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000274
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000275 Optional<int> CSRSpillFI;
Matt Arsenault17f33382018-03-27 19:42:55 +0000276 if ((FrameInfo.hasCalls() || !isEntryFunction()) && CSRegs &&
277 isCalleeSavedReg(CSRegs, LaneVGPR)) {
278 CSRSpillFI = FrameInfo.CreateSpillStackObject(4, 4);
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000279 }
280
281 SpillVGPRs.push_back(SGPRSpillVGPRCSR(LaneVGPR, CSRSpillFI));
Nicolai Haehnlee705aad2016-01-04 15:50:01 +0000282
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000283 // Add this register as live-in to all blocks to avoid machine verifer
284 // complaining about use of an undefined physical register.
285 for (MachineBasicBlock &BB : MF)
286 BB.addLiveIn(LaneVGPR);
287 } else {
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000288 LaneVGPR = SpillVGPRs.back().VGPR;
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000289 }
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000290
291 SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex));
Tom Stellardc5cf2f02014-08-21 20:40:54 +0000292 }
293
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000294 return true;
295}
296
297void SIMachineFunctionInfo::removeSGPRToVGPRFrameIndices(MachineFrameInfo &MFI) {
298 for (auto &R : SGPRToVGPRSpills)
299 MFI.RemoveStackObject(R.first);
Tom Stellardc149dc02013-11-27 21:23:35 +0000300}