blob: c2880244b66e3898dc603a7fa13a3c3d16b9b832 [file] [log] [blame]
Matt Arsenault0c90e952015-11-06 18:17:45 +00001//===----------------------- SIFrameLowering.cpp --------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Matt Arsenault0c90e952015-11-06 18:17:45 +00006//
7//==-----------------------------------------------------------------------===//
8
9#include "SIFrameLowering.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000010#include "AMDGPUSubtarget.h"
Matt Arsenault0e3d3892015-11-30 21:15:53 +000011#include "SIInstrInfo.h"
12#include "SIMachineFunctionInfo.h"
Matt Arsenault0c90e952015-11-06 18:17:45 +000013#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000014#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000015
Matt Arsenault03ae3992018-03-29 21:30:06 +000016#include "llvm/CodeGen/LivePhysRegs.h"
Matt Arsenault0c90e952015-11-06 18:17:45 +000017#include "llvm/CodeGen/MachineFrameInfo.h"
18#include "llvm/CodeGen/MachineFunction.h"
Matt Arsenault0e3d3892015-11-30 21:15:53 +000019#include "llvm/CodeGen/MachineInstrBuilder.h"
Matt Arsenault0c90e952015-11-06 18:17:45 +000020#include "llvm/CodeGen/RegisterScavenging.h"
21
22using namespace llvm;
23
Matt Arsenault0e3d3892015-11-30 21:15:53 +000024
Tom Stellard5bfbae52018-07-11 20:59:01 +000025static ArrayRef<MCPhysReg> getAllSGPR128(const GCNSubtarget &ST,
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +000026 const MachineFunction &MF) {
Matt Arsenaultab3429c2016-05-18 15:19:50 +000027 return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(),
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +000028 ST.getMaxNumSGPRs(MF) / 4);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000029}
30
Tom Stellard5bfbae52018-07-11 20:59:01 +000031static ArrayRef<MCPhysReg> getAllSGPRs(const GCNSubtarget &ST,
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +000032 const MachineFunction &MF) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000033 return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(),
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +000034 ST.getMaxNumSGPRs(MF));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +000035}
36
Tom Stellard5bfbae52018-07-11 20:59:01 +000037void SIFrameLowering::emitFlatScratchInit(const GCNSubtarget &ST,
Matt Arsenault57bc4322016-08-31 21:52:21 +000038 MachineFunction &MF,
39 MachineBasicBlock &MBB) const {
Matt Arsenaulte823d922017-02-18 18:29:53 +000040 const SIInstrInfo *TII = ST.getInstrInfo();
41 const SIRegisterInfo* TRI = &TII->getRegisterInfo();
Matt Arsenault8623e8d2017-08-03 23:00:29 +000042 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaulte823d922017-02-18 18:29:53 +000043
Matt Arsenault57bc4322016-08-31 21:52:21 +000044 // We don't need this if we only have spills since there is no user facing
45 // scratch.
46
47 // TODO: If we know we don't have flat instructions earlier, we can omit
48 // this from the input registers.
49 //
50 // TODO: We only need to know if we access scratch space through a flat
51 // pointer. Because we only detect if flat instructions are used at all,
52 // this will be used more often than necessary on VI.
53
54 // Debug location must be unknown since the first debug location is used to
55 // determine the end of the prologue.
56 DebugLoc DL;
57 MachineBasicBlock::iterator I = MBB.begin();
58
59 unsigned FlatScratchInitReg
Matt Arsenault8623e8d2017-08-03 23:00:29 +000060 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT);
Matt Arsenault57bc4322016-08-31 21:52:21 +000061
62 MachineRegisterInfo &MRI = MF.getRegInfo();
63 MRI.addLiveIn(FlatScratchInitReg);
64 MBB.addLiveIn(FlatScratchInitReg);
65
Matt Arsenault57bc4322016-08-31 21:52:21 +000066 unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0);
Matt Arsenaulte823d922017-02-18 18:29:53 +000067 unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1);
Matt Arsenault57bc4322016-08-31 21:52:21 +000068
Matt Arsenault57bc4322016-08-31 21:52:21 +000069 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
70
Matt Arsenaulte823d922017-02-18 18:29:53 +000071 // Do a 64-bit pointer add.
72 if (ST.flatScratchIsPointer()) {
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +000073 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) {
74 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
75 .addReg(FlatScrInitLo)
76 .addReg(ScratchWaveOffsetReg);
77 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), FlatScrInitHi)
78 .addReg(FlatScrInitHi)
79 .addImm(0);
80 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)).
81 addReg(FlatScrInitLo).
82 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO |
83 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_)));
84 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)).
85 addReg(FlatScrInitHi).
86 addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI |
87 (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_)));
88 return;
89 }
90
Matt Arsenaulte823d922017-02-18 18:29:53 +000091 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO)
92 .addReg(FlatScrInitLo)
93 .addReg(ScratchWaveOffsetReg);
94 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI)
95 .addReg(FlatScrInitHi)
96 .addImm(0);
97
98 return;
99 }
100
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000101 assert(ST.getGeneration() < AMDGPUSubtarget::GFX10);
102
Matt Arsenaulte823d922017-02-18 18:29:53 +0000103 // Copy the size in bytes.
104 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO)
105 .addReg(FlatScrInitHi, RegState::Kill);
106
Matt Arsenault57bc4322016-08-31 21:52:21 +0000107 // Add wave offset in bytes to private base offset.
108 // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init.
109 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo)
110 .addReg(FlatScrInitLo)
111 .addReg(ScratchWaveOffsetReg);
112
113 // Convert offset to 256-byte units.
114 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI)
115 .addReg(FlatScrInitLo, RegState::Kill)
116 .addImm(8);
117}
118
119unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg(
Tom Stellard5bfbae52018-07-11 20:59:01 +0000120 const GCNSubtarget &ST,
Matt Arsenault57bc4322016-08-31 21:52:21 +0000121 const SIInstrInfo *TII,
122 const SIRegisterInfo *TRI,
123 SIMachineFunctionInfo *MFI,
124 MachineFunction &MF) const {
Matt Arsenaulte2218492017-04-24 21:08:32 +0000125 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault57bc4322016-08-31 21:52:21 +0000126
127 // We need to insert initialization of the scratch resource descriptor.
128 unsigned ScratchRsrcReg = MFI->getScratchRSrcReg();
Matt Arsenaulte2218492017-04-24 21:08:32 +0000129 if (ScratchRsrcReg == AMDGPU::NoRegister ||
130 !MRI.isPhysRegUsed(ScratchRsrcReg))
Matt Arsenault08906a32016-10-28 19:43:31 +0000131 return AMDGPU::NoRegister;
Matt Arsenault57bc4322016-08-31 21:52:21 +0000132
133 if (ST.hasSGPRInitBug() ||
134 ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF))
135 return ScratchRsrcReg;
136
137 // We reserved the last registers for this. Shift it down to the end of those
138 // which were actually used.
139 //
140 // FIXME: It might be safer to use a pseudoregister before replacement.
141
142 // FIXME: We should be able to eliminate unused input registers. We only
143 // cannot do this for the resources required for scratch access. For now we
144 // skip over user SGPRs and may leave unused holes.
145
146 // We find the resource first because it has an alignment requirement.
147
Matt Arsenault08906a32016-10-28 19:43:31 +0000148 unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4;
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000149 ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF);
Matt Arsenault08906a32016-10-28 19:43:31 +0000150 AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded));
151
Matt Arsenaulte0bf7d02017-02-21 19:12:08 +0000152 // Skip the last N reserved elements because they should have already been
153 // reserved for VCC etc.
Matt Arsenault08906a32016-10-28 19:43:31 +0000154 for (MCPhysReg Reg : AllSGPR128s) {
Matt Arsenault57bc4322016-08-31 21:52:21 +0000155 // Pick the first unallocated one. Make sure we don't clobber the other
156 // reserved input we needed.
Matt Arsenault08906a32016-10-28 19:43:31 +0000157 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
Matt Arsenault57bc4322016-08-31 21:52:21 +0000158 MRI.replaceRegWith(ScratchRsrcReg, Reg);
159 MFI->setScratchRSrcReg(Reg);
160 return Reg;
161 }
162 }
163
164 return ScratchRsrcReg;
165}
166
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000167// Shift down registers reserved for the scratch wave offset.
168unsigned SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg(
169 const GCNSubtarget &ST, const SIInstrInfo *TII, const SIRegisterInfo *TRI,
170 SIMachineFunctionInfo *MFI, MachineFunction &MF) const {
Matt Arsenaulte2218492017-04-24 21:08:32 +0000171 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault57bc4322016-08-31 21:52:21 +0000172 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
Matt Arsenaulte2218492017-04-24 21:08:32 +0000173
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000174 assert(MFI->isEntryFunction());
175
Matt Arsenaulte2218492017-04-24 21:08:32 +0000176 // No replacement necessary.
177 if (ScratchWaveOffsetReg == AMDGPU::NoRegister ||
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000178 (!hasFP(MF) && !MRI.isPhysRegUsed(ScratchWaveOffsetReg))) {
179 return AMDGPU::NoRegister;
Matt Arsenault36c31222017-04-25 23:40:57 +0000180 }
Matt Arsenaulte2218492017-04-24 21:08:32 +0000181
Matt Arsenault36c31222017-04-25 23:40:57 +0000182 if (ST.hasSGPRInitBug())
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000183 return ScratchWaveOffsetReg;
Matt Arsenault57bc4322016-08-31 21:52:21 +0000184
Matt Arsenault57bc4322016-08-31 21:52:21 +0000185 unsigned NumPreloaded = MFI->getNumPreloadedSGPRs();
186
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000187 ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF);
Matt Arsenault08906a32016-10-28 19:43:31 +0000188 if (NumPreloaded > AllSGPRs.size())
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000189 return ScratchWaveOffsetReg;
Matt Arsenault08906a32016-10-28 19:43:31 +0000190
191 AllSGPRs = AllSGPRs.slice(NumPreloaded);
192
Matt Arsenault57bc4322016-08-31 21:52:21 +0000193 // We need to drop register from the end of the list that we cannot use
194 // for the scratch wave offset.
195 // + 2 s102 and s103 do not exist on VI.
196 // + 2 for vcc
197 // + 2 for xnack_mask
198 // + 2 for flat_scratch
199 // + 4 for registers reserved for scratch resource register
200 // + 1 for register reserved for scratch wave offset. (By exluding this
201 // register from the list to consider, it means that when this
202 // register is being used for the scratch wave offset and there
203 // are no other free SGPRs, then the value will stay in this register.
Matt Arsenault36c31222017-04-25 23:40:57 +0000204 // + 1 if stack pointer is used.
Matt Arsenault57bc4322016-08-31 21:52:21 +0000205 // ----
Matt Arsenault36c31222017-04-25 23:40:57 +0000206 // 13 (+1)
207 unsigned ReservedRegCount = 13;
Matt Arsenault08906a32016-10-28 19:43:31 +0000208
Matt Arsenault36c31222017-04-25 23:40:57 +0000209 if (AllSGPRs.size() < ReservedRegCount)
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000210 return ScratchWaveOffsetReg;
Matt Arsenault36c31222017-04-25 23:40:57 +0000211
212 bool HandledScratchWaveOffsetReg =
213 ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
214
215 for (MCPhysReg Reg : AllSGPRs.drop_back(ReservedRegCount)) {
Matt Arsenault57bc4322016-08-31 21:52:21 +0000216 // Pick the first unallocated SGPR. Be careful not to pick an alias of the
217 // scratch descriptor, since we haven’t added its uses yet.
Matt Arsenaulte2218492017-04-24 21:08:32 +0000218 if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) {
Matt Arsenault36c31222017-04-25 23:40:57 +0000219 if (!HandledScratchWaveOffsetReg) {
220 HandledScratchWaveOffsetReg = true;
221
222 MRI.replaceRegWith(ScratchWaveOffsetReg, Reg);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000223 if (MFI->getScratchWaveOffsetReg() == MFI->getStackPtrOffsetReg()) {
224 assert(!hasFP(MF));
225 MFI->setStackPtrOffsetReg(Reg);
226 }
227
Matt Arsenault36c31222017-04-25 23:40:57 +0000228 MFI->setScratchWaveOffsetReg(Reg);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000229 MFI->setFrameOffsetReg(Reg);
Matt Arsenault36c31222017-04-25 23:40:57 +0000230 ScratchWaveOffsetReg = Reg;
Matt Arsenault36c31222017-04-25 23:40:57 +0000231 break;
232 }
Matt Arsenault57bc4322016-08-31 21:52:21 +0000233 }
234 }
235
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000236 return ScratchWaveOffsetReg;
Matt Arsenault57bc4322016-08-31 21:52:21 +0000237}
238
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000239void SIFrameLowering::emitEntryFunctionPrologue(MachineFunction &MF,
240 MachineBasicBlock &MBB) const {
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000241 assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
242
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000243 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000244
245 // If we only have SGPR spills, we won't actually be using scratch memory
246 // since these spill to VGPRs.
247 //
248 // FIXME: We should be cleaning up these unused SGPR spill frame indices
249 // somewhere.
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000250
Matt Arsenaultaa6fb4c2019-02-21 23:27:46 +0000251 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000252 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000253 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
Matt Arsenault296b8492016-02-12 06:31:30 +0000254 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenaultceafc552018-05-29 17:42:50 +0000255 const Function &F = MF.getFunction();
Matt Arsenault57bc4322016-08-31 21:52:21 +0000256
Matt Arsenault08906a32016-10-28 19:43:31 +0000257 // We need to do the replacement of the private segment buffer and wave offset
258 // register even if there are no stack objects. There could be stores to undef
259 // or a constant without an associated object.
260
261 // FIXME: We still have implicit uses on SGPR spill instructions in case they
262 // need to spill to vector memory. It's likely that will not happen, but at
263 // this point it appears we need the setup. This part of the prolog should be
264 // emitted after frame indices are eliminated.
265
Matt Arsenault254ad3d2017-07-18 16:44:58 +0000266 if (MFI->hasFlatScratchInit())
Matt Arsenaulte823d922017-02-18 18:29:53 +0000267 emitFlatScratchInit(ST, MF, MBB);
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000268
Matt Arsenaulte2218492017-04-24 21:08:32 +0000269 unsigned ScratchRsrcReg
270 = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF);
Matt Arsenault36c31222017-04-25 23:40:57 +0000271
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000272 unsigned ScratchWaveOffsetReg =
273 getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF);
Matt Arsenaulte2218492017-04-24 21:08:32 +0000274
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000275 // We need to insert initialization of the scratch resource descriptor.
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000276 unsigned PreloadedScratchWaveOffsetReg = MFI->getPreloadedReg(
277 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000278
279 unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister;
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +0000280 if (ST.isAmdHsaOrMesa(F)) {
Matt Arsenault8623e8d2017-08-03 23:00:29 +0000281 PreloadedPrivateBufferReg = MFI->getPreloadedReg(
282 AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000283 }
284
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000285 bool OffsetRegUsed = ScratchWaveOffsetReg != AMDGPU::NoRegister &&
286 MRI.isPhysRegUsed(ScratchWaveOffsetReg);
Matt Arsenaulte2218492017-04-24 21:08:32 +0000287 bool ResourceRegUsed = ScratchRsrcReg != AMDGPU::NoRegister &&
288 MRI.isPhysRegUsed(ScratchRsrcReg);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000289
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000290 // FIXME: Hack to not crash in situations which emitted an error.
291 if (PreloadedScratchWaveOffsetReg == AMDGPU::NoRegister)
292 return;
293
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000294 // We added live-ins during argument lowering, but since they were not used
295 // they were deleted. We're adding the uses now, so add them back.
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000296 MRI.addLiveIn(PreloadedScratchWaveOffsetReg);
297 MBB.addLiveIn(PreloadedScratchWaveOffsetReg);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000298
Matt Arsenault08906a32016-10-28 19:43:31 +0000299 if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +0000300 assert(ST.isAmdHsaOrMesa(F) || ST.isMesaGfxShader(F));
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000301 MRI.addLiveIn(PreloadedPrivateBufferReg);
302 MBB.addLiveIn(PreloadedPrivateBufferReg);
303 }
304
Matt Arsenault57bc4322016-08-31 21:52:21 +0000305 // Make the register selected live throughout the function.
306 for (MachineBasicBlock &OtherBB : MF) {
307 if (&OtherBB == &MBB)
308 continue;
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000309
Matt Arsenault08906a32016-10-28 19:43:31 +0000310 if (OffsetRegUsed)
311 OtherBB.addLiveIn(ScratchWaveOffsetReg);
312
313 if (ResourceRegUsed)
314 OtherBB.addLiveIn(ScratchRsrcReg);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000315 }
316
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000317 DebugLoc DL;
Matt Arsenault57bc4322016-08-31 21:52:21 +0000318 MachineBasicBlock::iterator I = MBB.begin();
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000319
Matt Arsenault08906a32016-10-28 19:43:31 +0000320 // If we reserved the original input registers, we don't need to copy to the
321 // reserved registers.
322
323 bool CopyBuffer = ResourceRegUsed &&
324 PreloadedPrivateBufferReg != AMDGPU::NoRegister &&
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +0000325 ST.isAmdHsaOrMesa(F) &&
Matt Arsenault08906a32016-10-28 19:43:31 +0000326 ScratchRsrcReg != PreloadedPrivateBufferReg;
327
328 // This needs to be careful of the copying order to avoid overwriting one of
329 // the input registers before it's been copied to it's final
330 // destination. Usually the offset should be copied first.
331 bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg,
332 ScratchWaveOffsetReg);
333 if (CopyBuffer && CopyBufferFirst) {
334 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
335 .addReg(PreloadedPrivateBufferReg, RegState::Kill);
336 }
337
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000338 unsigned SPReg = MFI->getStackPtrOffsetReg();
339 assert(SPReg != AMDGPU::SP_REG);
340
341 // FIXME: Remove the isPhysRegUsed checks
342 const bool HasFP = hasFP(MF);
343
344 if (HasFP || OffsetRegUsed) {
345 assert(ScratchWaveOffsetReg);
Matt Arsenault1d215172016-08-31 21:52:25 +0000346 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg)
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000347 .addReg(PreloadedScratchWaveOffsetReg, HasFP ? RegState::Kill : 0);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000348 }
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000349
Matt Arsenault08906a32016-10-28 19:43:31 +0000350 if (CopyBuffer && !CopyBufferFirst) {
Matt Arsenault1d215172016-08-31 21:52:25 +0000351 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg)
352 .addReg(PreloadedPrivateBufferReg, RegState::Kill);
Matt Arsenault08906a32016-10-28 19:43:31 +0000353 }
354
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000355 if (ResourceRegUsed) {
Tim Renouf13229152017-09-29 09:49:35 +0000356 emitEntryFunctionScratchSetup(ST, MF, MBB, MFI, I,
357 PreloadedPrivateBufferReg, ScratchRsrcReg);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000358 }
359
360 if (HasFP) {
361 DebugLoc DL;
362 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
363 int64_t StackSize = FrameInfo.getStackSize();
364
365 // On kernel entry, the private scratch wave offset is the SP value.
366 if (StackSize == 0) {
367 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), SPReg)
368 .addReg(MFI->getScratchWaveOffsetReg());
369 } else {
370 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), SPReg)
371 .addReg(MFI->getScratchWaveOffsetReg())
372 .addImm(StackSize * ST.getWavefrontSize());
373 }
374 }
Tim Renouf13229152017-09-29 09:49:35 +0000375}
376
377// Emit scratch setup code for AMDPAL or Mesa, assuming ResourceRegUsed is set.
Tom Stellard5bfbae52018-07-11 20:59:01 +0000378void SIFrameLowering::emitEntryFunctionScratchSetup(const GCNSubtarget &ST,
Tim Renouf13229152017-09-29 09:49:35 +0000379 MachineFunction &MF, MachineBasicBlock &MBB, SIMachineFunctionInfo *MFI,
380 MachineBasicBlock::iterator I, unsigned PreloadedPrivateBufferReg,
381 unsigned ScratchRsrcReg) const {
382
383 const SIInstrInfo *TII = ST.getInstrInfo();
384 const SIRegisterInfo *TRI = &TII->getRegisterInfo();
Matt Arsenaultceafc552018-05-29 17:42:50 +0000385 const Function &Fn = MF.getFunction();
Tim Renouf13229152017-09-29 09:49:35 +0000386 DebugLoc DL;
Tim Renouf13229152017-09-29 09:49:35 +0000387
388 if (ST.isAmdPalOS()) {
389 // The pointer to the GIT is formed from the offset passed in and either
390 // the amdgpu-git-ptr-high function attribute or the top part of the PC
391 unsigned RsrcLo = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
392 unsigned RsrcHi = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
393 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
394
395 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
396
397 if (MFI->getGITPtrHigh() != 0xffffffff) {
398 BuildMI(MBB, I, DL, SMovB32, RsrcHi)
399 .addImm(MFI->getGITPtrHigh())
400 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
401 } else {
402 const MCInstrDesc &GetPC64 = TII->get(AMDGPU::S_GETPC_B64);
403 BuildMI(MBB, I, DL, GetPC64, Rsrc01);
404 }
Tim Renouf832f90f2018-02-26 14:46:43 +0000405 auto GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in
406 if (ST.hasMergedShaders()) {
407 switch (MF.getFunction().getCallingConv()) {
408 case CallingConv::AMDGPU_HS:
409 case CallingConv::AMDGPU_GS:
410 // Low GIT address is passed in s8 rather than s0 for an LS+HS or
411 // ES+GS merged shader on gfx9+.
412 GitPtrLo = AMDGPU::SGPR8;
413 break;
414 default:
415 break;
416 }
417 }
Tim Renouf7190a462018-04-10 11:25:15 +0000418 MF.getRegInfo().addLiveIn(GitPtrLo);
Matt Arsenault302eedc2019-05-31 22:47:36 +0000419 MBB.addLiveIn(GitPtrLo);
Tim Renouf13229152017-09-29 09:49:35 +0000420 BuildMI(MBB, I, DL, SMovB32, RsrcLo)
Tim Renouf832f90f2018-02-26 14:46:43 +0000421 .addReg(GitPtrLo)
Tim Renouf13229152017-09-29 09:49:35 +0000422 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
423
424 // We now have the GIT ptr - now get the scratch descriptor from the entry
Tim Renouf7190a462018-04-10 11:25:15 +0000425 // at offset 0 (or offset 16 for a compute shader).
Tim Renouf13229152017-09-29 09:49:35 +0000426 PointerType *PtrTy =
Matthias Braunf1caa282017-12-15 22:22:58 +0000427 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
Tim Renouf13229152017-09-29 09:49:35 +0000428 AMDGPUAS::CONSTANT_ADDRESS);
429 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
430 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
431 auto MMO = MF.getMachineMemOperand(PtrInfo,
432 MachineMemOperand::MOLoad |
433 MachineMemOperand::MOInvariant |
434 MachineMemOperand::MODereferenceable,
Matt Arsenault2a645982019-01-31 01:38:47 +0000435 16, 4);
Matt Arsenaultceafc552018-05-29 17:42:50 +0000436 unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0;
Carl Ritson494b8ac2019-02-08 15:41:11 +0000437 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
438 unsigned EncodedOffset = AMDGPU::getSMRDEncodedOffset(Subtarget, Offset);
Tim Renouf13229152017-09-29 09:49:35 +0000439 BuildMI(MBB, I, DL, LoadDwordX4, ScratchRsrcReg)
440 .addReg(Rsrc01)
Carl Ritson494b8ac2019-02-08 15:41:11 +0000441 .addImm(EncodedOffset) // offset
Tim Renouf13229152017-09-29 09:49:35 +0000442 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000443 .addImm(0) // dlc
Tim Renouf13229152017-09-29 09:49:35 +0000444 .addReg(ScratchRsrcReg, RegState::ImplicitDefine)
445 .addMemOperand(MMO);
446 return;
447 }
Matt Arsenaultceafc552018-05-29 17:42:50 +0000448 if (ST.isMesaGfxShader(Fn)
Tim Renouf13229152017-09-29 09:49:35 +0000449 || (PreloadedPrivateBufferReg == AMDGPU::NoRegister)) {
Konstantin Zhuravlyovaa067cb2018-10-04 21:02:16 +0000450 assert(!ST.isAmdHsaOrMesa(Fn));
Matt Arsenault1d215172016-08-31 21:52:25 +0000451 const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32);
452
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000453 unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2);
454 unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3);
455
456 // Use relocations to get the pointer, and setup the other bits manually.
457 uint64_t Rsrc23 = TII->getScratchRsrcWords23();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000458
Matt Arsenault10fc0622017-06-26 03:01:31 +0000459 if (MFI->hasImplicitBufferPtr()) {
Tom Stellard2f3f9852017-01-25 01:25:13 +0000460 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
461
Matthias Braunf1caa282017-12-15 22:22:58 +0000462 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
Tom Stellard2f3f9852017-01-25 01:25:13 +0000463 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
464
465 BuildMI(MBB, I, DL, Mov64, Rsrc01)
Matt Arsenault10fc0622017-06-26 03:01:31 +0000466 .addReg(MFI->getImplicitBufferPtrUserSGPR())
Tom Stellard2f3f9852017-01-25 01:25:13 +0000467 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
468 } else {
469 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
470
471 PointerType *PtrTy =
Matthias Braunf1caa282017-12-15 22:22:58 +0000472 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
Konstantin Zhuravlyov435151a2017-11-01 19:12:38 +0000473 AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellard2f3f9852017-01-25 01:25:13 +0000474 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
475 auto MMO = MF.getMachineMemOperand(PtrInfo,
476 MachineMemOperand::MOLoad |
477 MachineMemOperand::MOInvariant |
478 MachineMemOperand::MODereferenceable,
Matt Arsenault2a645982019-01-31 01:38:47 +0000479 8, 4);
Tom Stellard2f3f9852017-01-25 01:25:13 +0000480 BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01)
Matt Arsenault10fc0622017-06-26 03:01:31 +0000481 .addReg(MFI->getImplicitBufferPtrUserSGPR())
Tom Stellard2f3f9852017-01-25 01:25:13 +0000482 .addImm(0) // offset
483 .addImm(0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000484 .addImm(0) // dlc
Tom Stellard2f3f9852017-01-25 01:25:13 +0000485 .addMemOperand(MMO)
486 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
Matt Arsenault302eedc2019-05-31 22:47:36 +0000487
488 MF.getRegInfo().addLiveIn(MFI->getImplicitBufferPtrUserSGPR());
489 MBB.addLiveIn(MFI->getImplicitBufferPtrUserSGPR());
Tom Stellard2f3f9852017-01-25 01:25:13 +0000490 }
491 } else {
492 unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0);
493 unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1);
494
495 BuildMI(MBB, I, DL, SMovB32, Rsrc0)
496 .addExternalSymbol("SCRATCH_RSRC_DWORD0")
497 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
498
499 BuildMI(MBB, I, DL, SMovB32, Rsrc1)
500 .addExternalSymbol("SCRATCH_RSRC_DWORD1")
501 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
502
503 }
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000504
505 BuildMI(MBB, I, DL, SMovB32, Rsrc2)
506 .addImm(Rsrc23 & 0xffffffff)
507 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
508
509 BuildMI(MBB, I, DL, SMovB32, Rsrc3)
510 .addImm(Rsrc23 >> 32)
511 .addReg(ScratchRsrcReg, RegState::ImplicitDefine);
512 }
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000513}
514
Matt Arsenault03ae3992018-03-29 21:30:06 +0000515// Find a scratch register that we can use at the start of the prologue to
516// re-align the stack pointer. We avoid using callee-save registers since they
517// may appear to be free when this is called from canUseAsPrologue (during
518// shrink wrapping), but then no longer be free when this is called from
519// emitPrologue.
520//
521// FIXME: This is a bit conservative, since in the above case we could use one
522// of the callee-save registers as a scratch temp to re-align the stack pointer,
523// but we would then have to make sure that we were in fact saving at least one
524// callee-save register in the prologue, which is additional complexity that
525// doesn't seem worth the benefit.
Matt Arsenault3d59e382019-05-24 18:18:51 +0000526static unsigned findScratchNonCalleeSaveRegister(MachineFunction &MF,
527 LivePhysRegs &LiveRegs,
528 const TargetRegisterClass &RC) {
529 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault03ae3992018-03-29 21:30:06 +0000530 const SIRegisterInfo &TRI = *Subtarget.getRegisterInfo();
Matt Arsenault03ae3992018-03-29 21:30:06 +0000531
532 // Mark callee saved registers as used so we will not choose them.
Matt Arsenault3d59e382019-05-24 18:18:51 +0000533 const MCPhysReg *CSRegs = TRI.getCalleeSavedRegs(&MF);
Matt Arsenault03ae3992018-03-29 21:30:06 +0000534 for (unsigned i = 0; CSRegs[i]; ++i)
535 LiveRegs.addReg(CSRegs[i]);
536
Matt Arsenault3d59e382019-05-24 18:18:51 +0000537 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault03ae3992018-03-29 21:30:06 +0000538
Matt Arsenault3d59e382019-05-24 18:18:51 +0000539 for (unsigned Reg : RC) {
Matt Arsenault03ae3992018-03-29 21:30:06 +0000540 if (LiveRegs.available(MRI, Reg))
541 return Reg;
542 }
543
544 return AMDGPU::NoRegister;
545}
546
Sander de Smalen5d6ee762019-06-17 09:13:29 +0000547bool SIFrameLowering::isSupportedStackID(TargetStackID::Value ID) const {
Fangrui Song5401c2d2019-06-17 10:20:20 +0000548 switch (ID) {
549 case TargetStackID::Default:
550 case TargetStackID::NoAlloc:
551 case TargetStackID::SGPRSpill:
552 return true;
553 }
554 llvm_unreachable("Invalid TargetStackID::Value");
Sander de Smalen5d6ee762019-06-17 09:13:29 +0000555}
556
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000557void SIFrameLowering::emitPrologue(MachineFunction &MF,
558 MachineBasicBlock &MBB) const {
Matt Arsenault03ae3992018-03-29 21:30:06 +0000559 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000560 if (FuncInfo->isEntryFunction()) {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000561 emitEntryFunctionPrologue(MF, MBB);
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000562 return;
563 }
564
565 const MachineFrameInfo &MFI = MF.getFrameInfo();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000566 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000567 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenault03ae3992018-03-29 21:30:06 +0000568 const SIRegisterInfo &TRI = TII->getRegisterInfo();
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000569
570 unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
571 unsigned FramePtrReg = FuncInfo->getFrameOffsetReg();
Matt Arsenault3d59e382019-05-24 18:18:51 +0000572 LivePhysRegs LiveRegs;
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000573
574 MachineBasicBlock::iterator MBBI = MBB.begin();
575 DebugLoc DL;
576
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000577 bool HasFP = false;
Matt Arsenault03ae3992018-03-29 21:30:06 +0000578 uint32_t NumBytes = MFI.getStackSize();
579 uint32_t RoundedSize = NumBytes;
Matt Arsenault03ae3992018-03-29 21:30:06 +0000580
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000581 if (TRI.needsStackRealignment(MF)) {
582 HasFP = true;
Matt Arsenault03ae3992018-03-29 21:30:06 +0000583 const unsigned Alignment = MFI.getMaxAlignment();
Matt Arsenault03ae3992018-03-29 21:30:06 +0000584
585 RoundedSize += Alignment;
586
Matt Arsenault3d59e382019-05-24 18:18:51 +0000587 LiveRegs.init(TRI);
588 LiveRegs.addLiveIns(MBB);
589
590 unsigned ScratchSPReg
591 = findScratchNonCalleeSaveRegister(MF, LiveRegs,
592 AMDGPU::SReg_32_XM0RegClass);
Matt Arsenault03ae3992018-03-29 21:30:06 +0000593 assert(ScratchSPReg != AMDGPU::NoRegister);
594
595 // s_add_u32 tmp_reg, s32, NumBytes
596 // s_and_b32 s32, tmp_reg, 0b111...0000
597 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), ScratchSPReg)
598 .addReg(StackPtrReg)
599 .addImm((Alignment - 1) * ST.getWavefrontSize())
600 .setMIFlag(MachineInstr::FrameSetup);
601 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_AND_B32), FramePtrReg)
602 .addReg(ScratchSPReg, RegState::Kill)
603 .addImm(-Alignment * ST.getWavefrontSize())
604 .setMIFlag(MachineInstr::FrameSetup);
605 FuncInfo->setIsStackRealigned(true);
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000606 } else if ((HasFP = hasFP(MF))) {
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000607 // If we need a base pointer, set it up here. It's whatever the value of
608 // the stack pointer is at this point. Any variable size objects will be
609 // allocated after this, so we can still use the base pointer to reference
610 // locals.
611 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::COPY), FramePtrReg)
612 .addReg(StackPtrReg)
613 .setMIFlag(MachineInstr::FrameSetup);
614 }
615
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000616 if (HasFP && RoundedSize != 0) {
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000617 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_U32), StackPtrReg)
618 .addReg(StackPtrReg)
Matt Arsenault03ae3992018-03-29 21:30:06 +0000619 .addImm(RoundedSize * ST.getWavefrontSize())
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000620 .setMIFlag(MachineInstr::FrameSetup);
621 }
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000622
Matt Arsenault24e80b82019-05-28 16:46:02 +0000623 // To avoid clobbering VGPRs in lanes that weren't active on function entry,
624 // turn on all lanes before doing the spill to memory.
625 unsigned ScratchExecCopy = AMDGPU::NoRegister;
626
627 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
628 : FuncInfo->getSGPRSpillVGPRs()) {
629 if (!Reg.FI.hasValue())
630 continue;
631
632 if (ScratchExecCopy == AMDGPU::NoRegister) {
633 if (LiveRegs.empty()) {
634 LiveRegs.init(TRI);
635 LiveRegs.addLiveIns(MBB);
636 }
637
638 ScratchExecCopy
639 = findScratchNonCalleeSaveRegister(MF, LiveRegs,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000640 *TRI.getWaveMaskRegClass());
Matt Arsenault24e80b82019-05-28 16:46:02 +0000641
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000642 const unsigned OrSaveExec = ST.isWave32() ?
643 AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64;
644 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec),
Matt Arsenault24e80b82019-05-28 16:46:02 +0000645 ScratchExecCopy)
646 .addImm(-1);
Matt Arsenault3d59e382019-05-24 18:18:51 +0000647 }
648
Matt Arsenault24e80b82019-05-28 16:46:02 +0000649 TII->storeRegToStackSlot(MBB, MBBI, Reg.VGPR, true,
650 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
651 &TII->getRegisterInfo());
652 }
Matt Arsenault3d59e382019-05-24 18:18:51 +0000653
Matt Arsenault24e80b82019-05-28 16:46:02 +0000654 if (ScratchExecCopy != AMDGPU::NoRegister) {
Matt Arsenault3d59e382019-05-24 18:18:51 +0000655 // FIXME: Split block and make terminator.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000656 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
657 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
658 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec)
Matt Arsenault3d59e382019-05-24 18:18:51 +0000659 .addReg(ScratchExecCopy);
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000660 }
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000661}
662
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000663void SIFrameLowering::emitEpilogue(MachineFunction &MF,
664 MachineBasicBlock &MBB) const {
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000665 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
666 if (FuncInfo->isEntryFunction())
667 return;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000668
Tom Stellard5bfbae52018-07-11 20:59:01 +0000669 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000670 const SIInstrInfo *TII = ST.getInstrInfo();
671 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
Matt Arsenault3d59e382019-05-24 18:18:51 +0000672 DebugLoc DL;
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000673
Matt Arsenault24e80b82019-05-28 16:46:02 +0000674 unsigned ScratchExecCopy = AMDGPU::NoRegister;
675 for (const SIMachineFunctionInfo::SGPRSpillVGPRCSR &Reg
676 : FuncInfo->getSGPRSpillVGPRs()) {
677 if (!Reg.FI.hasValue())
678 continue;
Matt Arsenault3d59e382019-05-24 18:18:51 +0000679
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000680 const SIRegisterInfo &TRI = TII->getRegisterInfo();
Matt Arsenault24e80b82019-05-28 16:46:02 +0000681 if (ScratchExecCopy == AMDGPU::NoRegister) {
682 // See emitPrologue
683 LivePhysRegs LiveRegs(*ST.getRegisterInfo());
684 LiveRegs.addLiveIns(MBB);
Matt Arsenault3d59e382019-05-24 18:18:51 +0000685
Matt Arsenault24e80b82019-05-28 16:46:02 +0000686 ScratchExecCopy
687 = findScratchNonCalleeSaveRegister(MF, LiveRegs,
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000688 *TRI.getWaveMaskRegClass());
Matt Arsenault3d59e382019-05-24 18:18:51 +0000689
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000690 const unsigned OrSaveExec = ST.isWave32() ?
691 AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64;
692
693 BuildMI(MBB, MBBI, DL, TII->get(OrSaveExec), ScratchExecCopy)
Matt Arsenault24e80b82019-05-28 16:46:02 +0000694 .addImm(-1);
Matt Arsenault3d59e382019-05-24 18:18:51 +0000695 }
696
Matt Arsenault24e80b82019-05-28 16:46:02 +0000697 TII->loadRegFromStackSlot(MBB, MBBI, Reg.VGPR,
698 Reg.FI.getValue(), &AMDGPU::VGPR_32RegClass,
699 &TII->getRegisterInfo());
700 }
701
702 if (ScratchExecCopy != AMDGPU::NoRegister) {
Matt Arsenault3d59e382019-05-24 18:18:51 +0000703 // FIXME: Split block and make terminator.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000704 unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
705 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
706 BuildMI(MBB, MBBI, DL, TII->get(ExecMov), Exec)
Matt Arsenault3d59e382019-05-24 18:18:51 +0000707 .addReg(ScratchExecCopy);
Matt Arsenault8e8f8f42017-08-02 01:52:45 +0000708 }
709
Matt Arsenault5dc457c2019-06-20 17:03:23 +0000710 const MachineFrameInfo &MFI = MF.getFrameInfo();
711 uint32_t NumBytes = MFI.getStackSize();
712 uint32_t RoundedSize = FuncInfo->isStackRealigned() ?
713 NumBytes + MFI.getMaxAlignment() : NumBytes;
Matt Arsenault03ae3992018-03-29 21:30:06 +0000714
Matt Arsenault5dc457c2019-06-20 17:03:23 +0000715 if (RoundedSize != 0 && hasFP(MF)) {
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000716 const unsigned StackPtrReg = FuncInfo->getStackPtrOffsetReg();
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000717 BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_SUB_U32), StackPtrReg)
718 .addReg(StackPtrReg)
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000719 .addImm(RoundedSize * ST.getWavefrontSize())
720 .setMIFlag(MachineInstr::FrameDestroy);
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000721 }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000722}
723
Matt Arsenault942404d2019-06-24 14:34:40 +0000724// Note SGPRSpill stack IDs should only be used for SGPR spilling to VGPRs, not
725// memory.
726static bool allStackObjectsAreDeadOrSGPR(const MachineFrameInfo &MFI) {
Matt Arsenault7b6c5d22017-02-22 22:23:32 +0000727 for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd();
728 I != E; ++I) {
Matt Arsenault942404d2019-06-24 14:34:40 +0000729 if (!MFI.isDeadObjectIndex(I) &&
730 MFI.getStackID(I) != TargetStackID::SGPRSpill)
Matt Arsenault7b6c5d22017-02-22 22:23:32 +0000731 return false;
732 }
733
734 return true;
735}
736
Konstantin Zhuravlyovffdb00e2017-03-10 19:39:07 +0000737int SIFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
738 unsigned &FrameReg) const {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000739 const SIRegisterInfo *RI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
Konstantin Zhuravlyovffdb00e2017-03-10 19:39:07 +0000740
741 FrameReg = RI->getFrameRegister(MF);
742 return MF.getFrameInfo().getObjectOffset(FI);
743}
744
Matt Arsenault0c90e952015-11-06 18:17:45 +0000745void SIFrameLowering::processFunctionBeforeFrameFinalized(
746 MachineFunction &MF,
747 RegScavenger *RS) const {
Matthias Braun941a7052016-07-28 18:40:00 +0000748 MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000749
Matthias Braun941a7052016-07-28 18:40:00 +0000750 if (!MFI.hasStackObjects())
Matt Arsenault0e3d3892015-11-30 21:15:53 +0000751 return;
752
Tom Stellard5bfbae52018-07-11 20:59:01 +0000753 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault7b6c5d22017-02-22 22:23:32 +0000754 const SIInstrInfo *TII = ST.getInstrInfo();
755 const SIRegisterInfo &TRI = TII->getRegisterInfo();
756 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
757 bool AllSGPRSpilledToVGPRs = false;
758
759 if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) {
760 AllSGPRSpilledToVGPRs = true;
761
762 // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs
763 // are spilled to VGPRs, in which case we can eliminate the stack usage.
764 //
765 // XXX - This operates under the assumption that only other SGPR spills are
766 // users of the frame index. I'm not 100% sure this is correct. The
767 // StackColoring pass has a comment saying a future improvement would be to
768 // merging of allocas with spill slots, but for now according to
769 // MachineFrameInfo isSpillSlot can't alias any other object.
770 for (MachineBasicBlock &MBB : MF) {
771 MachineBasicBlock::iterator Next;
772 for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) {
773 MachineInstr &MI = *I;
774 Next = std::next(I);
775
776 if (TII->isSGPRSpill(MI)) {
777 int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex();
Sander de Smalen5d6ee762019-06-17 09:13:29 +0000778 assert(MFI.getStackID(FI) == TargetStackID::SGPRSpill);
Matt Arsenault7b6c5d22017-02-22 22:23:32 +0000779 if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) {
780 bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS);
781 (void)Spilled;
782 assert(Spilled && "failed to spill SGPR to VGPR when allocated");
783 } else
784 AllSGPRSpilledToVGPRs = false;
785 }
786 }
787 }
Matt Arsenault7b6c5d22017-02-22 22:23:32 +0000788 }
789
Sander de Smalen7f23e0a2019-04-02 09:46:52 +0000790 FuncInfo->removeSGPRToVGPRFrameIndices(MFI);
791
Matt Arsenault942404d2019-06-24 14:34:40 +0000792 if (!allStackObjectsAreDeadOrSGPR(MFI)) {
Matt Arsenault7b6c5d22017-02-22 22:23:32 +0000793 assert(RS && "RegScavenger required if spilling");
794
Matt Arsenault34c8b832019-06-05 22:37:50 +0000795 if (FuncInfo->isEntryFunction()) {
796 int ScavengeFI = MFI.CreateFixedObject(
797 TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
798 RS->addScavengingFrameIndex(ScavengeFI);
799 } else {
800 int ScavengeFI = MFI.CreateStackObject(
801 TRI.getSpillSize(AMDGPU::SGPR_32RegClass),
802 TRI.getSpillAlignment(AMDGPU::SGPR_32RegClass),
803 false);
804 RS->addScavengingFrameIndex(ScavengeFI);
805 }
Matt Arsenault707780b2017-02-22 21:05:25 +0000806 }
Matt Arsenault0c90e952015-11-06 18:17:45 +0000807}
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000808
Matt Arsenaultecb43ef2017-09-13 23:47:01 +0000809void SIFrameLowering::determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
810 RegScavenger *RS) const {
811 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
812 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
813
814 // The SP is specifically managed and we don't want extra spills of it.
815 SavedRegs.reset(MFI->getStackPtrOffsetReg());
816}
817
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000818MachineBasicBlock::iterator SIFrameLowering::eliminateCallFramePseudoInstr(
819 MachineFunction &MF,
820 MachineBasicBlock &MBB,
821 MachineBasicBlock::iterator I) const {
822 int64_t Amount = I->getOperand(0).getImm();
823 if (Amount == 0)
824 return MBB.erase(I);
825
Tom Stellard5bfbae52018-07-11 20:59:01 +0000826 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenaultb62a4eb2017-08-01 19:54:18 +0000827 const SIInstrInfo *TII = ST.getInstrInfo();
828 const DebugLoc &DL = I->getDebugLoc();
829 unsigned Opc = I->getOpcode();
830 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
831 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
832
833 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
834 if (!TFI->hasReservedCallFrame(MF)) {
835 unsigned Align = getStackAlignment();
836
837 Amount = alignTo(Amount, Align);
838 assert(isUInt<32>(Amount) && "exceeded stack address space size");
839 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
840 unsigned SPReg = MFI->getStackPtrOffsetReg();
841
842 unsigned Op = IsDestroy ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
843 BuildMI(MBB, I, DL, TII->get(Op), SPReg)
844 .addReg(SPReg)
845 .addImm(Amount * ST.getWavefrontSize());
846 } else if (CalleePopAmount != 0) {
847 llvm_unreachable("is this used?");
848 }
849
850 return MBB.erase(I);
851}
852
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000853bool SIFrameLowering::hasFP(const MachineFunction &MF) const {
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000854 const MachineFrameInfo &MFI = MF.getFrameInfo();
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000855 if (MFI.hasCalls()) {
856 // All offsets are unsigned, so need to be addressed in the same direction
857 // as stack growth.
858 if (MFI.getStackSize() != 0)
859 return true;
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000860
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000861 // For the entry point, the input wave scratch offset must be copied to the
862 // API SP if there are calls.
863 if (MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction())
864 return true;
Matt Arsenaultb812b7a2019-06-05 22:20:47 +0000865 }
866
867 return MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
868 MFI.hasStackMap() || MFI.hasPatchPoint() ||
Matt Arsenault5dc457c2019-06-20 17:03:23 +0000869 MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->needsStackRealignment(MF) ||
870 MF.getTarget().Options.DisableFramePointerElim(MF);
Matt Arsenaultf28683c2017-06-26 17:53:59 +0000871}