blob: 8c0cdc12c8f7fbad76f497f682ee3cdb5c88a24e [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the AArch64 implementation of TargetFrameLowering class.
11//
Kristof Beyls17cb8982015-04-09 08:49:47 +000012// On AArch64, stack frames are structured as follows:
13//
14// The stack grows downward.
15//
16// All of the individual frame areas on the frame below are optional, i.e. it's
17// possible to create a function so that the particular area isn't present
18// in the frame.
19//
20// At function entry, the "frame" looks as follows:
21//
22// | | Higher address
23// |-----------------------------------|
24// | |
25// | arguments passed on the stack |
26// | |
27// |-----------------------------------| <- sp
28// | | Lower address
29//
30//
31// After the prologue has run, the frame has the following general structure.
32// Note that this doesn't depict the case where a red-zone is used. Also,
33// technically the last frame area (VLAs) doesn't get created until in the
34// main function body, after the prologue is run. However, it's depicted here
35// for completeness.
36//
37// | | Higher address
38// |-----------------------------------|
39// | |
40// | arguments passed on the stack |
41// | |
42// |-----------------------------------|
43// | |
44// | prev_fp, prev_lr |
45// | (a.k.a. "frame record") |
46// |-----------------------------------| <- fp(=x29)
47// | |
48// | other callee-saved registers |
49// | |
50// |-----------------------------------|
51// |.empty.space.to.make.part.below....|
52// |.aligned.in.case.it.needs.more.than| (size of this area is unknown at
53// |.the.standard.16-byte.alignment....| compile time; if present)
54// |-----------------------------------|
55// | |
56// | local variables of fixed size |
57// | including spill slots |
58// |-----------------------------------| <- bp(not defined by ABI,
59// |.variable-sized.local.variables....| LLVM chooses X19)
60// |.(VLAs)............................| (size of this area is unknown at
61// |...................................| compile time)
62// |-----------------------------------| <- sp
63// | | Lower address
64//
65//
66// To access the data in a frame, at-compile time, a constant offset must be
67// computable from one of the pointers (fp, bp, sp) to access it. The size
68// of the areas with a dotted background cannot be computed at compile-time
69// if they are present, making it required to have all three of fp, bp and
70// sp to be set up to be able to access all contents in the frame areas,
71// assuming all of the frame areas are non-empty.
72//
73// For most functions, some of the frame areas are empty. For those functions,
74// it may not be necessary to set up fp or bp:
Benjamin Kramerdf005cb2015-08-08 18:27:36 +000075// * A base pointer is definitely needed when there are both VLAs and local
Kristof Beyls17cb8982015-04-09 08:49:47 +000076// variables with more-than-default alignment requirements.
Benjamin Kramerdf005cb2015-08-08 18:27:36 +000077// * A frame pointer is definitely needed when there are local variables with
Kristof Beyls17cb8982015-04-09 08:49:47 +000078// more-than-default alignment requirements.
79//
80// In some cases when a base pointer is not strictly needed, it is generated
81// anyway when offsets from the frame pointer to access local variables become
82// so large that the offset can't be encoded in the immediate fields of loads
83// or stores.
84//
85// FIXME: also explain the redzone concept.
86// FIXME: also explain the concept of reserved call frames.
87//
Tim Northover3b0846e2014-05-24 12:50:23 +000088//===----------------------------------------------------------------------===//
89
90#include "AArch64FrameLowering.h"
91#include "AArch64InstrInfo.h"
92#include "AArch64MachineFunctionInfo.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +000093#include "AArch64RegisterInfo.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000094#include "AArch64Subtarget.h"
95#include "AArch64TargetMachine.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +000096#include "llvm/ADT/SmallVector.h"
Tim Northover3b0846e2014-05-24 12:50:23 +000097#include "llvm/ADT/Statistic.h"
Matthias Braun332bb5c2016-07-06 21:31:27 +000098#include "llvm/CodeGen/LivePhysRegs.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +000099#include "llvm/CodeGen/MachineBasicBlock.h"
Tim Northover3b0846e2014-05-24 12:50:23 +0000100#include "llvm/CodeGen/MachineFrameInfo.h"
101#include "llvm/CodeGen/MachineFunction.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000102#include "llvm/CodeGen/MachineInstr.h"
Tim Northover3b0846e2014-05-24 12:50:23 +0000103#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000104#include "llvm/CodeGen/MachineMemOperand.h"
Tim Northover3b0846e2014-05-24 12:50:23 +0000105#include "llvm/CodeGen/MachineModuleInfo.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000106#include "llvm/CodeGen/MachineOperand.h"
Tim Northover3b0846e2014-05-24 12:50:23 +0000107#include "llvm/CodeGen/MachineRegisterInfo.h"
108#include "llvm/CodeGen/RegisterScavenging.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000109#include "llvm/IR/Attributes.h"
110#include "llvm/IR/CallingConv.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +0000111#include "llvm/IR/DataLayout.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000112#include "llvm/IR/DebugLoc.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +0000113#include "llvm/IR/Function.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000114#include "llvm/MC/MCDwarf.h"
Tim Northover3b0846e2014-05-24 12:50:23 +0000115#include "llvm/Support/CommandLine.h"
Benjamin Kramer1f8930e2014-07-25 11:42:14 +0000116#include "llvm/Support/Debug.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000117#include "llvm/Support/ErrorHandling.h"
118#include "llvm/Support/MathExtras.h"
Tim Northover3b0846e2014-05-24 12:50:23 +0000119#include "llvm/Support/raw_ostream.h"
Eugene Zelenko11f69072017-01-25 00:29:26 +0000120#include "llvm/Target/TargetInstrInfo.h"
121#include "llvm/Target/TargetMachine.h"
122#include "llvm/Target/TargetOptions.h"
123#include "llvm/Target/TargetRegisterInfo.h"
124#include "llvm/Target/TargetSubtargetInfo.h"
125#include <cassert>
126#include <cstdint>
127#include <iterator>
128#include <vector>
Tim Northover3b0846e2014-05-24 12:50:23 +0000129
130using namespace llvm;
131
132#define DEBUG_TYPE "frame-info"
133
134static cl::opt<bool> EnableRedZone("aarch64-redzone",
135 cl::desc("enable use of redzone on AArch64"),
136 cl::init(false), cl::Hidden);
137
138STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
139
Tim Northover3b0846e2014-05-24 12:50:23 +0000140bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
141 if (!EnableRedZone)
142 return false;
143 // Don't use the red zone if the function explicitly asks us not to.
144 // This is typically used for kernel code.
Duncan P. N. Exon Smith003bb7d2015-02-14 02:09:06 +0000145 if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone))
Tim Northover3b0846e2014-05-24 12:50:23 +0000146 return false;
147
Matthias Braun941a7052016-07-28 18:40:00 +0000148 const MachineFrameInfo &MFI = MF.getFrameInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000149 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
150 unsigned NumBytes = AFI->getLocalStackSize();
151
Matthias Braun941a7052016-07-28 18:40:00 +0000152 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > 128);
Tim Northover3b0846e2014-05-24 12:50:23 +0000153}
154
155/// hasFP - Return true if the specified function should have a dedicated frame
156/// pointer register.
157bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
Matthias Braun941a7052016-07-28 18:40:00 +0000158 const MachineFrameInfo &MFI = MF.getFrameInfo();
Eric Christopherfc6de422014-08-05 02:39:49 +0000159 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
Geoff Berry62c1a1e2016-03-02 17:58:31 +0000160 // Retain behavior of always omitting the FP for leaf functions when possible.
Matthias Braun941a7052016-07-28 18:40:00 +0000161 return (MFI.hasCalls() &&
Geoff Berry62c1a1e2016-03-02 17:58:31 +0000162 MF.getTarget().Options.DisableFramePointerElim(MF)) ||
Matthias Braun941a7052016-07-28 18:40:00 +0000163 MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
164 MFI.hasStackMap() || MFI.hasPatchPoint() ||
Geoff Berry62c1a1e2016-03-02 17:58:31 +0000165 RegInfo->needsStackRealignment(MF);
Tim Northover3b0846e2014-05-24 12:50:23 +0000166}
167
168/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
169/// not required, we reserve argument space for call sites in the function
170/// immediately on entry to the current function. This eliminates the need for
171/// add/sub sp brackets around call sites. Returns true if the call frame is
172/// included as part of the stack frame.
173bool
174AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
Matthias Braun941a7052016-07-28 18:40:00 +0000175 return !MF.getFrameInfo().hasVarSizedObjects();
Tim Northover3b0846e2014-05-24 12:50:23 +0000176}
177
Hans Wennborge1a2e902016-03-31 18:33:38 +0000178MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
Tim Northover3b0846e2014-05-24 12:50:23 +0000179 MachineFunction &MF, MachineBasicBlock &MBB,
180 MachineBasicBlock::iterator I) const {
Eric Christopherfc6de422014-08-05 02:39:49 +0000181 const AArch64InstrInfo *TII =
182 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
Tim Northover3b0846e2014-05-24 12:50:23 +0000183 DebugLoc DL = I->getDebugLoc();
Matthias Braunfa3872e2015-05-18 20:27:55 +0000184 unsigned Opc = I->getOpcode();
Tim Northover3b0846e2014-05-24 12:50:23 +0000185 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
186 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
187
Eric Christopherfc6de422014-08-05 02:39:49 +0000188 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
Tim Northover3b0846e2014-05-24 12:50:23 +0000189 if (!TFI->hasReservedCallFrame(MF)) {
190 unsigned Align = getStackAlignment();
191
192 int64_t Amount = I->getOperand(0).getImm();
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000193 Amount = alignTo(Amount, Align);
Tim Northover3b0846e2014-05-24 12:50:23 +0000194 if (!IsDestroy)
195 Amount = -Amount;
196
197 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
198 // doesn't have to pop anything), then the first operand will be zero too so
199 // this adjustment is a no-op.
200 if (CalleePopAmount == 0) {
201 // FIXME: in-function stack adjustment for calls is limited to 24-bits
202 // because there's no guaranteed temporary register available.
203 //
Sylvestre Ledru469de192014-08-11 18:04:46 +0000204 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available.
Tim Northover3b0846e2014-05-24 12:50:23 +0000205 // 1) For offset <= 12-bit, we use LSL #0
206 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses
207 // LSL #0, and the other uses LSL #12.
208 //
Chad Rosier401a4ab2016-01-19 16:50:45 +0000209 // Most call frames will be allocated at the start of a function so
Tim Northover3b0846e2014-05-24 12:50:23 +0000210 // this is OK, but it is a limitation that needs dealing with.
211 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large");
212 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII);
213 }
214 } else if (CalleePopAmount != 0) {
215 // If the calling convention demands that the callee pops arguments from the
216 // stack, we want to add it back if we have a reserved call frame.
217 assert(CalleePopAmount < 0xffffff && "call frame too large");
218 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount,
219 TII);
220 }
Hans Wennborge1a2e902016-03-31 18:33:38 +0000221 return MBB.erase(I);
Tim Northover3b0846e2014-05-24 12:50:23 +0000222}
223
224void AArch64FrameLowering::emitCalleeSavedFrameMoves(
Geoff Berry62d47252016-02-25 16:36:08 +0000225 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000226 MachineFunction &MF = *MBB.getParent();
Matthias Braun941a7052016-07-28 18:40:00 +0000227 MachineFrameInfo &MFI = MF.getFrameInfo();
Matthias Braunf23ef432016-11-30 23:48:42 +0000228 const TargetSubtargetInfo &STI = MF.getSubtarget();
229 const MCRegisterInfo *MRI = STI.getRegisterInfo();
230 const TargetInstrInfo *TII = STI.getInstrInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000231 DebugLoc DL = MBB.findDebugLoc(MBBI);
232
233 // Add callee saved registers to move list.
Matthias Braun941a7052016-07-28 18:40:00 +0000234 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000235 if (CSI.empty())
236 return;
237
Tim Northover3b0846e2014-05-24 12:50:23 +0000238 for (const auto &Info : CSI) {
239 unsigned Reg = Info.getReg();
Geoff Berry62d47252016-02-25 16:36:08 +0000240 int64_t Offset =
Matthias Braun941a7052016-07-28 18:40:00 +0000241 MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
Tim Northover3b0846e2014-05-24 12:50:23 +0000242 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
Matthias Braunf23ef432016-11-30 23:48:42 +0000243 unsigned CFIIndex = MF.addFrameInst(
Geoff Berry62d47252016-02-25 16:36:08 +0000244 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
Tim Northover3b0846e2014-05-24 12:50:23 +0000245 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
Adrian Prantlb9fa9452014-12-16 00:20:49 +0000246 .addCFIIndex(CFIIndex)
247 .setMIFlags(MachineInstr::FrameSetup);
Tim Northover3b0846e2014-05-24 12:50:23 +0000248 }
249}
250
Geoff Berry7e4ba3d2016-02-19 18:27:32 +0000251// Find a scratch register that we can use at the start of the prologue to
252// re-align the stack pointer. We avoid using callee-save registers since they
253// may appear to be free when this is called from canUseAsPrologue (during
254// shrink wrapping), but then no longer be free when this is called from
255// emitPrologue.
256//
257// FIXME: This is a bit conservative, since in the above case we could use one
258// of the callee-save registers as a scratch temp to re-align the stack pointer,
259// but we would then have to make sure that we were in fact saving at least one
260// callee-save register in the prologue, which is additional complexity that
261// doesn't seem worth the benefit.
262static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) {
263 MachineFunction *MF = MBB->getParent();
264
265 // If MBB is an entry block, use X9 as the scratch register
266 if (&MF->front() == MBB)
267 return AArch64::X9;
268
Matthias Braun332bb5c2016-07-06 21:31:27 +0000269 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
270 LivePhysRegs LiveRegs(&TRI);
271 LiveRegs.addLiveIns(*MBB);
Geoff Berry7e4ba3d2016-02-19 18:27:32 +0000272
Matthias Braun332bb5c2016-07-06 21:31:27 +0000273 // Mark callee saved registers as used so we will not choose them.
Geoff Berry7e4ba3d2016-02-19 18:27:32 +0000274 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
275 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
276 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(MF);
Geoff Berry7e4ba3d2016-02-19 18:27:32 +0000277 for (unsigned i = 0; CSRegs[i]; ++i)
Matthias Braun332bb5c2016-07-06 21:31:27 +0000278 LiveRegs.addReg(CSRegs[i]);
Geoff Berry7e4ba3d2016-02-19 18:27:32 +0000279
Matthias Braun332bb5c2016-07-06 21:31:27 +0000280 // Prefer X9 since it was historically used for the prologue scratch reg.
281 const MachineRegisterInfo &MRI = MF->getRegInfo();
282 if (LiveRegs.available(MRI, AArch64::X9))
283 return AArch64::X9;
Geoff Berry7e4ba3d2016-02-19 18:27:32 +0000284
Matthias Braun332bb5c2016-07-06 21:31:27 +0000285 for (unsigned Reg : AArch64::GPR64RegClass) {
286 if (LiveRegs.available(MRI, Reg))
287 return Reg;
288 }
Geoff Berry7e4ba3d2016-02-19 18:27:32 +0000289 return AArch64::NoRegister;
290}
291
292bool AArch64FrameLowering::canUseAsPrologue(
293 const MachineBasicBlock &MBB) const {
294 const MachineFunction *MF = MBB.getParent();
295 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
296 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
297 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
298
299 // Don't need a scratch register if we're not going to re-align the stack.
300 if (!RegInfo->needsStackRealignment(*MF))
301 return true;
302 // Otherwise, we can use any block as long as it has a scratch register
303 // available.
304 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister;
305}
306
Geoff Berrya5335642016-05-06 16:34:59 +0000307bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
308 MachineFunction &MF, unsigned StackBumpBytes) const {
309 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
Matthias Braun941a7052016-07-28 18:40:00 +0000310 const MachineFrameInfo &MFI = MF.getFrameInfo();
Geoff Berrya5335642016-05-06 16:34:59 +0000311 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
312 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
313
314 if (AFI->getLocalStackSize() == 0)
315 return false;
316
317 // 512 is the maximum immediate for stp/ldp that will be used for
318 // callee-save save/restores
319 if (StackBumpBytes >= 512)
320 return false;
321
Matthias Braun941a7052016-07-28 18:40:00 +0000322 if (MFI.hasVarSizedObjects())
Geoff Berrya5335642016-05-06 16:34:59 +0000323 return false;
324
325 if (RegInfo->needsStackRealignment(MF))
326 return false;
327
328 // This isn't strictly necessary, but it simplifies things a bit since the
329 // current RedZone handling code assumes the SP is adjusted by the
330 // callee-save save/restore code.
331 if (canUseRedZone(MF))
332 return false;
333
334 return true;
335}
336
337// Convert callee-save register save/restore instruction to do stack pointer
338// decrement/increment to allocate/deallocate the callee-save stack area by
339// converting store/load to use pre/post increment version.
340static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000341 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
342 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc) {
Geoff Berrya5335642016-05-06 16:34:59 +0000343 unsigned NewOpc;
344 bool NewIsUnscaled = false;
345 switch (MBBI->getOpcode()) {
346 default:
347 llvm_unreachable("Unexpected callee-save save/restore opcode!");
348 case AArch64::STPXi:
349 NewOpc = AArch64::STPXpre;
350 break;
351 case AArch64::STPDi:
352 NewOpc = AArch64::STPDpre;
353 break;
354 case AArch64::STRXui:
355 NewOpc = AArch64::STRXpre;
356 NewIsUnscaled = true;
357 break;
358 case AArch64::STRDui:
359 NewOpc = AArch64::STRDpre;
360 NewIsUnscaled = true;
361 break;
362 case AArch64::LDPXi:
363 NewOpc = AArch64::LDPXpost;
364 break;
365 case AArch64::LDPDi:
366 NewOpc = AArch64::LDPDpost;
367 break;
368 case AArch64::LDRXui:
369 NewOpc = AArch64::LDRXpost;
370 NewIsUnscaled = true;
371 break;
372 case AArch64::LDRDui:
373 NewOpc = AArch64::LDRDpost;
374 NewIsUnscaled = true;
375 break;
376 }
377
378 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
379 MIB.addReg(AArch64::SP, RegState::Define);
380
381 // Copy all operands other than the immediate offset.
382 unsigned OpndIdx = 0;
383 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
384 ++OpndIdx)
Diana Picus116bbab2017-01-13 09:58:52 +0000385 MIB.add(MBBI->getOperand(OpndIdx));
Geoff Berrya5335642016-05-06 16:34:59 +0000386
387 assert(MBBI->getOperand(OpndIdx).getImm() == 0 &&
388 "Unexpected immediate offset in first/last callee-save save/restore "
389 "instruction!");
390 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
391 "Unexpected base register in callee-save save/restore instruction!");
392 // Last operand is immediate offset that needs fixing.
393 assert(CSStackSizeInc % 8 == 0);
394 int64_t CSStackSizeIncImm = CSStackSizeInc;
395 if (!NewIsUnscaled)
396 CSStackSizeIncImm /= 8;
397 MIB.addImm(CSStackSizeIncImm);
398
399 MIB.setMIFlags(MBBI->getFlags());
400 MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end());
401
402 return std::prev(MBB.erase(MBBI));
403}
404
405// Fixup callee-save register save/restore instructions to take into account
406// combined SP bump by adding the local stack size to the stack offsets.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000407static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI,
Geoff Berrya5335642016-05-06 16:34:59 +0000408 unsigned LocalStackSize) {
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000409 unsigned Opc = MI.getOpcode();
Geoff Berrya5335642016-05-06 16:34:59 +0000410 (void)Opc;
411 assert((Opc == AArch64::STPXi || Opc == AArch64::STPDi ||
412 Opc == AArch64::STRXui || Opc == AArch64::STRDui ||
413 Opc == AArch64::LDPXi || Opc == AArch64::LDPDi ||
414 Opc == AArch64::LDRXui || Opc == AArch64::LDRDui) &&
415 "Unexpected callee-save save/restore opcode!");
416
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000417 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1;
418 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
Geoff Berrya5335642016-05-06 16:34:59 +0000419 "Unexpected base register in callee-save save/restore instruction!");
420 // Last operand is immediate offset that needs fixing.
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000421 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx);
Geoff Berrya5335642016-05-06 16:34:59 +0000422 // All generated opcodes have scaled offsets.
423 assert(LocalStackSize % 8 == 0);
424 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / 8);
425}
426
Quentin Colombet61b305e2015-05-05 17:38:16 +0000427void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
428 MachineBasicBlock &MBB) const {
Tim Northover3b0846e2014-05-24 12:50:23 +0000429 MachineBasicBlock::iterator MBBI = MBB.begin();
Matthias Braun941a7052016-07-28 18:40:00 +0000430 const MachineFrameInfo &MFI = MF.getFrameInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000431 const Function *Fn = MF.getFunction();
Ahmed Bougacha66834ec2015-12-16 22:54:06 +0000432 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
433 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
434 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000435 MachineModuleInfo &MMI = MF.getMMI();
Tim Northover775aaeb2015-11-05 21:54:58 +0000436 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
437 bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
438 bool HasFP = hasFP(MF);
439
440 // Debug location must be unknown since the first debug location is used
441 // to determine the end of the prologue.
442 DebugLoc DL;
443
444 // All calls are tail calls in GHC calling conv, and functions have no
445 // prologue/epilogue.
Greg Fitzgeraldfa78d082015-01-19 17:40:05 +0000446 if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
447 return;
448
Matthias Braun941a7052016-07-28 18:40:00 +0000449 int NumBytes = (int)MFI.getStackSize();
Tim Northover3b0846e2014-05-24 12:50:23 +0000450 if (!AFI->hasStackFrame()) {
451 assert(!HasFP && "unexpected function without stack frame but with FP");
452
453 // All of the stack allocation is for locals.
454 AFI->setLocalStackSize(NumBytes);
455
Chad Rosier27c352d2016-03-14 18:24:34 +0000456 if (!NumBytes)
457 return;
Tim Northover3b0846e2014-05-24 12:50:23 +0000458 // REDZONE: If the stack size is less than 128 bytes, we don't need
459 // to actually allocate.
Chad Rosier27c352d2016-03-14 18:24:34 +0000460 if (canUseRedZone(MF))
461 ++NumRedZoneFunctions;
462 else {
Tim Northover3b0846e2014-05-24 12:50:23 +0000463 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
464 MachineInstr::FrameSetup);
465
Chad Rosier27c352d2016-03-14 18:24:34 +0000466 // Label used to tie together the PROLOG_LABEL and the MachineMoves.
467 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
Tim Northover3b0846e2014-05-24 12:50:23 +0000468 // Encode the stack size of the leaf function.
Matthias Braunf23ef432016-11-30 23:48:42 +0000469 unsigned CFIIndex = MF.addFrameInst(
Tim Northover3b0846e2014-05-24 12:50:23 +0000470 MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
471 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
Adrian Prantlb9fa9452014-12-16 00:20:49 +0000472 .addCFIIndex(CFIIndex)
473 .setMIFlags(MachineInstr::FrameSetup);
Tim Northover3b0846e2014-05-24 12:50:23 +0000474 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000475 return;
476 }
477
Geoff Berrya5335642016-05-06 16:34:59 +0000478 auto CSStackSize = AFI->getCalleeSavedStackSize();
Chad Rosier27c352d2016-03-14 18:24:34 +0000479 // All of the remaining stack allocations are for locals.
Geoff Berrya5335642016-05-06 16:34:59 +0000480 AFI->setLocalStackSize(NumBytes - CSStackSize);
Tim Northover3b0846e2014-05-24 12:50:23 +0000481
Geoff Berrya5335642016-05-06 16:34:59 +0000482 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
483 if (CombineSPBump) {
484 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
485 MachineInstr::FrameSetup);
486 NumBytes = 0;
487 } else if (CSStackSize != 0) {
488 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(MBB, MBBI, DL, TII,
489 -CSStackSize);
490 NumBytes -= CSStackSize;
491 }
492 assert(NumBytes >= 0 && "Negative stack allocation size!?");
493
494 // Move past the saves of the callee-saved registers, fixing up the offsets
495 // and pre-inc if we decided to combine the callee-save and local stack
496 // pointer bump above.
Geoff Berry04bf91a2016-02-01 16:29:19 +0000497 MachineBasicBlock::iterator End = MBB.end();
Geoff Berrya5335642016-05-06 16:34:59 +0000498 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup)) {
499 if (CombineSPBump)
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000500 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize());
Tim Northover3b0846e2014-05-24 12:50:23 +0000501 ++MBBI;
Geoff Berrya5335642016-05-06 16:34:59 +0000502 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000503 if (HasFP) {
Chad Rosier27c352d2016-03-14 18:24:34 +0000504 // Only set up FP if we actually need to. Frame pointer is fp = sp - 16.
Geoff Berrya5335642016-05-06 16:34:59 +0000505 int FPOffset = CSStackSize - 16;
506 if (CombineSPBump)
507 FPOffset += AFI->getLocalStackSize();
Chad Rosier27c352d2016-03-14 18:24:34 +0000508
Tim Northover3b0846e2014-05-24 12:50:23 +0000509 // Issue sub fp, sp, FPOffset or
510 // mov fp,sp when FPOffset is zero.
511 // Note: All stores of callee-saved registers are marked as "FrameSetup".
512 // This code marks the instruction(s) that set the FP also.
513 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII,
514 MachineInstr::FrameSetup);
515 }
516
Tim Northover3b0846e2014-05-24 12:50:23 +0000517 // Allocate space for the rest of the frame.
Chad Rosier27c352d2016-03-14 18:24:34 +0000518 if (NumBytes) {
519 const bool NeedsRealignment = RegInfo->needsStackRealignment(MF);
520 unsigned scratchSPReg = AArch64::SP;
Kristof Beyls17cb8982015-04-09 08:49:47 +0000521
Chad Rosier27c352d2016-03-14 18:24:34 +0000522 if (NeedsRealignment) {
523 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB);
524 assert(scratchSPReg != AArch64::NoRegister);
525 }
Kristof Beyls17cb8982015-04-09 08:49:47 +0000526
Chad Rosier27c352d2016-03-14 18:24:34 +0000527 // If we're a leaf function, try using the red zone.
528 if (!canUseRedZone(MF))
529 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
530 // the correct value here, as NumBytes also includes padding bytes,
531 // which shouldn't be counted here.
532 emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII,
533 MachineInstr::FrameSetup);
Kristof Beyls17cb8982015-04-09 08:49:47 +0000534
Chad Rosier27c352d2016-03-14 18:24:34 +0000535 if (NeedsRealignment) {
Matthias Braun941a7052016-07-28 18:40:00 +0000536 const unsigned Alignment = MFI.getMaxAlignment();
Chad Rosier27c352d2016-03-14 18:24:34 +0000537 const unsigned NrBitsToZero = countTrailingZeros(Alignment);
538 assert(NrBitsToZero > 1);
539 assert(scratchSPReg != AArch64::SP);
Kristof Beyls17cb8982015-04-09 08:49:47 +0000540
Chad Rosier27c352d2016-03-14 18:24:34 +0000541 // SUB X9, SP, NumBytes
542 // -- X9 is temporary register, so shouldn't contain any live data here,
543 // -- free to use. This is already produced by emitFrameOffset above.
544 // AND SP, X9, 0b11111...0000
545 // The logical immediates have a non-trivial encoding. The following
546 // formula computes the encoded immediate with all ones but
547 // NrBitsToZero zero bits as least significant bits.
548 uint32_t andMaskEncoded = (1 << 12) // = N
549 | ((64 - NrBitsToZero) << 6) // immr
550 | ((64 - NrBitsToZero - 1) << 0); // imms
551
552 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP)
553 .addReg(scratchSPReg, RegState::Kill)
554 .addImm(andMaskEncoded);
555 AFI->setStackRealigned(true);
556 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000557 }
558
559 // If we need a base pointer, set it up here. It's whatever the value of the
560 // stack pointer is at this point. Any variable size objects will be allocated
561 // after this, so we can still use the base pointer to reference locals.
562 //
563 // FIXME: Clarify FrameSetup flags here.
564 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
565 // needed.
Kristof Beyls17cb8982015-04-09 08:49:47 +0000566 if (RegInfo->hasBasePointer(MF)) {
567 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP,
568 false);
569 }
Tim Northover3b0846e2014-05-24 12:50:23 +0000570
571 if (needsFrameMoves) {
Mehdi Aminibd7287e2015-07-16 06:11:10 +0000572 const DataLayout &TD = MF.getDataLayout();
573 const int StackGrowth = -TD.getPointerSize(0);
Tim Northover3b0846e2014-05-24 12:50:23 +0000574 unsigned FramePtr = RegInfo->getFrameRegister(MF);
Tim Northover3b0846e2014-05-24 12:50:23 +0000575 // An example of the prologue:
576 //
577 // .globl __foo
578 // .align 2
579 // __foo:
580 // Ltmp0:
581 // .cfi_startproc
582 // .cfi_personality 155, ___gxx_personality_v0
583 // Leh_func_begin:
584 // .cfi_lsda 16, Lexception33
585 //
586 // stp xa,bx, [sp, -#offset]!
587 // ...
588 // stp x28, x27, [sp, #offset-32]
589 // stp fp, lr, [sp, #offset-16]
590 // add fp, sp, #offset - 16
591 // sub sp, sp, #1360
592 //
593 // The Stack:
594 // +-------------------------------------------+
595 // 10000 | ........ | ........ | ........ | ........ |
596 // 10004 | ........ | ........ | ........ | ........ |
597 // +-------------------------------------------+
598 // 10008 | ........ | ........ | ........ | ........ |
599 // 1000c | ........ | ........ | ........ | ........ |
600 // +===========================================+
601 // 10010 | X28 Register |
602 // 10014 | X28 Register |
603 // +-------------------------------------------+
604 // 10018 | X27 Register |
605 // 1001c | X27 Register |
606 // +===========================================+
607 // 10020 | Frame Pointer |
608 // 10024 | Frame Pointer |
609 // +-------------------------------------------+
610 // 10028 | Link Register |
611 // 1002c | Link Register |
612 // +===========================================+
613 // 10030 | ........ | ........ | ........ | ........ |
614 // 10034 | ........ | ........ | ........ | ........ |
615 // +-------------------------------------------+
616 // 10038 | ........ | ........ | ........ | ........ |
617 // 1003c | ........ | ........ | ........ | ........ |
618 // +-------------------------------------------+
619 //
620 // [sp] = 10030 :: >>initial value<<
621 // sp = 10020 :: stp fp, lr, [sp, #-16]!
622 // fp = sp == 10020 :: mov fp, sp
623 // [sp] == 10020 :: stp x28, x27, [sp, #-16]!
624 // sp == 10010 :: >>final value<<
625 //
626 // The frame pointer (w29) points to address 10020. If we use an offset of
627 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
628 // for w27, and -32 for w28:
629 //
630 // Ltmp1:
631 // .cfi_def_cfa w29, 16
632 // Ltmp2:
633 // .cfi_offset w30, -8
634 // Ltmp3:
635 // .cfi_offset w29, -16
636 // Ltmp4:
637 // .cfi_offset w27, -24
638 // Ltmp5:
639 // .cfi_offset w28, -32
640
641 if (HasFP) {
642 // Define the current CFA rule to use the provided FP.
643 unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
Matthias Braunf23ef432016-11-30 23:48:42 +0000644 unsigned CFIIndex = MF.addFrameInst(
Tim Northover3b0846e2014-05-24 12:50:23 +0000645 MCCFIInstruction::createDefCfa(nullptr, Reg, 2 * StackGrowth));
646 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
Adrian Prantlb9fa9452014-12-16 00:20:49 +0000647 .addCFIIndex(CFIIndex)
648 .setMIFlags(MachineInstr::FrameSetup);
Tim Northover3b0846e2014-05-24 12:50:23 +0000649 } else {
650 // Encode the stack size of the leaf function.
Matthias Braunf23ef432016-11-30 23:48:42 +0000651 unsigned CFIIndex = MF.addFrameInst(
Matthias Braun941a7052016-07-28 18:40:00 +0000652 MCCFIInstruction::createDefCfaOffset(nullptr, -MFI.getStackSize()));
Tim Northover3b0846e2014-05-24 12:50:23 +0000653 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
Adrian Prantlb9fa9452014-12-16 00:20:49 +0000654 .addCFIIndex(CFIIndex)
655 .setMIFlags(MachineInstr::FrameSetup);
Tim Northover3b0846e2014-05-24 12:50:23 +0000656 }
657
Geoff Berry62d47252016-02-25 16:36:08 +0000658 // Now emit the moves for whatever callee saved regs we have (including FP,
659 // LR if those are saved).
660 emitCalleeSavedFrameMoves(MBB, MBBI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000661 }
662}
663
Tim Northover3b0846e2014-05-24 12:50:23 +0000664void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
665 MachineBasicBlock &MBB) const {
666 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
Matthias Braun941a7052016-07-28 18:40:00 +0000667 MachineFrameInfo &MFI = MF.getFrameInfo();
Ahmed Bougacha66834ec2015-12-16 22:54:06 +0000668 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
Ahmed Bougacha66834ec2015-12-16 22:54:06 +0000669 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
Quentin Colombet61b305e2015-05-05 17:38:16 +0000670 DebugLoc DL;
671 bool IsTailCallReturn = false;
672 if (MBB.end() != MBBI) {
673 DL = MBBI->getDebugLoc();
674 unsigned RetOpcode = MBBI->getOpcode();
675 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
676 RetOpcode == AArch64::TCRETURNri;
677 }
Matthias Braun941a7052016-07-28 18:40:00 +0000678 int NumBytes = MFI.getStackSize();
Tim Northover3b0846e2014-05-24 12:50:23 +0000679 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
680
Greg Fitzgeraldfa78d082015-01-19 17:40:05 +0000681 // All calls are tail calls in GHC calling conv, and functions have no
682 // prologue/epilogue.
683 if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
684 return;
685
Kristof Beyls17cb8982015-04-09 08:49:47 +0000686 // Initial and residual are named for consistency with the prologue. Note that
Tim Northover3b0846e2014-05-24 12:50:23 +0000687 // in the epilogue, the residual adjustment is executed first.
688 uint64_t ArgumentPopSize = 0;
Quentin Colombet61b305e2015-05-05 17:38:16 +0000689 if (IsTailCallReturn) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000690 MachineOperand &StackAdjust = MBBI->getOperand(1);
691
692 // For a tail-call in a callee-pops-arguments environment, some or all of
693 // the stack may actually be in use for the call's arguments, this is
694 // calculated during LowerCall and consumed here...
695 ArgumentPopSize = StackAdjust.getImm();
696 } else {
697 // ... otherwise the amount to pop is *all* of the argument space,
698 // conveniently stored in the MachineFunctionInfo by
699 // LowerFormalArguments. This will, of course, be zero for the C calling
700 // convention.
701 ArgumentPopSize = AFI->getArgumentStackToRestore();
702 }
703
704 // The stack frame should be like below,
705 //
706 // ---------------------- ---
707 // | | |
708 // | BytesInStackArgArea| CalleeArgStackSize
709 // | (NumReusableBytes) | (of tail call)
710 // | | ---
711 // | | |
712 // ---------------------| --- |
713 // | | | |
714 // | CalleeSavedReg | | |
Geoff Berry04bf91a2016-02-01 16:29:19 +0000715 // | (CalleeSavedStackSize)| | |
Tim Northover3b0846e2014-05-24 12:50:23 +0000716 // | | | |
717 // ---------------------| | NumBytes
718 // | | StackSize (StackAdjustUp)
719 // | LocalStackSize | | |
720 // | (covering callee | | |
721 // | args) | | |
722 // | | | |
723 // ---------------------- --- ---
724 //
725 // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
726 // = StackSize + ArgumentPopSize
727 //
728 // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
729 // it as the 2nd argument of AArch64ISD::TC_RETURN.
Tim Northover3b0846e2014-05-24 12:50:23 +0000730
Geoff Berrya5335642016-05-06 16:34:59 +0000731 auto CSStackSize = AFI->getCalleeSavedStackSize();
732 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
733
734 if (!CombineSPBump && CSStackSize != 0)
735 convertCalleeSaveRestoreToSPPrePostIncDec(
736 MBB, std::prev(MBB.getFirstTerminator()), DL, TII, CSStackSize);
737
Tim Northover3b0846e2014-05-24 12:50:23 +0000738 // Move past the restores of the callee-saved registers.
Quentin Colombet61b305e2015-05-05 17:38:16 +0000739 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
Matthias Braun45419292015-12-17 03:18:47 +0000740 MachineBasicBlock::iterator Begin = MBB.begin();
741 while (LastPopI != Begin) {
742 --LastPopI;
Geoff Berry04bf91a2016-02-01 16:29:19 +0000743 if (!LastPopI->getFlag(MachineInstr::FrameDestroy)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000744 ++LastPopI;
Matthias Braun45419292015-12-17 03:18:47 +0000745 break;
Geoff Berrya5335642016-05-06 16:34:59 +0000746 } else if (CombineSPBump)
Duncan P. N. Exon Smithab53fd92016-07-08 20:29:42 +0000747 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize());
Tim Northover3b0846e2014-05-24 12:50:23 +0000748 }
Geoff Berrya5335642016-05-06 16:34:59 +0000749
750 // If there is a single SP update, insert it before the ret and we're done.
751 if (CombineSPBump) {
752 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
753 NumBytes + ArgumentPopSize, TII,
754 MachineInstr::FrameDestroy);
755 return;
756 }
757
758 NumBytes -= CSStackSize;
Tim Northover3b0846e2014-05-24 12:50:23 +0000759 assert(NumBytes >= 0 && "Negative stack allocation size!?");
760
761 if (!hasFP(MF)) {
Geoff Berrya1c62692016-02-23 16:54:36 +0000762 bool RedZone = canUseRedZone(MF);
Tim Northover3b0846e2014-05-24 12:50:23 +0000763 // If this was a redzone leaf function, we don't need to restore the
Geoff Berrya1c62692016-02-23 16:54:36 +0000764 // stack pointer (but we may need to pop stack args for fastcc).
765 if (RedZone && ArgumentPopSize == 0)
766 return;
767
Geoff Berrya5335642016-05-06 16:34:59 +0000768 bool NoCalleeSaveRestore = CSStackSize == 0;
Geoff Berrya1c62692016-02-23 16:54:36 +0000769 int StackRestoreBytes = RedZone ? 0 : NumBytes;
770 if (NoCalleeSaveRestore)
771 StackRestoreBytes += ArgumentPopSize;
772 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
773 StackRestoreBytes, TII, MachineInstr::FrameDestroy);
774 // If we were able to combine the local stack pop with the argument pop,
775 // then we're done.
776 if (NoCalleeSaveRestore || ArgumentPopSize == 0)
777 return;
778 NumBytes = 0;
Tim Northover3b0846e2014-05-24 12:50:23 +0000779 }
780
781 // Restore the original stack pointer.
782 // FIXME: Rather than doing the math here, we should instead just use
783 // non-post-indexed loads for the restores if we aren't actually going to
784 // be able to save any instructions.
Matthias Braun941a7052016-07-28 18:40:00 +0000785 if (MFI.hasVarSizedObjects() || AFI->isStackRealigned())
Tim Northover3b0846e2014-05-24 12:50:23 +0000786 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
Geoff Berrya5335642016-05-06 16:34:59 +0000787 -CSStackSize + 16, TII, MachineInstr::FrameDestroy);
Chad Rosier6d986552016-03-14 18:17:41 +0000788 else if (NumBytes)
789 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII,
790 MachineInstr::FrameDestroy);
Geoff Berrya1c62692016-02-23 16:54:36 +0000791
792 // This must be placed after the callee-save restore code because that code
793 // assumes the SP is at the same location as it was after the callee-save save
794 // code in the prologue.
795 if (ArgumentPopSize)
796 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
797 ArgumentPopSize, TII, MachineInstr::FrameDestroy);
Tim Northover3b0846e2014-05-24 12:50:23 +0000798}
799
Tim Northover3b0846e2014-05-24 12:50:23 +0000800/// getFrameIndexReference - Provide a base+offset reference to an FI slot for
801/// debug info. It's the same as what we use for resolving the code-gen
802/// references for now. FIXME: This can go wrong when references are
803/// SP-relative and simple call frames aren't used.
804int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF,
805 int FI,
806 unsigned &FrameReg) const {
807 return resolveFrameIndexReference(MF, FI, FrameReg);
808}
809
810int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
811 int FI, unsigned &FrameReg,
812 bool PreferFP) const {
Matthias Braun941a7052016-07-28 18:40:00 +0000813 const MachineFrameInfo &MFI = MF.getFrameInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +0000814 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
Eric Christopherfc6de422014-08-05 02:39:49 +0000815 MF.getSubtarget().getRegisterInfo());
Tim Northover3b0846e2014-05-24 12:50:23 +0000816 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
Matthias Braun941a7052016-07-28 18:40:00 +0000817 int FPOffset = MFI.getObjectOffset(FI) + 16;
818 int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize();
819 bool isFixed = MFI.isFixedObjectIndex(FI);
Tim Northover3b0846e2014-05-24 12:50:23 +0000820
821 // Use frame pointer to reference fixed objects. Use it for locals if
Kristof Beyls17cb8982015-04-09 08:49:47 +0000822 // there are VLAs or a dynamically realigned SP (and thus the SP isn't
823 // reliable as a base). Make sure useFPForScavengingIndex() does the
824 // right thing for the emergency spill slot.
Tim Northover3b0846e2014-05-24 12:50:23 +0000825 bool UseFP = false;
826 if (AFI->hasStackFrame()) {
827 // Note: Keeping the following as multiple 'if' statements rather than
828 // merging to a single expression for readability.
829 //
830 // Argument access should always use the FP.
831 if (isFixed) {
832 UseFP = hasFP(MF);
Kristof Beyls17cb8982015-04-09 08:49:47 +0000833 } else if (hasFP(MF) && !RegInfo->hasBasePointer(MF) &&
834 !RegInfo->needsStackRealignment(MF)) {
Tim Northover3b0846e2014-05-24 12:50:23 +0000835 // Use SP or FP, whichever gives us the best chance of the offset
836 // being in range for direct access. If the FPOffset is positive,
837 // that'll always be best, as the SP will be even further away.
838 // If the FPOffset is negative, we have to keep in mind that the
839 // available offset range for negative offsets is smaller than for
840 // positive ones. If we have variable sized objects, we're stuck with
841 // using the FP regardless, though, as the SP offset is unknown
842 // and we don't have a base pointer available. If an offset is
843 // available via the FP and the SP, use whichever is closest.
Matthias Braun941a7052016-07-28 18:40:00 +0000844 if (PreferFP || MFI.hasVarSizedObjects() || FPOffset >= 0 ||
Tim Northover3b0846e2014-05-24 12:50:23 +0000845 (FPOffset >= -256 && Offset > -FPOffset))
846 UseFP = true;
847 }
848 }
849
Kristof Beyls17cb8982015-04-09 08:49:47 +0000850 assert((isFixed || !RegInfo->needsStackRealignment(MF) || !UseFP) &&
851 "In the presence of dynamic stack pointer realignment, "
852 "non-argument objects cannot be accessed through the frame pointer");
853
Tim Northover3b0846e2014-05-24 12:50:23 +0000854 if (UseFP) {
855 FrameReg = RegInfo->getFrameRegister(MF);
856 return FPOffset;
857 }
858
859 // Use the base pointer if we have one.
860 if (RegInfo->hasBasePointer(MF))
861 FrameReg = RegInfo->getBaseRegister();
862 else {
863 FrameReg = AArch64::SP;
864 // If we're using the red zone for this function, the SP won't actually
865 // be adjusted, so the offsets will be negative. They're also all
866 // within range of the signed 9-bit immediate instructions.
867 if (canUseRedZone(MF))
868 Offset -= AFI->getLocalStackSize();
869 }
870
871 return Offset;
872}
873
874static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
Matthias Braun74a0bd32016-04-13 21:43:16 +0000875 // Do not set a kill flag on values that are also marked as live-in. This
876 // happens with the @llvm-returnaddress intrinsic and with arguments passed in
877 // callee saved registers.
878 // Omitting the kill flags is conservatively correct even if the live-in
879 // is not used after all.
880 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg);
881 return getKillRegState(!IsLiveIn);
Tim Northover3b0846e2014-05-24 12:50:23 +0000882}
883
Manman Ren57518142016-04-11 21:08:06 +0000884static bool produceCompactUnwindFrame(MachineFunction &MF) {
885 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
Reid Klecknerb5180542017-03-21 16:57:19 +0000886 AttributeList Attrs = MF.getFunction()->getAttributes();
Manman Ren57518142016-04-11 21:08:06 +0000887 return Subtarget.isTargetMachO() &&
888 !(Subtarget.getTargetLowering()->supportSwiftError() &&
889 Attrs.hasAttrSomewhere(Attribute::SwiftError));
890}
891
Benjamin Kramerb7d33112016-08-06 11:13:10 +0000892namespace {
Eugene Zelenko11f69072017-01-25 00:29:26 +0000893
Geoff Berry29d4a692016-02-01 19:07:06 +0000894struct RegPairInfo {
Eugene Zelenko11f69072017-01-25 00:29:26 +0000895 unsigned Reg1 = AArch64::NoRegister;
896 unsigned Reg2 = AArch64::NoRegister;
Geoff Berry29d4a692016-02-01 19:07:06 +0000897 int FrameIdx;
898 int Offset;
899 bool IsGPR;
Eugene Zelenko11f69072017-01-25 00:29:26 +0000900
901 RegPairInfo() = default;
902
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000903 bool isPaired() const { return Reg2 != AArch64::NoRegister; }
Geoff Berry29d4a692016-02-01 19:07:06 +0000904};
Eugene Zelenko11f69072017-01-25 00:29:26 +0000905
Benjamin Kramerb7d33112016-08-06 11:13:10 +0000906} // end anonymous namespace
Geoff Berry29d4a692016-02-01 19:07:06 +0000907
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000908static void computeCalleeSaveRegisterPairs(
909 MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI,
910 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs) {
Geoff Berry29d4a692016-02-01 19:07:06 +0000911
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000912 if (CSI.empty())
913 return;
914
915 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
Matthias Braun941a7052016-07-28 18:40:00 +0000916 MachineFrameInfo &MFI = MF.getFrameInfo();
Roman Levenstein2792b3f2016-03-10 04:35:09 +0000917 CallingConv::ID CC = MF.getFunction()->getCallingConv();
Tim Northover3b0846e2014-05-24 12:50:23 +0000918 unsigned Count = CSI.size();
Roman Levenstein2792b3f2016-03-10 04:35:09 +0000919 (void)CC;
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000920 // MachO's compact unwind format relies on all registers being stored in
921 // pairs.
Manman Ren57518142016-04-11 21:08:06 +0000922 assert((!produceCompactUnwindFrame(MF) ||
Roman Levenstein2792b3f2016-03-10 04:35:09 +0000923 CC == CallingConv::PreserveMost ||
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000924 (Count & 1) == 0) &&
925 "Odd number of callee-saved regs to spill!");
926 unsigned Offset = AFI->getCalleeSavedStackSize();
Tim Northover775aaeb2015-11-05 21:54:58 +0000927
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000928 for (unsigned i = 0; i < Count; ++i) {
Geoff Berry29d4a692016-02-01 19:07:06 +0000929 RegPairInfo RPI;
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000930 RPI.Reg1 = CSI[i].getReg();
931
932 assert(AArch64::GPR64RegClass.contains(RPI.Reg1) ||
933 AArch64::FPR64RegClass.contains(RPI.Reg1));
934 RPI.IsGPR = AArch64::GPR64RegClass.contains(RPI.Reg1);
935
936 // Add the next reg to the pair if it is in the same register class.
937 if (i + 1 < Count) {
938 unsigned NextReg = CSI[i + 1].getReg();
939 if ((RPI.IsGPR && AArch64::GPR64RegClass.contains(NextReg)) ||
940 (!RPI.IsGPR && AArch64::FPR64RegClass.contains(NextReg)))
941 RPI.Reg2 = NextReg;
942 }
Geoff Berry29d4a692016-02-01 19:07:06 +0000943
Tim Northover3b0846e2014-05-24 12:50:23 +0000944 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
945 // list to come in sorted by frame index so that we can issue the store
946 // pair instructions directly. Assert if we see anything otherwise.
947 //
948 // The order of the registers in the list is controlled by
949 // getCalleeSavedRegs(), so they will always be in-order, as well.
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000950 assert((!RPI.isPaired() ||
951 (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) &&
Tim Northover3b0846e2014-05-24 12:50:23 +0000952 "Out of order callee saved regs!");
Geoff Berry29d4a692016-02-01 19:07:06 +0000953
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000954 // MachO's compact unwind format relies on all registers being stored in
955 // adjacent register pairs.
Manman Ren57518142016-04-11 21:08:06 +0000956 assert((!produceCompactUnwindFrame(MF) ||
Roman Levenstein2792b3f2016-03-10 04:35:09 +0000957 CC == CallingConv::PreserveMost ||
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000958 (RPI.isPaired() &&
959 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
960 RPI.Reg1 + 1 == RPI.Reg2))) &&
961 "Callee-save registers not saved as adjacent register pair!");
962
963 RPI.FrameIdx = CSI[i].getFrameIdx();
964
965 if (Count * 8 != AFI->getCalleeSavedStackSize() && !RPI.isPaired()) {
966 // Round up size of non-pair to pair size if we need to pad the
967 // callee-save area to ensure 16-byte alignment.
968 Offset -= 16;
Matthias Braun941a7052016-07-28 18:40:00 +0000969 assert(MFI.getObjectAlignment(RPI.FrameIdx) <= 16);
970 MFI.setObjectAlignment(RPI.FrameIdx, 16);
Geoff Berry66f6b652016-06-02 16:22:07 +0000971 AFI->setCalleeSaveStackHasFreeSpace(true);
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000972 } else
973 Offset -= RPI.isPaired() ? 16 : 8;
974 assert(Offset % 8 == 0);
975 RPI.Offset = Offset / 8;
Geoff Berry29d4a692016-02-01 19:07:06 +0000976 assert((RPI.Offset >= -64 && RPI.Offset <= 63) &&
977 "Offset out of bounds for LDP/STP immediate");
978
979 RegPairs.push_back(RPI);
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000980 if (RPI.isPaired())
981 ++i;
Geoff Berry29d4a692016-02-01 19:07:06 +0000982 }
983}
984
985bool AArch64FrameLowering::spillCalleeSavedRegisters(
986 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
987 const std::vector<CalleeSavedInfo> &CSI,
988 const TargetRegisterInfo *TRI) const {
989 MachineFunction &MF = *MBB.getParent();
990 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
991 DebugLoc DL;
992 SmallVector<RegPairInfo, 8> RegPairs;
993
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000994 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);
Geoff Berry29d4a692016-02-01 19:07:06 +0000995
Geoff Berryc25d3bd2016-02-12 16:31:41 +0000996 for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE;
Geoff Berry29d4a692016-02-01 19:07:06 +0000997 ++RPII) {
998 RegPairInfo RPI = *RPII;
999 unsigned Reg1 = RPI.Reg1;
1000 unsigned Reg2 = RPI.Reg2;
1001 unsigned StrOpc;
1002
Geoff Berrya5335642016-05-06 16:34:59 +00001003 // Issue sequence of spills for cs regs. The first spill may be converted
1004 // to a pre-decrement store later by emitPrologue if the callee-save stack
1005 // area allocation can't be combined with the local stack area allocation.
Tim Northover3b0846e2014-05-24 12:50:23 +00001006 // For example:
Geoff Berrya5335642016-05-06 16:34:59 +00001007 // stp x22, x21, [sp, #0] // addImm(+0)
Tim Northover3b0846e2014-05-24 12:50:23 +00001008 // stp x20, x19, [sp, #16] // addImm(+2)
1009 // stp fp, lr, [sp, #32] // addImm(+4)
1010 // Rationale: This sequence saves uop updates compared to a sequence of
1011 // pre-increment spills like stp xi,xj,[sp,#-16]!
Geoff Berry29d4a692016-02-01 19:07:06 +00001012 // Note: Similar rationale and sequence for restores in epilog.
Geoff Berrya5335642016-05-06 16:34:59 +00001013 if (RPI.IsGPR)
1014 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
1015 else
1016 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001017 DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1);
1018 if (RPI.isPaired())
1019 dbgs() << ", " << TRI->getName(Reg2);
1020 dbgs() << ") -> fi#(" << RPI.FrameIdx;
1021 if (RPI.isPaired())
1022 dbgs() << ", " << RPI.FrameIdx+1;
1023 dbgs() << ")\n");
Geoff Berry29d4a692016-02-01 19:07:06 +00001024
Tim Northover3b0846e2014-05-24 12:50:23 +00001025 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
Geoff Berrya5335642016-05-06 16:34:59 +00001026 MBB.addLiveIn(Reg1);
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001027 if (RPI.isPaired()) {
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001028 MBB.addLiveIn(Reg2);
Geoff Berrya5335642016-05-06 16:34:59 +00001029 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
Geoff Berryc3764062016-04-15 15:16:19 +00001030 MIB.addMemOperand(MF.getMachineMemOperand(
1031 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1),
1032 MachineMemOperand::MOStore, 8, 8));
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001033 }
Geoff Berrya5335642016-05-06 16:34:59 +00001034 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
1035 .addReg(AArch64::SP)
1036 .addImm(RPI.Offset) // [sp, #offset*8], where factor*8 is implicit
1037 .setMIFlag(MachineInstr::FrameSetup);
Geoff Berryc3764062016-04-15 15:16:19 +00001038 MIB.addMemOperand(MF.getMachineMemOperand(
1039 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx),
1040 MachineMemOperand::MOStore, 8, 8));
Tim Northover3b0846e2014-05-24 12:50:23 +00001041 }
1042 return true;
1043}
1044
1045bool AArch64FrameLowering::restoreCalleeSavedRegisters(
1046 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1047 const std::vector<CalleeSavedInfo> &CSI,
1048 const TargetRegisterInfo *TRI) const {
1049 MachineFunction &MF = *MBB.getParent();
Eric Christopherfc6de422014-08-05 02:39:49 +00001050 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
Tim Northover3b0846e2014-05-24 12:50:23 +00001051 DebugLoc DL;
Geoff Berry29d4a692016-02-01 19:07:06 +00001052 SmallVector<RegPairInfo, 8> RegPairs;
Tim Northover3b0846e2014-05-24 12:50:23 +00001053
1054 if (MI != MBB.end())
1055 DL = MI->getDebugLoc();
1056
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001057 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs);
Geoff Berry29d4a692016-02-01 19:07:06 +00001058
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001059 for (auto RPII = RegPairs.begin(), RPIE = RegPairs.end(); RPII != RPIE;
Geoff Berry29d4a692016-02-01 19:07:06 +00001060 ++RPII) {
1061 RegPairInfo RPI = *RPII;
1062 unsigned Reg1 = RPI.Reg1;
1063 unsigned Reg2 = RPI.Reg2;
1064
Geoff Berrya5335642016-05-06 16:34:59 +00001065 // Issue sequence of restores for cs regs. The last restore may be converted
1066 // to a post-increment load later by emitEpilogue if the callee-save stack
1067 // area allocation can't be combined with the local stack area allocation.
Tim Northover3b0846e2014-05-24 12:50:23 +00001068 // For example:
1069 // ldp fp, lr, [sp, #32] // addImm(+4)
1070 // ldp x20, x19, [sp, #16] // addImm(+2)
Geoff Berrya5335642016-05-06 16:34:59 +00001071 // ldp x22, x21, [sp, #0] // addImm(+0)
Tim Northover3b0846e2014-05-24 12:50:23 +00001072 // Note: see comment in spillCalleeSavedRegisters()
1073 unsigned LdrOpc;
Geoff Berrya5335642016-05-06 16:34:59 +00001074 if (RPI.IsGPR)
1075 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
1076 else
1077 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001078 DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1);
1079 if (RPI.isPaired())
1080 dbgs() << ", " << TRI->getName(Reg2);
1081 dbgs() << ") -> fi#(" << RPI.FrameIdx;
1082 if (RPI.isPaired())
1083 dbgs() << ", " << RPI.FrameIdx+1;
1084 dbgs() << ")\n");
Tim Northover3b0846e2014-05-24 12:50:23 +00001085
Tim Northover3b0846e2014-05-24 12:50:23 +00001086 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
Geoff Berryc3764062016-04-15 15:16:19 +00001087 if (RPI.isPaired()) {
Geoff Berrya5335642016-05-06 16:34:59 +00001088 MIB.addReg(Reg2, getDefRegState(true));
Geoff Berryc3764062016-04-15 15:16:19 +00001089 MIB.addMemOperand(MF.getMachineMemOperand(
1090 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx + 1),
1091 MachineMemOperand::MOLoad, 8, 8));
Geoff Berryc3764062016-04-15 15:16:19 +00001092 }
Geoff Berrya5335642016-05-06 16:34:59 +00001093 MIB.addReg(Reg1, getDefRegState(true))
1094 .addReg(AArch64::SP)
1095 .addImm(RPI.Offset) // [sp, #offset*8] where the factor*8 is implicit
1096 .setMIFlag(MachineInstr::FrameDestroy);
Geoff Berryc3764062016-04-15 15:16:19 +00001097 MIB.addMemOperand(MF.getMachineMemOperand(
1098 MachinePointerInfo::getFixedStack(MF, RPI.FrameIdx),
1099 MachineMemOperand::MOLoad, 8, 8));
Tim Northover3b0846e2014-05-24 12:50:23 +00001100 }
1101 return true;
1102}
1103
Matthias Braun02564862015-07-14 17:17:13 +00001104void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
1105 BitVector &SavedRegs,
1106 RegScavenger *RS) const {
1107 // All calls are tail calls in GHC calling conv, and functions have no
1108 // prologue/epilogue.
1109 if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
1110 return;
1111
1112 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
Tim Northover3b0846e2014-05-24 12:50:23 +00001113 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
Eric Christopherfc6de422014-08-05 02:39:49 +00001114 MF.getSubtarget().getRegisterInfo());
Tim Northover3b0846e2014-05-24 12:50:23 +00001115 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001116 unsigned UnspilledCSGPR = AArch64::NoRegister;
1117 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
Tim Northover3b0846e2014-05-24 12:50:23 +00001118
1119 // The frame record needs to be created by saving the appropriate registers
1120 if (hasFP(MF)) {
Matthias Braun02564862015-07-14 17:17:13 +00001121 SavedRegs.set(AArch64::FP);
1122 SavedRegs.set(AArch64::LR);
Tim Northover3b0846e2014-05-24 12:50:23 +00001123 }
1124
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001125 unsigned BasePointerReg = AArch64::NoRegister;
Tim Northover3b0846e2014-05-24 12:50:23 +00001126 if (RegInfo->hasBasePointer(MF))
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001127 BasePointerReg = RegInfo->getBaseRegister();
Tim Northover3b0846e2014-05-24 12:50:23 +00001128
Tim Northover3b0846e2014-05-24 12:50:23 +00001129 bool ExtraCSSpill = false;
Tim Northover3b0846e2014-05-24 12:50:23 +00001130 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001131 // Figure out which callee-saved registers to save/restore.
1132 for (unsigned i = 0; CSRegs[i]; ++i) {
1133 const unsigned Reg = CSRegs[i];
Tim Northover3b0846e2014-05-24 12:50:23 +00001134
Geoff Berry7e4ba3d2016-02-19 18:27:32 +00001135 // Add the base pointer register to SavedRegs if it is callee-save.
1136 if (Reg == BasePointerReg)
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001137 SavedRegs.set(Reg);
Tim Northover3b0846e2014-05-24 12:50:23 +00001138
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001139 bool RegUsed = SavedRegs.test(Reg);
1140 unsigned PairedReg = CSRegs[i ^ 1];
1141 if (!RegUsed) {
1142 if (AArch64::GPR64RegClass.contains(Reg) &&
1143 !RegInfo->isReservedReg(MF, Reg)) {
1144 UnspilledCSGPR = Reg;
1145 UnspilledCSGPRPaired = PairedReg;
Tim Northover3b0846e2014-05-24 12:50:23 +00001146 }
1147 continue;
1148 }
1149
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001150 // MachO's compact unwind format relies on all registers being stored in
1151 // pairs.
1152 // FIXME: the usual format is actually better if unwinding isn't needed.
Manman Ren57518142016-04-11 21:08:06 +00001153 if (produceCompactUnwindFrame(MF) && !SavedRegs.test(PairedReg)) {
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001154 SavedRegs.set(PairedReg);
Geoff Berry74cb7182016-05-16 20:52:28 +00001155 if (AArch64::GPR64RegClass.contains(PairedReg) &&
1156 !RegInfo->isReservedReg(MF, PairedReg))
1157 ExtraCSSpill = true;
Tim Northover3b0846e2014-05-24 12:50:23 +00001158 }
Tim Northover3b0846e2014-05-24 12:50:23 +00001159 }
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001160
1161 DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:";
1162 for (int Reg = SavedRegs.find_first(); Reg != -1;
1163 Reg = SavedRegs.find_next(Reg))
1164 dbgs() << ' ' << PrintReg(Reg, RegInfo);
1165 dbgs() << "\n";);
1166
1167 // If any callee-saved registers are used, the frame cannot be eliminated.
1168 unsigned NumRegsSpilled = SavedRegs.count();
1169 bool CanEliminateFrame = NumRegsSpilled == 0;
Tim Northover3b0846e2014-05-24 12:50:23 +00001170
1171 // FIXME: Set BigStack if any stack slot references may be out of range.
1172 // For now, just conservatively guestimate based on unscaled indexing
1173 // range. We'll end up allocating an unnecessary spill slot a lot, but
1174 // realistically that's not a big deal at this stage of the game.
1175 // The CSR spill slots have not been allocated yet, so estimateStackSize
1176 // won't include them.
Matthias Braun941a7052016-07-28 18:40:00 +00001177 MachineFrameInfo &MFI = MF.getFrameInfo();
1178 unsigned CFSize = MFI.estimateStackSize(MF) + 8 * NumRegsSpilled;
Tim Northover3b0846e2014-05-24 12:50:23 +00001179 DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
1180 bool BigStack = (CFSize >= 256);
1181 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
1182 AFI->setHasStackFrame(true);
1183
1184 // Estimate if we might need to scavenge a register at some point in order
1185 // to materialize a stack offset. If so, either spill one additional
1186 // callee-saved register or reserve a special spill slot to facilitate
1187 // register scavenging. If we already spilled an extra callee-saved register
1188 // above to keep the number of spills even, we don't need to do anything else
1189 // here.
1190 if (BigStack && !ExtraCSSpill) {
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001191 if (UnspilledCSGPR != AArch64::NoRegister) {
1192 DEBUG(dbgs() << "Spilling " << PrintReg(UnspilledCSGPR, RegInfo)
1193 << " to get a scratch register.\n");
1194 SavedRegs.set(UnspilledCSGPR);
1195 // MachO's compact unwind format relies on all registers being stored in
1196 // pairs, so if we need to spill one extra for BigStack, then we need to
1197 // store the pair.
Manman Ren57518142016-04-11 21:08:06 +00001198 if (produceCompactUnwindFrame(MF))
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001199 SavedRegs.set(UnspilledCSGPRPaired);
Tim Northover3b0846e2014-05-24 12:50:23 +00001200 ExtraCSSpill = true;
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001201 NumRegsSpilled = SavedRegs.count();
Tim Northover3b0846e2014-05-24 12:50:23 +00001202 }
1203
1204 // If we didn't find an extra callee-saved register to spill, create
1205 // an emergency spill slot.
1206 if (!ExtraCSSpill) {
1207 const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
Matthias Braun941a7052016-07-28 18:40:00 +00001208 int FI = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), false);
Tim Northover3b0846e2014-05-24 12:50:23 +00001209 RS->addScavengingFrameIndex(FI);
1210 DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
1211 << " as the emergency spill slot.\n");
1212 }
1213 }
Geoff Berry04bf91a2016-02-01 16:29:19 +00001214
Geoff Berryc25d3bd2016-02-12 16:31:41 +00001215 // Round up to register pair alignment to avoid additional SP adjustment
1216 // instructions.
1217 AFI->setCalleeSavedStackSize(alignTo(8 * NumRegsSpilled, 16));
Tim Northover3b0846e2014-05-24 12:50:23 +00001218}
Geoff Berry66f6b652016-06-02 16:22:07 +00001219
1220bool AArch64FrameLowering::enableStackSlotScavenging(
1221 const MachineFunction &MF) const {
1222 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1223 return AFI->hasCalleeSaveStackFreeSpace();
1224}