blob: 798986ca907522f2cd27c768508732b87f5cd624 [file] [log] [blame]
Stephen Hines36b56882014-04-23 16:57:46 -07001//===- ARM64FrameLowering.cpp - ARM64 Frame Lowering -----------*- C++ -*-====//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the ARM64 implementation of TargetFrameLowering class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "frame-info"
15#include "ARM64FrameLowering.h"
16#include "ARM64InstrInfo.h"
17#include "ARM64MachineFunctionInfo.h"
18#include "ARM64Subtarget.h"
19#include "ARM64TargetMachine.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/Function.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineInstrBuilder.h"
26#include "llvm/CodeGen/MachineModuleInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/RegisterScavenging.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Support/raw_ostream.h"
32
33using namespace llvm;
34
35static cl::opt<bool> EnableRedZone("arm64-redzone",
36 cl::desc("enable use of redzone on ARM64"),
37 cl::init(false), cl::Hidden);
38
39STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
40
41static unsigned estimateStackSize(MachineFunction &MF) {
42 const MachineFrameInfo *FFI = MF.getFrameInfo();
43 int Offset = 0;
44 for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
45 int FixedOff = -FFI->getObjectOffset(i);
46 if (FixedOff > Offset)
47 Offset = FixedOff;
48 }
49 for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
50 if (FFI->isDeadObjectIndex(i))
51 continue;
52 Offset += FFI->getObjectSize(i);
53 unsigned Align = FFI->getObjectAlignment(i);
54 // Adjust to alignment boundary
55 Offset = (Offset + Align - 1) / Align * Align;
56 }
57 // This does not include the 16 bytes used for fp and lr.
58 return (unsigned)Offset;
59}
60
61bool ARM64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
62 if (!EnableRedZone)
63 return false;
64 // Don't use the red zone if the function explicitly asks us not to.
65 // This is typically used for kernel code.
66 if (MF.getFunction()->getAttributes().hasAttribute(
67 AttributeSet::FunctionIndex, Attribute::NoRedZone))
68 return false;
69
70 const MachineFrameInfo *MFI = MF.getFrameInfo();
71 const ARM64FunctionInfo *AFI = MF.getInfo<ARM64FunctionInfo>();
72 unsigned NumBytes = AFI->getLocalStackSize();
73
74 // Note: currently hasFP() is always true for hasCalls(), but that's an
75 // implementation detail of the current code, not a strict requirement,
76 // so stay safe here and check both.
77 if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128)
78 return false;
79 return true;
80}
81
82/// hasFP - Return true if the specified function should have a dedicated frame
83/// pointer register.
84bool ARM64FrameLowering::hasFP(const MachineFunction &MF) const {
85 const MachineFrameInfo *MFI = MF.getFrameInfo();
86
87#ifndef NDEBUG
88 const TargetRegisterInfo *RegInfo = MF.getTarget().getRegisterInfo();
89 assert(!RegInfo->needsStackRealignment(MF) &&
90 "No stack realignment on ARM64!");
91#endif
92
93 return (MFI->hasCalls() || MFI->hasVarSizedObjects() ||
94 MFI->isFrameAddressTaken());
95}
96
97/// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
98/// not required, we reserve argument space for call sites in the function
99/// immediately on entry to the current function. This eliminates the need for
100/// add/sub sp brackets around call sites. Returns true if the call frame is
101/// included as part of the stack frame.
102bool ARM64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
103 return !MF.getFrameInfo()->hasVarSizedObjects();
104}
105
106void ARM64FrameLowering::eliminateCallFramePseudoInstr(
107 MachineFunction &MF, MachineBasicBlock &MBB,
108 MachineBasicBlock::iterator I) const {
109 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
110 const ARM64InstrInfo *TII =
111 static_cast<const ARM64InstrInfo *>(MF.getTarget().getInstrInfo());
112 if (!TFI->hasReservedCallFrame(MF)) {
113 // If we have alloca, convert as follows:
114 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
115 // ADJCALLSTACKUP -> add, sp, sp, amount
116 MachineInstr *Old = I;
117 DebugLoc DL = Old->getDebugLoc();
118 unsigned Amount = Old->getOperand(0).getImm();
119 if (Amount != 0) {
120 // We need to keep the stack aligned properly. To do this, we round the
121 // amount of space needed for the outgoing arguments up to the next
122 // alignment boundary.
123 unsigned Align = TFI->getStackAlignment();
124 Amount = (Amount + Align - 1) / Align * Align;
125
126 // Replace the pseudo instruction with a new instruction...
127 unsigned Opc = Old->getOpcode();
128 if (Opc == ARM64::ADJCALLSTACKDOWN) {
129 emitFrameOffset(MBB, I, DL, ARM64::SP, ARM64::SP, -Amount, TII);
130 } else {
131 assert(Opc == ARM64::ADJCALLSTACKUP && "expected ADJCALLSTACKUP");
132 emitFrameOffset(MBB, I, DL, ARM64::SP, ARM64::SP, Amount, TII);
133 }
134 }
135 }
136 MBB.erase(I);
137}
138
139void
140ARM64FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
141 MachineBasicBlock::iterator MBBI,
142 unsigned FramePtr) const {
143 MachineFunction &MF = *MBB.getParent();
144 MachineFrameInfo *MFI = MF.getFrameInfo();
145 MachineModuleInfo &MMI = MF.getMMI();
146 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
147 const ARM64InstrInfo *TII = TM.getInstrInfo();
148 DebugLoc DL = MBB.findDebugLoc(MBBI);
149
150 // Add callee saved registers to move list.
151 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
152 if (CSI.empty())
153 return;
154
155 const DataLayout *TD = MF.getTarget().getDataLayout();
156 bool HasFP = hasFP(MF);
157
158 // Calculate amount of bytes used for return address storing.
159 int stackGrowth = -TD->getPointerSize(0);
160
161 // Calculate offsets.
162 int64_t saveAreaOffset = (HasFP ? 2 : 1) * stackGrowth;
163 unsigned TotalSkipped = 0;
164 for (const auto &Info : CSI) {
165 unsigned Reg = Info.getReg();
166 int64_t Offset = MFI->getObjectOffset(Info.getFrameIdx()) -
167 getOffsetOfLocalArea() + saveAreaOffset;
168
169 // Don't output a new CFI directive if we're re-saving the frame pointer or
170 // link register. This happens when the PrologEpilogInserter has inserted an
171 // extra "STP" of the frame pointer and link register -- the "emitPrologue"
172 // method automatically generates the directives when frame pointers are
173 // used. If we generate CFI directives for the extra "STP"s, the linker will
174 // lose track of the correct values for the frame pointer and link register.
175 if (HasFP && (FramePtr == Reg || Reg == ARM64::LR)) {
176 TotalSkipped += stackGrowth;
177 continue;
178 }
179
180 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
181 unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
182 nullptr, DwarfReg, Offset - TotalSkipped));
183 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
184 .addCFIIndex(CFIIndex);
185 }
186}
187
188void ARM64FrameLowering::emitPrologue(MachineFunction &MF) const {
189 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
190 MachineBasicBlock::iterator MBBI = MBB.begin();
191 const MachineFrameInfo *MFI = MF.getFrameInfo();
192 const Function *Fn = MF.getFunction();
193 const ARM64RegisterInfo *RegInfo = TM.getRegisterInfo();
194 const ARM64InstrInfo *TII = TM.getInstrInfo();
195 MachineModuleInfo &MMI = MF.getMMI();
196 ARM64FunctionInfo *AFI = MF.getInfo<ARM64FunctionInfo>();
197 bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
198 bool HasFP = hasFP(MF);
199 DebugLoc DL = MBB.findDebugLoc(MBBI);
200
201 int NumBytes = (int)MFI->getStackSize();
202 if (!AFI->hasStackFrame()) {
203 assert(!HasFP && "unexpected function without stack frame but with FP");
204
205 // All of the stack allocation is for locals.
206 AFI->setLocalStackSize(NumBytes);
207
208 // Label used to tie together the PROLOG_LABEL and the MachineMoves.
209 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
210
211 // REDZONE: If the stack size is less than 128 bytes, we don't need
212 // to actually allocate.
213 if (NumBytes && !canUseRedZone(MF)) {
214 emitFrameOffset(MBB, MBBI, DL, ARM64::SP, ARM64::SP, -NumBytes, TII,
215 MachineInstr::FrameSetup);
216
217 // Encode the stack size of the leaf function.
218 unsigned CFIIndex = MMI.addFrameInst(
219 MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
220 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
221 .addCFIIndex(CFIIndex);
222 } else if (NumBytes) {
223 ++NumRedZoneFunctions;
224 }
225
226 return;
227 }
228
229 // Only set up FP if we actually need to.
230 int FPOffset = 0;
231 if (HasFP) {
232 // First instruction must a) allocate the stack and b) have an immediate
233 // that is a multiple of -2.
234 assert((MBBI->getOpcode() == ARM64::STPXpre ||
235 MBBI->getOpcode() == ARM64::STPDpre) &&
236 MBBI->getOperand(2).getReg() == ARM64::SP &&
237 MBBI->getOperand(3).getImm() < 0 &&
238 (MBBI->getOperand(3).getImm() & 1) == 0);
239
240 // Frame pointer is fp = sp - 16. Since the STPXpre subtracts the space
241 // required for the callee saved register area we get the frame pointer
242 // by addding that offset - 16 = -getImm()*8 - 2*8 = -(getImm() + 2) * 8.
243 FPOffset = -(MBBI->getOperand(3).getImm() + 2) * 8;
244 assert(FPOffset >= 0 && "Bad Framepointer Offset");
245 }
246
247 // Move past the saves of the callee-saved registers.
248 while (MBBI->getOpcode() == ARM64::STPXi ||
249 MBBI->getOpcode() == ARM64::STPDi ||
250 MBBI->getOpcode() == ARM64::STPXpre ||
251 MBBI->getOpcode() == ARM64::STPDpre) {
252 ++MBBI;
253 NumBytes -= 16;
254 }
255 assert(NumBytes >= 0 && "Negative stack allocation size!?");
256 if (HasFP) {
257 // Issue sub fp, sp, FPOffset or
258 // mov fp,sp when FPOffset is zero.
259 // Note: All stores of callee-saved registers are marked as "FrameSetup".
260 // This code marks the instruction(s) that set the FP also.
261 emitFrameOffset(MBB, MBBI, DL, ARM64::FP, ARM64::SP, FPOffset, TII,
262 MachineInstr::FrameSetup);
263 }
264
265 // All of the remaining stack allocations are for locals.
266 AFI->setLocalStackSize(NumBytes);
267
268 // Allocate space for the rest of the frame.
269 if (NumBytes) {
270 // If we're a leaf function, try using the red zone.
271 if (!canUseRedZone(MF))
272 emitFrameOffset(MBB, MBBI, DL, ARM64::SP, ARM64::SP, -NumBytes, TII,
273 MachineInstr::FrameSetup);
274 }
275
276 // If we need a base pointer, set it up here. It's whatever the value of the
277 // stack pointer is at this point. Any variable size objects will be allocated
278 // after this, so we can still use the base pointer to reference locals.
279 //
280 // FIXME: Clarify FrameSetup flags here.
281 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
282 // needed.
283 //
284 if (RegInfo->hasBasePointer(MF))
285 TII->copyPhysReg(MBB, MBBI, DL, ARM64::X19, ARM64::SP, false);
286
287 if (needsFrameMoves) {
288 const DataLayout *TD = MF.getTarget().getDataLayout();
289 const int StackGrowth = -TD->getPointerSize(0);
290 unsigned FramePtr = RegInfo->getFrameRegister(MF);
291
292 // An example of the prologue:
293 //
294 // .globl __foo
295 // .align 2
296 // __foo:
297 // Ltmp0:
298 // .cfi_startproc
299 // .cfi_personality 155, ___gxx_personality_v0
300 // Leh_func_begin:
301 // .cfi_lsda 16, Lexception33
302 //
303 // stp xa,bx, [sp, -#offset]!
304 // ...
305 // stp x28, x27, [sp, #offset-32]
306 // stp fp, lr, [sp, #offset-16]
307 // add fp, sp, #offset - 16
308 // sub sp, sp, #1360
309 //
310 // The Stack:
311 // +-------------------------------------------+
312 // 10000 | ........ | ........ | ........ | ........ |
313 // 10004 | ........ | ........ | ........ | ........ |
314 // +-------------------------------------------+
315 // 10008 | ........ | ........ | ........ | ........ |
316 // 1000c | ........ | ........ | ........ | ........ |
317 // +===========================================+
318 // 10010 | X28 Register |
319 // 10014 | X28 Register |
320 // +-------------------------------------------+
321 // 10018 | X27 Register |
322 // 1001c | X27 Register |
323 // +===========================================+
324 // 10020 | Frame Pointer |
325 // 10024 | Frame Pointer |
326 // +-------------------------------------------+
327 // 10028 | Link Register |
328 // 1002c | Link Register |
329 // +===========================================+
330 // 10030 | ........ | ........ | ........ | ........ |
331 // 10034 | ........ | ........ | ........ | ........ |
332 // +-------------------------------------------+
333 // 10038 | ........ | ........ | ........ | ........ |
334 // 1003c | ........ | ........ | ........ | ........ |
335 // +-------------------------------------------+
336 //
337 // [sp] = 10030 :: >>initial value<<
338 // sp = 10020 :: stp fp, lr, [sp, #-16]!
339 // fp = sp == 10020 :: mov fp, sp
340 // [sp] == 10020 :: stp x28, x27, [sp, #-16]!
341 // sp == 10010 :: >>final value<<
342 //
343 // The frame pointer (w29) points to address 10020. If we use an offset of
344 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
345 // for w27, and -32 for w28:
346 //
347 // Ltmp1:
348 // .cfi_def_cfa w29, 16
349 // Ltmp2:
350 // .cfi_offset w30, -8
351 // Ltmp3:
352 // .cfi_offset w29, -16
353 // Ltmp4:
354 // .cfi_offset w27, -24
355 // Ltmp5:
356 // .cfi_offset w28, -32
357
358 if (HasFP) {
359 // Define the current CFA rule to use the provided FP.
360 unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
361 unsigned CFIIndex = MMI.addFrameInst(
362 MCCFIInstruction::createDefCfa(nullptr, Reg, 2 * StackGrowth));
363 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
364 .addCFIIndex(CFIIndex);
365
366 // Record the location of the stored LR
367 unsigned LR = RegInfo->getDwarfRegNum(ARM64::LR, true);
368 CFIIndex = MMI.addFrameInst(
369 MCCFIInstruction::createOffset(nullptr, LR, StackGrowth));
370 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
371 .addCFIIndex(CFIIndex);
372
373 // Record the location of the stored FP
374 CFIIndex = MMI.addFrameInst(
375 MCCFIInstruction::createOffset(nullptr, Reg, 2 * StackGrowth));
376 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
377 .addCFIIndex(CFIIndex);
378 } else {
379 // Encode the stack size of the leaf function.
380 unsigned CFIIndex = MMI.addFrameInst(
381 MCCFIInstruction::createDefCfaOffset(nullptr, -MFI->getStackSize()));
382 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
383 .addCFIIndex(CFIIndex);
384 }
385
386 // Now emit the moves for whatever callee saved regs we have.
387 emitCalleeSavedFrameMoves(MBB, MBBI, FramePtr);
388 }
389}
390
391static bool isCalleeSavedRegister(unsigned Reg, const uint16_t *CSRegs) {
392 for (unsigned i = 0; CSRegs[i]; ++i)
393 if (Reg == CSRegs[i])
394 return true;
395 return false;
396}
397
398static bool isCSRestore(MachineInstr *MI, const uint16_t *CSRegs) {
399 if (MI->getOpcode() == ARM64::LDPXpost ||
400 MI->getOpcode() == ARM64::LDPDpost || MI->getOpcode() == ARM64::LDPXi ||
401 MI->getOpcode() == ARM64::LDPDi) {
402 if (!isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs) ||
403 !isCalleeSavedRegister(MI->getOperand(1).getReg(), CSRegs) ||
404 MI->getOperand(2).getReg() != ARM64::SP)
405 return false;
406 return true;
407 }
408
409 return false;
410}
411
412void ARM64FrameLowering::emitEpilogue(MachineFunction &MF,
413 MachineBasicBlock &MBB) const {
414 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
415 assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
416 MachineFrameInfo *MFI = MF.getFrameInfo();
417 const ARM64InstrInfo *TII =
418 static_cast<const ARM64InstrInfo *>(MF.getTarget().getInstrInfo());
419 const ARM64RegisterInfo *RegInfo =
420 static_cast<const ARM64RegisterInfo *>(MF.getTarget().getRegisterInfo());
421 DebugLoc DL = MBBI->getDebugLoc();
422
423 int NumBytes = MFI->getStackSize();
424 unsigned NumRestores = 0;
425 // Move past the restores of the callee-saved registers.
426 MachineBasicBlock::iterator LastPopI = MBBI;
427 const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
428 if (LastPopI != MBB.begin()) {
429 do {
430 ++NumRestores;
431 --LastPopI;
432 } while (LastPopI != MBB.begin() && isCSRestore(LastPopI, CSRegs));
433 if (!isCSRestore(LastPopI, CSRegs)) {
434 ++LastPopI;
435 --NumRestores;
436 }
437 }
438 NumBytes -= NumRestores * 16;
439 assert(NumBytes >= 0 && "Negative stack allocation size!?");
440
441 if (!hasFP(MF)) {
442 // If this was a redzone leaf function, we don't need to restore the
443 // stack pointer.
444 if (!canUseRedZone(MF))
445 emitFrameOffset(MBB, LastPopI, DL, ARM64::SP, ARM64::SP, NumBytes, TII);
446 return;
447 }
448
449 // Restore the original stack pointer.
450 // FIXME: Rather than doing the math here, we should instead just use
451 // non-post-indexed loads for the restores if we aren't actually going to
452 // be able to save any instructions.
453 if (NumBytes || MFI->hasVarSizedObjects())
454 emitFrameOffset(MBB, LastPopI, DL, ARM64::SP, ARM64::FP,
455 -(NumRestores - 1) * 16, TII, MachineInstr::NoFlags);
456}
457
458/// getFrameIndexOffset - Returns the displacement from the frame register to
459/// the stack frame of the specified index.
460int ARM64FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
461 int FI) const {
462 unsigned FrameReg;
463 return getFrameIndexReference(MF, FI, FrameReg);
464}
465
466/// getFrameIndexReference - Provide a base+offset reference to an FI slot for
467/// debug info. It's the same as what we use for resolving the code-gen
468/// references for now. FIXME: This can go wrong when references are
469/// SP-relative and simple call frames aren't used.
470int ARM64FrameLowering::getFrameIndexReference(const MachineFunction &MF,
471 int FI,
472 unsigned &FrameReg) const {
473 return resolveFrameIndexReference(MF, FI, FrameReg);
474}
475
476int ARM64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF,
477 int FI, unsigned &FrameReg,
478 bool PreferFP) const {
479 const MachineFrameInfo *MFI = MF.getFrameInfo();
480 const ARM64RegisterInfo *RegInfo =
481 static_cast<const ARM64RegisterInfo *>(MF.getTarget().getRegisterInfo());
482 const ARM64FunctionInfo *AFI = MF.getInfo<ARM64FunctionInfo>();
483 int FPOffset = MFI->getObjectOffset(FI) + 16;
484 int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
485 bool isFixed = MFI->isFixedObjectIndex(FI);
486
487 // Use frame pointer to reference fixed objects. Use it for locals if
488 // there are VLAs (and thus the SP isn't reliable as a base).
489 // Make sure useFPForScavengingIndex() does the right thing for the emergency
490 // spill slot.
491 bool UseFP = false;
492 if (AFI->hasStackFrame()) {
493 // Note: Keeping the following as multiple 'if' statements rather than
494 // merging to a single expression for readability.
495 //
496 // Argument access should always use the FP.
497 if (isFixed) {
498 UseFP = hasFP(MF);
499 } else if (hasFP(MF) && !RegInfo->hasBasePointer(MF)) {
500 // Use SP or FP, whichever gives us the best chance of the offset
501 // being in range for direct access. If the FPOffset is positive,
502 // that'll always be best, as the SP will be even further away.
503 // If the FPOffset is negative, we have to keep in mind that the
504 // available offset range for negative offsets is smaller than for
505 // positive ones. If we have variable sized objects, we're stuck with
506 // using the FP regardless, though, as the SP offset is unknown
507 // and we don't have a base pointer available. If an offset is
508 // available via the FP and the SP, use whichever is closest.
509 if (PreferFP || MFI->hasVarSizedObjects() || FPOffset >= 0 ||
510 (FPOffset >= -256 && Offset > -FPOffset))
511 UseFP = true;
512 }
513 }
514
515 if (UseFP) {
516 FrameReg = RegInfo->getFrameRegister(MF);
517 return FPOffset;
518 }
519
520 // Use the base pointer if we have one.
521 if (RegInfo->hasBasePointer(MF))
522 FrameReg = RegInfo->getBaseRegister();
523 else {
524 FrameReg = ARM64::SP;
525 // If we're using the red zone for this function, the SP won't actually
526 // be adjusted, so the offsets will be negative. They're also all
527 // within range of the signed 9-bit immediate instructions.
528 if (canUseRedZone(MF))
529 Offset -= AFI->getLocalStackSize();
530 }
531
532 return Offset;
533}
534
535static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
536 if (Reg != ARM64::LR)
537 return getKillRegState(true);
538
539 // LR maybe referred to later by an @llvm.returnaddress intrinsic.
540 bool LRLiveIn = MF.getRegInfo().isLiveIn(ARM64::LR);
541 bool LRKill = !(LRLiveIn && MF.getFrameInfo()->isReturnAddressTaken());
542 return getKillRegState(LRKill);
543}
544
545bool ARM64FrameLowering::spillCalleeSavedRegisters(
546 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
547 const std::vector<CalleeSavedInfo> &CSI,
548 const TargetRegisterInfo *TRI) const {
549 MachineFunction &MF = *MBB.getParent();
550 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
551 unsigned Count = CSI.size();
552 DebugLoc DL;
553 assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
554
555 if (MI != MBB.end())
556 DL = MI->getDebugLoc();
557
558 for (unsigned i = 0; i < Count; i += 2) {
559 unsigned idx = Count - i - 2;
560 unsigned Reg1 = CSI[idx].getReg();
561 unsigned Reg2 = CSI[idx + 1].getReg();
562 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
563 // list to come in sorted by frame index so that we can issue the store
564 // pair instructions directly. Assert if we see anything otherwise.
565 //
566 // The order of the registers in the list is controlled by
567 // getCalleeSavedRegs(), so they will always be in-order, as well.
568 assert(CSI[idx].getFrameIdx() + 1 == CSI[idx + 1].getFrameIdx() &&
569 "Out of order callee saved regs!");
570 unsigned StrOpc;
571 assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
572 assert((i & 1) == 0 && "Odd index for callee-saved reg spill!");
573 // Issue sequence of non-sp increment and pi sp spills for cs regs. The
574 // first spill is a pre-increment that allocates the stack.
575 // For example:
576 // stp x22, x21, [sp, #-48]! // addImm(-6)
577 // stp x20, x19, [sp, #16] // addImm(+2)
578 // stp fp, lr, [sp, #32] // addImm(+4)
579 // Rationale: This sequence saves uop updates compared to a sequence of
580 // pre-increment spills like stp xi,xj,[sp,#-16]!
581 // Note: Similar rational and sequence for restores in epilog.
582 if (ARM64::GPR64RegClass.contains(Reg1)) {
583 assert(ARM64::GPR64RegClass.contains(Reg2) &&
584 "Expected GPR64 callee-saved register pair!");
585 // For first spill use pre-increment store.
586 if (i == 0)
587 StrOpc = ARM64::STPXpre;
588 else
589 StrOpc = ARM64::STPXi;
590 } else if (ARM64::FPR64RegClass.contains(Reg1)) {
591 assert(ARM64::FPR64RegClass.contains(Reg2) &&
592 "Expected FPR64 callee-saved register pair!");
593 // For first spill use pre-increment store.
594 if (i == 0)
595 StrOpc = ARM64::STPDpre;
596 else
597 StrOpc = ARM64::STPDi;
598 } else
599 llvm_unreachable("Unexpected callee saved register!");
600 DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1) << ", "
601 << TRI->getName(Reg2) << ") -> fi#(" << CSI[idx].getFrameIdx()
602 << ", " << CSI[idx + 1].getFrameIdx() << ")\n");
603 // Compute offset: i = 0 => offset = -Count;
604 // i = 2 => offset = -(Count - 2) + Count = 2 = i; etc.
605 const int Offset = (i == 0) ? -Count : i;
606 assert((Offset >= -64 && Offset <= 63) &&
607 "Offset out of bounds for STP immediate");
608 BuildMI(MBB, MI, DL, TII.get(StrOpc))
609 .addReg(Reg2, getPrologueDeath(MF, Reg2))
610 .addReg(Reg1, getPrologueDeath(MF, Reg1))
611 .addReg(ARM64::SP)
612 .addImm(Offset) // [sp, #offset * 8], where factor * 8 is implicit
613 .setMIFlag(MachineInstr::FrameSetup);
614 }
615 return true;
616}
617
618bool ARM64FrameLowering::restoreCalleeSavedRegisters(
619 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
620 const std::vector<CalleeSavedInfo> &CSI,
621 const TargetRegisterInfo *TRI) const {
622 MachineFunction &MF = *MBB.getParent();
623 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
624 unsigned Count = CSI.size();
625 DebugLoc DL;
626 assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
627
628 if (MI != MBB.end())
629 DL = MI->getDebugLoc();
630
631 for (unsigned i = 0; i < Count; i += 2) {
632 unsigned Reg1 = CSI[i].getReg();
633 unsigned Reg2 = CSI[i + 1].getReg();
634 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
635 // list to come in sorted by frame index so that we can issue the store
636 // pair instructions directly. Assert if we see anything otherwise.
637 assert(CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx() &&
638 "Out of order callee saved regs!");
639 // Issue sequence of non-sp increment and sp-pi restores for cs regs. Only
640 // the last load is sp-pi post-increment and de-allocates the stack:
641 // For example:
642 // ldp fp, lr, [sp, #32] // addImm(+4)
643 // ldp x20, x19, [sp, #16] // addImm(+2)
644 // ldp x22, x21, [sp], #48 // addImm(+6)
645 // Note: see comment in spillCalleeSavedRegisters()
646 unsigned LdrOpc;
647
648 assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!");
649 assert((i & 1) == 0 && "Odd index for callee-saved reg spill!");
650 if (ARM64::GPR64RegClass.contains(Reg1)) {
651 assert(ARM64::GPR64RegClass.contains(Reg2) &&
652 "Expected GPR64 callee-saved register pair!");
653 if (i == Count - 2)
654 LdrOpc = ARM64::LDPXpost;
655 else
656 LdrOpc = ARM64::LDPXi;
657 } else if (ARM64::FPR64RegClass.contains(Reg1)) {
658 assert(ARM64::FPR64RegClass.contains(Reg2) &&
659 "Expected FPR64 callee-saved register pair!");
660 if (i == Count - 2)
661 LdrOpc = ARM64::LDPDpost;
662 else
663 LdrOpc = ARM64::LDPDi;
664 } else
665 llvm_unreachable("Unexpected callee saved register!");
666 DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1) << ", "
667 << TRI->getName(Reg2) << ") -> fi#(" << CSI[i].getFrameIdx()
668 << ", " << CSI[i + 1].getFrameIdx() << ")\n");
669
670 // Compute offset: i = 0 => offset = Count - 2; i = 2 => offset = Count - 4;
671 // etc.
672 const int Offset = (i == Count - 2) ? Count : Count - i - 2;
673 assert((Offset >= -64 && Offset <= 63) &&
674 "Offset out of bounds for LDP immediate");
675 BuildMI(MBB, MI, DL, TII.get(LdrOpc))
676 .addReg(Reg2, getDefRegState(true))
677 .addReg(Reg1, getDefRegState(true))
678 .addReg(ARM64::SP)
679 .addImm(Offset); // [sp], #offset * 8 or [sp, #offset * 8]
680 // where the factor * 8 is implicit
681 }
682 return true;
683}
684
685void ARM64FrameLowering::processFunctionBeforeCalleeSavedScan(
686 MachineFunction &MF, RegScavenger *RS) const {
687 const ARM64RegisterInfo *RegInfo =
688 static_cast<const ARM64RegisterInfo *>(MF.getTarget().getRegisterInfo());
689 ARM64FunctionInfo *AFI = MF.getInfo<ARM64FunctionInfo>();
690 MachineRegisterInfo *MRI = &MF.getRegInfo();
691 SmallVector<unsigned, 4> UnspilledCSGPRs;
692 SmallVector<unsigned, 4> UnspilledCSFPRs;
693
694 // The frame record needs to be created by saving the appropriate registers
695 if (hasFP(MF)) {
696 MRI->setPhysRegUsed(ARM64::FP);
697 MRI->setPhysRegUsed(ARM64::LR);
698 }
699
700 // Spill the BasePtr if it's used. Do this first thing so that the
701 // getCalleeSavedRegs() below will get the right answer.
702 if (RegInfo->hasBasePointer(MF))
703 MRI->setPhysRegUsed(RegInfo->getBaseRegister());
704
705 // If any callee-saved registers are used, the frame cannot be eliminated.
706 unsigned NumGPRSpilled = 0;
707 unsigned NumFPRSpilled = 0;
708 bool ExtraCSSpill = false;
709 bool CanEliminateFrame = true;
710 DEBUG(dbgs() << "*** processFunctionBeforeCalleeSavedScan\nUsed CSRs:");
711 const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
712
713 // Check pairs of consecutive callee-saved registers.
714 for (unsigned i = 0; CSRegs[i]; i += 2) {
715 assert(CSRegs[i + 1] && "Odd number of callee-saved registers!");
716
717 const unsigned OddReg = CSRegs[i];
718 const unsigned EvenReg = CSRegs[i + 1];
719 assert((ARM64::GPR64RegClass.contains(OddReg) &&
720 ARM64::GPR64RegClass.contains(EvenReg)) ^
721 (ARM64::FPR64RegClass.contains(OddReg) &&
722 ARM64::FPR64RegClass.contains(EvenReg)) &&
723 "Register class mismatch!");
724
725 const bool OddRegUsed = MRI->isPhysRegUsed(OddReg);
726 const bool EvenRegUsed = MRI->isPhysRegUsed(EvenReg);
727
728 // Early exit if none of the registers in the register pair is actually
729 // used.
730 if (!OddRegUsed && !EvenRegUsed) {
731 if (ARM64::GPR64RegClass.contains(OddReg)) {
732 UnspilledCSGPRs.push_back(OddReg);
733 UnspilledCSGPRs.push_back(EvenReg);
734 } else {
735 UnspilledCSFPRs.push_back(OddReg);
736 UnspilledCSFPRs.push_back(EvenReg);
737 }
738 continue;
739 }
740
741 unsigned Reg = ARM64::NoRegister;
742 // If only one of the registers of the register pair is used, make sure to
743 // mark the other one as used as well.
744 if (OddRegUsed ^ EvenRegUsed) {
745 // Find out which register is the additional spill.
746 Reg = OddRegUsed ? EvenReg : OddReg;
747 MRI->setPhysRegUsed(Reg);
748 }
749
750 DEBUG(dbgs() << ' ' << PrintReg(OddReg, RegInfo));
751 DEBUG(dbgs() << ' ' << PrintReg(EvenReg, RegInfo));
752
753 assert(((OddReg == ARM64::LR && EvenReg == ARM64::FP) ||
754 (RegInfo->getEncodingValue(OddReg) + 1 ==
755 RegInfo->getEncodingValue(EvenReg))) &&
756 "Register pair of non-adjacent registers!");
757 if (ARM64::GPR64RegClass.contains(OddReg)) {
758 NumGPRSpilled += 2;
759 // If it's not a reserved register, we can use it in lieu of an
760 // emergency spill slot for the register scavenger.
761 // FIXME: It would be better to instead keep looking and choose another
762 // unspilled register that isn't reserved, if there is one.
763 if (Reg != ARM64::NoRegister && !RegInfo->isReservedReg(MF, Reg))
764 ExtraCSSpill = true;
765 } else
766 NumFPRSpilled += 2;
767
768 CanEliminateFrame = false;
769 }
770
771 // FIXME: Set BigStack if any stack slot references may be out of range.
772 // For now, just conservatively guestimate based on unscaled indexing
773 // range. We'll end up allocating an unnecessary spill slot a lot, but
774 // realistically that's not a big deal at this stage of the game.
775 // The CSR spill slots have not been allocated yet, so estimateStackSize
776 // won't include them.
777 MachineFrameInfo *MFI = MF.getFrameInfo();
778 unsigned CFSize = estimateStackSize(MF) + 8 * (NumGPRSpilled + NumFPRSpilled);
779 DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n");
780 bool BigStack = (CFSize >= 256);
781 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
782 AFI->setHasStackFrame(true);
783
784 // Estimate if we might need to scavenge a register at some point in order
785 // to materialize a stack offset. If so, either spill one additional
786 // callee-saved register or reserve a special spill slot to facilitate
787 // register scavenging. If we already spilled an extra callee-saved register
788 // above to keep the number of spills even, we don't need to do anything else
789 // here.
790 if (BigStack && !ExtraCSSpill) {
791
792 // If we're adding a register to spill here, we have to add two of them
793 // to keep the number of regs to spill even.
794 assert(((UnspilledCSGPRs.size() & 1) == 0) && "Odd number of registers!");
795 unsigned Count = 0;
796 while (!UnspilledCSGPRs.empty() && Count < 2) {
797 unsigned Reg = UnspilledCSGPRs.back();
798 UnspilledCSGPRs.pop_back();
799 DEBUG(dbgs() << "Spilling " << PrintReg(Reg, RegInfo)
800 << " to get a scratch register.\n");
801 MRI->setPhysRegUsed(Reg);
802 ExtraCSSpill = true;
803 ++Count;
804 }
805
806 // If we didn't find an extra callee-saved register to spill, create
807 // an emergency spill slot.
808 if (!ExtraCSSpill) {
809 const TargetRegisterClass *RC = &ARM64::GPR64RegClass;
810 int FI = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false);
811 RS->addScavengingFrameIndex(FI);
812 DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
813 << " as the emergency spill slot.\n");
814 }
815 }
816}