| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 1 | //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====// | 
|  | 2 | // | 
|  | 3 | //                     The LLVM Compiler Infrastructure | 
|  | 4 | // | 
|  | 5 | // This file is distributed under the University of Illinois Open Source | 
|  | 6 | // License. See LICENSE.TXT for details. | 
|  | 7 | // | 
|  | 8 | //===----------------------------------------------------------------------===// | 
|  | 9 | // | 
|  | 10 | // This file contains the AArch64 implementation of TargetFrameLowering class. | 
|  | 11 | // | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 12 | // On AArch64, stack frames are structured as follows: | 
|  | 13 | // | 
|  | 14 | // The stack grows downward. | 
|  | 15 | // | 
|  | 16 | // All of the individual frame areas on the frame below are optional, i.e. it's | 
|  | 17 | // possible to create a function so that the particular area isn't present | 
|  | 18 | // in the frame. | 
|  | 19 | // | 
|  | 20 | // At function entry, the "frame" looks as follows: | 
|  | 21 | // | 
|  | 22 | // |                                   | Higher address | 
|  | 23 | // |-----------------------------------| | 
|  | 24 | // |                                   | | 
|  | 25 | // | arguments passed on the stack     | | 
|  | 26 | // |                                   | | 
|  | 27 | // |-----------------------------------| <- sp | 
|  | 28 | // |                                   | Lower address | 
|  | 29 | // | 
|  | 30 | // | 
|  | 31 | // After the prologue has run, the frame has the following general structure. | 
|  | 32 | // Note that this doesn't depict the case where a red-zone is used. Also, | 
|  | 33 | // technically the last frame area (VLAs) doesn't get created until in the | 
|  | 34 | // main function body, after the prologue is run. However, it's depicted here | 
|  | 35 | // for completeness. | 
|  | 36 | // | 
|  | 37 | // |                                   | Higher address | 
|  | 38 | // |-----------------------------------| | 
|  | 39 | // |                                   | | 
|  | 40 | // | arguments passed on the stack     | | 
|  | 41 | // |                                   | | 
|  | 42 | // |-----------------------------------| | 
|  | 43 | // |                                   | | 
|  | 44 | // | prev_fp, prev_lr                  | | 
|  | 45 | // | (a.k.a. "frame record")           | | 
|  | 46 | // |-----------------------------------| <- fp(=x29) | 
|  | 47 | // |                                   | | 
|  | 48 | // | other callee-saved registers      | | 
|  | 49 | // |                                   | | 
|  | 50 | // |-----------------------------------| | 
|  | 51 | // |.empty.space.to.make.part.below....| | 
|  | 52 | // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at | 
|  | 53 | // |.the.standard.16-byte.alignment....|  compile time; if present) | 
|  | 54 | // |-----------------------------------| | 
|  | 55 | // |                                   | | 
|  | 56 | // | local variables of fixed size     | | 
|  | 57 | // | including spill slots             | | 
|  | 58 | // |-----------------------------------| <- bp(not defined by ABI, | 
|  | 59 | // |.variable-sized.local.variables....|       LLVM chooses X19) | 
|  | 60 | // |.(VLAs)............................| (size of this area is unknown at | 
|  | 61 | // |...................................|  compile time) | 
|  | 62 | // |-----------------------------------| <- sp | 
|  | 63 | // |                                   | Lower address | 
|  | 64 | // | 
|  | 65 | // | 
|  | 66 | // To access the data in a frame, at-compile time, a constant offset must be | 
|  | 67 | // computable from one of the pointers (fp, bp, sp) to access it. The size | 
|  | 68 | // of the areas with a dotted background cannot be computed at compile-time | 
|  | 69 | // if they are present, making it required to have all three of fp, bp and | 
|  | 70 | // sp to be set up to be able to access all contents in the frame areas, | 
|  | 71 | // assuming all of the frame areas are non-empty. | 
|  | 72 | // | 
|  | 73 | // For most functions, some of the frame areas are empty. For those functions, | 
|  | 74 | // it may not be necessary to set up fp or bp: | 
|  | 75 | // * A base pointer is definitly needed when there are both VLAs and local | 
|  | 76 | //   variables with more-than-default alignment requirements. | 
|  | 77 | // * A frame pointer is definitly needed when there are local variables with | 
|  | 78 | //   more-than-default alignment requirements. | 
|  | 79 | // | 
|  | 80 | // In some cases when a base pointer is not strictly needed, it is generated | 
|  | 81 | // anyway when offsets from the frame pointer to access local variables become | 
|  | 82 | // so large that the offset can't be encoded in the immediate fields of loads | 
|  | 83 | // or stores. | 
|  | 84 | // | 
|  | 85 | // FIXME: also explain the redzone concept. | 
|  | 86 | // FIXME: also explain the concept of reserved call frames. | 
|  | 87 | // | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 88 | //===----------------------------------------------------------------------===// | 
|  | 89 |  | 
|  | 90 | #include "AArch64FrameLowering.h" | 
|  | 91 | #include "AArch64InstrInfo.h" | 
|  | 92 | #include "AArch64MachineFunctionInfo.h" | 
|  | 93 | #include "AArch64Subtarget.h" | 
|  | 94 | #include "AArch64TargetMachine.h" | 
|  | 95 | #include "llvm/ADT/Statistic.h" | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 96 | #include "llvm/CodeGen/MachineFrameInfo.h" | 
|  | 97 | #include "llvm/CodeGen/MachineFunction.h" | 
|  | 98 | #include "llvm/CodeGen/MachineInstrBuilder.h" | 
|  | 99 | #include "llvm/CodeGen/MachineModuleInfo.h" | 
|  | 100 | #include "llvm/CodeGen/MachineRegisterInfo.h" | 
|  | 101 | #include "llvm/CodeGen/RegisterScavenging.h" | 
| Benjamin Kramer | 1f8930e | 2014-07-25 11:42:14 +0000 | [diff] [blame] | 102 | #include "llvm/IR/DataLayout.h" | 
|  | 103 | #include "llvm/IR/Function.h" | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 104 | #include "llvm/Support/CommandLine.h" | 
| Benjamin Kramer | 1f8930e | 2014-07-25 11:42:14 +0000 | [diff] [blame] | 105 | #include "llvm/Support/Debug.h" | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 106 | #include "llvm/Support/raw_ostream.h" | 
|  | 107 |  | 
|  | 108 | using namespace llvm; | 
|  | 109 |  | 
|  | 110 | #define DEBUG_TYPE "frame-info" | 
|  | 111 |  | 
|  | 112 | static cl::opt<bool> EnableRedZone("aarch64-redzone", | 
|  | 113 | cl::desc("enable use of redzone on AArch64"), | 
|  | 114 | cl::init(false), cl::Hidden); | 
|  | 115 |  | 
|  | 116 | STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); | 
|  | 117 |  | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 118 | bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { | 
|  | 119 | if (!EnableRedZone) | 
|  | 120 | return false; | 
|  | 121 | // Don't use the red zone if the function explicitly asks us not to. | 
|  | 122 | // This is typically used for kernel code. | 
| Duncan P. N. Exon Smith | 003bb7d | 2015-02-14 02:09:06 +0000 | [diff] [blame] | 123 | if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone)) | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 124 | return false; | 
|  | 125 |  | 
|  | 126 | const MachineFrameInfo *MFI = MF.getFrameInfo(); | 
|  | 127 | const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); | 
|  | 128 | unsigned NumBytes = AFI->getLocalStackSize(); | 
|  | 129 |  | 
|  | 130 | // Note: currently hasFP() is always true for hasCalls(), but that's an | 
|  | 131 | // implementation detail of the current code, not a strict requirement, | 
|  | 132 | // so stay safe here and check both. | 
|  | 133 | if (MFI->hasCalls() || hasFP(MF) || NumBytes > 128) | 
|  | 134 | return false; | 
|  | 135 | return true; | 
|  | 136 | } | 
|  | 137 |  | 
|  | 138 | /// hasFP - Return true if the specified function should have a dedicated frame | 
|  | 139 | /// pointer register. | 
|  | 140 | bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const { | 
|  | 141 | const MachineFrameInfo *MFI = MF.getFrameInfo(); | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 142 | const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 143 | return (MFI->hasCalls() || MFI->hasVarSizedObjects() || | 
| Juergen Ributzka | 99bd3cb | 2014-10-02 22:21:49 +0000 | [diff] [blame] | 144 | MFI->isFrameAddressTaken() || MFI->hasStackMap() || | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 145 | MFI->hasPatchPoint() || RegInfo->needsStackRealignment(MF)); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 146 | } | 
|  | 147 |  | 
|  | 148 | /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is | 
|  | 149 | /// not required, we reserve argument space for call sites in the function | 
|  | 150 | /// immediately on entry to the current function.  This eliminates the need for | 
|  | 151 | /// add/sub sp brackets around call sites.  Returns true if the call frame is | 
|  | 152 | /// included as part of the stack frame. | 
|  | 153 | bool | 
|  | 154 | AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { | 
|  | 155 | return !MF.getFrameInfo()->hasVarSizedObjects(); | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | void AArch64FrameLowering::eliminateCallFramePseudoInstr( | 
|  | 159 | MachineFunction &MF, MachineBasicBlock &MBB, | 
|  | 160 | MachineBasicBlock::iterator I) const { | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 161 | const AArch64InstrInfo *TII = | 
|  | 162 | static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 163 | DebugLoc DL = I->getDebugLoc(); | 
| Matthias Braun | fa3872e | 2015-05-18 20:27:55 +0000 | [diff] [blame] | 164 | unsigned Opc = I->getOpcode(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 165 | bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode(); | 
|  | 166 | uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0; | 
|  | 167 |  | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 168 | const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 169 | if (!TFI->hasReservedCallFrame(MF)) { | 
|  | 170 | unsigned Align = getStackAlignment(); | 
|  | 171 |  | 
|  | 172 | int64_t Amount = I->getOperand(0).getImm(); | 
|  | 173 | Amount = RoundUpToAlignment(Amount, Align); | 
|  | 174 | if (!IsDestroy) | 
|  | 175 | Amount = -Amount; | 
|  | 176 |  | 
|  | 177 | // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it | 
|  | 178 | // doesn't have to pop anything), then the first operand will be zero too so | 
|  | 179 | // this adjustment is a no-op. | 
|  | 180 | if (CalleePopAmount == 0) { | 
|  | 181 | // FIXME: in-function stack adjustment for calls is limited to 24-bits | 
|  | 182 | // because there's no guaranteed temporary register available. | 
|  | 183 | // | 
| Sylvestre Ledru | 469de19 | 2014-08-11 18:04:46 +0000 | [diff] [blame] | 184 | // ADD/SUB (immediate) has only LSL #0 and LSL #12 available. | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 185 | // 1) For offset <= 12-bit, we use LSL #0 | 
|  | 186 | // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses | 
|  | 187 | // LSL #0, and the other uses LSL #12. | 
|  | 188 | // | 
|  | 189 | // Mostly call frames will be allocated at the start of a function so | 
|  | 190 | // this is OK, but it is a limitation that needs dealing with. | 
|  | 191 | assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large"); | 
|  | 192 | emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, Amount, TII); | 
|  | 193 | } | 
|  | 194 | } else if (CalleePopAmount != 0) { | 
|  | 195 | // If the calling convention demands that the callee pops arguments from the | 
|  | 196 | // stack, we want to add it back if we have a reserved call frame. | 
|  | 197 | assert(CalleePopAmount < 0xffffff && "call frame too large"); | 
|  | 198 | emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, -CalleePopAmount, | 
|  | 199 | TII); | 
|  | 200 | } | 
|  | 201 | MBB.erase(I); | 
|  | 202 | } | 
|  | 203 |  | 
|  | 204 | void AArch64FrameLowering::emitCalleeSavedFrameMoves( | 
|  | 205 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, | 
|  | 206 | unsigned FramePtr) const { | 
|  | 207 | MachineFunction &MF = *MBB.getParent(); | 
|  | 208 | MachineFrameInfo *MFI = MF.getFrameInfo(); | 
|  | 209 | MachineModuleInfo &MMI = MF.getMMI(); | 
|  | 210 | const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo(); | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 211 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 212 | DebugLoc DL = MBB.findDebugLoc(MBBI); | 
|  | 213 |  | 
|  | 214 | // Add callee saved registers to move list. | 
|  | 215 | const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); | 
|  | 216 | if (CSI.empty()) | 
|  | 217 | return; | 
|  | 218 |  | 
| Eric Christopher | 8b77065 | 2015-01-26 19:03:15 +0000 | [diff] [blame] | 219 | const DataLayout *TD = MF.getTarget().getDataLayout(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 220 | bool HasFP = hasFP(MF); | 
|  | 221 |  | 
|  | 222 | // Calculate amount of bytes used for return address storing. | 
|  | 223 | int stackGrowth = -TD->getPointerSize(0); | 
|  | 224 |  | 
|  | 225 | // Calculate offsets. | 
|  | 226 | int64_t saveAreaOffset = (HasFP ? 2 : 1) * stackGrowth; | 
|  | 227 | unsigned TotalSkipped = 0; | 
|  | 228 | for (const auto &Info : CSI) { | 
|  | 229 | unsigned Reg = Info.getReg(); | 
|  | 230 | int64_t Offset = MFI->getObjectOffset(Info.getFrameIdx()) - | 
|  | 231 | getOffsetOfLocalArea() + saveAreaOffset; | 
|  | 232 |  | 
|  | 233 | // Don't output a new CFI directive if we're re-saving the frame pointer or | 
|  | 234 | // link register. This happens when the PrologEpilogInserter has inserted an | 
|  | 235 | // extra "STP" of the frame pointer and link register -- the "emitPrologue" | 
|  | 236 | // method automatically generates the directives when frame pointers are | 
|  | 237 | // used. If we generate CFI directives for the extra "STP"s, the linker will | 
|  | 238 | // lose track of the correct values for the frame pointer and link register. | 
|  | 239 | if (HasFP && (FramePtr == Reg || Reg == AArch64::LR)) { | 
|  | 240 | TotalSkipped += stackGrowth; | 
|  | 241 | continue; | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true); | 
|  | 245 | unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset( | 
|  | 246 | nullptr, DwarfReg, Offset - TotalSkipped)); | 
|  | 247 | BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) | 
| Adrian Prantl | b9fa945 | 2014-12-16 00:20:49 +0000 | [diff] [blame] | 248 | .addCFIIndex(CFIIndex) | 
|  | 249 | .setMIFlags(MachineInstr::FrameSetup); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 250 | } | 
|  | 251 | } | 
|  | 252 |  | 
| Manman Ren | 0e20822 | 2015-04-29 20:03:38 +0000 | [diff] [blame] | 253 | /// Get FPOffset by analyzing the first instruction. | 
|  | 254 | static int getFPOffsetInPrologue(MachineInstr *MBBI) { | 
|  | 255 | // First instruction must a) allocate the stack  and b) have an immediate | 
|  | 256 | // that is a multiple of -2. | 
|  | 257 | assert(((MBBI->getOpcode() == AArch64::STPXpre || | 
|  | 258 | MBBI->getOpcode() == AArch64::STPDpre) && | 
|  | 259 | MBBI->getOperand(3).getReg() == AArch64::SP && | 
|  | 260 | MBBI->getOperand(4).getImm() < 0 && | 
|  | 261 | (MBBI->getOperand(4).getImm() & 1) == 0)); | 
|  | 262 |  | 
|  | 263 | // Frame pointer is fp = sp - 16. Since the  STPXpre subtracts the space | 
|  | 264 | // required for the callee saved register area we get the frame pointer | 
|  | 265 | // by addding that offset - 16 = -getImm()*8 - 2*8 = -(getImm() + 2) * 8. | 
|  | 266 | int FPOffset = -(MBBI->getOperand(4).getImm() + 2) * 8; | 
|  | 267 | assert(FPOffset >= 0 && "Bad Framepointer Offset"); | 
|  | 268 | return FPOffset; | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 | static bool isCSSave(MachineInstr *MBBI) { | 
|  | 272 | return MBBI->getOpcode() == AArch64::STPXi || | 
|  | 273 | MBBI->getOpcode() == AArch64::STPDi || | 
|  | 274 | MBBI->getOpcode() == AArch64::STPXpre || | 
|  | 275 | MBBI->getOpcode() == AArch64::STPDpre; | 
|  | 276 | } | 
|  | 277 |  | 
| Quentin Colombet | 61b305e | 2015-05-05 17:38:16 +0000 | [diff] [blame] | 278 | void AArch64FrameLowering::emitPrologue(MachineFunction &MF, | 
|  | 279 | MachineBasicBlock &MBB) const { | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 280 | MachineBasicBlock::iterator MBBI = MBB.begin(); | 
|  | 281 | const MachineFrameInfo *MFI = MF.getFrameInfo(); | 
|  | 282 | const Function *Fn = MF.getFunction(); | 
| Eric Christopher | bc76b97 | 2014-06-10 17:33:39 +0000 | [diff] [blame] | 283 | const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 284 | MF.getSubtarget().getRegisterInfo()); | 
|  | 285 | const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 286 | MachineModuleInfo &MMI = MF.getMMI(); | 
|  | 287 | AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); | 
|  | 288 | bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry(); | 
|  | 289 | bool HasFP = hasFP(MF); | 
|  | 290 | DebugLoc DL = MBB.findDebugLoc(MBBI); | 
|  | 291 |  | 
| Greg Fitzgerald | fa78d08 | 2015-01-19 17:40:05 +0000 | [diff] [blame] | 292 | // All calls are tail calls in GHC calling conv, and functions have no | 
|  | 293 | // prologue/epilogue. | 
|  | 294 | if (MF.getFunction()->getCallingConv() == CallingConv::GHC) | 
|  | 295 | return; | 
|  | 296 |  | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 297 | int NumBytes = (int)MFI->getStackSize(); | 
|  | 298 | if (!AFI->hasStackFrame()) { | 
|  | 299 | assert(!HasFP && "unexpected function without stack frame but with FP"); | 
|  | 300 |  | 
|  | 301 | // All of the stack allocation is for locals. | 
|  | 302 | AFI->setLocalStackSize(NumBytes); | 
|  | 303 |  | 
|  | 304 | // Label used to tie together the PROLOG_LABEL and the MachineMoves. | 
| Jim Grosbach | 6f48200 | 2015-05-18 18:43:14 +0000 | [diff] [blame] | 305 | MCSymbol *FrameLabel = MMI.getContext().createTempSymbol(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 306 |  | 
|  | 307 | // REDZONE: If the stack size is less than 128 bytes, we don't need | 
|  | 308 | // to actually allocate. | 
|  | 309 | if (NumBytes && !canUseRedZone(MF)) { | 
|  | 310 | emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, | 
|  | 311 | MachineInstr::FrameSetup); | 
|  | 312 |  | 
|  | 313 | // Encode the stack size of the leaf function. | 
|  | 314 | unsigned CFIIndex = MMI.addFrameInst( | 
|  | 315 | MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes)); | 
|  | 316 | BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) | 
| Adrian Prantl | b9fa945 | 2014-12-16 00:20:49 +0000 | [diff] [blame] | 317 | .addCFIIndex(CFIIndex) | 
|  | 318 | .setMIFlags(MachineInstr::FrameSetup); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 319 | } else if (NumBytes) { | 
|  | 320 | ++NumRedZoneFunctions; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | return; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | // Only set up FP if we actually need to. | 
|  | 327 | int FPOffset = 0; | 
| Manman Ren | 0e20822 | 2015-04-29 20:03:38 +0000 | [diff] [blame] | 328 | if (HasFP) | 
|  | 329 | FPOffset = getFPOffsetInPrologue(MBBI); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 330 |  | 
|  | 331 | // Move past the saves of the callee-saved registers. | 
| Manman Ren | 0e20822 | 2015-04-29 20:03:38 +0000 | [diff] [blame] | 332 | while (isCSSave(MBBI)) { | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 333 | ++MBBI; | 
|  | 334 | NumBytes -= 16; | 
|  | 335 | } | 
|  | 336 | assert(NumBytes >= 0 && "Negative stack allocation size!?"); | 
|  | 337 | if (HasFP) { | 
|  | 338 | // Issue    sub fp, sp, FPOffset or | 
|  | 339 | //          mov fp,sp          when FPOffset is zero. | 
|  | 340 | // Note: All stores of callee-saved registers are marked as "FrameSetup". | 
|  | 341 | // This code marks the instruction(s) that set the FP also. | 
|  | 342 | emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII, | 
|  | 343 | MachineInstr::FrameSetup); | 
|  | 344 | } | 
|  | 345 |  | 
|  | 346 | // All of the remaining stack allocations are for locals. | 
|  | 347 | AFI->setLocalStackSize(NumBytes); | 
|  | 348 |  | 
|  | 349 | // Allocate space for the rest of the frame. | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 350 |  | 
|  | 351 | const unsigned Alignment = MFI->getMaxAlignment(); | 
| Evgeniy Stepanov | 00b3020 | 2015-07-10 21:24:07 +0000 | [diff] [blame] | 352 | const bool NeedsRealignment = RegInfo->needsStackRealignment(MF); | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 353 | unsigned scratchSPReg = AArch64::SP; | 
| Evgeniy Stepanov | 00b3020 | 2015-07-10 21:24:07 +0000 | [diff] [blame] | 354 | if (NumBytes && NeedsRealignment) { | 
|  | 355 | // Use the first callee-saved register as a scratch register. | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 356 | scratchSPReg = AArch64::X9; | 
| Evgeniy Stepanov | 00b3020 | 2015-07-10 21:24:07 +0000 | [diff] [blame] | 357 | MF.getRegInfo().setPhysRegUsed(scratchSPReg); | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 358 | } | 
|  | 359 |  | 
|  | 360 | // If we're a leaf function, try using the red zone. | 
|  | 361 | if (NumBytes && !canUseRedZone(MF)) | 
|  | 362 | // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have | 
|  | 363 | // the correct value here, as NumBytes also includes padding bytes, | 
|  | 364 | // which shouldn't be counted here. | 
|  | 365 | emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII, | 
|  | 366 | MachineInstr::FrameSetup); | 
|  | 367 |  | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 368 | if (NumBytes && NeedsRealignment) { | 
|  | 369 | const unsigned NrBitsToZero = countTrailingZeros(Alignment); | 
|  | 370 | assert(NrBitsToZero > 1); | 
|  | 371 | assert(scratchSPReg != AArch64::SP); | 
|  | 372 |  | 
|  | 373 | // SUB X9, SP, NumBytes | 
|  | 374 | //   -- X9 is temporary register, so shouldn't contain any live data here, | 
|  | 375 | //   -- free to use. This is already produced by emitFrameOffset above. | 
|  | 376 | // AND SP, X9, 0b11111...0000 | 
|  | 377 | // The logical immediates have a non-trivial encoding. The following | 
|  | 378 | // formula computes the encoded immediate with all ones but | 
|  | 379 | // NrBitsToZero zero bits as least significant bits. | 
|  | 380 | uint32_t andMaskEncoded = | 
|  | 381 | (1                   <<12) // = N | 
|  | 382 | | ((64-NrBitsToZero)   << 6) // immr | 
|  | 383 | | ((64-NrBitsToZero-1) << 0) // imms | 
|  | 384 | ; | 
|  | 385 | BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP) | 
|  | 386 | .addReg(scratchSPReg, RegState::Kill) | 
|  | 387 | .addImm(andMaskEncoded); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 388 | } | 
|  | 389 |  | 
|  | 390 | // If we need a base pointer, set it up here. It's whatever the value of the | 
|  | 391 | // stack pointer is at this point. Any variable size objects will be allocated | 
|  | 392 | // after this, so we can still use the base pointer to reference locals. | 
|  | 393 | // | 
|  | 394 | // FIXME: Clarify FrameSetup flags here. | 
|  | 395 | // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is | 
|  | 396 | // needed. | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 397 | if (RegInfo->hasBasePointer(MF)) { | 
|  | 398 | TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP, | 
|  | 399 | false); | 
|  | 400 | } | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 401 |  | 
|  | 402 | if (needsFrameMoves) { | 
| Eric Christopher | 8b77065 | 2015-01-26 19:03:15 +0000 | [diff] [blame] | 403 | const DataLayout *TD = MF.getTarget().getDataLayout(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 404 | const int StackGrowth = -TD->getPointerSize(0); | 
|  | 405 | unsigned FramePtr = RegInfo->getFrameRegister(MF); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 406 | // An example of the prologue: | 
|  | 407 | // | 
|  | 408 | //     .globl __foo | 
|  | 409 | //     .align 2 | 
|  | 410 | //  __foo: | 
|  | 411 | // Ltmp0: | 
|  | 412 | //     .cfi_startproc | 
|  | 413 | //     .cfi_personality 155, ___gxx_personality_v0 | 
|  | 414 | // Leh_func_begin: | 
|  | 415 | //     .cfi_lsda 16, Lexception33 | 
|  | 416 | // | 
|  | 417 | //     stp  xa,bx, [sp, -#offset]! | 
|  | 418 | //     ... | 
|  | 419 | //     stp  x28, x27, [sp, #offset-32] | 
|  | 420 | //     stp  fp, lr, [sp, #offset-16] | 
|  | 421 | //     add  fp, sp, #offset - 16 | 
|  | 422 | //     sub  sp, sp, #1360 | 
|  | 423 | // | 
|  | 424 | // The Stack: | 
|  | 425 | //       +-------------------------------------------+ | 
|  | 426 | // 10000 | ........ | ........ | ........ | ........ | | 
|  | 427 | // 10004 | ........ | ........ | ........ | ........ | | 
|  | 428 | //       +-------------------------------------------+ | 
|  | 429 | // 10008 | ........ | ........ | ........ | ........ | | 
|  | 430 | // 1000c | ........ | ........ | ........ | ........ | | 
|  | 431 | //       +===========================================+ | 
|  | 432 | // 10010 |                X28 Register               | | 
|  | 433 | // 10014 |                X28 Register               | | 
|  | 434 | //       +-------------------------------------------+ | 
|  | 435 | // 10018 |                X27 Register               | | 
|  | 436 | // 1001c |                X27 Register               | | 
|  | 437 | //       +===========================================+ | 
|  | 438 | // 10020 |                Frame Pointer              | | 
|  | 439 | // 10024 |                Frame Pointer              | | 
|  | 440 | //       +-------------------------------------------+ | 
|  | 441 | // 10028 |                Link Register              | | 
|  | 442 | // 1002c |                Link Register              | | 
|  | 443 | //       +===========================================+ | 
|  | 444 | // 10030 | ........ | ........ | ........ | ........ | | 
|  | 445 | // 10034 | ........ | ........ | ........ | ........ | | 
|  | 446 | //       +-------------------------------------------+ | 
|  | 447 | // 10038 | ........ | ........ | ........ | ........ | | 
|  | 448 | // 1003c | ........ | ........ | ........ | ........ | | 
|  | 449 | //       +-------------------------------------------+ | 
|  | 450 | // | 
|  | 451 | //     [sp] = 10030        ::    >>initial value<< | 
|  | 452 | //     sp = 10020          ::  stp fp, lr, [sp, #-16]! | 
|  | 453 | //     fp = sp == 10020    ::  mov fp, sp | 
|  | 454 | //     [sp] == 10020       ::  stp x28, x27, [sp, #-16]! | 
|  | 455 | //     sp == 10010         ::    >>final value<< | 
|  | 456 | // | 
|  | 457 | // The frame pointer (w29) points to address 10020. If we use an offset of | 
|  | 458 | // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24 | 
|  | 459 | // for w27, and -32 for w28: | 
|  | 460 | // | 
|  | 461 | //  Ltmp1: | 
|  | 462 | //     .cfi_def_cfa w29, 16 | 
|  | 463 | //  Ltmp2: | 
|  | 464 | //     .cfi_offset w30, -8 | 
|  | 465 | //  Ltmp3: | 
|  | 466 | //     .cfi_offset w29, -16 | 
|  | 467 | //  Ltmp4: | 
|  | 468 | //     .cfi_offset w27, -24 | 
|  | 469 | //  Ltmp5: | 
|  | 470 | //     .cfi_offset w28, -32 | 
|  | 471 |  | 
|  | 472 | if (HasFP) { | 
|  | 473 | // Define the current CFA rule to use the provided FP. | 
|  | 474 | unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); | 
|  | 475 | unsigned CFIIndex = MMI.addFrameInst( | 
|  | 476 | MCCFIInstruction::createDefCfa(nullptr, Reg, 2 * StackGrowth)); | 
|  | 477 | BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) | 
| Adrian Prantl | b9fa945 | 2014-12-16 00:20:49 +0000 | [diff] [blame] | 478 | .addCFIIndex(CFIIndex) | 
|  | 479 | .setMIFlags(MachineInstr::FrameSetup); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 480 |  | 
|  | 481 | // Record the location of the stored LR | 
|  | 482 | unsigned LR = RegInfo->getDwarfRegNum(AArch64::LR, true); | 
|  | 483 | CFIIndex = MMI.addFrameInst( | 
|  | 484 | MCCFIInstruction::createOffset(nullptr, LR, StackGrowth)); | 
|  | 485 | BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) | 
| Adrian Prantl | b9fa945 | 2014-12-16 00:20:49 +0000 | [diff] [blame] | 486 | .addCFIIndex(CFIIndex) | 
|  | 487 | .setMIFlags(MachineInstr::FrameSetup); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 488 |  | 
|  | 489 | // Record the location of the stored FP | 
|  | 490 | CFIIndex = MMI.addFrameInst( | 
|  | 491 | MCCFIInstruction::createOffset(nullptr, Reg, 2 * StackGrowth)); | 
|  | 492 | BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) | 
| Adrian Prantl | b9fa945 | 2014-12-16 00:20:49 +0000 | [diff] [blame] | 493 | .addCFIIndex(CFIIndex) | 
|  | 494 | .setMIFlags(MachineInstr::FrameSetup); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 495 | } else { | 
|  | 496 | // Encode the stack size of the leaf function. | 
|  | 497 | unsigned CFIIndex = MMI.addFrameInst( | 
|  | 498 | MCCFIInstruction::createDefCfaOffset(nullptr, -MFI->getStackSize())); | 
|  | 499 | BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) | 
| Adrian Prantl | b9fa945 | 2014-12-16 00:20:49 +0000 | [diff] [blame] | 500 | .addCFIIndex(CFIIndex) | 
|  | 501 | .setMIFlags(MachineInstr::FrameSetup); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 502 | } | 
|  | 503 |  | 
|  | 504 | // Now emit the moves for whatever callee saved regs we have. | 
|  | 505 | emitCalleeSavedFrameMoves(MBB, MBBI, FramePtr); | 
|  | 506 | } | 
|  | 507 | } | 
|  | 508 |  | 
|  | 509 | static bool isCalleeSavedRegister(unsigned Reg, const MCPhysReg *CSRegs) { | 
|  | 510 | for (unsigned i = 0; CSRegs[i]; ++i) | 
|  | 511 | if (Reg == CSRegs[i]) | 
|  | 512 | return true; | 
|  | 513 | return false; | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | static bool isCSRestore(MachineInstr *MI, const MCPhysReg *CSRegs) { | 
|  | 517 | unsigned RtIdx = 0; | 
|  | 518 | if (MI->getOpcode() == AArch64::LDPXpost || | 
|  | 519 | MI->getOpcode() == AArch64::LDPDpost) | 
|  | 520 | RtIdx = 1; | 
|  | 521 |  | 
|  | 522 | if (MI->getOpcode() == AArch64::LDPXpost || | 
|  | 523 | MI->getOpcode() == AArch64::LDPDpost || | 
|  | 524 | MI->getOpcode() == AArch64::LDPXi || MI->getOpcode() == AArch64::LDPDi) { | 
|  | 525 | if (!isCalleeSavedRegister(MI->getOperand(RtIdx).getReg(), CSRegs) || | 
|  | 526 | !isCalleeSavedRegister(MI->getOperand(RtIdx + 1).getReg(), CSRegs) || | 
|  | 527 | MI->getOperand(RtIdx + 2).getReg() != AArch64::SP) | 
|  | 528 | return false; | 
|  | 529 | return true; | 
|  | 530 | } | 
|  | 531 |  | 
|  | 532 | return false; | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 | void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, | 
|  | 536 | MachineBasicBlock &MBB) const { | 
|  | 537 | MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 538 | MachineFrameInfo *MFI = MF.getFrameInfo(); | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 539 | const AArch64InstrInfo *TII = | 
|  | 540 | static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo()); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 541 | const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 542 | MF.getSubtarget().getRegisterInfo()); | 
| Quentin Colombet | 61b305e | 2015-05-05 17:38:16 +0000 | [diff] [blame] | 543 | DebugLoc DL; | 
|  | 544 | bool IsTailCallReturn = false; | 
|  | 545 | if (MBB.end() != MBBI) { | 
|  | 546 | DL = MBBI->getDebugLoc(); | 
|  | 547 | unsigned RetOpcode = MBBI->getOpcode(); | 
|  | 548 | IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi || | 
|  | 549 | RetOpcode == AArch64::TCRETURNri; | 
|  | 550 | } | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 551 | int NumBytes = MFI->getStackSize(); | 
|  | 552 | const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); | 
|  | 553 |  | 
| Greg Fitzgerald | fa78d08 | 2015-01-19 17:40:05 +0000 | [diff] [blame] | 554 | // All calls are tail calls in GHC calling conv, and functions have no | 
|  | 555 | // prologue/epilogue. | 
|  | 556 | if (MF.getFunction()->getCallingConv() == CallingConv::GHC) | 
|  | 557 | return; | 
|  | 558 |  | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 559 | // Initial and residual are named for consistency with the prologue. Note that | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 560 | // in the epilogue, the residual adjustment is executed first. | 
|  | 561 | uint64_t ArgumentPopSize = 0; | 
| Quentin Colombet | 61b305e | 2015-05-05 17:38:16 +0000 | [diff] [blame] | 562 | if (IsTailCallReturn) { | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 563 | MachineOperand &StackAdjust = MBBI->getOperand(1); | 
|  | 564 |  | 
|  | 565 | // For a tail-call in a callee-pops-arguments environment, some or all of | 
|  | 566 | // the stack may actually be in use for the call's arguments, this is | 
|  | 567 | // calculated during LowerCall and consumed here... | 
|  | 568 | ArgumentPopSize = StackAdjust.getImm(); | 
|  | 569 | } else { | 
|  | 570 | // ... otherwise the amount to pop is *all* of the argument space, | 
|  | 571 | // conveniently stored in the MachineFunctionInfo by | 
|  | 572 | // LowerFormalArguments. This will, of course, be zero for the C calling | 
|  | 573 | // convention. | 
|  | 574 | ArgumentPopSize = AFI->getArgumentStackToRestore(); | 
|  | 575 | } | 
|  | 576 |  | 
|  | 577 | // The stack frame should be like below, | 
|  | 578 | // | 
|  | 579 | //      ----------------------                     --- | 
|  | 580 | //      |                    |                      | | 
|  | 581 | //      | BytesInStackArgArea|              CalleeArgStackSize | 
|  | 582 | //      | (NumReusableBytes) |                (of tail call) | 
|  | 583 | //      |                    |                     --- | 
|  | 584 | //      |                    |                      | | 
|  | 585 | //      ---------------------|        ---           | | 
|  | 586 | //      |                    |         |            | | 
|  | 587 | //      |   CalleeSavedReg   |         |            | | 
|  | 588 | //      | (NumRestores * 16) |         |            | | 
|  | 589 | //      |                    |         |            | | 
|  | 590 | //      ---------------------|         |         NumBytes | 
|  | 591 | //      |                    |     StackSize  (StackAdjustUp) | 
|  | 592 | //      |   LocalStackSize   |         |            | | 
|  | 593 | //      | (covering callee   |         |            | | 
|  | 594 | //      |       args)        |         |            | | 
|  | 595 | //      |                    |         |            | | 
|  | 596 | //      ----------------------        ---          --- | 
|  | 597 | // | 
|  | 598 | // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize | 
|  | 599 | //             = StackSize + ArgumentPopSize | 
|  | 600 | // | 
|  | 601 | // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps | 
|  | 602 | // it as the 2nd argument of AArch64ISD::TC_RETURN. | 
|  | 603 | NumBytes += ArgumentPopSize; | 
|  | 604 |  | 
|  | 605 | unsigned NumRestores = 0; | 
|  | 606 | // Move past the restores of the callee-saved registers. | 
| Quentin Colombet | 61b305e | 2015-05-05 17:38:16 +0000 | [diff] [blame] | 607 | MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 608 | const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF); | 
|  | 609 | if (LastPopI != MBB.begin()) { | 
|  | 610 | do { | 
|  | 611 | ++NumRestores; | 
|  | 612 | --LastPopI; | 
|  | 613 | } while (LastPopI != MBB.begin() && isCSRestore(LastPopI, CSRegs)); | 
|  | 614 | if (!isCSRestore(LastPopI, CSRegs)) { | 
|  | 615 | ++LastPopI; | 
|  | 616 | --NumRestores; | 
|  | 617 | } | 
|  | 618 | } | 
|  | 619 | NumBytes -= NumRestores * 16; | 
|  | 620 | assert(NumBytes >= 0 && "Negative stack allocation size!?"); | 
|  | 621 |  | 
|  | 622 | if (!hasFP(MF)) { | 
|  | 623 | // If this was a redzone leaf function, we don't need to restore the | 
|  | 624 | // stack pointer. | 
|  | 625 | if (!canUseRedZone(MF)) | 
|  | 626 | emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, | 
|  | 627 | TII); | 
|  | 628 | return; | 
|  | 629 | } | 
|  | 630 |  | 
|  | 631 | // Restore the original stack pointer. | 
|  | 632 | // FIXME: Rather than doing the math here, we should instead just use | 
|  | 633 | // non-post-indexed loads for the restores if we aren't actually going to | 
|  | 634 | // be able to save any instructions. | 
|  | 635 | if (NumBytes || MFI->hasVarSizedObjects()) | 
|  | 636 | emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP, | 
|  | 637 | -(NumRestores - 1) * 16, TII, MachineInstr::NoFlags); | 
|  | 638 | } | 
|  | 639 |  | 
|  | 640 | /// getFrameIndexOffset - Returns the displacement from the frame register to | 
|  | 641 | /// the stack frame of the specified index. | 
|  | 642 | int AArch64FrameLowering::getFrameIndexOffset(const MachineFunction &MF, | 
|  | 643 | int FI) const { | 
|  | 644 | unsigned FrameReg; | 
|  | 645 | return getFrameIndexReference(MF, FI, FrameReg); | 
|  | 646 | } | 
|  | 647 |  | 
|  | 648 | /// getFrameIndexReference - Provide a base+offset reference to an FI slot for | 
|  | 649 | /// debug info.  It's the same as what we use for resolving the code-gen | 
|  | 650 | /// references for now.  FIXME: This can go wrong when references are | 
|  | 651 | /// SP-relative and simple call frames aren't used. | 
|  | 652 | int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF, | 
|  | 653 | int FI, | 
|  | 654 | unsigned &FrameReg) const { | 
|  | 655 | return resolveFrameIndexReference(MF, FI, FrameReg); | 
|  | 656 | } | 
|  | 657 |  | 
|  | 658 | int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, | 
|  | 659 | int FI, unsigned &FrameReg, | 
|  | 660 | bool PreferFP) const { | 
|  | 661 | const MachineFrameInfo *MFI = MF.getFrameInfo(); | 
|  | 662 | const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 663 | MF.getSubtarget().getRegisterInfo()); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 664 | const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); | 
|  | 665 | int FPOffset = MFI->getObjectOffset(FI) + 16; | 
|  | 666 | int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize(); | 
|  | 667 | bool isFixed = MFI->isFixedObjectIndex(FI); | 
|  | 668 |  | 
|  | 669 | // Use frame pointer to reference fixed objects. Use it for locals if | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 670 | // there are VLAs or a dynamically realigned SP (and thus the SP isn't | 
|  | 671 | // reliable as a base). Make sure useFPForScavengingIndex() does the | 
|  | 672 | // right thing for the emergency spill slot. | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 673 | bool UseFP = false; | 
|  | 674 | if (AFI->hasStackFrame()) { | 
|  | 675 | // Note: Keeping the following as multiple 'if' statements rather than | 
|  | 676 | // merging to a single expression for readability. | 
|  | 677 | // | 
|  | 678 | // Argument access should always use the FP. | 
|  | 679 | if (isFixed) { | 
|  | 680 | UseFP = hasFP(MF); | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 681 | } else if (hasFP(MF) && !RegInfo->hasBasePointer(MF) && | 
|  | 682 | !RegInfo->needsStackRealignment(MF)) { | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 683 | // Use SP or FP, whichever gives us the best chance of the offset | 
|  | 684 | // being in range for direct access. If the FPOffset is positive, | 
|  | 685 | // that'll always be best, as the SP will be even further away. | 
|  | 686 | // If the FPOffset is negative, we have to keep in mind that the | 
|  | 687 | // available offset range for negative offsets is smaller than for | 
|  | 688 | // positive ones. If we have variable sized objects, we're stuck with | 
|  | 689 | // using the FP regardless, though, as the SP offset is unknown | 
|  | 690 | // and we don't have a base pointer available. If an offset is | 
|  | 691 | // available via the FP and the SP, use whichever is closest. | 
|  | 692 | if (PreferFP || MFI->hasVarSizedObjects() || FPOffset >= 0 || | 
|  | 693 | (FPOffset >= -256 && Offset > -FPOffset)) | 
|  | 694 | UseFP = true; | 
|  | 695 | } | 
|  | 696 | } | 
|  | 697 |  | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 698 | assert((isFixed || !RegInfo->needsStackRealignment(MF) || !UseFP) && | 
|  | 699 | "In the presence of dynamic stack pointer realignment, " | 
|  | 700 | "non-argument objects cannot be accessed through the frame pointer"); | 
|  | 701 |  | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 702 | if (UseFP) { | 
|  | 703 | FrameReg = RegInfo->getFrameRegister(MF); | 
|  | 704 | return FPOffset; | 
|  | 705 | } | 
|  | 706 |  | 
|  | 707 | // Use the base pointer if we have one. | 
|  | 708 | if (RegInfo->hasBasePointer(MF)) | 
|  | 709 | FrameReg = RegInfo->getBaseRegister(); | 
|  | 710 | else { | 
|  | 711 | FrameReg = AArch64::SP; | 
|  | 712 | // If we're using the red zone for this function, the SP won't actually | 
|  | 713 | // be adjusted, so the offsets will be negative. They're also all | 
|  | 714 | // within range of the signed 9-bit immediate instructions. | 
|  | 715 | if (canUseRedZone(MF)) | 
|  | 716 | Offset -= AFI->getLocalStackSize(); | 
|  | 717 | } | 
|  | 718 |  | 
|  | 719 | return Offset; | 
|  | 720 | } | 
|  | 721 |  | 
|  | 722 | static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) { | 
|  | 723 | if (Reg != AArch64::LR) | 
|  | 724 | return getKillRegState(true); | 
|  | 725 |  | 
|  | 726 | // LR maybe referred to later by an @llvm.returnaddress intrinsic. | 
|  | 727 | bool LRLiveIn = MF.getRegInfo().isLiveIn(AArch64::LR); | 
|  | 728 | bool LRKill = !(LRLiveIn && MF.getFrameInfo()->isReturnAddressTaken()); | 
|  | 729 | return getKillRegState(LRKill); | 
|  | 730 | } | 
|  | 731 |  | 
|  | 732 | bool AArch64FrameLowering::spillCalleeSavedRegisters( | 
|  | 733 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, | 
|  | 734 | const std::vector<CalleeSavedInfo> &CSI, | 
|  | 735 | const TargetRegisterInfo *TRI) const { | 
|  | 736 | MachineFunction &MF = *MBB.getParent(); | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 737 | const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 738 | unsigned Count = CSI.size(); | 
|  | 739 | DebugLoc DL; | 
|  | 740 | assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!"); | 
|  | 741 |  | 
|  | 742 | if (MI != MBB.end()) | 
|  | 743 | DL = MI->getDebugLoc(); | 
|  | 744 |  | 
|  | 745 | for (unsigned i = 0; i < Count; i += 2) { | 
|  | 746 | unsigned idx = Count - i - 2; | 
|  | 747 | unsigned Reg1 = CSI[idx].getReg(); | 
|  | 748 | unsigned Reg2 = CSI[idx + 1].getReg(); | 
|  | 749 | // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI | 
|  | 750 | // list to come in sorted by frame index so that we can issue the store | 
|  | 751 | // pair instructions directly. Assert if we see anything otherwise. | 
|  | 752 | // | 
|  | 753 | // The order of the registers in the list is controlled by | 
|  | 754 | // getCalleeSavedRegs(), so they will always be in-order, as well. | 
|  | 755 | assert(CSI[idx].getFrameIdx() + 1 == CSI[idx + 1].getFrameIdx() && | 
|  | 756 | "Out of order callee saved regs!"); | 
|  | 757 | unsigned StrOpc; | 
|  | 758 | assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!"); | 
|  | 759 | assert((i & 1) == 0 && "Odd index for callee-saved reg spill!"); | 
|  | 760 | // Issue sequence of non-sp increment and pi sp spills for cs regs. The | 
|  | 761 | // first spill is a pre-increment that allocates the stack. | 
|  | 762 | // For example: | 
|  | 763 | //    stp     x22, x21, [sp, #-48]!   // addImm(-6) | 
|  | 764 | //    stp     x20, x19, [sp, #16]    // addImm(+2) | 
|  | 765 | //    stp     fp, lr, [sp, #32]      // addImm(+4) | 
|  | 766 | // Rationale: This sequence saves uop updates compared to a sequence of | 
|  | 767 | // pre-increment spills like stp xi,xj,[sp,#-16]! | 
|  | 768 | // Note: Similar rational and sequence for restores in epilog. | 
|  | 769 | if (AArch64::GPR64RegClass.contains(Reg1)) { | 
|  | 770 | assert(AArch64::GPR64RegClass.contains(Reg2) && | 
|  | 771 | "Expected GPR64 callee-saved register pair!"); | 
|  | 772 | // For first spill use pre-increment store. | 
|  | 773 | if (i == 0) | 
|  | 774 | StrOpc = AArch64::STPXpre; | 
|  | 775 | else | 
|  | 776 | StrOpc = AArch64::STPXi; | 
|  | 777 | } else if (AArch64::FPR64RegClass.contains(Reg1)) { | 
|  | 778 | assert(AArch64::FPR64RegClass.contains(Reg2) && | 
|  | 779 | "Expected FPR64 callee-saved register pair!"); | 
|  | 780 | // For first spill use pre-increment store. | 
|  | 781 | if (i == 0) | 
|  | 782 | StrOpc = AArch64::STPDpre; | 
|  | 783 | else | 
|  | 784 | StrOpc = AArch64::STPDi; | 
|  | 785 | } else | 
|  | 786 | llvm_unreachable("Unexpected callee saved register!"); | 
|  | 787 | DEBUG(dbgs() << "CSR spill: (" << TRI->getName(Reg1) << ", " | 
|  | 788 | << TRI->getName(Reg2) << ") -> fi#(" << CSI[idx].getFrameIdx() | 
|  | 789 | << ", " << CSI[idx + 1].getFrameIdx() << ")\n"); | 
|  | 790 | // Compute offset: i = 0 => offset = -Count; | 
|  | 791 | //                 i = 2 => offset = -(Count - 2) + Count = 2 = i; etc. | 
|  | 792 | const int Offset = (i == 0) ? -Count : i; | 
|  | 793 | assert((Offset >= -64 && Offset <= 63) && | 
|  | 794 | "Offset out of bounds for STP immediate"); | 
|  | 795 | MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc)); | 
|  | 796 | if (StrOpc == AArch64::STPDpre || StrOpc == AArch64::STPXpre) | 
|  | 797 | MIB.addReg(AArch64::SP, RegState::Define); | 
|  | 798 |  | 
| Quentin Colombet | fd7475b | 2015-04-10 23:14:34 +0000 | [diff] [blame] | 799 | MBB.addLiveIn(Reg1); | 
|  | 800 | MBB.addLiveIn(Reg2); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 801 | MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)) | 
|  | 802 | .addReg(Reg1, getPrologueDeath(MF, Reg1)) | 
|  | 803 | .addReg(AArch64::SP) | 
|  | 804 | .addImm(Offset) // [sp, #offset * 8], where factor * 8 is implicit | 
|  | 805 | .setMIFlag(MachineInstr::FrameSetup); | 
|  | 806 | } | 
|  | 807 | return true; | 
|  | 808 | } | 
|  | 809 |  | 
|  | 810 | bool AArch64FrameLowering::restoreCalleeSavedRegisters( | 
|  | 811 | MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, | 
|  | 812 | const std::vector<CalleeSavedInfo> &CSI, | 
|  | 813 | const TargetRegisterInfo *TRI) const { | 
|  | 814 | MachineFunction &MF = *MBB.getParent(); | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 815 | const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 816 | unsigned Count = CSI.size(); | 
|  | 817 | DebugLoc DL; | 
|  | 818 | assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!"); | 
|  | 819 |  | 
|  | 820 | if (MI != MBB.end()) | 
|  | 821 | DL = MI->getDebugLoc(); | 
|  | 822 |  | 
|  | 823 | for (unsigned i = 0; i < Count; i += 2) { | 
|  | 824 | unsigned Reg1 = CSI[i].getReg(); | 
|  | 825 | unsigned Reg2 = CSI[i + 1].getReg(); | 
|  | 826 | // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI | 
|  | 827 | // list to come in sorted by frame index so that we can issue the store | 
|  | 828 | // pair instructions directly. Assert if we see anything otherwise. | 
|  | 829 | assert(CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx() && | 
|  | 830 | "Out of order callee saved regs!"); | 
|  | 831 | // Issue sequence of non-sp increment and sp-pi restores for cs regs. Only | 
|  | 832 | // the last load is sp-pi post-increment and de-allocates the stack: | 
|  | 833 | // For example: | 
|  | 834 | //    ldp     fp, lr, [sp, #32]       // addImm(+4) | 
|  | 835 | //    ldp     x20, x19, [sp, #16]     // addImm(+2) | 
|  | 836 | //    ldp     x22, x21, [sp], #48     // addImm(+6) | 
|  | 837 | // Note: see comment in spillCalleeSavedRegisters() | 
|  | 838 | unsigned LdrOpc; | 
|  | 839 |  | 
|  | 840 | assert((Count & 1) == 0 && "Odd number of callee-saved regs to spill!"); | 
|  | 841 | assert((i & 1) == 0 && "Odd index for callee-saved reg spill!"); | 
|  | 842 | if (AArch64::GPR64RegClass.contains(Reg1)) { | 
|  | 843 | assert(AArch64::GPR64RegClass.contains(Reg2) && | 
|  | 844 | "Expected GPR64 callee-saved register pair!"); | 
|  | 845 | if (i == Count - 2) | 
|  | 846 | LdrOpc = AArch64::LDPXpost; | 
|  | 847 | else | 
|  | 848 | LdrOpc = AArch64::LDPXi; | 
|  | 849 | } else if (AArch64::FPR64RegClass.contains(Reg1)) { | 
|  | 850 | assert(AArch64::FPR64RegClass.contains(Reg2) && | 
|  | 851 | "Expected FPR64 callee-saved register pair!"); | 
|  | 852 | if (i == Count - 2) | 
|  | 853 | LdrOpc = AArch64::LDPDpost; | 
|  | 854 | else | 
|  | 855 | LdrOpc = AArch64::LDPDi; | 
|  | 856 | } else | 
|  | 857 | llvm_unreachable("Unexpected callee saved register!"); | 
|  | 858 | DEBUG(dbgs() << "CSR restore: (" << TRI->getName(Reg1) << ", " | 
|  | 859 | << TRI->getName(Reg2) << ") -> fi#(" << CSI[i].getFrameIdx() | 
|  | 860 | << ", " << CSI[i + 1].getFrameIdx() << ")\n"); | 
|  | 861 |  | 
|  | 862 | // Compute offset: i = 0 => offset = Count - 2; i = 2 => offset = Count - 4; | 
|  | 863 | // etc. | 
|  | 864 | const int Offset = (i == Count - 2) ? Count : Count - i - 2; | 
|  | 865 | assert((Offset >= -64 && Offset <= 63) && | 
|  | 866 | "Offset out of bounds for LDP immediate"); | 
|  | 867 | MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc)); | 
|  | 868 | if (LdrOpc == AArch64::LDPXpost || LdrOpc == AArch64::LDPDpost) | 
|  | 869 | MIB.addReg(AArch64::SP, RegState::Define); | 
|  | 870 |  | 
|  | 871 | MIB.addReg(Reg2, getDefRegState(true)) | 
|  | 872 | .addReg(Reg1, getDefRegState(true)) | 
|  | 873 | .addReg(AArch64::SP) | 
|  | 874 | .addImm(Offset); // [sp], #offset * 8  or [sp, #offset * 8] | 
|  | 875 | // where the factor * 8 is implicit | 
|  | 876 | } | 
|  | 877 | return true; | 
|  | 878 | } | 
|  | 879 |  | 
|  | 880 | void AArch64FrameLowering::processFunctionBeforeCalleeSavedScan( | 
|  | 881 | MachineFunction &MF, RegScavenger *RS) const { | 
|  | 882 | const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>( | 
| Eric Christopher | fc6de42 | 2014-08-05 02:39:49 +0000 | [diff] [blame] | 883 | MF.getSubtarget().getRegisterInfo()); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 884 | AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>(); | 
|  | 885 | MachineRegisterInfo *MRI = &MF.getRegInfo(); | 
|  | 886 | SmallVector<unsigned, 4> UnspilledCSGPRs; | 
|  | 887 | SmallVector<unsigned, 4> UnspilledCSFPRs; | 
|  | 888 |  | 
|  | 889 | // The frame record needs to be created by saving the appropriate registers | 
|  | 890 | if (hasFP(MF)) { | 
|  | 891 | MRI->setPhysRegUsed(AArch64::FP); | 
|  | 892 | MRI->setPhysRegUsed(AArch64::LR); | 
|  | 893 | } | 
|  | 894 |  | 
|  | 895 | // Spill the BasePtr if it's used. Do this first thing so that the | 
|  | 896 | // getCalleeSavedRegs() below will get the right answer. | 
|  | 897 | if (RegInfo->hasBasePointer(MF)) | 
|  | 898 | MRI->setPhysRegUsed(RegInfo->getBaseRegister()); | 
|  | 899 |  | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 900 | if (RegInfo->needsStackRealignment(MF) && !RegInfo->hasBasePointer(MF)) | 
|  | 901 | MRI->setPhysRegUsed(AArch64::X9); | 
|  | 902 |  | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 903 | // If any callee-saved registers are used, the frame cannot be eliminated. | 
|  | 904 | unsigned NumGPRSpilled = 0; | 
|  | 905 | unsigned NumFPRSpilled = 0; | 
|  | 906 | bool ExtraCSSpill = false; | 
|  | 907 | bool CanEliminateFrame = true; | 
|  | 908 | DEBUG(dbgs() << "*** processFunctionBeforeCalleeSavedScan\nUsed CSRs:"); | 
|  | 909 | const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF); | 
|  | 910 |  | 
|  | 911 | // Check pairs of consecutive callee-saved registers. | 
|  | 912 | for (unsigned i = 0; CSRegs[i]; i += 2) { | 
|  | 913 | assert(CSRegs[i + 1] && "Odd number of callee-saved registers!"); | 
|  | 914 |  | 
|  | 915 | const unsigned OddReg = CSRegs[i]; | 
|  | 916 | const unsigned EvenReg = CSRegs[i + 1]; | 
|  | 917 | assert((AArch64::GPR64RegClass.contains(OddReg) && | 
|  | 918 | AArch64::GPR64RegClass.contains(EvenReg)) ^ | 
|  | 919 | (AArch64::FPR64RegClass.contains(OddReg) && | 
|  | 920 | AArch64::FPR64RegClass.contains(EvenReg)) && | 
|  | 921 | "Register class mismatch!"); | 
|  | 922 |  | 
|  | 923 | const bool OddRegUsed = MRI->isPhysRegUsed(OddReg); | 
|  | 924 | const bool EvenRegUsed = MRI->isPhysRegUsed(EvenReg); | 
|  | 925 |  | 
|  | 926 | // Early exit if none of the registers in the register pair is actually | 
|  | 927 | // used. | 
|  | 928 | if (!OddRegUsed && !EvenRegUsed) { | 
|  | 929 | if (AArch64::GPR64RegClass.contains(OddReg)) { | 
|  | 930 | UnspilledCSGPRs.push_back(OddReg); | 
|  | 931 | UnspilledCSGPRs.push_back(EvenReg); | 
|  | 932 | } else { | 
|  | 933 | UnspilledCSFPRs.push_back(OddReg); | 
|  | 934 | UnspilledCSFPRs.push_back(EvenReg); | 
|  | 935 | } | 
|  | 936 | continue; | 
|  | 937 | } | 
|  | 938 |  | 
|  | 939 | unsigned Reg = AArch64::NoRegister; | 
|  | 940 | // If only one of the registers of the register pair is used, make sure to | 
|  | 941 | // mark the other one as used as well. | 
|  | 942 | if (OddRegUsed ^ EvenRegUsed) { | 
|  | 943 | // Find out which register is the additional spill. | 
|  | 944 | Reg = OddRegUsed ? EvenReg : OddReg; | 
|  | 945 | MRI->setPhysRegUsed(Reg); | 
|  | 946 | } | 
|  | 947 |  | 
|  | 948 | DEBUG(dbgs() << ' ' << PrintReg(OddReg, RegInfo)); | 
|  | 949 | DEBUG(dbgs() << ' ' << PrintReg(EvenReg, RegInfo)); | 
|  | 950 |  | 
|  | 951 | assert(((OddReg == AArch64::LR && EvenReg == AArch64::FP) || | 
|  | 952 | (RegInfo->getEncodingValue(OddReg) + 1 == | 
|  | 953 | RegInfo->getEncodingValue(EvenReg))) && | 
|  | 954 | "Register pair of non-adjacent registers!"); | 
|  | 955 | if (AArch64::GPR64RegClass.contains(OddReg)) { | 
|  | 956 | NumGPRSpilled += 2; | 
|  | 957 | // If it's not a reserved register, we can use it in lieu of an | 
|  | 958 | // emergency spill slot for the register scavenger. | 
|  | 959 | // FIXME: It would be better to instead keep looking and choose another | 
|  | 960 | // unspilled register that isn't reserved, if there is one. | 
|  | 961 | if (Reg != AArch64::NoRegister && !RegInfo->isReservedReg(MF, Reg)) | 
|  | 962 | ExtraCSSpill = true; | 
|  | 963 | } else | 
|  | 964 | NumFPRSpilled += 2; | 
|  | 965 |  | 
|  | 966 | CanEliminateFrame = false; | 
|  | 967 | } | 
|  | 968 |  | 
|  | 969 | // FIXME: Set BigStack if any stack slot references may be out of range. | 
|  | 970 | // For now, just conservatively guestimate based on unscaled indexing | 
|  | 971 | // range. We'll end up allocating an unnecessary spill slot a lot, but | 
|  | 972 | // realistically that's not a big deal at this stage of the game. | 
|  | 973 | // The CSR spill slots have not been allocated yet, so estimateStackSize | 
|  | 974 | // won't include them. | 
|  | 975 | MachineFrameInfo *MFI = MF.getFrameInfo(); | 
| Kristof Beyls | 17cb898 | 2015-04-09 08:49:47 +0000 | [diff] [blame] | 976 | unsigned CFSize = | 
|  | 977 | MFI->estimateStackSize(MF) + 8 * (NumGPRSpilled + NumFPRSpilled); | 
| Tim Northover | 3b0846e | 2014-05-24 12:50:23 +0000 | [diff] [blame] | 978 | DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n"); | 
|  | 979 | bool BigStack = (CFSize >= 256); | 
|  | 980 | if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF)) | 
|  | 981 | AFI->setHasStackFrame(true); | 
|  | 982 |  | 
|  | 983 | // Estimate if we might need to scavenge a register at some point in order | 
|  | 984 | // to materialize a stack offset. If so, either spill one additional | 
|  | 985 | // callee-saved register or reserve a special spill slot to facilitate | 
|  | 986 | // register scavenging. If we already spilled an extra callee-saved register | 
|  | 987 | // above to keep the number of spills even, we don't need to do anything else | 
|  | 988 | // here. | 
|  | 989 | if (BigStack && !ExtraCSSpill) { | 
|  | 990 |  | 
|  | 991 | // If we're adding a register to spill here, we have to add two of them | 
|  | 992 | // to keep the number of regs to spill even. | 
|  | 993 | assert(((UnspilledCSGPRs.size() & 1) == 0) && "Odd number of registers!"); | 
|  | 994 | unsigned Count = 0; | 
|  | 995 | while (!UnspilledCSGPRs.empty() && Count < 2) { | 
|  | 996 | unsigned Reg = UnspilledCSGPRs.back(); | 
|  | 997 | UnspilledCSGPRs.pop_back(); | 
|  | 998 | DEBUG(dbgs() << "Spilling " << PrintReg(Reg, RegInfo) | 
|  | 999 | << " to get a scratch register.\n"); | 
|  | 1000 | MRI->setPhysRegUsed(Reg); | 
|  | 1001 | ExtraCSSpill = true; | 
|  | 1002 | ++Count; | 
|  | 1003 | } | 
|  | 1004 |  | 
|  | 1005 | // If we didn't find an extra callee-saved register to spill, create | 
|  | 1006 | // an emergency spill slot. | 
|  | 1007 | if (!ExtraCSSpill) { | 
|  | 1008 | const TargetRegisterClass *RC = &AArch64::GPR64RegClass; | 
|  | 1009 | int FI = MFI->CreateStackObject(RC->getSize(), RC->getAlignment(), false); | 
|  | 1010 | RS->addScavengingFrameIndex(FI); | 
|  | 1011 | DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI | 
|  | 1012 | << " as the emergency spill slot.\n"); | 
|  | 1013 | } | 
|  | 1014 | } | 
|  | 1015 | } |