blob: edbe7d0894a0805e8cbb2ebaaea2b1888b15a3c0 [file] [log] [blame]
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001//===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the X86 implementation of TargetFrameLowering class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86FrameLowering.h"
15#include "X86InstrBuilder.h"
16#include "X86InstrInfo.h"
17#include "X86MachineFunctionInfo.h"
18#include "X86Subtarget.h"
19#include "X86TargetMachine.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineModuleInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
26#include "llvm/IR/DataLayout.h"
27#include "llvm/IR/Function.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCSymbol.h"
30#include "llvm/Support/CommandLine.h"
31#include "llvm/Target/TargetOptions.h"
32#include "llvm/Support/Debug.h"
33#include <cstdlib>
34
35using namespace llvm;
36
37// FIXME: completely move here.
38extern cl::opt<bool> ForceStackAlign;
39
Reid Klecknerf9977bf2015-06-17 21:50:02 +000040X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
41 unsigned StackAlignOverride)
42 : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
43 STI.is64Bit() ? -8 : -4),
Reid Kleckner034ea962015-06-18 20:32:02 +000044 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
Reid Klecknerf9977bf2015-06-17 21:50:02 +000045 // Cache a bunch of frame-related predicates for this subtarget.
Reid Kleckner034ea962015-06-18 20:32:02 +000046 SlotSize = TRI->getSlotSize();
Reid Klecknerf9977bf2015-06-17 21:50:02 +000047 Is64Bit = STI.is64Bit();
48 IsLP64 = STI.isTarget64BitLP64();
49 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
50 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
Reid Kleckner034ea962015-06-18 20:32:02 +000051 StackPtr = TRI->getStackRegister();
Reid Klecknerf9977bf2015-06-17 21:50:02 +000052}
53
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000054bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
Michael Kuperstein13fbd452015-02-01 16:56:04 +000055 return !MF.getFrameInfo()->hasVarSizedObjects() &&
56 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
57}
58
59/// canSimplifyCallFramePseudos - If there is a reserved call frame, the
60/// call frame pseudos can be simplified. Having a FP, as in the default
61/// implementation, is not sufficient here since we can't always use it.
62/// Use a more nuanced condition.
63bool
64X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
Michael Kuperstein13fbd452015-02-01 16:56:04 +000065 return hasReservedCallFrame(MF) ||
Reid Kleckner034ea962015-06-18 20:32:02 +000066 (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
67 TRI->hasBasePointer(MF);
Michael Kuperstein13fbd452015-02-01 16:56:04 +000068}
69
70// needsFrameIndexResolution - Do we need to perform FI resolution for
71// this function. Normally, this is required only when the function
72// has any stack objects. However, FI resolution actually has another job,
73// not apparent from the title - it resolves callframesetup/destroy
74// that were not simplified earlier.
75// So, this is required for x86 functions that have push sequences even
76// when there are no stack objects.
77bool
78X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
79 return MF.getFrameInfo()->hasStackObjects() ||
80 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000081}
82
83/// hasFP - Return true if the specified function should have a dedicated frame
84/// pointer register. This is true if the function has variable sized allocas
85/// or if frame pointer elimination is disabled.
86bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
87 const MachineFrameInfo *MFI = MF.getFrameInfo();
88 const MachineModuleInfo &MMI = MF.getMMI();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000089
90 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
Reid Kleckner034ea962015-06-18 20:32:02 +000091 TRI->needsStackRealignment(MF) ||
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000092 MFI->hasVarSizedObjects() ||
Reid Klecknere69bdb82015-07-07 23:45:58 +000093 MFI->isFrameAddressTaken() || MFI->hasOpaqueSPAdjustment() ||
Michael Kupersteine86aa9a2015-02-01 16:15:07 +000094 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
95 MMI.callsUnwindInit() || MMI.callsEHReturn() ||
96 MFI->hasStackMap() || MFI->hasPatchPoint());
97}
98
99static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
100 if (IsLP64) {
101 if (isInt<8>(Imm))
102 return X86::SUB64ri8;
103 return X86::SUB64ri32;
104 } else {
105 if (isInt<8>(Imm))
106 return X86::SUB32ri8;
107 return X86::SUB32ri;
108 }
109}
110
111static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
112 if (IsLP64) {
113 if (isInt<8>(Imm))
114 return X86::ADD64ri8;
115 return X86::ADD64ri32;
116 } else {
117 if (isInt<8>(Imm))
118 return X86::ADD32ri8;
119 return X86::ADD32ri;
120 }
121}
122
123static unsigned getSUBrrOpcode(unsigned isLP64) {
124 return isLP64 ? X86::SUB64rr : X86::SUB32rr;
125}
126
127static unsigned getADDrrOpcode(unsigned isLP64) {
128 return isLP64 ? X86::ADD64rr : X86::ADD32rr;
129}
130
131static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
132 if (IsLP64) {
133 if (isInt<8>(Imm))
134 return X86::AND64ri8;
135 return X86::AND64ri32;
136 }
137 if (isInt<8>(Imm))
138 return X86::AND32ri8;
139 return X86::AND32ri;
140}
141
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000142static unsigned getLEArOpcode(unsigned IsLP64) {
143 return IsLP64 ? X86::LEA64r : X86::LEA32r;
144}
145
146/// findDeadCallerSavedReg - Return a caller-saved register that isn't live
147/// when it reaches the "return" instruction. We can then pop a stack object
148/// to this register without worry about clobbering it.
149static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
150 MachineBasicBlock::iterator &MBBI,
Reid Kleckner034ea962015-06-18 20:32:02 +0000151 const TargetRegisterInfo *TRI,
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000152 bool Is64Bit) {
153 const MachineFunction *MF = MBB.getParent();
154 const Function *F = MF->getFunction();
155 if (!F || MF->getMMI().callsEHReturn())
156 return 0;
157
158 static const uint16_t CallerSavedRegs32Bit[] = {
159 X86::EAX, X86::EDX, X86::ECX, 0
160 };
161
162 static const uint16_t CallerSavedRegs64Bit[] = {
163 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
164 X86::R8, X86::R9, X86::R10, X86::R11, 0
165 };
166
167 unsigned Opc = MBBI->getOpcode();
168 switch (Opc) {
169 default: return 0;
170 case X86::RETL:
171 case X86::RETQ:
172 case X86::RETIL:
173 case X86::RETIQ:
174 case X86::TCRETURNdi:
175 case X86::TCRETURNri:
176 case X86::TCRETURNmi:
177 case X86::TCRETURNdi64:
178 case X86::TCRETURNri64:
179 case X86::TCRETURNmi64:
180 case X86::EH_RETURN:
181 case X86::EH_RETURN64: {
182 SmallSet<uint16_t, 8> Uses;
183 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
184 MachineOperand &MO = MBBI->getOperand(i);
185 if (!MO.isReg() || MO.isDef())
186 continue;
187 unsigned Reg = MO.getReg();
188 if (!Reg)
189 continue;
Reid Kleckner034ea962015-06-18 20:32:02 +0000190 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000191 Uses.insert(*AI);
192 }
193
194 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
195 for (; *CS; ++CS)
196 if (!Uses.count(*CS))
197 return *CS;
198 }
199 }
200
201 return 0;
202}
203
204static bool isEAXLiveIn(MachineFunction &MF) {
205 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
206 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
207 unsigned Reg = II->first;
208
209 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
210 Reg == X86::AH || Reg == X86::AL)
211 return true;
212 }
213
214 return false;
215}
216
Reid Kleckner98d78032015-06-18 20:22:12 +0000217/// Check whether or not the terminators of \p MBB needs to read EFLAGS.
218static bool terminatorsNeedFlagsAsInput(const MachineBasicBlock &MBB) {
219 for (const MachineInstr &MI : MBB.terminators()) {
220 bool BreakNext = false;
221 for (const MachineOperand &MO : MI.operands()) {
222 if (!MO.isReg())
223 continue;
224 unsigned Reg = MO.getReg();
225 if (Reg != X86::EFLAGS)
226 continue;
227
228 // This terminator needs an eflag that is not defined
229 // by a previous terminator.
230 if (!MO.isDef())
231 return true;
232 BreakNext = true;
233 }
234 if (BreakNext)
235 break;
236 }
237 return false;
238}
239
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000240/// emitSPUpdate - Emit a series of instructions to increment / decrement the
241/// stack pointer by a constant value.
Quentin Colombet494eb602015-05-22 18:10:47 +0000242void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
243 MachineBasicBlock::iterator &MBBI,
Reid Kleckner98d78032015-06-18 20:22:12 +0000244 int64_t NumBytes, bool InEpilogue) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000245 bool isSub = NumBytes < 0;
246 uint64_t Offset = isSub ? -NumBytes : NumBytes;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000247
248 uint64_t Chunk = (1LL << 31) - 1;
249 DebugLoc DL = MBB.findDebugLoc(MBBI);
250
251 while (Offset) {
252 if (Offset > Chunk) {
253 // Rather than emit a long series of instructions for large offsets,
254 // load the offset into a register and do one sub/add
255 unsigned Reg = 0;
256
257 if (isSub && !isEAXLiveIn(*MBB.getParent()))
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000258 Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000259 else
Reid Kleckner034ea962015-06-18 20:32:02 +0000260 Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000261
262 if (Reg) {
Reid Kleckner98d78032015-06-18 20:22:12 +0000263 unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000264 BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
265 .addImm(Offset);
266 Opc = isSub
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000267 ? getSUBrrOpcode(Is64Bit)
268 : getADDrrOpcode(Is64Bit);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000269 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
270 .addReg(StackPtr)
271 .addReg(Reg);
272 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
273 Offset = 0;
274 continue;
275 }
276 }
277
David Majnemer3aa0bd82015-02-24 00:11:32 +0000278 uint64_t ThisVal = std::min(Offset, Chunk);
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000279 if (ThisVal == (Is64Bit ? 8 : 4)) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000280 // Use push / pop instead.
281 unsigned Reg = isSub
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000282 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
Reid Kleckner034ea962015-06-18 20:32:02 +0000283 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000284 if (Reg) {
Reid Kleckner98d78032015-06-18 20:22:12 +0000285 unsigned Opc = isSub
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000286 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
287 : (Is64Bit ? X86::POP64r : X86::POP32r);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000288 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
289 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
290 if (isSub)
291 MI->setFlag(MachineInstr::FrameSetup);
292 Offset -= ThisVal;
293 continue;
294 }
295 }
296
Reid Kleckner98d78032015-06-18 20:22:12 +0000297 MachineInstrBuilder MI = BuildStackAdjustment(
298 MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000299 if (isSub)
Reid Kleckner98d78032015-06-18 20:22:12 +0000300 MI.setMIFlag(MachineInstr::FrameSetup);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000301
302 Offset -= ThisVal;
303 }
304}
305
Reid Kleckner98d78032015-06-18 20:22:12 +0000306MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
307 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
308 int64_t Offset, bool InEpilogue) const {
309 assert(Offset != 0 && "zero offset stack adjustment requested");
310
311 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
312 // is tricky.
313 bool UseLEA;
314 if (!InEpilogue) {
315 UseLEA = STI.useLeaForSP();
316 } else {
317 // If we can use LEA for SP but we shouldn't, check that none
318 // of the terminators uses the eflags. Otherwise we will insert
319 // a ADD that will redefine the eflags and break the condition.
320 // Alternatively, we could move the ADD, but this may not be possible
321 // and is an optimization anyway.
322 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
323 if (UseLEA && !STI.useLeaForSP())
324 UseLEA = terminatorsNeedFlagsAsInput(MBB);
325 // If that assert breaks, that means we do not do the right thing
326 // in canUseAsEpilogue.
327 assert((UseLEA || !terminatorsNeedFlagsAsInput(MBB)) &&
328 "We shouldn't have allowed this insertion point");
329 }
330
331 MachineInstrBuilder MI;
332 if (UseLEA) {
333 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
334 TII.get(getLEArOpcode(Uses64BitFramePtr)),
335 StackPtr),
336 StackPtr, false, Offset);
337 } else {
338 bool IsSub = Offset < 0;
339 uint64_t AbsOffset = IsSub ? -Offset : Offset;
340 unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
341 : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
342 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
343 .addReg(StackPtr)
344 .addImm(AbsOffset);
345 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
346 }
347 return MI;
348}
349
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000350/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
351static
352void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
353 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
354 if (MBBI == MBB.begin()) return;
355
356 MachineBasicBlock::iterator PI = std::prev(MBBI);
357 unsigned Opc = PI->getOpcode();
358 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
359 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
360 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
361 PI->getOperand(0).getReg() == StackPtr) {
362 if (NumBytes)
363 *NumBytes += PI->getOperand(2).getImm();
364 MBB.erase(PI);
365 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
366 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
367 PI->getOperand(0).getReg() == StackPtr) {
368 if (NumBytes)
369 *NumBytes -= PI->getOperand(2).getImm();
370 MBB.erase(PI);
371 }
372}
373
Quentin Colombet494eb602015-05-22 18:10:47 +0000374int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
375 MachineBasicBlock::iterator &MBBI,
Reid Klecknerf9977bf2015-06-17 21:50:02 +0000376 bool doMergeWithPrevious) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000377 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
378 (!doMergeWithPrevious && MBBI == MBB.end()))
379 return 0;
380
381 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
382 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
383 : std::next(MBBI);
384 unsigned Opc = PI->getOpcode();
385 int Offset = 0;
386
387 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
388 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
389 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
390 PI->getOperand(0).getReg() == StackPtr){
391 Offset += PI->getOperand(2).getImm();
392 MBB.erase(PI);
393 if (!doMergeWithPrevious) MBBI = NI;
394 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
395 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
396 PI->getOperand(0).getReg() == StackPtr) {
397 Offset -= PI->getOperand(2).getImm();
398 MBB.erase(PI);
399 if (!doMergeWithPrevious) MBBI = NI;
400 }
401
402 return Offset;
403}
404
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000405void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
406 MachineBasicBlock::iterator MBBI, DebugLoc DL,
407 MCCFIInstruction CFIInst) const {
Reid Kleckner7f189f82015-06-15 23:45:08 +0000408 MachineFunction &MF = *MBB.getParent();
409 unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
410 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
411 .addCFIIndex(CFIIndex);
412}
413
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000414void
415X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
416 MachineBasicBlock::iterator MBBI,
417 DebugLoc DL) const {
418 MachineFunction &MF = *MBB.getParent();
419 MachineFrameInfo *MFI = MF.getFrameInfo();
420 MachineModuleInfo &MMI = MF.getMMI();
421 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000422
423 // Add callee saved registers to move list.
424 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
425 if (CSI.empty()) return;
426
427 // Calculate offsets.
428 for (std::vector<CalleeSavedInfo>::const_iterator
429 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
430 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
431 unsigned Reg = I->getReg();
432
433 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000434 BuildCFI(MBB, MBBI, DL,
Reid Kleckner7f189f82015-06-15 23:45:08 +0000435 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000436 }
437}
438
439/// usesTheStack - This function checks if any of the users of EFLAGS
440/// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
441/// to use the stack, and if we don't adjust the stack we clobber the first
442/// frame index.
443/// See X86InstrInfo::copyPhysReg.
444static bool usesTheStack(const MachineFunction &MF) {
445 const MachineRegisterInfo &MRI = MF.getRegInfo();
446
447 for (MachineRegisterInfo::reg_instr_iterator
448 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
449 ri != re; ++ri)
450 if (ri->isCopy())
451 return true;
452
453 return false;
454}
455
456void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
457 MachineBasicBlock &MBB,
458 MachineBasicBlock::iterator MBBI,
Reid Klecknerf9977bf2015-06-17 21:50:02 +0000459 DebugLoc DL) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000460 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
461
462 unsigned CallOp;
463 if (Is64Bit)
464 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
465 else
466 CallOp = X86::CALLpcrel32;
467
468 const char *Symbol;
469 if (Is64Bit) {
470 if (STI.isTargetCygMing()) {
471 Symbol = "___chkstk_ms";
472 } else {
473 Symbol = "__chkstk";
474 }
475 } else if (STI.isTargetCygMing())
476 Symbol = "_alloca";
477 else
478 Symbol = "_chkstk";
479
480 MachineInstrBuilder CI;
481
482 // All current stack probes take AX and SP as input, clobber flags, and
483 // preserve all registers. x86_64 probes leave RSP unmodified.
484 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
485 // For the large code model, we have to call through a register. Use R11,
486 // as it is scratch in all supported calling conventions.
487 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
488 .addExternalSymbol(Symbol);
489 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
490 } else {
491 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
492 }
493
494 unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
495 unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
496 CI.addReg(AX, RegState::Implicit)
497 .addReg(SP, RegState::Implicit)
498 .addReg(AX, RegState::Define | RegState::Implicit)
499 .addReg(SP, RegState::Define | RegState::Implicit)
500 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
501
502 if (Is64Bit) {
503 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
504 // themselves. It also does not clobber %rax so we can reuse it when
505 // adjusting %rsp.
506 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
507 .addReg(X86::RSP)
508 .addReg(X86::RAX);
509 }
510}
511
David Majnemer93c22a42015-02-10 00:57:42 +0000512static unsigned calculateSetFPREG(uint64_t SPAdjust) {
513 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
514 // and might require smaller successive adjustments.
515 const uint64_t Win64MaxSEHOffset = 128;
516 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
517 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
David Majnemer89d05642015-02-21 01:04:47 +0000518 return SEHFrameOffset & -16;
David Majnemer93c22a42015-02-10 00:57:42 +0000519}
520
521// If we're forcing a stack realignment we can't rely on just the frame
522// info, we need to know the ABI stack alignment as well in case we
523// have a call out. Otherwise just make sure we have some alignment - we'll
524// go with the minimum SlotSize.
Reid Klecknerf9977bf2015-06-17 21:50:02 +0000525uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
David Majnemer93c22a42015-02-10 00:57:42 +0000526 const MachineFrameInfo *MFI = MF.getFrameInfo();
527 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
Reid Klecknerf9977bf2015-06-17 21:50:02 +0000528 unsigned StackAlign = getStackAlignment();
David Majnemer93c22a42015-02-10 00:57:42 +0000529 if (ForceStackAlign) {
530 if (MFI->hasCalls())
531 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
532 else if (MaxAlign < SlotSize)
533 MaxAlign = SlotSize;
534 }
535 return MaxAlign;
536}
537
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000538void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
539 MachineBasicBlock::iterator MBBI,
540 DebugLoc DL,
541 uint64_t MaxAlign) const {
542 uint64_t Val = -MaxAlign;
543 MachineInstr *MI =
544 BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
545 StackPtr)
546 .addReg(StackPtr)
547 .addImm(Val)
548 .setMIFlag(MachineInstr::FrameSetup);
549
550 // The EFLAGS implicit def is dead.
551 MI->getOperand(3).setIsDead();
552}
553
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000554/// emitPrologue - Push callee-saved registers onto the stack, which
555/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
556/// space for local variables. Also emit labels used by the exception handler to
557/// generate the exception handling frames.
558
559/*
560 Here's a gist of what gets emitted:
561
562 ; Establish frame pointer, if needed
563 [if needs FP]
564 push %rbp
565 .cfi_def_cfa_offset 16
566 .cfi_offset %rbp, -16
567 .seh_pushreg %rpb
568 mov %rsp, %rbp
569 .cfi_def_cfa_register %rbp
570
571 ; Spill general-purpose registers
572 [for all callee-saved GPRs]
573 pushq %<reg>
574 [if not needs FP]
575 .cfi_def_cfa_offset (offset from RETADDR)
576 .seh_pushreg %<reg>
577
578 ; If the required stack alignment > default stack alignment
579 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
580 ; of unknown size in the stack frame.
581 [if stack needs re-alignment]
582 and $MASK, %rsp
583
584 ; Allocate space for locals
585 [if target is Windows and allocated space > 4096 bytes]
586 ; Windows needs special care for allocations larger
587 ; than one page.
588 mov $NNN, %rax
589 call ___chkstk_ms/___chkstk
590 sub %rax, %rsp
591 [else]
592 sub $NNN, %rsp
593
594 [if needs FP]
595 .seh_stackalloc (size of XMM spill slots)
596 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
597 [else]
598 .seh_stackalloc NNN
599
600 ; Spill XMMs
601 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
602 ; they may get spilled on any platform, if the current function
603 ; calls @llvm.eh.unwind.init
604 [if needs FP]
605 [for all callee-saved XMM registers]
606 movaps %<xmm reg>, -MMM(%rbp)
607 [for all callee-saved XMM registers]
608 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
609 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
610 [else]
611 [for all callee-saved XMM registers]
612 movaps %<xmm reg>, KKK(%rsp)
613 [for all callee-saved XMM registers]
614 .seh_savexmm %<xmm reg>, KKK
615
616 .seh_endprologue
617
618 [if needs base pointer]
619 mov %rsp, %rbx
620 [if needs to restore base pointer]
621 mov %rsp, -MMM(%rbp)
622
623 ; Emit CFI info
624 [if needs FP]
625 [for all callee-saved registers]
626 .cfi_offset %<reg>, (offset from %rbp)
627 [else]
628 .cfi_def_cfa_offset (offset from RETADDR)
629 [for all callee-saved registers]
630 .cfi_offset %<reg>, (offset from %rsp)
631
632 Notes:
633 - .seh directives are emitted only for Windows 64 ABI
634 - .cfi directives are emitted for all other ABIs
635 - for 32-bit code, substitute %e?? registers for %r??
636*/
637
Quentin Colombet61b305e2015-05-05 17:38:16 +0000638void X86FrameLowering::emitPrologue(MachineFunction &MF,
639 MachineBasicBlock &MBB) const {
Reid Klecknerf9977bf2015-06-17 21:50:02 +0000640 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
641 "MF used frame lowering for wrong subtarget");
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000642 MachineBasicBlock::iterator MBBI = MBB.begin();
643 MachineFrameInfo *MFI = MF.getFrameInfo();
644 const Function *Fn = MF.getFunction();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000645 MachineModuleInfo &MMI = MF.getMMI();
646 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
David Majnemer93c22a42015-02-10 00:57:42 +0000647 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000648 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
649 bool HasFP = hasFP(MF);
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000650 bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000651 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
652 bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000653 bool NeedsDwarfCFI =
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000654 !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
Reid Kleckner034ea962015-06-18 20:32:02 +0000655 unsigned FramePtr = TRI->getFrameRegister(MF);
Eric Christopher05b81972015-02-02 17:38:43 +0000656 const unsigned MachineFramePtr =
657 STI.isTarget64BitILP32()
658 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
659 : FramePtr;
Reid Kleckner034ea962015-06-18 20:32:02 +0000660 unsigned BasePtr = TRI->getBaseRegister();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000661 DebugLoc DL;
662
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000663 // Add RETADDR move area to callee saved frame size.
664 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000665 if (TailCallReturnAddrDelta && IsWin64Prologue)
David Majnemer93c22a42015-02-10 00:57:42 +0000666 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
667
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000668 if (TailCallReturnAddrDelta < 0)
669 X86FI->setCalleeSavedFrameSize(
670 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
671
672 bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
673
674 // The default stack probe size is 4096 if the function has no stackprobesize
675 // attribute.
676 unsigned StackProbeSize = 4096;
677 if (Fn->hasFnAttribute("stack-probe-size"))
678 Fn->getFnAttribute("stack-probe-size")
679 .getValueAsString()
680 .getAsInteger(0, StackProbeSize);
681
682 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
683 // function, and use up to 128 bytes of stack space, don't have a frame
684 // pointer, calls, or dynamic alloca then we do not need to adjust the
685 // stack pointer (we fit in the Red Zone). We also check that we don't
686 // push and pop from the stack.
Duncan P. N. Exon Smith5975a702015-02-14 01:59:52 +0000687 if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
Reid Kleckner034ea962015-06-18 20:32:02 +0000688 !TRI->needsStackRealignment(MF) &&
Duncan P. N. Exon Smith5975a702015-02-14 01:59:52 +0000689 !MFI->hasVarSizedObjects() && // No dynamic alloca.
690 !MFI->adjustsStack() && // No calls.
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000691 !IsWin64CC && // Win64 has no Red Zone
Duncan P. N. Exon Smith5975a702015-02-14 01:59:52 +0000692 !usesTheStack(MF) && // Don't push and pop.
693 !MF.shouldSplitStack()) { // Regular stack
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000694 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
695 if (HasFP) MinSize += SlotSize;
696 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
697 MFI->setStackSize(StackSize);
698 }
699
700 // Insert stack pointer adjustment for later moving of return addr. Only
701 // applies to tail call optimized functions where the callee argument stack
702 // size is bigger than the callers.
703 if (TailCallReturnAddrDelta < 0) {
Reid Kleckner98d78032015-06-18 20:22:12 +0000704 BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
705 /*InEpilogue=*/false)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000706 .setMIFlag(MachineInstr::FrameSetup);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000707 }
708
709 // Mapping for machine moves:
710 //
711 // DST: VirtualFP AND
712 // SRC: VirtualFP => DW_CFA_def_cfa_offset
713 // ELSE => DW_CFA_def_cfa
714 //
715 // SRC: VirtualFP AND
716 // DST: Register => DW_CFA_def_cfa_register
717 //
718 // ELSE
719 // OFFSET < 0 => DW_CFA_offset_extended_sf
720 // REG < 64 => DW_CFA_offset + Reg
721 // ELSE => DW_CFA_offset_extended
722
723 uint64_t NumBytes = 0;
724 int stackGrowth = -SlotSize;
725
726 if (HasFP) {
727 // Calculate required stack adjustment.
728 uint64_t FrameSize = StackSize - SlotSize;
729 // If required, include space for extra hidden slot for stashing base pointer.
730 if (X86FI->getRestoreBasePointer())
731 FrameSize += SlotSize;
David Majnemer89d05642015-02-21 01:04:47 +0000732
733 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
734
735 // Callee-saved registers are pushed on stack before the stack is realigned.
Reid Kleckner034ea962015-06-18 20:32:02 +0000736 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
David Majnemer89d05642015-02-21 01:04:47 +0000737 NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000738
739 // Get the offset of the stack slot for the EBP register, which is
740 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
741 // Update the frame offset adjustment.
742 MFI->setOffsetAdjustment(-NumBytes);
743
744 // Save EBP/RBP into the appropriate stack slot.
745 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
746 .addReg(MachineFramePtr, RegState::Kill)
747 .setMIFlag(MachineInstr::FrameSetup);
748
749 if (NeedsDwarfCFI) {
750 // Mark the place where EBP/RBP was saved.
751 // Define the current CFA rule to use the provided offset.
752 assert(StackSize);
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000753 BuildCFI(MBB, MBBI, DL,
Reid Kleckner7f189f82015-06-15 23:45:08 +0000754 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000755
756 // Change the rule for the FramePtr to be an "offset" rule.
Reid Kleckner034ea962015-06-18 20:32:02 +0000757 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000758 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
759 nullptr, DwarfFramePtr, 2 * stackGrowth));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000760 }
761
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000762 if (NeedsWinCFI) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000763 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
764 .addImm(FramePtr)
765 .setMIFlag(MachineInstr::FrameSetup);
766 }
767
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000768 if (!IsWin64Prologue) {
David Majnemer93c22a42015-02-10 00:57:42 +0000769 // Update EBP with the new base value.
770 BuildMI(MBB, MBBI, DL,
771 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
772 FramePtr)
773 .addReg(StackPtr)
774 .setMIFlag(MachineInstr::FrameSetup);
775 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000776
777 if (NeedsDwarfCFI) {
778 // Mark effective beginning of when frame pointer becomes valid.
779 // Define the current CFA to use the EBP/RBP register.
Reid Kleckner034ea962015-06-18 20:32:02 +0000780 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000781 BuildCFI(MBB, MBBI, DL,
Reid Kleckner7f189f82015-06-15 23:45:08 +0000782 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000783 }
784
785 // Mark the FramePtr as live-in in every block.
786 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
787 I->addLiveIn(MachineFramePtr);
788 } else {
789 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
790 }
791
792 // Skip the callee-saved push instructions.
793 bool PushedRegs = false;
794 int StackOffset = 2 * stackGrowth;
795
796 while (MBBI != MBB.end() &&
Michael Kupersteine1ea4e7d2015-07-16 12:27:59 +0000797 MBBI->getFlag(MachineInstr::FrameSetup) &&
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000798 (MBBI->getOpcode() == X86::PUSH32r ||
799 MBBI->getOpcode() == X86::PUSH64r)) {
800 PushedRegs = true;
801 unsigned Reg = MBBI->getOperand(0).getReg();
802 ++MBBI;
803
804 if (!HasFP && NeedsDwarfCFI) {
805 // Mark callee-saved push instruction.
806 // Define the current CFA rule to use the provided offset.
807 assert(StackSize);
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000808 BuildCFI(MBB, MBBI, DL,
Reid Kleckner7f189f82015-06-15 23:45:08 +0000809 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000810 StackOffset += stackGrowth;
811 }
812
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000813 if (NeedsWinCFI) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000814 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
815 MachineInstr::FrameSetup);
816 }
817 }
818
819 // Realign stack after we pushed callee-saved registers (so that we'll be
820 // able to calculate their offsets from the frame pointer).
David Majnemer93c22a42015-02-10 00:57:42 +0000821 // Don't do this for Win64, it needs to realign the stack after the prologue.
Reid Kleckner034ea962015-06-18 20:32:02 +0000822 if (!IsWin64Prologue && TRI->needsStackRealignment(MF)) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000823 assert(HasFP && "There should be a frame pointer if stack is realigned.");
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000824 BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000825 }
826
827 // If there is an SUB32ri of ESP immediately before this instruction, merge
828 // the two. This can be the case when tail call elimination is enabled and
829 // the callee has more arguments then the caller.
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000830 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000831
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000832 // Adjust stack pointer: ESP -= numbytes.
833
834 // Windows and cygwin/mingw require a prologue helper routine when allocating
835 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
836 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
837 // stack and adjust the stack pointer in one go. The 64-bit version of
838 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
839 // responsible for adjusting the stack pointer. Touching the stack at 4K
840 // increments is necessary to ensure that the guard pages used by the OS
841 // virtual memory manager are allocated in correct sequence.
David Majnemer89d05642015-02-21 01:04:47 +0000842 uint64_t AlignedNumBytes = NumBytes;
Reid Kleckner034ea962015-06-18 20:32:02 +0000843 if (IsWin64Prologue && TRI->needsStackRealignment(MF))
David Majnemer89d05642015-02-21 01:04:47 +0000844 AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
845 if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000846 // Check whether EAX is livein for this function.
847 bool isEAXAlive = isEAXLiveIn(MF);
848
849 if (isEAXAlive) {
850 // Sanity check that EAX is not livein for this function.
851 // It should not be, so throw an assert.
852 assert(!Is64Bit && "EAX is livein in x64 case!");
853
854 // Save EAX
855 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
856 .addReg(X86::EAX, RegState::Kill)
857 .setMIFlag(MachineInstr::FrameSetup);
858 }
859
860 if (Is64Bit) {
861 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
862 // Function prologue is responsible for adjusting the stack pointer.
David Majnemer006c4902015-02-23 21:50:30 +0000863 if (isUInt<32>(NumBytes)) {
864 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
865 .addImm(NumBytes)
866 .setMIFlag(MachineInstr::FrameSetup);
867 } else if (isInt<32>(NumBytes)) {
868 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
869 .addImm(NumBytes)
870 .setMIFlag(MachineInstr::FrameSetup);
871 } else {
872 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
873 .addImm(NumBytes)
874 .setMIFlag(MachineInstr::FrameSetup);
875 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000876 } else {
877 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
878 // We'll also use 4 already allocated bytes for EAX.
879 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
880 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
881 .setMIFlag(MachineInstr::FrameSetup);
882 }
883
884 // Save a pointer to the MI where we set AX.
885 MachineBasicBlock::iterator SetRAX = MBBI;
886 --SetRAX;
887
888 // Call __chkstk, __chkstk_ms, or __alloca.
889 emitStackProbeCall(MF, MBB, MBBI, DL);
890
891 // Apply the frame setup flag to all inserted instrs.
892 for (; SetRAX != MBBI; ++SetRAX)
893 SetRAX->setFlag(MachineInstr::FrameSetup);
894
895 if (isEAXAlive) {
896 // Restore EAX
897 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
898 X86::EAX),
899 StackPtr, false, NumBytes - 4);
900 MI->setFlag(MachineInstr::FrameSetup);
901 MBB.insert(MBBI, MI);
902 }
903 } else if (NumBytes) {
Reid Kleckner98d78032015-06-18 20:22:12 +0000904 emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000905 }
906
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000907 if (NeedsWinCFI && NumBytes)
David Majnemer93c22a42015-02-10 00:57:42 +0000908 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
909 .addImm(NumBytes)
910 .setMIFlag(MachineInstr::FrameSetup);
911
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000912 int SEHFrameOffset = 0;
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000913 if (IsWin64Prologue && HasFP) {
David Majnemer93c22a42015-02-10 00:57:42 +0000914 SEHFrameOffset = calculateSetFPREG(NumBytes);
David Majnemer31d868b2015-02-23 21:50:27 +0000915 if (SEHFrameOffset)
916 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
917 StackPtr, false, SEHFrameOffset);
918 else
919 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr).addReg(StackPtr);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000920
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000921 if (NeedsWinCFI)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000922 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
923 .addImm(FramePtr)
924 .addImm(SEHFrameOffset)
925 .setMIFlag(MachineInstr::FrameSetup);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000926 }
927
David Majnemera7d908e2015-02-10 19:01:47 +0000928 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
929 const MachineInstr *FrameInstr = &*MBBI;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000930 ++MBBI;
931
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000932 if (NeedsWinCFI) {
David Majnemera7d908e2015-02-10 19:01:47 +0000933 int FI;
934 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
935 if (X86::FR64RegClass.contains(Reg)) {
936 int Offset = getFrameIndexOffset(MF, FI);
937 Offset += SEHFrameOffset;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000938
David Majnemera7d908e2015-02-10 19:01:47 +0000939 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
940 .addImm(Reg)
941 .addImm(Offset)
942 .setMIFlag(MachineInstr::FrameSetup);
943 }
944 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000945 }
David Majnemera7d908e2015-02-10 19:01:47 +0000946 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000947
Reid Kleckner1c140bd2015-06-16 18:08:57 +0000948 if (NeedsWinCFI)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000949 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
950 .setMIFlag(MachineInstr::FrameSetup);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000951
David Majnemer93c22a42015-02-10 00:57:42 +0000952 // Realign stack after we spilled callee-saved registers (so that we'll be
953 // able to calculate their offsets from the frame pointer).
954 // Win64 requires aligning the stack after the prologue.
Reid Kleckner034ea962015-06-18 20:32:02 +0000955 if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
David Majnemer93c22a42015-02-10 00:57:42 +0000956 assert(HasFP && "There should be a frame pointer if stack is realigned.");
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000957 BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
David Majnemer93c22a42015-02-10 00:57:42 +0000958 }
959
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000960 // If we need a base pointer, set it up here. It's whatever the value
961 // of the stack pointer is at this point. Any variable size objects
962 // will be allocated after this, so we can still use the base pointer
963 // to reference locals.
Reid Kleckner034ea962015-06-18 20:32:02 +0000964 if (TRI->hasBasePointer(MF)) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000965 // Update the base pointer with the current stack pointer.
966 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
967 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
968 .addReg(StackPtr)
969 .setMIFlag(MachineInstr::FrameSetup);
970 if (X86FI->getRestoreBasePointer()) {
Reid Klecknere69bdb82015-07-07 23:45:58 +0000971 // Stash value of base pointer. Saving RSP instead of EBP shortens
972 // dependence chain. Used by SjLj EH.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000973 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
974 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
975 FramePtr, true, X86FI->getRestoreBasePointerOffset())
976 .addReg(StackPtr)
977 .setMIFlag(MachineInstr::FrameSetup);
978 }
Reid Klecknere69bdb82015-07-07 23:45:58 +0000979
980 if (X86FI->getHasSEHFramePtrSave()) {
981 // Stash the value of the frame pointer relative to the base pointer for
982 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
983 // it recovers the frame pointer from the base pointer rather than the
984 // other way around.
985 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
986 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), BasePtr, true,
987 getFrameIndexOffset(MF, X86FI->getSEHFramePtrSaveIndex()))
988 .addReg(FramePtr)
989 .setMIFlag(MachineInstr::FrameSetup);
990 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +0000991 }
992
993 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
994 // Mark end of stack pointer adjustment.
995 if (!HasFP && NumBytes) {
996 // Define the current CFA rule to use the provided offset.
997 assert(StackSize);
Reid Kleckner3854f7b2015-06-18 18:03:25 +0000998 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
999 nullptr, -StackSize + stackGrowth));
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001000 }
1001
1002 // Emit DWARF info specifying the offsets of the callee-saved registers.
1003 if (PushedRegs)
1004 emitCalleeSavedFrameMoves(MBB, MBBI, DL);
1005 }
1006}
1007
Quentin Colombetaa8020752015-05-27 06:28:41 +00001008bool X86FrameLowering::canUseLEAForSPInEpilogue(
1009 const MachineFunction &MF) const {
Quentin Colombet494eb602015-05-22 18:10:47 +00001010 // We can't use LEA instructions for adjusting the stack pointer if this is a
1011 // leaf function in the Win64 ABI. Only ADD instructions may be used to
1012 // deallocate the stack.
1013 // This means that we can use LEA for SP in two situations:
1014 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1015 // 2. We *have* a frame pointer which means we are permitted to use LEA.
Quentin Colombetaa8020752015-05-27 06:28:41 +00001016 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1017}
1018
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001019void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1020 MachineBasicBlock &MBB) const {
1021 const MachineFrameInfo *MFI = MF.getFrameInfo();
1022 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
Quentin Colombetaa8020752015-05-27 06:28:41 +00001023 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1024 DebugLoc DL;
1025 if (MBBI != MBB.end())
1026 DL = MBBI->getDebugLoc();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001027 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001028 const bool Is64BitILP32 = STI.isTarget64BitILP32();
Reid Kleckner034ea962015-06-18 20:32:02 +00001029 unsigned FramePtr = TRI->getFrameRegister(MF);
Eric Christopher05b81972015-02-02 17:38:43 +00001030 unsigned MachineFramePtr =
1031 Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
1032 : FramePtr;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001033
Reid Kleckner1c140bd2015-06-16 18:08:57 +00001034 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1035 bool NeedsWinCFI =
1036 IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001037
1038 // Get the number of bytes to allocate from the FrameInfo.
1039 uint64_t StackSize = MFI->getStackSize();
David Majnemer93c22a42015-02-10 00:57:42 +00001040 uint64_t MaxAlign = calculateMaxStackAlign(MF);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001041 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1042 uint64_t NumBytes = 0;
1043
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001044 if (hasFP(MF)) {
1045 // Calculate required stack adjustment.
1046 uint64_t FrameSize = StackSize - SlotSize;
David Majnemer89d05642015-02-21 01:04:47 +00001047 NumBytes = FrameSize - CSSize;
1048
1049 // Callee-saved registers were pushed on stack before the stack was
1050 // realigned.
Reid Kleckner034ea962015-06-18 20:32:02 +00001051 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
David Majnemer89d05642015-02-21 01:04:47 +00001052 NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001053
1054 // Pop EBP.
1055 BuildMI(MBB, MBBI, DL,
1056 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr);
1057 } else {
1058 NumBytes = StackSize - CSSize;
1059 }
David Majnemer93c22a42015-02-10 00:57:42 +00001060 uint64_t SEHStackAllocAmt = NumBytes;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001061
1062 // Skip the callee-saved pop instructions.
1063 while (MBBI != MBB.begin()) {
1064 MachineBasicBlock::iterator PI = std::prev(MBBI);
1065 unsigned Opc = PI->getOpcode();
1066
1067 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
1068 !PI->isTerminator())
1069 break;
1070
1071 --MBBI;
1072 }
1073 MachineBasicBlock::iterator FirstCSPop = MBBI;
1074
Quentin Colombetaa8020752015-05-27 06:28:41 +00001075 if (MBBI != MBB.end())
1076 DL = MBBI->getDebugLoc();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001077
1078 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1079 // instruction, merge the two instructions.
1080 if (NumBytes || MFI->hasVarSizedObjects())
1081 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
1082
1083 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1084 // slot before popping them off! Same applies for the case, when stack was
1085 // realigned.
Reid Kleckner034ea962015-06-18 20:32:02 +00001086 if (TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
1087 if (TRI->needsStackRealignment(MF))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001088 MBBI = FirstCSPop;
David Majnemere1bbad92015-02-25 21:13:37 +00001089 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
Reid Kleckner1c140bd2015-06-16 18:08:57 +00001090 uint64_t LEAAmount =
1091 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
David Majnemere1bbad92015-02-25 21:13:37 +00001092
1093 // There are only two legal forms of epilogue:
1094 // - add SEHAllocationSize, %rsp
1095 // - lea SEHAllocationSize(%FramePtr), %rsp
1096 //
1097 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
1098 // However, we may use this sequence if we have a frame pointer because the
1099 // effects of the prologue can safely be undone.
1100 if (LEAAmount != 0) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001101 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
1102 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
David Majnemere1bbad92015-02-25 21:13:37 +00001103 FramePtr, false, LEAAmount);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001104 --MBBI;
1105 } else {
1106 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
1107 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
1108 .addReg(FramePtr);
1109 --MBBI;
1110 }
1111 } else if (NumBytes) {
1112 // Adjust stack pointer back: ESP += numbytes.
Reid Kleckner98d78032015-06-18 20:22:12 +00001113 emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001114 --MBBI;
1115 }
1116
1117 // Windows unwinder will not invoke function's exception handler if IP is
1118 // either in prologue or in epilogue. This behavior causes a problem when a
1119 // call immediately precedes an epilogue, because the return address points
1120 // into the epilogue. To cope with that, we insert an epilogue marker here,
1121 // then replace it with a 'nop' if it ends up immediately after a CALL in the
1122 // final emitted code.
Reid Kleckner1c140bd2015-06-16 18:08:57 +00001123 if (NeedsWinCFI)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001124 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
1125
Quentin Colombet494eb602015-05-22 18:10:47 +00001126 // Add the return addr area delta back since we are not tail calling.
1127 int Offset = -1 * X86FI->getTCReturnAddrDelta();
1128 assert(Offset >= 0 && "TCDelta should never be positive");
1129 if (Offset) {
1130 MBBI = MBB.getFirstTerminator();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001131
1132 // Check for possible merge with preceding ADD instruction.
Reid Kleckner3854f7b2015-06-18 18:03:25 +00001133 Offset += mergeSPUpdates(MBB, MBBI, true);
Reid Kleckner98d78032015-06-18 20:22:12 +00001134 emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001135 }
1136}
1137
1138int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
1139 int FI) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001140 const MachineFrameInfo *MFI = MF.getFrameInfo();
David Majnemer93c22a42015-02-10 00:57:42 +00001141 // Offset will hold the offset from the stack pointer at function entry to the
1142 // object.
1143 // We need to factor in additional offsets applied during the prologue to the
1144 // frame, base, and stack pointer depending on which is used.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001145 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
David Majnemer93c22a42015-02-10 00:57:42 +00001146 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1147 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001148 uint64_t StackSize = MFI->getStackSize();
David Majnemer93c22a42015-02-10 00:57:42 +00001149 bool HasFP = hasFP(MF);
Reid Kleckner1c140bd2015-06-16 18:08:57 +00001150 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
David Majnemer93c22a42015-02-10 00:57:42 +00001151 int64_t FPDelta = 0;
1152
Reid Kleckner1c140bd2015-06-16 18:08:57 +00001153 if (IsWin64Prologue) {
David Majnemer89d05642015-02-21 01:04:47 +00001154 assert(!MFI->hasCalls() || (StackSize % 16) == 8);
1155
David Majnemer93c22a42015-02-10 00:57:42 +00001156 // Calculate required stack adjustment.
1157 uint64_t FrameSize = StackSize - SlotSize;
1158 // If required, include space for extra hidden slot for stashing base pointer.
1159 if (X86FI->getRestoreBasePointer())
1160 FrameSize += SlotSize;
David Majnemer89d05642015-02-21 01:04:47 +00001161 uint64_t NumBytes = FrameSize - CSSize;
David Majnemer93c22a42015-02-10 00:57:42 +00001162
David Majnemer93c22a42015-02-10 00:57:42 +00001163 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
David Majnemer13d0b112015-02-10 21:22:05 +00001164 if (FI && FI == X86FI->getFAIndex())
1165 return -SEHFrameOffset;
1166
David Majnemer93c22a42015-02-10 00:57:42 +00001167 // FPDelta is the offset from the "traditional" FP location of the old base
1168 // pointer followed by return address and the location required by the
1169 // restricted Win64 prologue.
1170 // Add FPDelta to all offsets below that go through the frame pointer.
David Majnemer89d05642015-02-21 01:04:47 +00001171 FPDelta = FrameSize - SEHFrameOffset;
1172 assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
1173 "FPDelta isn't aligned per the Win64 ABI!");
David Majnemer93c22a42015-02-10 00:57:42 +00001174 }
1175
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001176
Reid Kleckner034ea962015-06-18 20:32:02 +00001177 if (TRI->hasBasePointer(MF)) {
David Majnemer93c22a42015-02-10 00:57:42 +00001178 assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001179 if (FI < 0) {
1180 // Skip the saved EBP.
David Majnemer93c22a42015-02-10 00:57:42 +00001181 return Offset + SlotSize + FPDelta;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001182 } else {
1183 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1184 return Offset + StackSize;
1185 }
Reid Kleckner034ea962015-06-18 20:32:02 +00001186 } else if (TRI->needsStackRealignment(MF)) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001187 if (FI < 0) {
1188 // Skip the saved EBP.
David Majnemer93c22a42015-02-10 00:57:42 +00001189 return Offset + SlotSize + FPDelta;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001190 } else {
1191 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1192 return Offset + StackSize;
1193 }
1194 // FIXME: Support tail calls
1195 } else {
David Majnemer93c22a42015-02-10 00:57:42 +00001196 if (!HasFP)
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001197 return Offset + StackSize;
1198
1199 // Skip the saved EBP.
David Majnemer93c22a42015-02-10 00:57:42 +00001200 Offset += SlotSize;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001201
1202 // Skip the RETADDR move area
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001203 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1204 if (TailCallReturnAddrDelta < 0)
1205 Offset -= TailCallReturnAddrDelta;
1206 }
1207
David Majnemer89d05642015-02-21 01:04:47 +00001208 return Offset + FPDelta;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001209}
1210
1211int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1212 unsigned &FrameReg) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001213 // We can't calculate offset from frame pointer if the stack is realigned,
1214 // so enforce usage of stack/base pointer. The base pointer is used when we
1215 // have dynamic allocas in addition to dynamic realignment.
Reid Kleckner034ea962015-06-18 20:32:02 +00001216 if (TRI->hasBasePointer(MF))
1217 FrameReg = TRI->getBaseRegister();
1218 else if (TRI->needsStackRealignment(MF))
1219 FrameReg = TRI->getStackRegister();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001220 else
Reid Kleckner034ea962015-06-18 20:32:02 +00001221 FrameReg = TRI->getFrameRegister(MF);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001222 return getFrameIndexOffset(MF, FI);
1223}
1224
1225// Simplified from getFrameIndexOffset keeping only StackPointer cases
1226int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const {
1227 const MachineFrameInfo *MFI = MF.getFrameInfo();
1228 // Does not include any dynamic realign.
1229 const uint64_t StackSize = MFI->getStackSize();
1230 {
1231#ifndef NDEBUG
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001232 // Note: LLVM arranges the stack as:
1233 // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
1234 // > "Stack Slots" (<--SP)
1235 // We can always address StackSlots from RSP. We can usually (unless
1236 // needsStackRealignment) address CSRs from RSP, but sometimes need to
1237 // address them from RBP. FixedObjects can be placed anywhere in the stack
1238 // frame depending on their specific requirements (i.e. we can actually
1239 // refer to arguments to the function which are stored in the *callers*
1240 // frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs
1241 // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.
1242
Reid Kleckner034ea962015-06-18 20:32:02 +00001243 assert(!TRI->hasBasePointer(MF) && "we don't handle this case");
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001244
1245 // We don't handle tail calls, and shouldn't be seeing them
1246 // either.
1247 int TailCallReturnAddrDelta =
1248 MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
1249 assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
1250#endif
1251 }
1252
1253 // This is how the math works out:
1254 //
1255 // %rsp grows (i.e. gets lower) left to right. Each box below is
1256 // one word (eight bytes). Obj0 is the stack slot we're trying to
1257 // get to.
1258 //
1259 // ----------------------------------
1260 // | BP | Obj0 | Obj1 | ... | ObjN |
1261 // ----------------------------------
1262 // ^ ^ ^ ^
1263 // A B C E
1264 //
1265 // A is the incoming stack pointer.
1266 // (B - A) is the local area offset (-8 for x86-64) [1]
1267 // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
1268 //
1269 // |(E - B)| is the StackSize (absolute value, positive). For a
1270 // stack that grown down, this works out to be (B - E). [3]
1271 //
1272 // E is also the value of %rsp after stack has been set up, and we
1273 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
1274 // (C - E) == (C - A) - (B - A) + (B - E)
1275 // { Using [1], [2] and [3] above }
1276 // == getObjectOffset - LocalAreaOffset + StackSize
1277 //
1278
1279 // Get the Offset from the StackPointer
1280 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1281
1282 return Offset + StackSize;
1283}
1284// Simplified from getFrameIndexReference keeping only StackPointer cases
Eric Christopher05b81972015-02-02 17:38:43 +00001285int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
1286 int FI,
1287 unsigned &FrameReg) const {
Reid Kleckner034ea962015-06-18 20:32:02 +00001288 assert(!TRI->hasBasePointer(MF) && "we don't handle this case");
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001289
Reid Kleckner034ea962015-06-18 20:32:02 +00001290 FrameReg = TRI->getStackRegister();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001291 return getFrameIndexOffsetFromSP(MF, FI);
1292}
1293
1294bool X86FrameLowering::assignCalleeSavedSpillSlots(
1295 MachineFunction &MF, const TargetRegisterInfo *TRI,
1296 std::vector<CalleeSavedInfo> &CSI) const {
1297 MachineFrameInfo *MFI = MF.getFrameInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001298 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1299
1300 unsigned CalleeSavedFrameSize = 0;
1301 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
1302
1303 if (hasFP(MF)) {
1304 // emitPrologue always spills frame register the first thing.
1305 SpillSlotOffset -= SlotSize;
1306 MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1307
1308 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
1309 // the frame register, we can delete it from CSI list and not have to worry
1310 // about avoiding it later.
Reid Kleckner034ea962015-06-18 20:32:02 +00001311 unsigned FPReg = TRI->getFrameRegister(MF);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001312 for (unsigned i = 0; i < CSI.size(); ++i) {
1313 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
1314 CSI.erase(CSI.begin() + i);
1315 break;
1316 }
1317 }
1318 }
1319
1320 // Assign slots for GPRs. It increases frame size.
1321 for (unsigned i = CSI.size(); i != 0; --i) {
1322 unsigned Reg = CSI[i - 1].getReg();
1323
1324 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1325 continue;
1326
1327 SpillSlotOffset -= SlotSize;
1328 CalleeSavedFrameSize += SlotSize;
1329
1330 int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
1331 CSI[i - 1].setFrameIdx(SlotIndex);
1332 }
1333
1334 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
1335
1336 // Assign slots for XMMs.
1337 for (unsigned i = CSI.size(); i != 0; --i) {
1338 unsigned Reg = CSI[i - 1].getReg();
1339 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
1340 continue;
1341
Reid Kleckner034ea962015-06-18 20:32:02 +00001342 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001343 // ensure alignment
1344 SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
1345 // spill into slot
1346 SpillSlotOffset -= RC->getSize();
1347 int SlotIndex =
1348 MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
1349 CSI[i - 1].setFrameIdx(SlotIndex);
1350 MFI->ensureMaxAlignment(RC->getAlignment());
1351 }
1352
1353 return true;
1354}
1355
1356bool X86FrameLowering::spillCalleeSavedRegisters(
1357 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
1358 const std::vector<CalleeSavedInfo> &CSI,
1359 const TargetRegisterInfo *TRI) const {
1360 DebugLoc DL = MBB.findDebugLoc(MI);
1361
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001362 // Push GPRs. It increases frame size.
1363 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1364 for (unsigned i = CSI.size(); i != 0; --i) {
1365 unsigned Reg = CSI[i - 1].getReg();
1366
1367 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
1368 continue;
1369 // Add the callee-saved register as live-in. It's killed at the spill.
1370 MBB.addLiveIn(Reg);
1371
1372 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1373 .setMIFlag(MachineInstr::FrameSetup);
1374 }
1375
1376 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1377 // It can be done by spilling XMMs to stack frame.
1378 for (unsigned i = CSI.size(); i != 0; --i) {
1379 unsigned Reg = CSI[i-1].getReg();
David Majnemera7d908e2015-02-10 19:01:47 +00001380 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001381 continue;
1382 // Add the callee-saved register as live-in. It's killed at the spill.
1383 MBB.addLiveIn(Reg);
1384 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1385
1386 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
1387 TRI);
1388 --MI;
1389 MI->setFlag(MachineInstr::FrameSetup);
1390 ++MI;
1391 }
1392
1393 return true;
1394}
1395
1396bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1397 MachineBasicBlock::iterator MI,
1398 const std::vector<CalleeSavedInfo> &CSI,
1399 const TargetRegisterInfo *TRI) const {
1400 if (CSI.empty())
1401 return false;
1402
1403 DebugLoc DL = MBB.findDebugLoc(MI);
1404
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001405 // Reload XMMs from stack frame.
1406 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1407 unsigned Reg = CSI[i].getReg();
1408 if (X86::GR64RegClass.contains(Reg) ||
1409 X86::GR32RegClass.contains(Reg))
1410 continue;
1411
1412 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1413 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
1414 }
1415
1416 // POP GPRs.
1417 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1418 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1419 unsigned Reg = CSI[i].getReg();
1420 if (!X86::GR64RegClass.contains(Reg) &&
1421 !X86::GR32RegClass.contains(Reg))
1422 continue;
1423
1424 BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
1425 }
1426 return true;
1427}
1428
Matthias Braun02564862015-07-14 17:17:13 +00001429void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
1430 BitVector &SavedRegs,
1431 RegScavenger *RS) const {
1432 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1433
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001434 MachineFrameInfo *MFI = MF.getFrameInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001435
1436 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1437 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1438
1439 if (TailCallReturnAddrDelta < 0) {
1440 // create RETURNADDR area
1441 // arg
1442 // arg
1443 // RETADDR
1444 // { ...
1445 // RETADDR area
1446 // ...
1447 // }
1448 // [EBP]
1449 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1450 TailCallReturnAddrDelta - SlotSize, true);
1451 }
1452
1453 // Spill the BasePtr if it's used.
Reid Kleckner034ea962015-06-18 20:32:02 +00001454 if (TRI->hasBasePointer(MF))
Matthias Braun02564862015-07-14 17:17:13 +00001455 SavedRegs.set(TRI->getBaseRegister());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001456}
1457
1458static bool
1459HasNestArgument(const MachineFunction *MF) {
1460 const Function *F = MF->getFunction();
1461 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1462 I != E; I++) {
1463 if (I->hasNestAttr())
1464 return true;
1465 }
1466 return false;
1467}
1468
1469/// GetScratchRegister - Get a temp register for performing work in the
1470/// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1471/// and the properties of the function either one or two registers will be
1472/// needed. Set primary to true for the first register, false for the second.
1473static unsigned
1474GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
1475 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1476
1477 // Erlang stuff.
1478 if (CallingConvention == CallingConv::HiPE) {
1479 if (Is64Bit)
1480 return Primary ? X86::R14 : X86::R13;
1481 else
1482 return Primary ? X86::EBX : X86::EDI;
1483 }
1484
1485 if (Is64Bit) {
1486 if (IsLP64)
1487 return Primary ? X86::R11 : X86::R12;
1488 else
1489 return Primary ? X86::R11D : X86::R12D;
1490 }
1491
1492 bool IsNested = HasNestArgument(&MF);
1493
1494 if (CallingConvention == CallingConv::X86_FastCall ||
1495 CallingConvention == CallingConv::Fast) {
1496 if (IsNested)
1497 report_fatal_error("Segmented stacks does not support fastcall with "
1498 "nested function.");
1499 return Primary ? X86::EAX : X86::ECX;
1500 }
1501 if (IsNested)
1502 return Primary ? X86::EDX : X86::EAX;
1503 return Primary ? X86::ECX : X86::EAX;
1504}
1505
1506// The stack limit in the TCB is set to this many bytes above the actual stack
1507// limit.
1508static const uint64_t kSplitStackAvailable = 256;
1509
Quentin Colombet61b305e2015-05-05 17:38:16 +00001510void X86FrameLowering::adjustForSegmentedStacks(
1511 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001512 MachineFrameInfo *MFI = MF.getFrameInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001513 uint64_t StackSize;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001514 unsigned TlsReg, TlsOffset;
1515 DebugLoc DL;
1516
1517 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1518 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1519 "Scratch register is live-in");
1520
1521 if (MF.getFunction()->isVarArg())
1522 report_fatal_error("Segmented stacks do not support vararg functions.");
1523 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
1524 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
1525 !STI.isTargetDragonFly())
1526 report_fatal_error("Segmented stacks not supported on this platform.");
1527
1528 // Eventually StackSize will be calculated by a link-time pass; which will
1529 // also decide whether checking code needs to be injected into this particular
1530 // prologue.
1531 StackSize = MFI->getStackSize();
1532
1533 // Do not generate a prologue for functions with a stack of size zero
1534 if (StackSize == 0)
1535 return;
1536
1537 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1538 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1539 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1540 bool IsNested = false;
1541
1542 // We need to know if the function has a nest argument only in 64 bit mode.
1543 if (Is64Bit)
1544 IsNested = HasNestArgument(&MF);
1545
1546 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1547 // allocMBB needs to be last (terminating) instruction.
1548
Quentin Colombet61b305e2015-05-05 17:38:16 +00001549 for (MachineBasicBlock::livein_iterator i = PrologueMBB.livein_begin(),
1550 e = PrologueMBB.livein_end();
1551 i != e; i++) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001552 allocMBB->addLiveIn(*i);
1553 checkMBB->addLiveIn(*i);
1554 }
1555
1556 if (IsNested)
1557 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
1558
1559 MF.push_front(allocMBB);
1560 MF.push_front(checkMBB);
1561
1562 // When the frame size is less than 256 we just compare the stack
1563 // boundary directly to the value of the stack pointer, per gcc.
1564 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1565
1566 // Read the limit off the current stacklet off the stack_guard location.
1567 if (Is64Bit) {
1568 if (STI.isTargetLinux()) {
1569 TlsReg = X86::FS;
1570 TlsOffset = IsLP64 ? 0x70 : 0x40;
1571 } else if (STI.isTargetDarwin()) {
1572 TlsReg = X86::GS;
1573 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1574 } else if (STI.isTargetWin64()) {
1575 TlsReg = X86::GS;
1576 TlsOffset = 0x28; // pvArbitrary, reserved for application use
1577 } else if (STI.isTargetFreeBSD()) {
1578 TlsReg = X86::FS;
1579 TlsOffset = 0x18;
1580 } else if (STI.isTargetDragonFly()) {
1581 TlsReg = X86::FS;
1582 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
1583 } else {
1584 report_fatal_error("Segmented stacks not supported on this platform.");
1585 }
1586
1587 if (CompareStackPointer)
1588 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
1589 else
1590 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
1591 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1592
1593 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
1594 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1595 } else {
1596 if (STI.isTargetLinux()) {
1597 TlsReg = X86::GS;
1598 TlsOffset = 0x30;
1599 } else if (STI.isTargetDarwin()) {
1600 TlsReg = X86::GS;
1601 TlsOffset = 0x48 + 90*4;
1602 } else if (STI.isTargetWin32()) {
1603 TlsReg = X86::FS;
1604 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1605 } else if (STI.isTargetDragonFly()) {
1606 TlsReg = X86::FS;
1607 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
1608 } else if (STI.isTargetFreeBSD()) {
1609 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1610 } else {
1611 report_fatal_error("Segmented stacks not supported on this platform.");
1612 }
1613
1614 if (CompareStackPointer)
1615 ScratchReg = X86::ESP;
1616 else
1617 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1618 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1619
1620 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
1621 STI.isTargetDragonFly()) {
1622 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1623 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1624 } else if (STI.isTargetDarwin()) {
1625
1626 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
1627 unsigned ScratchReg2;
1628 bool SaveScratch2;
1629 if (CompareStackPointer) {
1630 // The primary scratch register is available for holding the TLS offset.
1631 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1632 SaveScratch2 = false;
1633 } else {
1634 // Need to use a second register to hold the TLS offset
1635 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
1636
1637 // Unfortunately, with fastcc the second scratch register may hold an
1638 // argument.
1639 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1640 }
1641
1642 // If Scratch2 is live-in then it needs to be saved.
1643 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1644 "Scratch register is live-in and not saved");
1645
1646 if (SaveScratch2)
1647 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1648 .addReg(ScratchReg2, RegState::Kill);
1649
1650 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1651 .addImm(TlsOffset);
1652 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1653 .addReg(ScratchReg)
1654 .addReg(ScratchReg2).addImm(1).addReg(0)
1655 .addImm(0)
1656 .addReg(TlsReg);
1657
1658 if (SaveScratch2)
1659 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1660 }
1661 }
1662
1663 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1664 // It jumps to normal execution of the function body.
Quentin Colombet61b305e2015-05-05 17:38:16 +00001665 BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&PrologueMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001666
1667 // On 32 bit we first push the arguments size and then the frame size. On 64
1668 // bit, we pass the stack frame size in r10 and the argument size in r11.
1669 if (Is64Bit) {
1670 // Functions with nested arguments use R10, so it needs to be saved across
1671 // the call to _morestack
1672
1673 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
1674 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
1675 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
1676 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
1677 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
1678
1679 if (IsNested)
1680 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
1681
1682 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
1683 .addImm(StackSize);
1684 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
1685 .addImm(X86FI->getArgumentStackSize());
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001686 } else {
1687 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1688 .addImm(X86FI->getArgumentStackSize());
1689 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1690 .addImm(StackSize);
1691 }
1692
1693 // __morestack is in libgcc
1694 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
1695 // Under the large code model, we cannot assume that __morestack lives
1696 // within 2^31 bytes of the call site, so we cannot use pc-relative
1697 // addressing. We cannot perform the call via a temporary register,
1698 // as the rax register may be used to store the static chain, and all
1699 // other suitable registers may be either callee-save or used for
1700 // parameter passing. We cannot use the stack at this point either
1701 // because __morestack manipulates the stack directly.
1702 //
1703 // To avoid these issues, perform an indirect call via a read-only memory
1704 // location containing the address.
1705 //
1706 // This solution is not perfect, as it assumes that the .rodata section
1707 // is laid out within 2^31 bytes of each function body, but this seems
1708 // to be sufficient for JIT.
1709 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
1710 .addReg(X86::RIP)
1711 .addImm(0)
1712 .addReg(0)
1713 .addExternalSymbol("__morestack_addr")
1714 .addReg(0);
1715 MF.getMMI().setUsesMorestackAddr(true);
1716 } else {
1717 if (Is64Bit)
1718 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1719 .addExternalSymbol("__morestack");
1720 else
1721 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1722 .addExternalSymbol("__morestack");
1723 }
1724
1725 if (IsNested)
1726 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1727 else
1728 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1729
Quentin Colombet61b305e2015-05-05 17:38:16 +00001730 allocMBB->addSuccessor(&PrologueMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001731
1732 checkMBB->addSuccessor(allocMBB);
Quentin Colombet61b305e2015-05-05 17:38:16 +00001733 checkMBB->addSuccessor(&PrologueMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001734
1735#ifdef XDEBUG
1736 MF.verify();
1737#endif
1738}
1739
1740/// Erlang programs may need a special prologue to handle the stack size they
1741/// might need at runtime. That is because Erlang/OTP does not implement a C
1742/// stack but uses a custom implementation of hybrid stack/heap architecture.
1743/// (for more information see Eric Stenman's Ph.D. thesis:
1744/// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1745///
1746/// CheckStack:
1747/// temp0 = sp - MaxStack
1748/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1749/// OldStart:
1750/// ...
1751/// IncStack:
1752/// call inc_stack # doubles the stack space
1753/// temp0 = sp - MaxStack
1754/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
Quentin Colombet61b305e2015-05-05 17:38:16 +00001755void X86FrameLowering::adjustForHiPEPrologue(
1756 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001757 MachineFrameInfo *MFI = MF.getFrameInfo();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001758 DebugLoc DL;
1759 // HiPE-specific values
1760 const unsigned HipeLeafWords = 24;
1761 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1762 const unsigned Guaranteed = HipeLeafWords * SlotSize;
1763 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1764 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1765 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1766
1767 assert(STI.isTargetLinux() &&
1768 "HiPE prologue is only supported on Linux operating systems.");
1769
1770 // Compute the largest caller's frame that is needed to fit the callees'
1771 // frames. This 'MaxStack' is computed from:
1772 //
1773 // a) the fixed frame size, which is the space needed for all spilled temps,
1774 // b) outgoing on-stack parameter areas, and
1775 // c) the minimum stack space this function needs to make available for the
1776 // functions it calls (a tunable ABI property).
1777 if (MFI->hasCalls()) {
1778 unsigned MoreStackForCalls = 0;
1779
1780 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1781 MBBI != MBBE; ++MBBI)
1782 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1783 MI != ME; ++MI) {
1784 if (!MI->isCall())
1785 continue;
1786
1787 // Get callee operand.
1788 const MachineOperand &MO = MI->getOperand(0);
1789
1790 // Only take account of global function calls (no closures etc.).
1791 if (!MO.isGlobal())
1792 continue;
1793
1794 const Function *F = dyn_cast<Function>(MO.getGlobal());
1795 if (!F)
1796 continue;
1797
1798 // Do not update 'MaxStack' for primitive and built-in functions
1799 // (encoded with names either starting with "erlang."/"bif_" or not
1800 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1801 // "_", such as the BIF "suspend_0") as they are executed on another
1802 // stack.
1803 if (F->getName().find("erlang.") != StringRef::npos ||
1804 F->getName().find("bif_") != StringRef::npos ||
1805 F->getName().find_first_of("._") == StringRef::npos)
1806 continue;
1807
1808 unsigned CalleeStkArity =
1809 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1810 if (HipeLeafWords - 1 > CalleeStkArity)
1811 MoreStackForCalls = std::max(MoreStackForCalls,
1812 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1813 }
1814 MaxStack += MoreStackForCalls;
1815 }
1816
1817 // If the stack frame needed is larger than the guaranteed then runtime checks
1818 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1819 if (MaxStack > Guaranteed) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001820 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1821 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1822
Quentin Colombet61b305e2015-05-05 17:38:16 +00001823 for (MachineBasicBlock::livein_iterator I = PrologueMBB.livein_begin(),
1824 E = PrologueMBB.livein_end();
1825 I != E; I++) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001826 stackCheckMBB->addLiveIn(*I);
1827 incStackMBB->addLiveIn(*I);
1828 }
1829
1830 MF.push_front(incStackMBB);
1831 MF.push_front(stackCheckMBB);
1832
1833 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1834 unsigned LEAop, CMPop, CALLop;
1835 if (Is64Bit) {
1836 SPReg = X86::RSP;
1837 PReg = X86::RBP;
1838 LEAop = X86::LEA64r;
1839 CMPop = X86::CMP64rm;
1840 CALLop = X86::CALL64pcrel32;
1841 SPLimitOffset = 0x90;
1842 } else {
1843 SPReg = X86::ESP;
1844 PReg = X86::EBP;
1845 LEAop = X86::LEA32r;
1846 CMPop = X86::CMP32rm;
1847 CALLop = X86::CALLpcrel32;
1848 SPLimitOffset = 0x4c;
1849 }
1850
1851 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
1852 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1853 "HiPE prologue scratch register is live-in");
1854
1855 // Create new MBB for StackCheck:
1856 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
1857 SPReg, false, -MaxStack);
1858 // SPLimitOffset is in a fixed heap location (pointed by BP).
1859 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
1860 .addReg(ScratchReg), PReg, false, SPLimitOffset);
Quentin Colombet61b305e2015-05-05 17:38:16 +00001861 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&PrologueMBB);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001862
1863 // Create new MBB for IncStack:
1864 BuildMI(incStackMBB, DL, TII.get(CALLop)).
1865 addExternalSymbol("inc_stack_0");
1866 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
1867 SPReg, false, -MaxStack);
1868 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
1869 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1870 BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
1871
Quentin Colombet61b305e2015-05-05 17:38:16 +00001872 stackCheckMBB->addSuccessor(&PrologueMBB, 99);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001873 stackCheckMBB->addSuccessor(incStackMBB, 1);
Quentin Colombet61b305e2015-05-05 17:38:16 +00001874 incStackMBB->addSuccessor(&PrologueMBB, 99);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001875 incStackMBB->addSuccessor(incStackMBB, 1);
1876 }
1877#ifdef XDEBUG
1878 MF.verify();
1879#endif
1880}
1881
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001882void X86FrameLowering::
1883eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1884 MachineBasicBlock::iterator I) const {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001885 bool reserveCallFrame = hasReservedCallFrame(MF);
Matthias Braunfa3872e2015-05-18 20:27:55 +00001886 unsigned Opcode = I->getOpcode();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001887 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001888 DebugLoc DL = I->getDebugLoc();
1889 uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
Michael Kuperstein13fbd452015-02-01 16:56:04 +00001890 uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001891 I = MBB.erase(I);
1892
1893 if (!reserveCallFrame) {
1894 // If the stack pointer can be changed after prologue, turn the
1895 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
1896 // adjcallstackdown instruction into 'add ESP, <amt>'
1897 if (Amount == 0)
1898 return;
1899
1900 // We need to keep the stack aligned properly. To do this, we round the
1901 // amount of space needed for the outgoing arguments up to the next
1902 // alignment boundary.
David Majnemer93c22a42015-02-10 00:57:42 +00001903 unsigned StackAlign = getStackAlignment();
1904 Amount = RoundUpToAlignment(Amount, StackAlign);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001905
Michael Kuperstein13fbd452015-02-01 16:56:04 +00001906 // Factor out the amount that gets handled inside the sequence
1907 // (Pushes of argument for frame setup, callee pops for frame destroy)
1908 Amount -= InternalAmt;
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001909
Michael Kuperstein13fbd452015-02-01 16:56:04 +00001910 if (Amount) {
Reid Kleckner98d78032015-06-18 20:22:12 +00001911 // Add Amount to SP to destroy a frame, and subtract to setup.
1912 int Offset = isDestroy ? Amount : -Amount;
1913 BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001914 }
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001915 return;
1916 }
1917
Reid Kleckner98d78032015-06-18 20:22:12 +00001918 if (isDestroy && InternalAmt) {
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001919 // If we are performing frame pointer elimination and if the callee pops
1920 // something off the stack pointer, add it back. We do this until we have
1921 // more advanced stack pointer tracking ability.
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001922 // We are not tracking the stack pointer adjustment by the callee, so make
1923 // sure we restore the stack pointer immediately after the call, there may
1924 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
1925 MachineBasicBlock::iterator B = MBB.begin();
1926 while (I != B && !std::prev(I)->isCall())
1927 --I;
Reid Kleckner98d78032015-06-18 20:22:12 +00001928 BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
Michael Kupersteine86aa9a2015-02-01 16:15:07 +00001929 }
1930}
1931
Quentin Colombetaa8020752015-05-27 06:28:41 +00001932bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
1933 assert(MBB.getParent() && "Block is not attached to a function!");
1934
1935 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
1936 return true;
1937
1938 // If we cannot use LEA to adjust SP, we may need to use ADD, which
1939 // clobbers the EFLAGS. Check that none of the terminators reads the
1940 // EFLAGS, and if one uses it, conservatively assume this is not
1941 // safe to insert the epilogue here.
1942 return !terminatorsNeedFlagsAsInput(MBB);
1943}