MachineFunction: Return reference from getFunction(); NFC
The Function can never be nullptr so we can return a reference.
llvm-svn: 320884
diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp
index 4881928..71526dd 100644
--- a/llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -63,7 +63,7 @@
SetupMachineFunction(MF);
if (Subtarget->isTargetCOFF()) {
- bool Local = MF.getFunction()->hasLocalLinkage();
+ bool Local = MF.getFunction().hasLocalLinkage();
OutStreamer->BeginCOFFSymbolDef(CurrentFnSym);
OutStreamer->EmitCOFFSymbolStorageClass(
Local ? COFF::IMAGE_SYM_CLASS_STATIC : COFF::IMAGE_SYM_CLASS_EXTERNAL);
diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
index b420279..522dc79 100644
--- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
+++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp
@@ -148,7 +148,7 @@
// is a danger of that being generated.
if (STI->isTargetDarwin() &&
(!MF.getLandingPads().empty() ||
- (MF.getFunction()->needsUnwindTableEntry() && !TFL->hasFP(MF))))
+ (MF.getFunction().needsUnwindTableEntry() && !TFL->hasFP(MF))))
return false;
// It is not valid to change the stack pointer outside the prolog/epilog
@@ -243,7 +243,7 @@
assert(isPowerOf2_32(SlotSize) && "Expect power of 2 stack slot size");
Log2SlotSize = Log2_32(SlotSize);
- if (skipFunction(*MF.getFunction()) || !isLegal(MF))
+ if (skipFunction(MF.getFunction()) || !isLegal(MF))
return false;
unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index 3e1f340..ccb982f 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -177,7 +177,7 @@
MachineFunction &MF = MIRBuilder.getMF();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = MF.getDataLayout();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
ArgInfo OrigArg{VReg, Val->getType()};
setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
@@ -334,7 +334,7 @@
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
- const Function &F = *MF.getFunction();
+ const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = F.getParent()->getDataLayout();
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp
index a4bb989..489d9d8 100644
--- a/llvm/lib/Target/X86/X86CmovConversion.cpp
+++ b/llvm/lib/Target/X86/X86CmovConversion.cpp
@@ -164,7 +164,7 @@
}
bool X86CmovConverterPass::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (!EnableCmovConverter)
return false;
diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp
index f32fb9c..f9e1ac3 100644
--- a/llvm/lib/Target/X86/X86DomainReassignment.cpp
+++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp
@@ -678,7 +678,7 @@
}
bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
if (DisableX86DomainReassignment)
return false;
diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
index 5dfd95f..ab2ef26 100644
--- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp
+++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp
@@ -222,7 +222,7 @@
case X86::EH_RESTORE: {
// Restore ESP and EBP, and optionally ESI if required.
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction()->getPersonalityFn()));
+ MBB.getParent()->getFunction().getPersonalityFn()));
X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
MBBI->eraseFromParent();
return true;
diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
index 2f7dd58..01d10fe 100644
--- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
@@ -146,12 +146,12 @@
FunctionPass *llvm::createX86FixupBWInsts() { return new FixupBWInstPass(); }
bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
- if (!FixupBWInsts || skipFunction(*MF.getFunction()))
+ if (!FixupBWInsts || skipFunction(MF.getFunction()))
return false;
this->MF = &MF;
TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
- OptForSize = MF.getFunction()->optForSize();
+ OptForSize = MF.getFunction().optForSize();
MLI = &getAnalysis<MachineLoopInfo>();
LiveRegs.init(TII->getRegisterInfo());
diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp
index d27974f..b41bf99 100644
--- a/llvm/lib/Target/X86/X86FixupLEAs.cpp
+++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp
@@ -191,12 +191,12 @@
FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
- if (skipFunction(*Func.getFunction()))
+ if (skipFunction(Func.getFunction()))
return false;
MF = &Func;
const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>();
- OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize();
+ OptIncDec = !ST.slowIncDec() || Func.getFunction().optForMinSize();
OptLEA = ST.LEAusesAG() || ST.slowLEA() || ST.slow3OpsLEA();
if (!OptLEA && !OptIncDec)
diff --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp
index b73a088..9a72e71 100644
--- a/llvm/lib/Target/X86/X86FloatingPoint.cpp
+++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp
@@ -349,7 +349,7 @@
// In regcall convention, some FP registers may not be passed through
// the stack, so they will need to be assigned to the stack first
- if ((Entry->getParent()->getFunction()->getCallingConv() ==
+ if ((Entry->getParent()->getFunction().getCallingConv() ==
CallingConv::X86_RegCall) && (Bundle.Mask && !Bundle.FixCount)) {
// In the register calling convention, up to one FP argument could be
// saved in the first FP register.
@@ -973,7 +973,7 @@
unsigned R = MO.getReg() - X86::FP0;
if (R < 8) {
- if (MF->getFunction()->getCallingConv() != CallingConv::X86_RegCall) {
+ if (MF->getFunction().getCallingConv() != CallingConv::X86_RegCall) {
assert(MO.isDef() && MO.isImplicit());
}
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp
index ead877a..80b1cc1 100644
--- a/llvm/lib/Target/X86/X86FrameLowering.cpp
+++ b/llvm/lib/Target/X86/X86FrameLowering.cpp
@@ -148,8 +148,7 @@
const X86RegisterInfo *TRI,
bool Is64Bit) {
const MachineFunction *MF = MBB.getParent();
- const Function *F = MF->getFunction();
- if (!F || MF->callsEHReturn())
+ if (MF->callsEHReturn())
return 0;
const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
@@ -820,7 +819,7 @@
const MachineFrameInfo &MFI = MF.getFrameInfo();
uint64_t MaxAlign = MFI.getMaxAlignment(); // Desired stack alignment.
unsigned StackAlign = getStackAlignment();
- if (MF.getFunction()->hasFnAttribute("stackrealign")) {
+ if (MF.getFunction().hasFnAttribute("stackrealign")) {
if (MFI.hasCalls())
MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
else if (MaxAlign < SlotSize)
@@ -935,28 +934,28 @@
"MF used frame lowering for wrong subtarget");
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo &MFI = MF.getFrameInfo();
- const Function *Fn = MF.getFunction();
+ const Function &Fn = MF.getFunction();
MachineModuleInfo &MMI = MF.getMMI();
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
bool IsFunclet = MBB.isEHFuncletEntry();
EHPersonality Personality = EHPersonality::Unknown;
- if (Fn->hasPersonalityFn())
- Personality = classifyEHPersonality(Fn->getPersonalityFn());
+ if (Fn.hasPersonalityFn())
+ Personality = classifyEHPersonality(Fn.getPersonalityFn());
bool FnHasClrFunclet =
MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
bool HasFP = hasFP(MF);
- bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
+ bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool NeedsWin64CFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
+ bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
// FIXME: Emit FPO data for EH funclets.
bool NeedsWinFPO =
!IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
bool NeedsDwarfCFI =
- !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ !IsWin64Prologue && (MMI.hasDebugInfo() || Fn.needsUnwindTableEntry());
unsigned FramePtr = TRI->getFrameRegister(MF);
const unsigned MachineFramePtr =
STI.isTarget64BitILP32()
@@ -982,16 +981,16 @@
// The default stack probe size is 4096 if the function has no stackprobesize
// attribute.
unsigned StackProbeSize = 4096;
- if (Fn->hasFnAttribute("stack-probe-size"))
- Fn->getFnAttribute("stack-probe-size")
+ if (Fn.hasFnAttribute("stack-probe-size"))
+ Fn.getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
// Re-align the stack on 64-bit if the x86-interrupt calling convention is
// used and an error code was pushed, since the x86-64 ABI requires a 16-byte
// stack alignment.
- if (Fn->getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
- Fn->arg_size() == 2) {
+ if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
+ Fn.arg_size() == 2) {
StackSize += 8;
MFI.setStackSize(StackSize);
emitSPUpdate(MBB, MBBI, -8, /*InEpilogue=*/false);
@@ -1002,7 +1001,7 @@
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
- if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
+ if (Is64Bit && !Fn.hasFnAttribute(Attribute::NoRedZone) &&
!TRI->needsStackRealignment(MF) &&
!MFI.hasVarSizedObjects() && // No dynamic alloca.
!MFI.adjustsStack() && // No calls.
@@ -1447,7 +1446,7 @@
// 1. The interrupt handling function uses any of the "rep" instructions.
// 2. Interrupt handling function calls another function.
//
- if (Fn->getCallingConv() == CallingConv::X86_INTR)
+ if (Fn.getCallingConv() == CallingConv::X86_INTR)
BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
.setMIFlag(MachineInstr::FrameSetup);
@@ -1508,7 +1507,7 @@
// This is the amount of stack a funclet needs to allocate.
unsigned UsedSize;
EHPersonality Personality =
- classifyEHPersonality(MF.getFunction()->getPersonalityFn());
+ classifyEHPersonality(MF.getFunction().getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
// CLR funclets need to hold enough space to include the PSPSym, at the
// same offset from the stack pointer (immediately after the prolog) as it
@@ -1551,7 +1550,7 @@
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsWin64CFI =
- IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
+ IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
// Get the number of bytes to allocate from the FrameInfo.
@@ -1981,7 +1980,7 @@
MachineInstr *CatchRet) const {
// SEH shouldn't use catchret.
assert(!isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction()->getPersonalityFn())) &&
+ MBB.getParent()->getFunction().getPersonalityFn())) &&
"SEH should not use CATCHRET");
DebugLoc DL = CatchRet->getDebugLoc();
MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
@@ -2021,9 +2020,9 @@
// Don't restore CSRs before an SEH catchret. SEH except blocks do not form
// funclets. emitEpilogue transforms these to normal jumps.
if (MI->getOpcode() == X86::CATCHRET) {
- const Function *Func = MBB.getParent()->getFunction();
+ const Function &F = MBB.getParent()->getFunction();
bool IsSEH = isAsynchronousEHPersonality(
- classifyEHPersonality(Func->getPersonalityFn()));
+ classifyEHPersonality(F.getPersonalityFn()));
if (IsSEH)
return true;
}
@@ -2095,8 +2094,8 @@
static bool
HasNestArgument(const MachineFunction *MF) {
- const Function *F = MF->getFunction();
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
+ const Function &F = MF->getFunction();
+ for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; I++) {
if (I->hasNestAttr())
return true;
@@ -2110,7 +2109,7 @@
/// needed. Set primary to true for the first register, false for the second.
static unsigned
GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
- CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
+ CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
// Erlang stuff.
if (CallingConvention == CallingConv::HiPE) {
@@ -2160,7 +2159,7 @@
assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
"Scratch register is live-in");
- if (MF.getFunction()->isVarArg())
+ if (MF.getFunction().isVarArg())
report_fatal_error("Segmented stacks do not support vararg functions.");
if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
!STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
@@ -2434,8 +2433,8 @@
Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
const unsigned Guaranteed = HipeLeafWords * SlotSize;
- unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
- MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
+ unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
+ MF.getFunction().arg_size() - CCRegisteredArgs : 0;
unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
assert(STI.isTargetLinux() &&
@@ -2649,10 +2648,10 @@
Amount = alignTo(Amount, StackAlign);
MachineModuleInfo &MMI = MF.getMMI();
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
- bool DwarfCFI = !WindowsCFI &&
- (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ bool DwarfCFI = !WindowsCFI &&
+ (MMI.hasDebugInfo() || F.needsUnwindTableEntry());
// If we have any exception handlers in this function, and we adjust
// the SP before calls, we may need to indicate this to the unwinder
@@ -2694,7 +2693,7 @@
StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
if (StackAdjustment) {
- if (!(Fn->optForMinSize() &&
+ if (!(F.optForMinSize() &&
adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
/*InEpilogue=*/false);
@@ -2767,13 +2766,13 @@
bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
// If we may need to emit frameless compact unwind information, give
// up as this is currently broken: PR25614.
- return (MF.getFunction()->hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
+ return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
// The lowering of segmented stack and HiPE only support entry blocks
// as prologue blocks: PR26107.
// This limitation may be lifted if we fix:
// - adjustForSegmentedStacks
// - adjustForHiPEPrologue
- MF.getFunction()->getCallingConv() != CallingConv::HiPE &&
+ MF.getFunction().getCallingConv() != CallingConv::HiPE &&
!MF.shouldSplitStack();
}
@@ -3003,9 +3002,9 @@
// If this function isn't doing Win64-style C++ EH, we don't need to do
// anything.
- const Function *Fn = MF.getFunction();
+ const Function &F = MF.getFunction();
if (!STI.is64Bit() || !MF.hasEHFunclets() ||
- classifyEHPersonality(Fn->getPersonalityFn()) != EHPersonality::MSVC_CXX)
+ classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX)
return;
// Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 8df8098..a6c7c5f 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -619,8 +619,8 @@
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptFor[Min]Size are used in pattern predicates that isel is matching.
- OptForSize = MF->getFunction()->optForSize();
- OptForMinSize = MF->getFunction()->optForMinSize();
+ OptForSize = MF->getFunction().optForSize();
+ OptForMinSize = MF->getFunction().optForMinSize();
assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize");
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
@@ -753,9 +753,9 @@
void X86DAGToDAGISel::EmitFunctionEntryCode() {
// If this is main, emit special code for main.
- if (const Function *Fn = MF->getFunction())
- if (Fn->hasExternalLinkage() && Fn->getName() == "main")
- emitSpecialCodeForMain();
+ const Function &F = MF->getFunction();
+ if (F.hasExternalLinkage() && F.getName() == "main")
+ emitSpecialCodeForMain();
}
static bool isDispSafeForFrameIndex(int64_t Val) {
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 94714bf..43971c3 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -94,7 +94,7 @@
const char *Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
- DiagnosticInfoUnsupported(*MF.getFunction(), Msg, dl.getDebugLoc()));
+ DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
}
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
@@ -1843,8 +1843,8 @@
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
- const Function *F = MF.getFunction();
- if (!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
+ const Function &F = MF.getFunction();
+ if (!F.hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(!Subtarget.isUnalignedMem16Slow() ||
((DstAlign == 0 || DstAlign >= 16) &&
@@ -1940,7 +1940,7 @@
if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
return;
unsigned ParamRegs = 0;
- if (auto *M = MF->getFunction()->getParent())
+ if (auto *M = MF->getFunction().getParent())
ParamRegs = M->getNumberRegisterParameters();
// Mark the first N int arguments as having reg
@@ -2207,7 +2207,7 @@
// For example, when they are used for argument passing.
bool ShouldDisableCalleeSavedRegister =
CallConv == CallingConv::X86_RegCall ||
- MF.getFunction()->hasFnAttribute("no_caller_saved_registers");
+ MF.getFunction().hasFnAttribute("no_caller_saved_registers");
if (CallConv == CallingConv::X86_INTR && !Outs.empty())
report_fatal_error("X86 interrupts may not return any value");
@@ -2889,8 +2889,8 @@
return None;
}
- const Function *Fn = MF.getFunction();
- bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
+ const Function &F = MF.getFunction();
+ bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool isSoftFloat = Subtarget.useSoftFloat();
assert(!(isSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
@@ -2923,10 +2923,9 @@
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
- const Function *Fn = MF.getFunction();
- if (Fn->hasExternalLinkage() &&
- Subtarget.isTargetCygMing() &&
- Fn->getName() == "main")
+ const Function &F = MF.getFunction();
+ if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
+ F.getName() == "main")
FuncInfo->setForceFramePointer(true);
MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -3101,7 +3100,7 @@
// Figure out if XMM registers are in use.
assert(!(Subtarget.useSoftFloat() &&
- Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
+ F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
// 64-bit calling conventions support varargs and register parameters, so we
@@ -3258,7 +3257,7 @@
FuncInfo->setArgumentStackSize(StackSize);
if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
- EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
+ EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
assert(Is64Bit);
// TODO: Add a mechanism to frame lowering that will allow us to indicate
@@ -3275,7 +3274,7 @@
}
if (CallConv == CallingConv::X86_RegCall ||
- Fn->hasFnAttribute("no_caller_saved_registers")) {
+ F.hasFnAttribute("no_caller_saved_registers")) {
MachineRegisterInfo &MRI = MF.getRegInfo();
for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
MRI.disableCalleeSavedRegister(Pair.first);
@@ -3366,7 +3365,7 @@
StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
- auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
@@ -3401,7 +3400,7 @@
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, SR != NotStructReturn,
- MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
+ MF.getFunction().hasStructRetAttr(), CLI.RetTy,
Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
@@ -3747,7 +3746,7 @@
}
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlags =
Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
@@ -3796,10 +3795,10 @@
// FIXME: Model this more precisely so that we can register allocate across
// the normal edge and spill and fill across the exceptional edge.
if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
- const Function *CallerFn = MF.getFunction();
+ const Function &CallerFn = MF.getFunction();
EHPersonality Pers =
- CallerFn->hasPersonalityFn()
- ? classifyEHPersonality(CallerFn->getPersonalityFn())
+ CallerFn.hasPersonalityFn()
+ ? classifyEHPersonality(CallerFn.getPersonalityFn())
: EHPersonality::Unknown;
if (isFuncletEHPersonality(Pers))
Mask = RegInfo->getNoPreservedMask();
@@ -4047,15 +4046,15 @@
// If -tailcallopt is specified, make fastcc functions tail-callable.
MachineFunction &MF = DAG.getMachineFunction();
- const Function *CallerF = MF.getFunction();
+ const Function &CallerF = MF.getFunction();
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
// perform a tailcall optimization here.
- if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
+ if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
return false;
- CallingConv::ID CallerCC = CallerF->getCallingConv();
+ CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
@@ -4639,7 +4638,7 @@
const SelectionDAG &DAG) const {
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
- bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat) {
@@ -6927,7 +6926,7 @@
// TODO: If multiple splats are generated to load the same constant,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
- bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
@@ -14903,7 +14902,7 @@
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
- bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
@@ -15044,7 +15043,7 @@
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
- const Module *Mod = DAG.getMachineFunction().getFunction()->getParent();
+ const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
auto PtrVT = getPointerTy(DAG.getDataLayout());
@@ -16968,7 +16967,7 @@
// An add of one will be selected as an INC.
if (C->isOne() &&
(!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::INC;
NumOperands = 1;
break;
@@ -16977,7 +16976,7 @@
// An add of negative one (subtract of one) will be selected as a DEC.
if (C->isAllOnesValue() &&
(!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::DEC;
NumOperands = 1;
break;
@@ -17172,7 +17171,7 @@
// with an immediate. 16 bit immediates are to be avoided.
if ((Op0.getValueType() == MVT::i16 &&
(isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
- !DAG.getMachineFunction().getFunction()->optForMinSize() &&
+ !DAG.getMachineFunction().getFunction().optForMinSize() &&
!Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@@ -19242,8 +19241,8 @@
if (Is64Bit) {
// The 64 bit implementation of segmented stacks needs to clobber both r10
// r11. This makes it impossible to use it along with nested parameters.
- const Function *F = MF.getFunction();
- for (const auto &A : F->args()) {
+ const Function &F = MF.getFunction();
+ for (const auto &A : F.args()) {
if (A.hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
@@ -19290,7 +19289,7 @@
SDLoc DL(Op);
if (!Subtarget.is64Bit() ||
- Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv())) {
+ Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
@@ -19344,7 +19343,7 @@
assert(Op.getNumOperands() == 4);
MachineFunction &MF = DAG.getMachineFunction();
- if (Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
// The Win64 ABI uses char* instead of a structure.
return DAG.expandVAArg(Op.getNode());
@@ -19375,7 +19374,7 @@
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!Subtarget.useSoftFloat() &&
- !(MF.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat)) &&
+ !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
Subtarget.hasSSE1());
}
@@ -19403,7 +19402,7 @@
// where a va_list is still an i8*.
assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
if (Subtarget.isCallingConvWin64(
- DAG.getMachineFunction().getFunction()->getCallingConv()))
+ DAG.getMachineFunction().getFunction().getCallingConv()))
// Probably a Win64 va_copy.
return DAG.expandVACopy(Op.getNode());
@@ -23939,7 +23938,7 @@
if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
// Convert to inc/dec if they aren't slow or we are optimizing for size.
if (AllowIncDec && (!Subtarget.slowIncDec() ||
- DAG.getMachineFunction().getFunction()->optForSize())) {
+ DAG.getMachineFunction().getFunction().optForSize())) {
if ((NewOpc == X86ISD::LADD && C->isOne()) ||
(NewOpc == X86ISD::LSUB && C->isAllOnesValue()))
return DAG.getMemIntrinsicNode(X86ISD::LINC, SDLoc(N),
@@ -26085,7 +26084,7 @@
int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
- if (!Subtarget.isCallingConvWin64(F->getFunction()->getCallingConv())) {
+ if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
@@ -26728,7 +26727,7 @@
DebugLoc DL = MI.getDebugLoc();
assert(!isAsynchronousEHPersonality(
- classifyEHPersonality(MF->getFunction()->getPersonalityFn())) &&
+ classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
"SEH does not use catchret!");
// Only 32-bit EH needs to worry about manually restoring stack pointers.
@@ -26755,7 +26754,7 @@
X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
- const Constant *PerFn = MF->getFunction()->getPersonalityFn();
+ const Constant *PerFn = MF->getFunction().getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
if (IsSEH && Subtarget.is32Bit()) {
@@ -32161,7 +32160,7 @@
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
// the expansion.
- bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
@@ -32354,7 +32353,7 @@
if (!MulConstantOptimization)
return SDValue();
// An imul is usually smaller than the alternative sequence.
- if (DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
@@ -33572,7 +33571,7 @@
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
- bool OptForSize = DAG.getMachineFunction().getFunction()->optForSize();
+ bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// SHLD/SHRD instructions have lower register pressure, but on some
// platforms they have higher latency than the equivalent
@@ -34512,8 +34511,8 @@
if (VT.getSizeInBits() != 64)
return SDValue();
- const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
+ const Function &F = DAG.getMachineFunction().getFunction();
+ bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal =
!Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
if ((VT.isVector() ||
@@ -35388,7 +35387,7 @@
// This takes at least 3 instructions, so favor a library call when operating
// on a scalar and minimizing code size.
- if (!VT.isVector() && DAG.getMachineFunction().getFunction()->optForMinSize())
+ if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
SDValue Op0 = N->getOperand(0);
@@ -38403,7 +38402,7 @@
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
- assert(Entry->getParent()->getFunction()->hasFnAttribute(
+ assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
@@ -38426,8 +38425,8 @@
/// string if not applicable.
StringRef X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
// If the function specifically requests stack probes, emit them.
- if (MF.getFunction()->hasFnAttribute("probe-stack"))
- return MF.getFunction()->getFnAttribute("probe-stack").getValueAsString();
+ if (MF.getFunction().hasFnAttribute("probe-stack"))
+ return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
// Generally, if we aren't on Windows, the platform ABI does not include
// support for stack probes, so don't emit them.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 24a6cf4d..4db969b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1228,8 +1228,8 @@
const SDLoc &dl, SelectionDAG &DAG) const override;
bool supportSplitCSR(MachineFunction *MF) const override {
- return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
- MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
+ return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
}
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
void insertCopiesSplitCSR(
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index a4ddb31..a246359 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -7726,7 +7726,7 @@
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsDwarfCFI =
!IsWin64Prologue &&
- (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry());
+ (MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry());
bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
if (EmitCFI) {
TFL->BuildCFI(MBB, I, DL,
@@ -8409,7 +8409,7 @@
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
- if (isSlowTwoMemOps && !MF.getFunction()->optForMinSize() &&
+ if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() &&
(MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
MI.getOpcode() == X86::PUSH64r))
@@ -8417,7 +8417,7 @@
// Avoid partial register update stalls unless optimizing for size.
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
unsigned NumOps = MI.getDesc().getNumOperands();
@@ -8586,7 +8586,7 @@
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Don't fold subreg spills, or reloads that use a high subreg.
@@ -8785,7 +8785,7 @@
// Avoid partial register update stalls unless optimizing for size.
// TODO: we should block undef reg update as well.
- if (!MF.getFunction()->optForSize() && hasPartialRegUpdate(MI.getOpcode()))
+ if (!MF.getFunction().optForSize() && hasPartialRegUpdate(MI.getOpcode()))
return nullptr;
// Determine the alignment of the load.
@@ -8881,16 +8881,16 @@
Type *Ty;
unsigned Opc = LoadMI.getOpcode();
if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
- Ty = Type::getFloatTy(MF.getFunction()->getContext());
+ Ty = Type::getFloatTy(MF.getFunction().getContext());
else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
- Ty = Type::getDoubleTy(MF.getFunction()->getContext());
+ Ty = Type::getDoubleTy(MF.getFunction().getContext());
else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()),16);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16);
else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8);
else
- Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
+ Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4);
bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
Opc == X86::AVX512_512_SETALLONES ||
@@ -10691,7 +10691,7 @@
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
@@ -10852,16 +10852,16 @@
bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
// Does the function use a red zone? If it does, then we can't risk messing
// with the stack.
- if (!F->hasFnAttribute(Attribute::NoRedZone))
+ if (!F.hasFnAttribute(Attribute::NoRedZone))
return false;
// If we *don't* want to outline from things that could potentially be deduped
// then return false.
- if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage())
+ if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
// This function is viable for outlining, so return true.
diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td
index 7bc67c7..42e89cb 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.td
+++ b/llvm/lib/Target/X86/X86InstrInfo.td
@@ -918,11 +918,11 @@
// the Function object through the <Target>Subtarget and objections were raised
// to that (see post-commit review comments for r301750).
let RecomputePerFunction = 1 in {
- def OptForSize : Predicate<"MF->getFunction()->optForSize()">;
- def OptForMinSize : Predicate<"MF->getFunction()->optForMinSize()">;
- def OptForSpeed : Predicate<"!MF->getFunction()->optForSize()">;
+ def OptForSize : Predicate<"MF->getFunction().optForSize()">;
+ def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">;
+ def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">;
def UseIncDec : Predicate<"!Subtarget->slowIncDec() || "
- "MF->getFunction()->optForSize()">;
+ "MF->getFunction().optForSize()">;
}
def CallImmAddr : Predicate<"Subtarget->isLegalToCallImmediateAddr()">;
diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
index 0b77014..1fc6f07 100644
--- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
+++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp
@@ -672,7 +672,7 @@
bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
- if (DisableX86LEAOpt || skipFunction(*MF.getFunction()))
+ if (DisableX86LEAOpt || skipFunction(MF.getFunction()))
return false;
MRI = &MF.getRegInfo();
@@ -696,7 +696,7 @@
// Remove redundant address calculations. Do it only for -Os/-Oz since only
// a code size gain is expected from this part of the pass.
- if (MF.getFunction()->optForSize())
+ if (MF.getFunction().optForSize())
Changed |= removeRedundantAddrCalc(LEAs);
}
diff --git a/llvm/lib/Target/X86/X86PadShortFunction.cpp b/llvm/lib/Target/X86/X86PadShortFunction.cpp
index f2ee437..1da0fad 100644
--- a/llvm/lib/Target/X86/X86PadShortFunction.cpp
+++ b/llvm/lib/Target/X86/X86PadShortFunction.cpp
@@ -96,10 +96,10 @@
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
- if (skipFunction(*MF.getFunction()))
+ if (skipFunction(MF.getFunction()))
return false;
- if (MF.getFunction()->optForSize()) {
+ if (MF.getFunction().optForSize()) {
return false;
}
diff --git a/llvm/lib/Target/X86/X86RegisterInfo.cpp b/llvm/lib/Target/X86/X86RegisterInfo.cpp
index d690035..bc31e95 100644
--- a/llvm/lib/Target/X86/X86RegisterInfo.cpp
+++ b/llvm/lib/Target/X86/X86RegisterInfo.cpp
@@ -218,13 +218,13 @@
const TargetRegisterClass *
X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
- const Function *F = MF.getFunction();
- if (IsWin64 || (F && F->getCallingConv() == CallingConv::Win64))
+ const Function &F = MF.getFunction();
+ if (IsWin64 || (F.getCallingConv() == CallingConv::Win64))
return &X86::GR64_TCW64RegClass;
else if (Is64Bit)
return &X86::GR64_TCRegClass;
- bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
+ bool hasHipeCC = (F.getCallingConv() == CallingConv::HiPE);
if (hasHipeCC)
return &X86::GR32RegClass;
return &X86::GR32_TCRegClass;
@@ -266,17 +266,17 @@
assert(MF && "MachineFunction required");
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
- const Function *F = MF->getFunction();
+ const Function &F = MF->getFunction();
bool HasSSE = Subtarget.hasSSE1();
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
bool CallsEHReturn = MF->callsEHReturn();
- CallingConv::ID CC = F->getCallingConv();
+ CallingConv::ID CC = F.getCallingConv();
// If attribute NoCallerSavedRegisters exists then we set X86_INTR calling
// convention because it has the CSR list.
- if (MF->getFunction()->hasFnAttribute("no_caller_saved_registers"))
+ if (MF->getFunction().hasFnAttribute("no_caller_saved_registers"))
CC = CallingConv::X86_INTR;
switch (CC) {
@@ -362,7 +362,7 @@
if (Is64Bit) {
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError);
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_SaveList
: CSR_64_SwiftError_SaveList;
@@ -380,7 +380,7 @@
const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
const MachineFunction *MF) const {
assert(MF && "Invalid MachineFunction pointer.");
- if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
+ if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
return nullptr;
@@ -473,9 +473,9 @@
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
// callsEHReturn().
if (Is64Bit) {
- const Function *F = MF.getFunction();
+ const Function &F = MF.getFunction();
bool IsSwiftCC = Subtarget.getTargetLowering()->supportSwiftError() &&
- F->getAttributes().hasAttrSomewhere(Attribute::SwiftError);
+ F.getAttributes().hasAttrSomewhere(Attribute::SwiftError);
if (IsSwiftCC)
return IsWin64 ? CSR_Win64_SwiftError_RegMask : CSR_64_SwiftError_RegMask;
return IsWin64 ? CSR_Win64_RegMask : CSR_64_RegMask;
@@ -519,7 +519,7 @@
// Set the base-pointer register and its aliases as reserved if needed.
if (hasBasePointer(MF)) {
- CallingConv::ID CC = MF.getFunction()->getCallingConv();
+ CallingConv::ID CC = MF.getFunction().getCallingConv();
const uint32_t *RegMask = getCallPreservedMask(MF, CC);
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
report_fatal_error(
diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
index d006556..1e04997 100644
--- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -247,7 +247,7 @@
Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32;
if (Repeats.BytesLeft() > 0 &&
- DAG.getMachineFunction().getFunction()->optForMinSize()) {
+ DAG.getMachineFunction().getFunction().optForMinSize()) {
// When agressively optimizing for size, avoid generating the code to
// handle BytesLeft.
Repeats.AVT = MVT::i8;
diff --git a/llvm/lib/Target/X86/X86VZeroUpper.cpp b/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 0b67e81..2242628 100644
--- a/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -285,7 +285,7 @@
TII = ST.getInstrInfo();
MachineRegisterInfo &MRI = MF.getRegInfo();
EverMadeChange = false;
- IsX86INTR = MF.getFunction()->getCallingConv() == CallingConv::X86_INTR;
+ IsX86INTR = MF.getFunction().getCallingConv() == CallingConv::X86_INTR;
bool FnHasLiveInYmmOrZmm = checkFnHasLiveInYmmOrZmm(MRI);
diff --git a/llvm/lib/Target/X86/X86WinAllocaExpander.cpp b/llvm/lib/Target/X86/X86WinAllocaExpander.cpp
index 8a186e9..1046696 100644
--- a/llvm/lib/Target/X86/X86WinAllocaExpander.cpp
+++ b/llvm/lib/Target/X86/X86WinAllocaExpander.cpp
@@ -279,9 +279,9 @@
SlotSize = TRI->getSlotSize();
StackProbeSize = 4096;
- if (MF.getFunction()->hasFnAttribute("stack-probe-size")) {
+ if (MF.getFunction().hasFnAttribute("stack-probe-size")) {
MF.getFunction()
- ->getFnAttribute("stack-probe-size")
+ .getFnAttribute("stack-probe-size")
.getValueAsString()
.getAsInteger(0, StackProbeSize);
}