Eliminate target hook IsEligibleForTailCallOptimization.
Target independent isel should always pass along the "tail call" property. Change
target hook LowerCall's parameter "isTailCall" into a refernce. If the target
decides it's impossible to honor the tail call request, it should set isTailCall
to false to make target independent isel happy.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@94626 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 36145af..6e64475 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1436,6 +1436,12 @@
/*AlwaysInline=*/true, NULL, 0, NULL, 0);
}
+/// FuncIsMadeTailCallSafe - Return true if the function is being made into
+/// a tailcall target by changing its ABI.
+static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) {
+ return PerformTailCallOpt && CC == CallingConv::Fast;
+}
+
SDValue
X86TargetLowering::LowerMemArgument(SDValue Chain,
CallingConv::ID CallConv,
@@ -1446,7 +1452,7 @@
unsigned i) {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
- bool AlwaysUseMutable = X86::IsEligibleForTailCallOpt(CallConv);
+ bool AlwaysUseMutable = FuncIsMadeTailCallSafe(CallConv);
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
EVT ValVT;
@@ -1583,8 +1589,8 @@
}
unsigned StackSize = CCInfo.getNextStackOffset();
- // align stack specially for tail calls
- if (X86::IsEligibleForTailCallOpt(CallConv))
+ // Align stack specially for tail calls.
+ if (FuncIsMadeTailCallSafe(CallConv))
StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for
@@ -1770,7 +1776,7 @@
SDValue
X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
- bool isTailCall,
+ bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,
@@ -1779,8 +1785,11 @@
bool Is64Bit = Subtarget->is64Bit();
bool IsStructRet = CallIsStructReturn(Outs);
- assert((!isTailCall || X86::IsEligibleForTailCallOpt(CallConv)) &&
- "Call is not eligible for tail call optimization!");
+ if (isTailCall)
+ // Check if it's really possible to do a tail call.
+ isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
+ Ins, DAG);
+
assert(!(isVarArg && CallConv == CallingConv::Fast) &&
"Var args not supported with calling convention fastcc");
@@ -1792,7 +1801,7 @@
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
- if (X86::IsEligibleForTailCallOpt(CallConv))
+ if (FuncIsMadeTailCallSafe(CallConv))
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0;
@@ -2230,8 +2239,10 @@
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
- return X86::IsEligibleForTailCallOpt(CalleeCC) &&
- DAG.getMachineFunction().getFunction()->getCallingConv() == CalleeCC;
+ if (CalleeCC == CallingConv::Fast &&
+ DAG.getMachineFunction().getFunction()->getCallingConv() == CalleeCC)
+ return true;
+ return false;
}
FastISel *
@@ -2304,10 +2315,6 @@
return false;
}
-bool X86::IsEligibleForTailCallOpt(CallingConv::ID CC) {
- return PerformTailCallOpt && CC == CallingConv::Fast;
-}
-
/// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
/// specific condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 0c83ffb..0642b39 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -362,10 +362,6 @@
/// fit into displacement field of the instruction.
bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement = true);
-
- /// IsEligibleForTailCallOpt - Return true if it's legal to perform tail call
- /// optimization for the given calling convention.
- bool IsEligibleForTailCallOpt(CallingConv::ID CC);
}
//===--------------------------------------------------------------------===//
@@ -550,16 +546,6 @@
return !X86ScalarSSEf64 || VT == MVT::f80;
}
- /// IsEligibleForTailCallOptimization - Check whether the call is eligible
- /// for tail call optimization. Targets which want to do tail call
- /// optimization should implement this function.
- virtual bool
- IsEligibleForTailCallOptimization(SDValue Callee,
- CallingConv::ID CalleeCC,
- bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SelectionDAG& DAG) const;
-
virtual const X86Subtarget* getSubtarget() {
return Subtarget;
}
@@ -637,6 +623,15 @@
ISD::ArgFlagsTy Flags);
// Call lowering helpers.
+
+ /// IsEligibleForTailCallOptimization - Check whether the call is eligible
+ /// for tail call optimization. Targets which want to do tail call
+ /// optimization should implement this function.
+ bool IsEligibleForTailCallOptimization(SDValue Callee,
+ CallingConv::ID CalleeCC,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SelectionDAG& DAG) const;
bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv);
SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
SDValue Chain, bool IsTailCall, bool Is64Bit,
@@ -712,7 +707,7 @@
SmallVectorImpl<SDValue> &InVals);
virtual SDValue
LowerCall(SDValue Chain, SDValue Callee,
- CallingConv::ID CallConv, bool isVarArg, bool isTailCall,
+ CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
DebugLoc dl, SelectionDAG &DAG,