Implement -mno-sse: if SSE is disabled on x86-64, don't store XMM on stack for
var-args, and don't allow FP return values
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@63495 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index db92a0c..6742a90 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -49,7 +49,7 @@
// weirdly; this is really the sse-regparm calling convention) in which
// case they use XMM0, otherwise it is the same as the common X86 calling
// conv.
- CCIfInReg<CCIfSubtarget<"hasSSE2()",
+ CCIfInReg<CCIfSubtarget<"hasSSE2()",
CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>,
CCDelegateTo<RetCC_X86Common>
@@ -134,7 +134,8 @@
// The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>,
+ CCIfSubtarget<"hasSSE1()",
+ CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin.
@@ -223,7 +224,8 @@
// The first 8 FP/Vector arguments are passed in XMM registers.
CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
- CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>,
+ CCIfSubtarget<"hasSSE1()",
+ CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
// The first 8 MMX (except for v1i64) vector arguments are passed in XMM
// registers on Darwin.
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 111269f..19d5aed 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1031,6 +1031,7 @@
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
bool isVarArg = TheCall->isVarArg();
+ bool Is64Bit = Subtarget->is64Bit();
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
@@ -1039,7 +1040,14 @@
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
MVT CopyVT = RVLocs[i].getValVT();
-
+
+ // If this is x86-64, and we disabled SSE, we can't return FP values
+ if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
+ ((Is64Bit || TheCall->isInreg()) && !Subtarget->hasSSE1())) {
+ cerr << "SSE register return with SSE disabled\n";
+ exit(1);
+ }
+
// If this is a call to a function that returns an fp value on the floating
// point stack, but where we prefer to use the value in xmm registers, copy
// it out as F80 and use a truncate to move it from fp stack reg to xmm reg.
@@ -1382,6 +1390,13 @@
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs,
TotalNumXMMRegs);
+ assert((Subtarget->hasSSE1() || !NumXMMRegs) &&
+ "SSE register cannot be used when SSE is disabled!");
+ if (!Subtarget->hasSSE1()) {
+ // Kernel mode asks for SSE to be disabled, so don't push them
+ // on the stack.
+ TotalNumXMMRegs = 0;
+ }
// For X86-64, if there are vararg parameters that are passed via
// registers, then we must store them to their spots on the stack so they
// may be loaded by deferencing the result of va_next.
@@ -1675,6 +1690,8 @@
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
+ assert((Subtarget->hasSSE1() || !NumXMMRegs)
+ && "SSE registers cannot be used when SSE is disabled");
Chain = DAG.getCopyToReg(Chain, X86::AL,
DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index 7034c55..ff7f72b 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -331,7 +331,7 @@
// are enabled. These are available on all x86-64 CPUs.
if (Is64Bit) {
HasX86_64 = true;
-#if 1
+#if 0
if (X86SSELevel < SSE2)
X86SSELevel = SSE2;
#endif