Don't attempt to tail call optimize for Win64.

llvm-svn: 131709
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e5156f8..ce1dc09 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2531,17 +2531,18 @@
     return false;
 
   // Do not sibcall optimize vararg calls unless all arguments are passed via
-  // registers
+  // registers.
   if (isVarArg && !Outs.empty()) {
+
+    // Optimizing for varargs on Win64 is unlikely to be safe without
+    // additional testing.
+    if (Subtarget->isTargetWin64())
+      return false;
+
     SmallVector<CCValAssign, 16> ArgLocs;
     CCState CCInfo(CalleeCC, isVarArg, getTargetMachine(),
                    ArgLocs, *DAG.getContext());
 
-    // Allocate shadow area for Win64
-    if (Subtarget->isTargetWin64()) {
-      CCInfo.AllocateStack(32, 8);
-    }
-
     CCInfo.AnalyzeCallOperands(Outs, CC_X86);
     for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
       if (!ArgLocs[i].isRegLoc())
diff --git a/llvm/test/CodeGen/X86/vararg_tailcall.ll b/llvm/test/CodeGen/X86/vararg_tailcall.ll
index d50b83a..73d80eb 100644
--- a/llvm/test/CodeGen/X86/vararg_tailcall.ll
+++ b/llvm/test/CodeGen/X86/vararg_tailcall.ll
@@ -12,7 +12,7 @@
 ; X64: @foo
 ; X64: jmp
 ; WIN64: @foo
-; WIN64: jmp
+; WIN64: callq
 define void @foo(i64 %arg) nounwind optsize ssp noredzone {
 entry:
   %call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i64 0, i64 0), i64 %arg) nounwind optsize noredzone
@@ -36,7 +36,7 @@
 ; X64: @foo2
 ; X64: jmp
 ; WIN64: @foo2
-; WIN64: jmp
+; WIN64: callq
 define i8* @foo2(i8* %arg) nounwind optsize ssp noredzone {
 entry:
   %tmp1 = load i8** @sel, align 8, !tbaa !0