Implemented correct stack probing on mingw/cygwin for dynamic alloca's.
Also, fixed static case in presence of eax livin. This fixes PR331

PS: Why don't we still have push/pop instructions? :)


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36195 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/CBackend/CBackend.cpp b/lib/Target/CBackend/CBackend.cpp
index 83704a3..ac7251f 100644
--- a/lib/Target/CBackend/CBackend.cpp
+++ b/lib/Target/CBackend/CBackend.cpp
@@ -1281,8 +1281,8 @@
   // Alloca is hard to get, and we don't want to include stdlib.h here.
   Out << "/* get a declaration for alloca */\n"
       << "#if defined(__CYGWIN__) || defined(__MINGW32__)\n"
-      << "extern void *_alloca(unsigned long);\n"
-      << "#define alloca(x) _alloca(x)\n"
+      << "#define  alloca(x) __builtin_alloca((x))\n"
+      << "#define _alloca(x) __builtin_alloca((x))\n"    
       << "#elif defined(__APPLE__)\n"
       << "extern void *__builtin_alloca(unsigned long);\n"
       << "#define alloca(x) __builtin_alloca(x)\n"
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 664c7e0..209b17a 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -237,7 +237,10 @@
   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
   if (Subtarget->is64Bit())
     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Expand);
+  if (Subtarget->isTargetCygMing())
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+  else
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
 
   if (X86ScalarSSE) {
     // Set up the FP register classes.
@@ -3401,6 +3404,36 @@
     }
 }
 
+SDOperand X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op,
+                                                     SelectionDAG &DAG) {
+  // Get the inputs.
+  SDOperand Chain = Op.getOperand(0);
+  SDOperand Size  = Op.getOperand(1);
+  // FIXME: Ensure alignment here
+
+  TargetLowering::ArgListTy Args; 
+  TargetLowering::ArgListEntry Entry;
+  MVT::ValueType IntPtr = getPointerTy();
+  MVT::ValueType SPTy = (Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
+  const Type *IntPtrTy = getTargetData()->getIntPtrType();
+  
+  Entry.Node    = Size;
+  Entry.Ty      = IntPtrTy;
+  Entry.isInReg = true; // Should pass in EAX
+  Args.push_back(Entry);
+  std::pair<SDOperand, SDOperand> CallResult =
+    LowerCallTo(Chain, IntPtrTy, false, false, CallingConv::C, false,
+                DAG.getExternalSymbol("_alloca", IntPtr), Args, DAG);
+
+  SDOperand SP = DAG.getCopyFromReg(CallResult.second, X86StackPtr, SPTy);
+  
+  std::vector<MVT::ValueType> Tys;
+  Tys.push_back(SPTy);
+  Tys.push_back(MVT::Other);
+  SDOperand Ops[2] = { SP, CallResult.second };
+  return DAG.getNode(ISD::MERGE_VALUES, Tys, Ops, 2);
+}
+
 SDOperand
 X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
   MachineFunction &MF = DAG.getMachineFunction();
@@ -4002,6 +4035,7 @@
   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
+  case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
   }
   return SDOperand();
 }
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 7d40e30..8b9c269 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -401,6 +401,7 @@
     SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG);
+    SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerREADCYCLCECOUNTER(SDOperand Op, SelectionDAG &DAG);
     SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG);
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 8e4e7d7..bfdaff6 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -477,6 +477,9 @@
 def POP32r   : I<0x58, AddRegFrm,
                  (ops GR32:$reg), "pop{l} $reg", []>, Imp<[ESP],[ESP]>;
 
+def PUSH32r  : I<0x50, AddRegFrm,
+                 (ops GR32:$reg), "push{l} $reg", []>, Imp<[ESP],[ESP]>;
+
 def MovePCtoStack : I<0, Pseudo, (ops piclabel:$label),
                       "call $label", []>;
 
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 3737c0e..cd2a0d4 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -1039,14 +1039,39 @@
 
   if (NumBytes) {   // adjust stack pointer: ESP -= numbytes
     if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) {
+      // Check, whether EAX is livein for this function
+      bool isEAXAlive = false;
+      for (MachineFunction::livein_iterator II = MF.livein_begin(),
+             EE = MF.livein_end(); (II != EE) && !isEAXAlive; ++II) {
+        unsigned Reg = II->first;
+        isEAXAlive = (Reg == X86::EAX || Reg == X86::AX ||
+                      Reg == X86::AH || Reg == X86::AL);
+      }
+
       // Function prologue calls _alloca to probe the stack when allocating  
       // more than 4k bytes in one go. Touching the stack at 4K increments is  
       // necessary to ensure that the guard pages used by the OS virtual memory
       // manager are allocated in correct sequence.
-      MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
-      MBB.insert(MBBI, MI);
-      MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
-      MBB.insert(MBBI, MI);
+      if (!isEAXAlive) {
+        MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
+        MBB.insert(MBBI, MI);
+        MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
+        MBB.insert(MBBI, MI);
+      } else {
+        // Save EAX
+        MI = BuildMI(TII.get(X86::PUSH32r), X86::EAX);
+        MBB.insert(MBBI, MI);
+        // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
+        // allocated bytes for EAX.
+        MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4);
+        MBB.insert(MBBI, MI);
+        MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
+        MBB.insert(MBBI, MI);
+        // Restore EAX
+        MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm), X86::EAX),
+                          StackPtr, NumBytes-4);
+        MBB.insert(MBBI, MI);
+      }
     } else {
       unsigned Opc = (NumBytes < 128) ?
         (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :