Add a parameter to CCState so that it can access the MachineFunction.

No functional change.

Part of PR6965


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@132763 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 5162bc5..3237a10 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -401,7 +401,7 @@
         Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
         continue;
       }
-      
+
       // A array/variable index is always of the form i*S where S is the
       // constant scale size.  See if we can push the scale into immediates.
       uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -469,7 +469,7 @@
     if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
       if (GVar->isThreadLocal())
         return false;
-    
+
     // RIP-relative addresses can't have additional register operands, so if
     // we've already folded stuff into the addressing mode, just force the
     // global value into its own register, which we can use as the basereg.
@@ -704,7 +704,8 @@
 
     // Analyze operands of the call, assigning locations to each operand.
     SmallVector<CCValAssign, 16> ValLocs;
-    CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
+    CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
+		   I->getContext());
     CCInfo.AnalyzeReturn(Outs, RetCC_X86);
 
     const Value *RV = Ret->getOperand(0);
@@ -936,7 +937,7 @@
 
 bool X86FastISel::X86SelectZExt(const Instruction *I) {
   // Handle zero-extension from i1 to i8, which is common.
-  if (!I->getOperand(0)->getType()->isIntegerTy(1)) 
+  if (!I->getOperand(0)->getType()->isIntegerTy(1))
     return false;
 
   EVT DstVT = TLI.getValueType(I->getType());
@@ -1062,13 +1063,13 @@
         if (OpReg == 0) return false;
         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TestOpc))
           .addReg(OpReg).addImm(1);
-        
+
         unsigned JmpOpc = X86::JNE_4;
         if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
           std::swap(TrueMBB, FalseMBB);
           JmpOpc = X86::JE_4;
         }
-        
+
         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(JmpOpc))
           .addMBB(TrueMBB);
         FastEmitBranch(FalseMBB, DL);
@@ -1336,7 +1337,7 @@
       return false;
 
     uint64_t Len = cast<ConstantInt>(MCI.getLength())->getZExtValue();
-    
+
     // Get the address of the dest and source addresses.
     X86AddressMode DestAM, SrcAM;
     if (!X86SelectAddress(MCI.getRawDest(), DestAM) ||
@@ -1345,7 +1346,7 @@
 
     return TryEmitSmallMemcpy(DestAM, SrcAM, Len);
   }
-      
+
   case Intrinsic::stackprotector: {
     // Emit code inline code to store the stack guard onto the stack.
     EVT PtrTy = TLI.getPointerTy();
@@ -1379,7 +1380,7 @@
   case Intrinsic::sadd_with_overflow:
   case Intrinsic::uadd_with_overflow: {
     // FIXME: Should fold immediates.
-    
+
     // Replace "add with overflow" intrinsics with an "add" instruction followed
     // by a seto/setc instruction.
     const Function *Callee = I.getCalledFunction();
@@ -1467,7 +1468,8 @@
   GetReturnInfo(I->getType(), CS.getAttributes().getRetAttributes(),
                 Outs, TLI, &Offsets);
   bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
-                        FTy->isVarArg(), Outs, FTy->getContext());
+					   *FuncInfo.MF, FTy->isVarArg(),
+					   Outs, FTy->getContext());
   if (!CanLowerReturn)
     return false;
 
@@ -1535,9 +1537,9 @@
           ArgVal = ConstantExpr::getZExt(CI,Type::getInt32Ty(CI->getContext()));
       }
     }
-    
+
     unsigned ArgReg;
-    
+
     // Passing bools around ends up doing a trunc to i1 and passing it.
     // Codegen this as an argument + "and 1".
     if (ArgVal->getType()->isIntegerTy(1) && isa<TruncInst>(ArgVal) &&
@@ -1546,10 +1548,10 @@
       ArgVal = cast<TruncInst>(ArgVal)->getOperand(0);
       ArgReg = getRegForValue(ArgVal);
       if (ArgReg == 0) return false;
-      
+
       MVT ArgVT;
       if (!isTypeLegal(ArgVal->getType(), ArgVT)) return false;
-      
+
       ArgReg = FastEmit_ri(ArgVT, ArgVT, ISD::AND, ArgReg,
                            ArgVal->hasOneUse(), 1);
     } else {
@@ -1575,7 +1577,8 @@
 
   // Analyze operands of the call, assigning locations to each operand.
   SmallVector<CCValAssign, 16> ArgLocs;
-  CCState CCInfo(CC, isVarArg, TM, ArgLocs, I->getParent()->getContext());
+  CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs,
+		 I->getParent()->getContext());
 
   // Allocate shadow area for Win64
   if (Subtarget->isTargetWin64())
@@ -1790,7 +1793,8 @@
   // Now handle call return values.
   SmallVector<unsigned, 4> UsedRegs;
   SmallVector<CCValAssign, 16> RVLocs;
-  CCState CCRetInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
+  CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs,
+		    I->getParent()->getContext());
   unsigned ResultReg = FuncInfo.CreateRegs(I->getType());
   CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
   for (unsigned i = 0; i != RVLocs.size(); ++i) {
@@ -1946,7 +1950,7 @@
       if (AM.BaseType == X86AddressMode::RegBase &&
           AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == 0)
         return AM.Base.Reg;
-      
+
       Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
       unsigned ResultReg = createResultReg(RC);
       addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,