Remove trailing whitespace to reduce later commit patch noise.

(Note: Eventually, commits like this will be handled via a pre-commit hook that
 does this automagically, as well as expand tabs to spaces and look for 80-col
 violations.)


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64827 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 31f295c..2c97b99 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -34,30 +34,30 @@
 #include "llvm/Support/CommandLine.h"
 using namespace llvm;
 
-static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc", 
+static cl::opt<bool> EnablePPCPreinc("enable-ppc-preinc",
 cl::desc("enable preincrement load/store generation on PPC (experimental)"),
                                      cl::Hidden);
 
 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
   : TargetLowering(TM), PPCSubTarget(*TM.getSubtargetImpl()) {
-    
+
   setPow2DivIsCheap();
 
   // Use _setjmp/_longjmp instead of setjmp/longjmp.
   setUseUnderscoreSetJmp(true);
   setUseUnderscoreLongJmp(true);
-    
+
   // Set up the register classes.
   addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
   addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
   addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
-  
+
   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
   setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
 
   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-    
+
   // PowerPC has pre-inc load and store's.
   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
@@ -92,7 +92,7 @@
   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
-  
+
   // We don't support sin/cos/sqrt/fmod/pow
   setOperationAction(ISD::FSIN , MVT::f64, Expand);
   setOperationAction(ISD::FCOS , MVT::f64, Expand);
@@ -104,16 +104,16 @@
   setOperationAction(ISD::FPOW , MVT::f32, Expand);
 
   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
-  
+
   // If we're enabling GP optimizations, use hardware square root
   if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
   }
-  
+
   setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
   setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
-  
+
   // PowerPC does not have BSWAP, CTPOP or CTTZ
   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
   setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
@@ -121,29 +121,29 @@
   setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
   setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
   setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
-  
+
   // PowerPC does not have ROTR
   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
-  
+
   // PowerPC does not have Select
   setOperationAction(ISD::SELECT, MVT::i32, Expand);
   setOperationAction(ISD::SELECT, MVT::i64, Expand);
   setOperationAction(ISD::SELECT, MVT::f32, Expand);
   setOperationAction(ISD::SELECT, MVT::f64, Expand);
-  
+
   // PowerPC wants to turn select_cc of FP into fsel when possible.
   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
 
   // PowerPC wants to optimize integer setcc a bit
   setOperationAction(ISD::SETCC, MVT::i32, Custom);
-  
+
   // PowerPC does not have BRCOND which requires SetCC
   setOperationAction(ISD::BRCOND, MVT::Other, Expand);
 
   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
-  
+
   // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
 
@@ -162,14 +162,14 @@
   // Support label based line numbers.
   setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
   setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
-  
+
   setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
   setOperationAction(ISD::EHSELECTION,   MVT::i64, Expand);
   setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
   setOperationAction(ISD::EHSELECTION,   MVT::i32, Expand);
-  
-  
-  // We want to legalize GlobalAddress and ConstantPool nodes into the 
+
+
+  // We want to legalize GlobalAddress and ConstantPool nodes into the
   // appropriate instructions to materialize the address.
   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
@@ -179,7 +179,7 @@
   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
-  
+
   // RET must be custom lowered, to meet ABI requirements.
   setOperationAction(ISD::RET               , MVT::Other, Custom);
 
@@ -191,24 +191,24 @@
 
   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
-  
+
   // VAARG is custom lowered with ELF 32 ABI
   if (TM.getSubtarget<PPCSubtarget>().isELF32_ABI())
     setOperationAction(ISD::VAARG, MVT::Other, Custom);
   else
     setOperationAction(ISD::VAARG, MVT::Other, Expand);
-  
+
   // Use the default implementation.
   setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
-  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand); 
+  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
 
   // We want to custom lower some of our intrinsics.
   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-  
+
   // Comparisons that require checking two conditions.
   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
@@ -222,7 +222,7 @@
   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
-    
+
   if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
     // They also have instructions for converting between i64 and fp.
     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
@@ -230,12 +230,12 @@
     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
- 
+
     // FIXME: disable this lowered code.  This generates 64-bit register values,
     // and we don't model the fact that the top part is clobbered by calls.  We
     // need to flag these together so that the value isn't live across a call.
     //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
-    
+
     // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
   } else {
@@ -269,7 +269,7 @@
       // add/sub are legal for all supported vector VT's.
       setOperationAction(ISD::ADD , VT, Legal);
       setOperationAction(ISD::SUB , VT, Legal);
-      
+
       // We promote all shuffles to v16i8.
       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
@@ -287,7 +287,7 @@
       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
       setOperationAction(ISD::STORE, VT, Promote);
       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
-      
+
       // No other operations are legal.
       setOperationAction(ISD::MUL , VT, Expand);
       setOperationAction(ISD::SDIV, VT, Expand);
@@ -320,12 +320,12 @@
     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
     setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
-    
+
     addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
     addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
     addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
     addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
-    
+
     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
@@ -333,16 +333,16 @@
 
     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
-    
+
     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
   }
-  
+
   setShiftAmountType(MVT::i32);
   setBooleanContents(ZeroOrOneBooleanContent);
-  
+
   if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
     setStackPointerRegisterToSaveRestore(PPC::X1);
     setExceptionPointerRegister(PPC::X3);
@@ -352,13 +352,13 @@
     setExceptionPointerRegister(PPC::R3);
     setExceptionSelectorRegister(PPC::R4);
   }
-  
+
   // We have target-specific dag combine patterns for the following nodes:
   setTargetDAGCombine(ISD::SINT_TO_FP);
   setTargetDAGCombine(ISD::STORE);
   setTargetDAGCombine(ISD::BR_CC);
   setTargetDAGCombine(ISD::BSWAP);
-  
+
   // Darwin long double math library functions have $LDBL128 appended.
   if (TM.getSubtarget<PPCSubtarget>().isDarwin()) {
     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
@@ -457,7 +457,7 @@
 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
 /// true if Op is undef or if it matches the specified value.
 static bool isConstantOrUndef(SDValue Op, unsigned Val) {
-  return Op.getOpcode() == ISD::UNDEF || 
+  return Op.getOpcode() == ISD::UNDEF ||
          cast<ConstantSDNode>(Op)->getZExtValue() == Val;
 }
 
@@ -498,13 +498,13 @@
 
 /// isVMerge - Common function, used to match vmrg* shuffles.
 ///
-static bool isVMerge(SDNode *N, unsigned UnitSize, 
+static bool isVMerge(SDNode *N, unsigned UnitSize,
                      unsigned LHSStart, unsigned RHSStart) {
   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
          N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
          "Unsupported merge size!");
-  
+
   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
       if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
@@ -542,9 +542,9 @@
   unsigned i;
   for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
     /*search*/;
-  
+
   if (i == 16) return -1;  // all undef.
-  
+
   // Otherwise, check to see if the rest of the elements are consequtively
   // numbered from this value.
   unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getZExtValue();
@@ -562,7 +562,7 @@
       if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
         return -1;
   }
-  
+
   return ShiftAmt;
 }
 
@@ -573,7 +573,7 @@
   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
          N->getNumOperands() == 16 &&
          (EltSize == 1 || EltSize == 2 || EltSize == 4));
-  
+
   // This is a splat operation if each element of the permute is the same, and
   // if the value doesn't reference the second vector.
   unsigned ElementBase = 0;
@@ -585,14 +585,14 @@
 
   if (cast<ConstantSDNode>(Elt)->getZExtValue() >= 16)
     return false;
-  
+
   // Check that they are consequtive.
   for (unsigned i = 1; i != EltSize; ++i) {
     if (!isa<ConstantSDNode>(N->getOperand(i)) ||
         cast<ConstantSDNode>(N->getOperand(i))->getZExtValue() != i+ElementBase)
       return false;
   }
-  
+
   assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
     if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
@@ -639,31 +639,31 @@
     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
     SDValue UniquedVals[4];
     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
-    
+
     // See if all of the elements in the buildvector agree across.
     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
       if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
       // If the element isn't a constant, bail fully out.
       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
 
-          
+
       if (UniquedVals[i&(Multiple-1)].getNode() == 0)
         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
         return SDValue();  // no match.
     }
-    
+
     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
     // either constant or undef values that are identical for each chunk.  See
     // if these chunks can form into a larger vspltis*.
-    
+
     // Check to see if all of the leading entries are either 0 or -1.  If
     // neither, then this won't fit into the immediate field.
     bool LeadingZero = true;
     bool LeadingOnes = true;
     for (unsigned i = 0; i != Multiple-1; ++i) {
       if (UniquedVals[i].getNode() == 0) continue;  // Must have been undefs.
-      
+
       LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
       LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
     }
@@ -682,10 +682,10 @@
       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
         return DAG.getTargetConstant(Val, MVT::i32);
     }
-    
+
     return SDValue();
   }
-  
+
   // Check to see if this buildvec has a single non-undef value in its elements.
   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
     if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
@@ -694,9 +694,9 @@
     else if (OpVal != N->getOperand(i))
       return SDValue();
   }
-  
+
   if (OpVal.getNode() == 0) return SDValue();  // All UNDEF: use implicit def.
-  
+
   unsigned ValSizeInBytes = 0;
   uint64_t Value = 0;
   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
@@ -712,13 +712,13 @@
   // this splat.  The only case that we could fit the replicated bits into our
   // immediate field for would be zero, and we prefer to use vxor for it.
   if (ValSizeInBytes < ByteSize) return SDValue();
-  
+
   // If the element value is larger than the splat value, cut it in half and
   // check to see if the two halves are equal.  Continue doing this until we
   // get to ByteSize.  This allows us to handle 0x01010101 as 0x01.
   while (ValSizeInBytes > ByteSize) {
     ValSizeInBytes >>= 1;
-    
+
     // If the top half equals the bottom half, we're still ok.
     if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
          (Value                        & ((1 << (8*ValSizeInBytes))-1)))
@@ -728,7 +728,7 @@
   // Properly sign extend the value.
   int ShAmt = (4-ByteSize)*8;
   int MaskVal = ((int)Value << ShAmt) >> ShAmt;
-  
+
   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
   if (MaskVal == 0) return SDValue();
 
@@ -749,7 +749,7 @@
 static bool isIntS16Immediate(SDNode *N, short &Imm) {
   if (N->getOpcode() != ISD::Constant)
     return false;
-  
+
   Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
   if (N->getValueType(0) == MVT::i32)
     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
@@ -773,14 +773,14 @@
       return false;    // r+i
     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
       return false;    // r+i
-    
+
     Base = N.getOperand(0);
     Index = N.getOperand(1);
     return true;
   } else if (N.getOpcode() == ISD::OR) {
     if (isIntS16Immediate(N.getOperand(1), imm))
       return false;    // r+i can fold it if we can.
-    
+
     // If this is an or of disjoint bitfields, we can codegen this as an add
     // (for better address arithmetic) if the LHS and RHS of the OR are provably
     // disjoint.
@@ -790,7 +790,7 @@
                           APInt::getAllOnesValue(N.getOperand(0)
                             .getValueSizeInBits()),
                           LHSKnownZero, LHSKnownOne);
-    
+
     if (LHSKnownZero.getBoolValue()) {
       DAG.ComputeMaskedBits(N.getOperand(1),
                             APInt::getAllOnesValue(N.getOperand(1)
@@ -805,7 +805,7 @@
       }
     }
   }
-  
+
   return false;
 }
 
@@ -820,7 +820,7 @@
   // If this can be more profitably realized as r+r, fail.
   if (SelectAddressRegReg(N, Disp, Base, DAG))
     return false;
-  
+
   if (N.getOpcode() == ISD::ADD) {
     short imm = 0;
     if (isIntS16Immediate(N.getOperand(1), imm)) {
@@ -864,7 +864,7 @@
     }
   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
     // Loading from a constant address.
-    
+
     // If this address fits entirely in a 16-bit sext immediate field, codegen
     // this as "d, 0"
     short Imm;
@@ -878,17 +878,17 @@
     if (CN->getValueType(0) == MVT::i32 ||
         (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
       int Addr = (int)CN->getZExtValue();
-      
+
       // Otherwise, break this down into an LIS + disp.
       Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
-      
+
       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
       Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base), 0);
       return true;
     }
   }
-  
+
   Disp = DAG.getTargetConstant(0, getPointerTy());
   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
@@ -907,7 +907,7 @@
   // reg+imm, e.g. where imm = 0.
   if (SelectAddressRegReg(N, Base, Index, DAG))
     return true;
-  
+
   // If the operand is an addition, always emit this as [r+r], since this is
   // better (for code size, and execution, as the memop does the add for free)
   // than emitting an explicit add.
@@ -916,7 +916,7 @@
     Index = N.getOperand(1);
     return true;
   }
-  
+
   // Otherwise, do it the hard way, using R0 as the base register.
   Base = DAG.getRegister(PPC::R0, N.getValueType());
   Index = N;
@@ -934,7 +934,7 @@
   // If this can be more profitably realized as r+r, fail.
   if (SelectAddressRegReg(N, Disp, Base, DAG))
     return false;
-  
+
   if (N.getOpcode() == ISD::ADD) {
     short imm = 0;
     if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
@@ -986,12 +986,12 @@
         Base = DAG.getRegister(PPC::R0, CN->getValueType(0));
         return true;
       }
-    
+
       // Fold the low-part of 32-bit absolute addresses into addr mode.
       if (CN->getValueType(0) == MVT::i32 ||
           (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
         int Addr = (int)CN->getZExtValue();
-      
+
         // Otherwise, break this down into an LIS + disp.
         Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
         Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
@@ -1001,7 +1001,7 @@
       }
     }
   }
-  
+
   Disp = DAG.getTargetConstant(0, getPointerTy());
   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
@@ -1020,13 +1020,13 @@
                                                   SelectionDAG &DAG) const {
   // Disabled by default for now.
   if (!EnablePPCPreinc) return false;
-  
+
   SDValue Ptr;
   MVT VT;
   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
     Ptr = LD->getBasePtr();
     VT = LD->getMemoryVT();
-    
+
   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
     ST = ST;
     Ptr = ST->getBasePtr();
@@ -1037,9 +1037,9 @@
   // PowerPC doesn't have preinc load/store instructions for vectors.
   if (VT.isVector())
     return false;
-  
+
   // TODO: Check reg+reg first.
-  
+
   // LDU/STU use reg+imm*4, others use reg+imm.
   if (VT != MVT::i64) {
     // reg + imm
@@ -1058,8 +1058,8 @@
         LD->getExtensionType() == ISD::SEXTLOAD &&
         isa<ConstantSDNode>(Offset))
       return false;
-  }    
-  
+  }
+
   AM = ISD::PRE_INC;
   return true;
 }
@@ -1068,7 +1068,7 @@
 //  LowerOperation implementation
 //===----------------------------------------------------------------------===//
 
-SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 
+SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
                                              SelectionDAG &DAG) {
   MVT PtrVT = Op.getValueType();
   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
@@ -1079,7 +1079,7 @@
   DebugLoc dl = Op.getDebugLoc();
 
   const TargetMachine &TM = DAG.getTarget();
-  
+
   SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, CPI, Zero);
   SDValue Lo = DAG.getNode(PPCISD::Lo, dl, PtrVT, CPI, Zero);
 
@@ -1091,14 +1091,14 @@
     // The address of the global is just (hi(&g)+lo(&g)).
     return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
   }
-  
+
   if (TM.getRelocationModel() == Reloc::PIC_) {
     // With PIC, the first instruction is actually "GR+hi(&G)".
     Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
-                     DAG.getNode(PPCISD::GlobalBaseReg, 
+                     DAG.getNode(PPCISD::GlobalBaseReg,
                                  DebugLoc::getUnknownLoc(), PtrVT), Hi);
   }
-  
+
   Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
   return Lo;
 }
@@ -1110,7 +1110,7 @@
   SDValue Zero = DAG.getConstant(0, PtrVT);
   // FIXME there isn't really any debug loc here
   DebugLoc dl = Op.getDebugLoc();
-  
+
   const TargetMachine &TM = DAG.getTarget();
 
   SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, JTI, Zero);
@@ -1124,25 +1124,25 @@
     // The address of the global is just (hi(&g)+lo(&g)).
     return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
   }
-  
+
   if (TM.getRelocationModel() == Reloc::PIC_) {
     // With PIC, the first instruction is actually "GR+hi(&G)".
     Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
-                     DAG.getNode(PPCISD::GlobalBaseReg, 
+                     DAG.getNode(PPCISD::GlobalBaseReg,
                                  DebugLoc::getUnknownLoc(), PtrVT), Hi);
   }
-  
+
   Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
   return Lo;
 }
 
-SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 
+SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
                                                    SelectionDAG &DAG) {
   assert(0 && "TLS not implemented for PPC.");
   return SDValue(); // Not reached
 }
 
-SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 
+SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
                                               SelectionDAG &DAG) {
   MVT PtrVT = Op.getValueType();
   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
@@ -1151,7 +1151,7 @@
   SDValue Zero = DAG.getConstant(0, PtrVT);
   // FIXME there isn't really any debug info here
   DebugLoc dl = GSDN->getDebugLoc();
-  
+
   const TargetMachine &TM = DAG.getTarget();
 
   SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, GA, Zero);
@@ -1165,19 +1165,19 @@
     // The address of the global is just (hi(&g)+lo(&g)).
     return DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
   }
-  
+
   if (TM.getRelocationModel() == Reloc::PIC_) {
     // With PIC, the first instruction is actually "GR+hi(&G)".
     Hi = DAG.getNode(ISD::ADD, dl, PtrVT,
-                     DAG.getNode(PPCISD::GlobalBaseReg, 
+                     DAG.getNode(PPCISD::GlobalBaseReg,
                                  DebugLoc::getUnknownLoc(), PtrVT), Hi);
   }
-  
+
   Lo = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
-  
+
   if (!TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV))
     return Lo;
-  
+
   // If the global is weak or external, we have to go through the lazy
   // resolution stub.
   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Lo, NULL, 0);
@@ -1186,7 +1186,7 @@
 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
   DebugLoc dl = Op.getDebugLoc();
-  
+
   // If we're comparing for equality to zero, expose the fact that this is
   // implented as a ctlz/srl pair on ppc, so that the dag combiner can
   // fold the new nodes.
@@ -1197,20 +1197,20 @@
       if (VT.bitsLT(MVT::i32)) {
         VT = MVT::i32;
         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
-      } 
+      }
       unsigned Log2b = Log2_32(VT.getSizeInBits());
       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
                                 DAG.getConstant(Log2b, MVT::i32));
       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
     }
-    // Leave comparisons against 0 and -1 alone for now, since they're usually 
+    // Leave comparisons against 0 and -1 alone for now, since they're usually
     // optimized.  FIXME: revisit this when we can custom lower all setcc
     // optimizations.
     if (C->isAllOnesValue() || C->isNullValue())
       return SDValue();
   }
-  
+
   // If we have an integer seteq/setne, turn it into a compare against zero
   // by xor'ing the rhs with the lhs, which is faster than setting a
   // condition register, reading it back out, and masking the correct bit.  The
@@ -1219,7 +1219,7 @@
   MVT LHSVT = Op.getOperand(0).getValueType();
   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
     MVT VT = Op.getValueType();
-    SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 
+    SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
                                 Op.getOperand(1));
     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
   }
@@ -1232,7 +1232,7 @@
                               unsigned VarArgsNumGPR,
                               unsigned VarArgsNumFPR,
                               const PPCSubtarget &Subtarget) {
-  
+
   assert(0 && "VAARG in ELF32 ABI not implemented yet!");
   return SDValue(); // Not reached
 }
@@ -1249,7 +1249,7 @@
   const Type *IntPtrTy =
     DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType();
 
-  TargetLowering::ArgListTy Args; 
+  TargetLowering::ArgListTy Args;
   TargetLowering::ArgListEntry Entry;
 
   Entry.Ty = IntPtrTy;
@@ -1262,7 +1262,7 @@
 
   Entry.Node = FPtr; Args.push_back(Entry);
   Entry.Node = Nest; Args.push_back(Entry);
-  
+
   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
   std::pair<SDValue, SDValue> CallResult =
     LowerCallTo(Chain, Op.getValueType().getTypeForMVT(), false, false,
@@ -1320,13 +1320,13 @@
 
   SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8);
   SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8);
-  
+
 
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  
+
   SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
   SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
-  
+
   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
 
@@ -1335,22 +1335,22 @@
 
   uint64_t FPROffset = 1;
   SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
-  
+
   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
-  
+
   // Store first byte : number of int regs
   SDValue firstStore = DAG.getStore(Op.getOperand(0), dl, ArgGPR,
                                       Op.getOperand(1), SV, 0);
   uint64_t nextOffset = FPROffset;
   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
                                   ConstFPROffset);
-  
+
   // Store second byte : number of float regs
   SDValue secondStore =
     DAG.getStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset);
   nextOffset += StackOffset;
   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
-  
+
   // Store second word : arguments given on stack
   SDValue thirdStore =
     DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, SV, nextOffset);
@@ -1374,8 +1374,8 @@
     };
     return FPR;
   }
-  
-  
+
+
   static const unsigned FPR[] = {
     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
     PPC::F8
@@ -1397,7 +1397,7 @@
 }
 
 SDValue
-PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, 
+PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
                                          SelectionDAG &DAG,
                                          int &VarArgsFrameIndex,
                                          int &VarArgsStackOffset,
@@ -1413,7 +1413,7 @@
   SDValue Root = Op.getOperand(0);
   bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
   DebugLoc dl = Op.getDebugLoc();
-  
+
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   bool isPPC64 = PtrVT == MVT::i64;
   bool isMachoABI = Subtarget.isMachoABI();
@@ -1435,9 +1435,9 @@
     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
   };
-  
+
   static const unsigned *FPR = GetFPR(Subtarget);
-  
+
   static const unsigned VR[] = {
     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
@@ -1448,13 +1448,13 @@
   const unsigned Num_VR_Regs  = array_lengthof( VR);
 
   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
-  
+
   const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
-  
+
   // In 32-bit non-varargs functions, the stack space for vectors is after the
   // stack space for non-vectors.  We do not use this space unless we have
   // too many vectors to fit in registers, something that only occurs in
-  // constructed examples:), but we have to walk the arglist to figure 
+  // constructed examples:), but we have to walk the arglist to figure
   // that out...for the pathological case, compute VecArgOffset as the
   // start of the vector parameter area.  Computing VecArgOffset is the
   // entire point of the following loop.
@@ -1462,7 +1462,7 @@
   // to handle Elf here.
   unsigned VecArgOffset = ArgOffset;
   if (!isVarArg && !isPPC64) {
-    for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues()-1; ArgNo != e; 
+    for (unsigned ArgNo = 0, e = Op.getNode()->getNumValues()-1; ArgNo != e;
          ++ArgNo) {
       MVT ObjectVT = Op.getValue(ArgNo).getValueType();
       unsigned ObjSize = ObjectVT.getSizeInBits()/8;
@@ -1472,7 +1472,7 @@
       if (Flags.isByVal()) {
         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
         ObjSize = Flags.getByValSize();
-        unsigned ArgSize = 
+        unsigned ArgSize =
                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
         VecArgOffset += ArgSize;
         continue;
@@ -1505,7 +1505,7 @@
   // Add DAG nodes to load the arguments or copy them out of registers.  On
   // entry to a function on PPC, the arguments start after the linkage area,
   // although the first ones are often in registers.
-  // 
+  //
   // In the ELF 32 ABI, GPRs and stack are double word align: an argument
   // represented with two words (long long or double) must be copied to an
   // even GPR_idx value or to an even ArgOffset value.
@@ -1522,7 +1522,7 @@
     ISD::ArgFlagsTy Flags =
       cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
     // See if next argument requires stack alignment in ELF
-    bool Align = Flags.isSplit(); 
+    bool Align = Flags.isSplit();
 
     unsigned CurArgOffset = ArgOffset;
 
@@ -1566,7 +1566,7 @@
           unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
           RegInfo.addLiveIn(GPR[GPR_idx], VReg);
           SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, PtrVT);
-          SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 
+          SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
                                NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 );
           MemOps.push_back(Store);
           ++GPR_idx;
@@ -1615,7 +1615,7 @@
           ArgSize = PtrByteSize;
         }
         // Stack align in ELF
-        if (needsLoad && Align && isELF32_ABI) 
+        if (needsLoad && Align && isELF32_ABI)
           ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
         // All int arguments reserve stack space in Macho ABI.
         if (isMachoABI || needsLoad) ArgOffset += PtrByteSize;
@@ -1649,7 +1649,7 @@
       // All int arguments reserve stack space in Macho ABI.
       if (isMachoABI || needsLoad) ArgOffset += 8;
       break;
-      
+
     case MVT::f32:
     case MVT::f64:
       // Every 4 bytes of argument space consumes one of the GPRs available for
@@ -1671,7 +1671,7 @@
       } else {
         needsLoad = true;
       }
-      
+
       // Stack align in ELF
       if (needsLoad && Align && isELF32_ABI)
         ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
@@ -1713,7 +1713,7 @@
       }
       break;
     }
-    
+
     // We need to load the argument to a virtual register if we determined above
     // that we ran out of physical registers of the appropriate type.
     if (needsLoad) {
@@ -1723,7 +1723,7 @@
       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
       ArgVal = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0);
     }
-    
+
     ArgValues.push_back(ArgVal);
   }
 
@@ -1749,29 +1749,29 @@
   // If the function takes variable number of arguments, make a frame index for
   // the start of the first vararg value... for expansion of llvm.va_start.
   if (isVarArg) {
-    
+
     int depth;
     if (isELF32_ABI) {
       VarArgsNumGPR = GPR_idx;
       VarArgsNumFPR = FPR_idx;
-   
+
       // Make room for Num_GPR_Regs, Num_FPR_Regs and for a possible frame
       // pointer.
       depth = -(Num_GPR_Regs * PtrVT.getSizeInBits()/8 +
                 Num_FPR_Regs * MVT(MVT::f64).getSizeInBits()/8 +
                 PtrVT.getSizeInBits()/8);
-      
+
       VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
                                                   ArgOffset);
 
     }
     else
       depth = ArgOffset;
-    
+
     VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
                                                depth);
     SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
-    
+
     // In ELF 32 ABI, the fixed integer arguments of a variadic function are
     // stored to the VarArgsFrameIndex on the stack.
     if (isELF32_ABI) {
@@ -1832,13 +1832,13 @@
       }
     }
   }
-  
+
   if (!MemOps.empty())
-    Root = DAG.getNode(ISD::TokenFactor, dl, 
+    Root = DAG.getNode(ISD::TokenFactor, dl,
                        MVT::Other, &MemOps[0], MemOps.size());
 
   ArgValues.push_back(Root);
- 
+
   // Return the new list of results.
   return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
                      &ArgValues[0], ArgValues.size());
@@ -1973,12 +1973,12 @@
 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
   if (!C) return 0;
-  
+
   int Addr = C->getZExtValue();
   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
       (Addr << 6 >> 6) != Addr)
     return 0;  // Top 6 bits have to be sext of immediate.
-  
+
   return DAG.getConstant((int)C->getZExtValue() >> 2,
                          DAG.getTargetLoweringInfo().getPointerTy()).getNode();
 }
@@ -2087,12 +2087,12 @@
 }
 
 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
-/// by "Src" to address "Dst" of size "Size".  Alignment information is 
+/// by "Src" to address "Dst" of size "Size".  Alignment information is
 /// specified by the specific parameter attribute. The copy will be passed as
 /// a byval function parameter.
 /// Sometimes what we are copying is the end of a larger object, the part that
 /// does not fit in registers.
-static SDValue 
+static SDValue
 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
                           unsigned Size, DebugLoc dl) {
@@ -2139,20 +2139,20 @@
   SDValue Callee = TheCall->getCallee();
   unsigned NumOps  = TheCall->getNumArgs();
   DebugLoc dl = TheCall->getDebugLoc();
-  
+
   bool isMachoABI = Subtarget.isMachoABI();
   bool isELF32_ABI  = Subtarget.isELF32_ABI();
 
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   bool isPPC64 = PtrVT == MVT::i64;
   unsigned PtrByteSize = isPPC64 ? 8 : 4;
-  
+
   MachineFunction &MF = DAG.getMachineFunction();
 
   // args_to_use will accumulate outgoing args for the PPCISD::CALL case in
   // SelectExpr to use to put the arguments in the appropriate registers.
   std::vector<SDValue> args_to_use;
-  
+
   // Mark this function as potentially containing a function that contains a
   // tail call. As a consequence the frame pointer will be used for dynamicalloc
   // and restoring the callers stack pointer in this functions epilog. This is
@@ -2173,12 +2173,12 @@
   // Calculate by how many bytes the stack has to be adjusted in case of tail
   // call optimization.
   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
-  
+
   // Adjust the stack pointer for the new arguments...
   // These operations are automatically eliminated by the prolog/epilog pass
   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
   SDValue CallSeqStart = Chain;
-  
+
   // Load the return address and frame pointer so it can be move somewhere else
   // later.
   SDValue LROp, FPOp;
@@ -2192,14 +2192,14 @@
     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
   else
     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
-  
+
   // Figure out which arguments are going to go in registers, and which in
   // memory.  Also, if this is a vararg function, floating point operations
   // must be stored to our stack, and loaded into integer regs as well, if
   // any integer regs are available for argument passing.
   unsigned ArgOffset = PPCFrameInfo::getLinkageSize(isPPC64, isMachoABI);
   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
-  
+
   static const unsigned GPR_32[] = {           // 32-bit registers.
     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
@@ -2209,7 +2209,7 @@
     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
   };
   static const unsigned *FPR = GetFPR(Subtarget);
-  
+
   static const unsigned VR[] = {
     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
@@ -2217,7 +2217,7 @@
   const unsigned NumGPRs = array_lengthof(GPR_32);
   const unsigned NumFPRs = isMachoABI ? 13 : 8;
   const unsigned NumVRs  = array_lengthof( VR);
-  
+
   const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32;
 
   std::vector<std::pair<unsigned, SDValue> > RegsToPass;
@@ -2234,7 +2234,7 @@
     // PtrOff will be used to store the current argument to the stack if a
     // register cannot be found for it.
     SDValue PtrOff;
-    
+
     // Stack align in ELF 32
     if (isELF32_ABI && Align)
       PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize,
@@ -2261,7 +2261,7 @@
         // Everything else is passed left-justified.
         MVT VT = (Size==1) ? MVT::i8 : MVT::i16;
         if (GPR_idx != NumGPRs) {
-          SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 
+          SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
                                           NULL, 0, VT);
           MemOpChains.push_back(Load.getValue(1));
           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
@@ -2271,7 +2271,7 @@
           SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
           SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
-                                CallSeqStart.getNode()->getOperand(0), 
+                                CallSeqStart.getNode()->getOperand(0),
                                 Flags, DAG, Size, dl);
           // This must go outside the CALLSEQ_START..END.
           SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
@@ -2287,7 +2287,7 @@
       // code assumes it is there, even if it could be put entirely into
       // registers.  (This is not what the doc says.)
       SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
-                            CallSeqStart.getNode()->getOperand(0), 
+                            CallSeqStart.getNode()->getOperand(0),
                             Flags, DAG, Size, dl);
       // This must go outside the CALLSEQ_START..END.
       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
@@ -2392,7 +2392,7 @@
     case MVT::v16i8:
       if (isVarArg) {
         // These go aligned on the stack, or in the corresponding R registers
-        // when within range.  The Darwin PPC ABI doc claims they also go in 
+        // when within range.  The Darwin PPC ABI doc claims they also go in
         // V registers; in fact gcc does this only for arguments that are
         // prototyped, not for those that match the ...  We do it for all
         // arguments, seems to work.
@@ -2403,7 +2403,7 @@
         }
         // We could elide this store in the case where the object fits
         // entirely in R registers.  Maybe later.
-        PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 
+        PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
                             DAG.getConstant(ArgOffset, PtrVT));
         SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0);
         MemOpChains.push_back(Store);
@@ -2470,16 +2470,16 @@
   if (!MemOpChains.empty())
     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                         &MemOpChains[0], MemOpChains.size());
-  
+
   // Build a sequence of copy-to-reg nodes chained together with token chain
   // and flag operands which copy the outgoing args into the appropriate regs.
   SDValue InFlag;
   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
-    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 
+    Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
                              RegsToPass[i].second, InFlag);
     InFlag = Chain.getValue(1);
   }
- 
+
   // With the ELF 32 ABI, set CR6 to true if this is a vararg call.
   if (isVarArg && isELF32_ABI) {
     SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, MVT::i32), 0);
@@ -2517,7 +2517,7 @@
 
   SmallVector<SDValue, 8> Ops;
   unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF;
-  
+
   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
   // node so that legalize doesn't hack it.
@@ -2535,7 +2535,7 @@
     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps,
                         2 + (InFlag.getNode() != 0));
     InFlag = Chain.getValue(1);
-    
+
     // Copy the callee address into R12/X12 on darwin.
     if (isMachoABI) {
       unsigned Reg = Callee.getValueType() == MVT::i32 ? PPC::R12 : PPC::X12;
@@ -2566,7 +2566,7 @@
   // Add argument registers to the end of the list so that they are known live
   // into the call.
   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
-    Ops.push_back(DAG.getRegister(RegsToPass[i].first, 
+    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
                                   RegsToPass[i].second.getValueType()));
 
   // When performing tail call optimization the callee pops its arguments off
@@ -2601,13 +2601,13 @@
   unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
   CCState CCInfo(CallerCC, isVarArg, TM, RVLocs);
   CCInfo.AnalyzeCallResult(TheCall, RetCC_PPC);
-  
+
   // Copy all of the result registers out of their specified physreg.
   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
     CCValAssign &VA = RVLocs[i];
     MVT VT = VA.getValVT();
     assert(VA.isRegLoc() && "Can only return in registers!");
-    Chain = DAG.getCopyFromReg(Chain, dl, 
+    Chain = DAG.getCopyFromReg(Chain, dl,
                                VA.getLocReg(), VT, InFlag).getValue(1);
     ResultVals.push_back(Chain.getValue(0));
     InFlag = Chain.getValue(2);
@@ -2616,7 +2616,7 @@
   // If the function returns void, just return the chain.
   if (RVLocs.empty())
     return Chain;
-  
+
   // Otherwise, merge everything together with a MERGE_VALUES node.
   ResultVals.push_back(Chain);
   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
@@ -2624,7 +2624,7 @@
   return Res.getValue(Op.getResNo());
 }
 
-SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG, 
+SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG,
                                       TargetMachine &TM) {
   SmallVector<CCValAssign, 16> RVLocs;
   unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
@@ -2632,7 +2632,7 @@
   DebugLoc dl = Op.getDebugLoc();
   CCState CCInfo(CC, isVarArg, TM, RVLocs);
   CCInfo.AnalyzeReturn(Op.getNode(), RetCC_PPC);
-  
+
   // If this is the first return lowered for this function, add the regs to the
   // liveout set for the function.
   if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
@@ -2672,12 +2672,12 @@
   }
 
   SDValue Flag;
-  
+
   // Copy the result values into the output registers.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {
     CCValAssign &VA = RVLocs[i];
     assert(VA.isRegLoc() && "Can only return in registers!");
-    Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 
+    Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
                              Op.getOperand(i*2+1), Flag);
     Flag = Chain.getValue(1);
   }
@@ -2692,7 +2692,7 @@
                                    const PPCSubtarget &Subtarget) {
   // When we pop the dynamic allocation we need to restore the SP link.
   DebugLoc dl = Op.getDebugLoc();
-  
+
   // Get the corect type for pointers.
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
@@ -2704,13 +2704,13 @@
   // Get the operands for the STACKRESTORE.
   SDValue Chain = Op.getOperand(0);
   SDValue SaveSP = Op.getOperand(1);
-  
+
   // Load the old link SP.
   SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, NULL, 0);
-  
+
   // Restore the stack pointer.
   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
-  
+
   // Store the old link SP.
   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, NULL, 0);
 }
@@ -2757,11 +2757,11 @@
   if (!FPSI) {
     // Find out what the fix offset of the frame pointer save area.
     int FPOffset = PPCFrameInfo::getFramePointerSaveOffset(IsPPC64, isMachoABI);
-    
+
     // Allocate the frame index for frame pointer save area.
-    FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset); 
+    FPSI = MF.getFrameInfo()->CreateFixedObject(IsPPC64? 8 : 4, FPOffset);
     // Save the result.
-    FI->setFramePointerSaveIndex(FPSI);                      
+    FI->setFramePointerSaveIndex(FPSI);
   }
   return DAG.getFrameIndex(FPSI, PtrVT);
 }
@@ -2772,8 +2772,8 @@
   // Get the inputs.
   SDValue Chain = Op.getOperand(0);
   SDValue Size  = Op.getOperand(1);
-  DebugLoc dl = Op.getDebugLoc(); 
- 
+  DebugLoc dl = Op.getDebugLoc();
+
   // Get the corect type for pointers.
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   // Negate the size.
@@ -2794,18 +2794,18 @@
   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
       !Op.getOperand(2).getValueType().isFloatingPoint())
     return SDValue();
-  
+
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
-  
+
   // Cannot handle SETEQ/SETNE.
   if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDValue();
-  
+
   MVT ResVT = Op.getValueType();
   MVT CmpVT = Op.getOperand(0).getValueType();
   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
   DebugLoc dl = Op.getDebugLoc();
-  
+
   // If the RHS of the comparison is a 0.0, we don't need to do the
   // subtraction at all.
   if (isFloatingPointZero(RHS))
@@ -2829,7 +2829,7 @@
       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
     }
-      
+
   SDValue Cmp;
   switch (CC) {
   default: break;       // SETUO etc aren't handled by fsel.
@@ -2901,15 +2901,15 @@
     return SDValue();
 
   if (Op.getOperand(0).getValueType() == MVT::i64) {
-    SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl, 
+    SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl,
                                MVT::f64, Op.getOperand(0));
     SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
     if (Op.getValueType() == MVT::f32)
-      FP = DAG.getNode(ISD::FP_ROUND, dl, 
+      FP = DAG.getNode(ISD::FP_ROUND, dl,
                        MVT::f32, FP, DAG.getIntPtrConstant(0));
     return FP;
   }
-  
+
   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
          "Unhandled SINT_TO_FP type in custom expander!");
   // Since we only generate this in 64-bit mode, we can take advantage of
@@ -2920,10 +2920,10 @@
   int FrameIdx = FrameInfo->CreateStackObject(8, 8);
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
-  
+
   SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32,
                                 Op.getOperand(0));
-  
+
   // STD the extended value into the stack slot.
   MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx),
                        MachineMemOperand::MOStore, 0, 8, 8);
@@ -2932,7 +2932,7 @@
                                 DAG.getMemOperand(MO));
   // Load the value as a double.
   SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0);
-  
+
   // FCFID it and return it.
   SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
   if (Op.getValueType() == MVT::f32)
@@ -3009,14 +3009,14 @@
   assert(Op.getNumOperands() == 3 &&
          VT == Op.getOperand(1).getValueType() &&
          "Unexpected SHL!");
-  
+
   // Expand into a bunch of logical ops.  Note that these ops
   // depend on the PPC behavior for oversized shift amounts.
   SDValue Lo = Op.getOperand(0);
   SDValue Hi = Op.getOperand(1);
   SDValue Amt = Op.getOperand(2);
   MVT AmtVT = Amt.getValueType();
-  
+
   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
                              DAG.getConstant(BitWidth, AmtVT), Amt);
   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
@@ -3038,14 +3038,14 @@
   assert(Op.getNumOperands() == 3 &&
          VT == Op.getOperand(1).getValueType() &&
          "Unexpected SRL!");
-  
+
   // Expand into a bunch of logical ops.  Note that these ops
   // depend on the PPC behavior for oversized shift amounts.
   SDValue Lo = Op.getOperand(0);
   SDValue Hi = Op.getOperand(1);
   SDValue Amt = Op.getOperand(2);
   MVT AmtVT = Amt.getValueType();
-  
+
   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
                              DAG.getConstant(BitWidth, AmtVT), Amt);
   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
@@ -3067,13 +3067,13 @@
   assert(Op.getNumOperands() == 3 &&
          VT == Op.getOperand(1).getValueType() &&
          "Unexpected SRA!");
-  
+
   // Expand into a bunch of logical ops, followed by a select_cc.
   SDValue Lo = Op.getOperand(0);
   SDValue Hi = Op.getOperand(1);
   SDValue Amt = Op.getOperand(2);
   MVT AmtVT = Amt.getValueType();
-  
+
   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
                              DAG.getConstant(BitWidth, AmtVT), Amt);
   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
@@ -3094,7 +3094,7 @@
 //
 
 // If this is a vector of constants or undefs, get the bits.  A bit in
-// UndefBits is set if the corresponding element of the vector is an 
+// UndefBits is set if the corresponding element of the vector is an
 // ISD::UNDEF value.  For undefs, the corresponding VectorBits values are
 // zero.   Return true if this is not an array of constants, false if it is.
 //
@@ -3102,11 +3102,11 @@
                                        uint64_t UndefBits[2]) {
   // Start with zero'd results.
   VectorBits[0] = VectorBits[1] = UndefBits[0] = UndefBits[1] = 0;
-  
+
   unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits();
   for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
     SDValue OpVal = BV->getOperand(i);
-    
+
     unsigned PartNo = i >= e/2;     // In the upper 128 bits?
     unsigned SlotNo = e/2 - (i & (e/2-1))-1;  // Which subpiece of the uint64_t.
 
@@ -3125,32 +3125,32 @@
       // Nonconstant element.
       return true;
     }
-    
+
     VectorBits[PartNo] |= EltBits << (SlotNo*EltBitSize);
   }
-  
-  //printf("%llx %llx  %llx %llx\n", 
+
+  //printf("%llx %llx  %llx %llx\n",
   //       VectorBits[0], VectorBits[1], UndefBits[0], UndefBits[1]);
   return false;
 }
 
 // If this is a splat (repetition) of a value across the whole vector, return
 // the smallest size that splats it.  For example, "0x01010101010101..." is a
-// splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and 
+// splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and
 // SplatSize = 1 byte.
-static bool isConstantSplat(const uint64_t Bits128[2], 
+static bool isConstantSplat(const uint64_t Bits128[2],
                             const uint64_t Undef128[2],
                             unsigned &SplatBits, unsigned &SplatUndef,
                             unsigned &SplatSize) {
-  
+
   // Don't let undefs prevent splats from matching.  See if the top 64-bits are
   // the same as the lower 64-bits, ignoring undefs.
   if ((Bits128[0] & ~Undef128[1]) != (Bits128[1] & ~Undef128[0]))
     return false;  // Can't be a splat if two pieces don't match.
-  
+
   uint64_t Bits64  = Bits128[0] | Bits128[1];
   uint64_t Undef64 = Undef128[0] & Undef128[1];
-  
+
   // Check that the top 32-bits are the same as the lower 32-bits, ignoring
   // undefs.
   if ((Bits64 & (~Undef64 >> 32)) != ((Bits64 >> 32) & ~Undef64))
@@ -3167,7 +3167,7 @@
     SplatSize = 4;
     return true;
   }
-  
+
   uint16_t Bits16  = uint16_t(Bits32)  | uint16_t(Bits32 >> 16);
   uint16_t Undef16 = uint16_t(Undef32) & uint16_t(Undef32 >> 16);
 
@@ -3179,7 +3179,7 @@
     SplatSize = 2;
     return true;
   }
-  
+
   // Otherwise, we have an 8-bit splat.
   SplatBits  = uint8_t(Bits16)  | uint8_t(Bits16 >> 8);
   SplatUndef = uint8_t(Undef16) & uint8_t(Undef16 >> 8);
@@ -3198,13 +3198,13 @@
   };
 
   MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
-  
+
   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
   if (Val == -1)
     SplatSize = 1;
-  
+
   MVT CanonicalVT = VTys[SplatSize-1];
-  
+
   // Build a canonical splat for this value.
   SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType());
   SmallVector<SDValue, 8> Ops;
@@ -3256,28 +3256,28 @@
 // selects to a single instruction, return Op.  Otherwise, if we can codegen
 // this case more efficiently than a constant pool load, lower it to the
 // sequence of ops that should be used.
-SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 
+SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
                                                SelectionDAG &DAG) {
   // If this is a vector of constants or undefs, get the bits.  A bit in
-  // UndefBits is set if the corresponding element of the vector is an 
+  // UndefBits is set if the corresponding element of the vector is an
   // ISD::UNDEF value.  For undefs, the corresponding VectorBits values are
-  // zero. 
+  // zero.
   uint64_t VectorBits[2];
   uint64_t UndefBits[2];
   DebugLoc dl = Op.getDebugLoc();
   if (GetConstantBuildVectorBits(Op.getNode(), VectorBits, UndefBits))
     return SDValue();   // Not a constant vector.
-  
+
   // If this is a splat (repetition) of a value across the whole vector, return
   // the smallest size that splats it.  For example, "0x01010101010101..." is a
-  // splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and 
+  // splat of 0x01, 0x0101, and 0x01010101.  We return SplatBits = 0x01 and
   // SplatSize = 1 byte.
   unsigned SplatBits, SplatUndef, SplatSize;
   if (isConstantSplat(VectorBits, UndefBits, SplatBits, SplatUndef, SplatSize)){
     bool HasAnyUndefs = (UndefBits[0] | UndefBits[1]) != 0;
-    
+
     // First, handle single instruction cases.
-    
+
     // All zeros?
     if (SplatBits == 0) {
       // Canonicalize all zero vectors to be v4i32.
@@ -3293,10 +3293,10 @@
     int32_t SextVal= int32_t(SplatBits << (32-8*SplatSize)) >> (32-8*SplatSize);
     if (SextVal >= -16 && SextVal <= 15)
       return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
-    
-    
+
+
     // Two instruction sequences.
-    
+
     // If this value is in the range [-32,30] and is even, use:
     //    tmp = VSPLTI[bhw], result = add tmp, tmp
     if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
@@ -3304,18 +3304,18 @@
       Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
       return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
     }
-    
-    // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is 
+
+    // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
     // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
     // for fneg/fabs.
     if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
       // Make -1 and vspltisw -1:
       SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
-      
+
       // Make the VSLW intrinsic, computing 0x8000_0000.
-      SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 
+      SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
                                        OnesV, DAG, dl);
-      
+
       // xor by OnesV to invert it.
       Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
       return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
@@ -3327,16 +3327,16 @@
       -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
       -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
     };
-    
+
     for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
       // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
       // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
       int i = SplatCsts[idx];
-      
+
       // Figure out what shift amount will be used by altivec if shifted by i in
       // this splat size.
       unsigned TypeShiftAmt = i & (SplatBitSize-1);
-      
+
       // vsplti + shl self.
       if (SextVal == (i << (int)TypeShiftAmt)) {
         SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
@@ -3347,7 +3347,7 @@
         Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
         return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
       }
-      
+
       // vsplti + srl self.
       if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
         SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
@@ -3358,7 +3358,7 @@
         Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
         return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
       }
-      
+
       // vsplti + sra self.
       if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
         SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
@@ -3369,7 +3369,7 @@
         Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
         return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
       }
-      
+
       // vsplti + rol self.
       if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
                            ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
@@ -3398,9 +3398,9 @@
         return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
       }
     }
-    
+
     // Three instruction sequences.
-    
+
     // Odd, in range [17,31]:  (vsplti C)-(vsplti -16).
     if (SextVal >= 0 && SextVal <= 31) {
       SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
@@ -3416,19 +3416,19 @@
       return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
     }
   }
-    
+
   return SDValue();
 }
 
 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
 /// the specified operations to build the shuffle.
 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
-                                      SDValue RHS, SelectionDAG &DAG, 
+                                      SDValue RHS, SelectionDAG &DAG,
                                       DebugLoc dl) {
   unsigned OpNum = (PFEntry >> 26) & 0x0F;
   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
-  
+
   enum {
     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
     OP_VMRGHW,
@@ -3441,17 +3441,17 @@
     OP_VSLDOI8,
     OP_VSLDOI12
   };
-  
+
   if (OpNum == OP_COPY) {
     if (LHSID == (1*9+2)*9+3) return LHS;
     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
     return RHS;
   }
-  
+
   SDValue OpLHS, OpRHS;
   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
-  
+
   unsigned ShufIdxs[16];
   switch (OpNum) {
   default: assert(0 && "Unknown i32 permute!");
@@ -3493,8 +3493,8 @@
   SDValue Ops[16];
   for (unsigned i = 0; i != 16; ++i)
     Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8);
-  
-  return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(), 
+
+  return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(),
                      OpLHS, OpRHS,
                      DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
 }
@@ -3503,13 +3503,13 @@
 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
 /// return the code it can be lowered into.  Worst case, it can always be
 /// lowered into a vperm.
-SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 
+SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
                                                  SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
   SDValue V1 = Op.getOperand(0);
   SDValue V2 = Op.getOperand(1);
   SDValue PermMask = Op.getOperand(2);
-  
+
   // Cases that are handled by instructions that take permute immediates
   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
   // selected by the instruction selector.
@@ -3529,7 +3529,7 @@
       return Op;
     }
   }
-  
+
   // Altivec has a variety of "shuffle immediates" that take two vector inputs
   // and produce a fixed permutation.  If any of these match, do not lower to
   // VPERM.
@@ -3543,7 +3543,7 @@
       PPC::isVMRGHShuffleMask(PermMask.getNode(), 2, false) ||
       PPC::isVMRGHShuffleMask(PermMask.getNode(), 4, false))
     return Op;
-  
+
   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
   // perfect shuffle table to emit an optimal matching sequence.
   unsigned PFIndexes[4];
@@ -3553,14 +3553,14 @@
     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
       if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
         continue;   // Undef, ignore it.
-      
-      unsigned ByteSource = 
+
+      unsigned ByteSource =
         cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getZExtValue();
       if ((ByteSource & 3) != j) {
         isFourElementShuffle = false;
         break;
       }
-      
+
       if (EltNo == 8) {
         EltNo = ByteSource/4;
       } else if (EltNo != ByteSource/4) {
@@ -3570,18 +3570,18 @@
     }
     PFIndexes[i] = EltNo;
   }
-    
-  // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 
+
+  // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
   // perfect shuffle vector to determine if it is cost effective to do this as
   // discrete instructions, or whether we should use a vperm.
   if (isFourElementShuffle) {
     // Compute the index in the perfect shuffle table.
-    unsigned PFTableIndex = 
+    unsigned PFTableIndex =
       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
-    
+
     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
     unsigned Cost  = (PFEntry >> 30);
-    
+
     // Determining when to avoid vperm is tricky.  Many things affect the cost
     // of vperm, particularly how many times the perm mask needs to be computed.
     // For example, if the perm mask can be hoisted out of a loop or is already
@@ -3590,35 +3590,35 @@
     // the loop requires an extra register.
     //
     // As a compromise, we only emit discrete instructions if the shuffle can be
-    // generated in 3 or fewer operations.  When we have loop information 
+    // generated in 3 or fewer operations.  When we have loop information
     // available, if this block is within a loop, we should avoid using vperm
     // for 3-operation perms and use a constant pool load instead.
-    if (Cost < 3) 
+    if (Cost < 3)
       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
   }
-  
+
   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
   // vector that will get spilled to the constant pool.
   if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
-  
+
   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
   // that it is in input element units, not in bytes.  Convert now.
   MVT EltVT = V1.getValueType().getVectorElementType();
   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
-  
+
   SmallVector<SDValue, 16> ResultMask;
   for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
     unsigned SrcElt;
     if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
       SrcElt = 0;
-    else 
+    else
       SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getZExtValue();
-    
+
     for (unsigned j = 0; j != BytesPerElement; ++j)
       ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
                                            MVT::i8));
   }
-  
+
   SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
                                     &ResultMask[0], ResultMask.size());
   return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
@@ -3649,7 +3649,7 @@
   case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
   case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
   case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
-    
+
     // Normal Comparisons.
   case Intrinsic::ppc_altivec_vcmpbfp:    CompareOpc = 966; isDot = 0; break;
   case Intrinsic::ppc_altivec_vcmpeqfp:   CompareOpc = 198; isDot = 0; break;
@@ -3670,7 +3670,7 @@
 
 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
 /// lower, do it, otherwise return null.
-SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 
+SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                      SelectionDAG &DAG) {
   // If this is a lowered altivec predicate compare, CompareOpc is set to the
   // opcode number of the comparison.
@@ -3679,7 +3679,7 @@
   bool isDot;
   if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
     return SDValue();    // Don't custom lower most intrinsics.
-  
+
   // If this is a non-dot comparison, make the VCMP node and we are done.
   if (!isDot) {
     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
@@ -3687,7 +3687,7 @@
                                 DAG.getConstant(CompareOpc, MVT::i32));
     return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp);
   }
-  
+
   // Create the PPCISD altivec 'dot' comparison node.
   SDValue Ops[] = {
     Op.getOperand(2),  // LHS
@@ -3698,13 +3698,13 @@
   VTs.push_back(Op.getOperand(2).getValueType());
   VTs.push_back(MVT::Flag);
   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
-  
+
   // Now that we have the comparison, emit a copy from the CR to a GPR.
   // This is flagged to the above dot comparison.
   SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32,
                                 DAG.getRegister(PPC::CR6, MVT::i32),
-                                CompNode.getValue(1)); 
-  
+                                CompNode.getValue(1));
+
   // Unpack the result based on how the target uses it.
   unsigned BitNo;   // Bit # of CR6.
   bool InvertBit;   // Invert result?
@@ -3723,14 +3723,14 @@
     BitNo = 2; InvertBit = true;
     break;
   }
-  
+
   // Shift the bit into the low position.
   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
                       DAG.getConstant(8-(3-BitNo), MVT::i32));
   // Isolate the bit.
   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
                       DAG.getConstant(1, MVT::i32));
-  
+
   // If we are supposed to, toggle the bit.
   if (InvertBit)
     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
@@ -3738,7 +3738,7 @@
   return Flags;
 }
 
-SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 
+SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
                                                    SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
   // Create a stack slot that is 16-byte aligned.
@@ -3746,7 +3746,7 @@
   int FrameIdx = FrameInfo->CreateStackObject(16, 16);
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
-  
+
   // Store the input value into Value#0 of the stack slot.
   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
                                  Op.getOperand(0), FIdx, NULL, 0);
@@ -3758,49 +3758,49 @@
   DebugLoc dl = Op.getDebugLoc();
   if (Op.getValueType() == MVT::v4i32) {
     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
-    
+
     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
-    
+
     SDValue RHSSwap =   // = vrlw RHS, 16
       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
-    
+
     // Shrinkify inputs to v8i16.
     LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS);
     RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS);
     RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap);
-    
+
     // Low parts multiplied together, generating 32-bit results (we ignore the
     // top parts).
     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
                                         LHS, RHS, DAG, dl, MVT::v4i32);
-    
+
     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
     // Shift the high parts up 16 bits.
-    HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 
+    HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
                               Neg16, DAG, dl);
     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
   } else if (Op.getValueType() == MVT::v8i16) {
     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
-    
+
     SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
 
     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
                             LHS, RHS, Zero, DAG, dl);
   } else if (Op.getValueType() == MVT::v16i8) {
     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
-    
+
     // Multiply the even 8-bit parts, producing 16-bit sums.
     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
                                            LHS, RHS, DAG, dl, MVT::v8i16);
     EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts);
-    
+
     // Multiply the odd 8-bit parts, producing 16-bit sums.
     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
                                           LHS, RHS, DAG, dl, MVT::v8i16);
     OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts);
-    
+
     // Merge the results together.
     SDValue Ops[16];
     for (unsigned i = 0; i != 8; ++i) {
@@ -3819,23 +3819,23 @@
 ///
 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
   switch (Op.getOpcode()) {
-  default: assert(0 && "Wasn't expecting to be able to lower this!"); 
+  default: assert(0 && "Wasn't expecting to be able to lower this!");
   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
   case ISD::SETCC:              return LowerSETCC(Op, DAG);
   case ISD::TRAMPOLINE:         return LowerTRAMPOLINE(Op, DAG);
-  case ISD::VASTART:            
+  case ISD::VASTART:
     return LowerVASTART(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
                         VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
-  
-  case ISD::VAARG:            
+
+  case ISD::VAARG:
     return LowerVAARG(Op, DAG, VarArgsFrameIndex, VarArgsStackOffset,
                       VarArgsNumGPR, VarArgsNumFPR, PPCSubTarget);
 
   case ISD::FORMAL_ARGUMENTS:
-    return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex, 
+    return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex,
                                  VarArgsStackOffset, VarArgsNumGPR,
                                  VarArgsNumFPR, PPCSubTarget);
 
@@ -3863,7 +3863,7 @@
   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
   case ISD::MUL:                return LowerMUL(Op, DAG);
-  
+
   // Frame & Return address.
   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
@@ -3882,7 +3882,7 @@
   case ISD::FP_ROUND_INREG: {
     assert(N->getValueType(0) == MVT::ppcf128);
     assert(N->getOperand(0).getValueType() == MVT::ppcf128);
-    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 
+    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
                              MVT::f64, N->getOperand(0),
                              DAG.getIntPtrConstant(0));
     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
@@ -3936,7 +3936,7 @@
 
     // We know the low half is about to be thrown away, so just use something
     // convenient.
-    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128, 
+    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
                                 FPreg, FPreg));
     return;
   }
@@ -3999,7 +3999,7 @@
   BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
   BuildMI(BB, dl, TII->get(PPC::BCC))
-    .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);    
+    .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
   BB->addSuccessor(loopMBB);
   BB->addSuccessor(exitMBB);
 
@@ -4010,7 +4010,7 @@
 }
 
 MachineBasicBlock *
-PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI, 
+PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
                                             MachineBasicBlock *BB,
                                             bool is8bit,    // operation
                                             unsigned BinOpcode) const {
@@ -4040,7 +4040,7 @@
   exitMBB->transferSuccessors(BB);
 
   MachineRegisterInfo &RegInfo = F->getRegInfo();
-  const TargetRegisterClass *RC = 
+  const TargetRegisterClass *RC =
     is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
               (const TargetRegisterClass *) &PPC::GPRCRegClass;
   unsigned PtrReg = RegInfo.createVirtualRegister(RC);
@@ -4125,7 +4125,7 @@
   BuildMI(BB, dl, TII->get(PPC::STWCX))
     .addReg(Tmp4Reg).addReg(PPC::R0).addReg(PtrReg);
   BuildMI(BB, dl, TII->get(PPC::BCC))
-    .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);    
+    .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
   BB->addSuccessor(loopMBB);
   BB->addSuccessor(exitMBB);
 
@@ -4180,15 +4180,15 @@
     // Next, add the true and fallthrough blocks as its successors.
     BB->addSuccessor(copy0MBB);
     BB->addSuccessor(sinkMBB);
-    
+
     //  copy0MBB:
     //   %FalseValue = ...
     //   # fallthrough to sinkMBB
     BB = copy0MBB;
-    
+
     // Update machine-CFG edges
     BB->addSuccessor(sinkMBB);
-    
+
     //  sinkMBB:
     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
     //  ...
@@ -4315,7 +4315,7 @@
     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
     BB->addSuccessor(loop1MBB);
     BB->addSuccessor(exitMBB);
-    
+
     BB = midMBB;
     BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
       .addReg(dest).addReg(ptrA).addReg(ptrB);
@@ -4350,7 +4350,7 @@
     exitMBB->transferSuccessors(BB);
 
     MachineRegisterInfo &RegInfo = F->getRegInfo();
-    const TargetRegisterClass *RC = 
+    const TargetRegisterClass *RC =
       is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
                 (const TargetRegisterClass *) &PPC::GPRCRegClass;
     unsigned PtrReg = RegInfo.createVirtualRegister(RC);
@@ -4459,7 +4459,7 @@
     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
     BB->addSuccessor(loop1MBB);
     BB->addSuccessor(exitMBB);
-    
+
     BB = midMBB;
     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
       .addReg(PPC::R0).addReg(PtrReg);
@@ -4507,7 +4507,7 @@
         return N->getOperand(0);
     }
     break;
-    
+
   case ISD::SINT_TO_FP:
     if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
       if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
@@ -4521,13 +4521,13 @@
             Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
             DCI.AddToWorklist(Val.getNode());
           }
-            
+
           Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val);
           DCI.AddToWorklist(Val.getNode());
           Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val);
           DCI.AddToWorklist(Val.getNode());
           if (N->getValueType(0) == MVT::f32) {
-            Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val, 
+            Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val,
                               DAG.getIntPtrConstant(0));
             DCI.AddToWorklist(Val.getNode());
           }
@@ -4559,7 +4559,7 @@
       DCI.AddToWorklist(Val.getNode());
       return Val;
     }
-    
+
     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
     if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
         N->getOperand(1).getNode()->hasOneUse() &&
@@ -4595,11 +4595,11 @@
       };
       SDValue BSLoad = DAG.getNode(PPCISD::LBRX, dl, VTs, Ops, 4);
 
-      // If this is an i16 load, insert the truncate.  
+      // If this is an i16 load, insert the truncate.
       SDValue ResVal = BSLoad;
       if (N->getValueType(0) == MVT::i16)
         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
-      
+
       // First, combine the bswap away.  This makes the value produced by the
       // load dead.
       DCI.CombineTo(N, ResVal);
@@ -4607,11 +4607,11 @@
       // Next, combine the load away, we give it a bogus result value but a real
       // chain result.  The result value is dead because the bswap is dead.
       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
-      
+
       // Return N so it doesn't get rechecked!
       return SDValue(N, 0);
     }
-    
+
     break;
   case PPCISD::VCMP: {
     // If a VCMPo node already exists with exactly the same operands as this
@@ -4621,10 +4621,10 @@
     if (!N->getOperand(0).hasOneUse() &&
         !N->getOperand(1).hasOneUse() &&
         !N->getOperand(2).hasOneUse()) {
-      
+
       // Scan all of the users of the LHS, looking for VCMPo's that match.
       SDNode *VCMPoNode = 0;
-      
+
       SDNode *LHSN = N->getOperand(0).getNode();
       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
            UI != E; ++UI)
@@ -4635,17 +4635,17 @@
           VCMPoNode = *UI;
           break;
         }
-      
+
       // If there is no VCMPo node, or if the flag value has a single use, don't
       // transform this.
       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
         break;
-        
-      // Look at the (necessarily single) use of the flag value.  If it has a 
+
+      // Look at the (necessarily single) use of the flag value.  If it has a
       // chain, this transformation is more complex.  Note that multiple things
       // could use the value result, which we should ignore.
       SDNode *FlagUser = 0;
-      for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 
+      for (SDNode::use_iterator UI = VCMPoNode->use_begin();
            FlagUser == 0; ++UI) {
         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
         SDNode *User = *UI;
@@ -4656,7 +4656,7 @@
           }
         }
       }
-      
+
       // If the user is a MFCR instruction, we know this is safe.  Otherwise we
       // give up for right now.
       if (FlagUser->getOpcode() == PPCISD::MFCR)
@@ -4673,12 +4673,12 @@
     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
     int CompareOpc;
     bool isDot;
-    
+
     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
         getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
       assert(isDot && "Can't compare against a vector result!");
-      
+
       // If this is a comparison against something other than 0/1, then we know
       // that the condition is never/always true.
       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
@@ -4689,9 +4689,9 @@
         return DAG.getNode(ISD::BR, dl, MVT::Other,
                            N->getOperand(0), N->getOperand(4));
       }
-    
+
       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
-      
+
       // Create the PPCISD altivec 'dot' comparison node.
       std::vector<MVT> VTs;
       SDValue Ops[] = {
@@ -4702,7 +4702,7 @@
       VTs.push_back(LHS.getOperand(2).getValueType());
       VTs.push_back(MVT::Flag);
       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
-      
+
       // Unpack the result based on how the target uses it.
       PPC::Predicate CompOpc;
       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
@@ -4729,7 +4729,7 @@
     break;
   }
   }
-  
+
   return SDValue();
 }
 
@@ -4739,7 +4739,7 @@
 
 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
                                                        const APInt &Mask,
-                                                       APInt &KnownZero, 
+                                                       APInt &KnownZero,
                                                        APInt &KnownOne,
                                                        const SelectionDAG &DAG,
                                                        unsigned Depth) const {
@@ -4770,7 +4770,7 @@
     case Intrinsic::ppc_altivec_vcmpgtuw_p:
       KnownZero = ~1U;  // All bits but the low one are known to be zero.
       break;
-    }        
+    }
   }
   }
 }
@@ -4778,7 +4778,7 @@
 
 /// getConstraintType - Given a constraint, return the type of
 /// constraint it is for this target.
-PPCTargetLowering::ConstraintType 
+PPCTargetLowering::ConstraintType
 PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
   if (Constraint.size() == 1) {
     switch (Constraint[0]) {
@@ -4794,7 +4794,7 @@
   return TargetLowering::getConstraintType(Constraint);
 }
 
-std::pair<unsigned, const TargetRegisterClass*> 
+std::pair<unsigned, const TargetRegisterClass*>
 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
                                                 MVT VT) const {
   if (Constraint.size() == 1) {
@@ -4811,13 +4811,13 @@
       else if (VT == MVT::f64)
         return std::make_pair(0U, PPC::F8RCRegisterClass);
       break;
-    case 'v': 
+    case 'v':
       return std::make_pair(0U, PPC::VRRCRegisterClass);
     case 'y':   // crrc
       return std::make_pair(0U, PPC::CRRCRegisterClass);
     }
   }
-  
+
   return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
 }
 
@@ -4867,7 +4867,7 @@
       if ((int)Value > 0 && isPowerOf2_32(Value))
         Result = DAG.getTargetConstant(Value, Op.getValueType());
       break;
-    case 'O':  // "O" is the constant zero. 
+    case 'O':  // "O" is the constant zero.
       if (Value == 0)
         Result = DAG.getTargetConstant(Value, Op.getValueType());
       break;
@@ -4879,31 +4879,31 @@
     break;
   }
   }
-  
+
   if (Result.getNode()) {
     Ops.push_back(Result);
     return;
   }
-  
+
   // Handle standard constraint letters.
   TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG);
 }
 
 // isLegalAddressingMode - Return true if the addressing mode represented
 // by AM is legal for this target, for a load/store of the specified type.
-bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM, 
+bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
                                               const Type *Ty) const {
   // FIXME: PPC does not allow r+i addressing modes for vectors!
-  
+
   // PPC allows a sign-extended 16-bit immediate field.
   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
     return false;
-  
+
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
-  
-  // PPC only support r+r, 
+
+  // PPC only support r+r,
   switch (AM.Scale) {
   case 0:  // "r+i" or just "i", depending on HasBaseReg.
     break;
@@ -4921,7 +4921,7 @@
     // No other scales are supported.
     return false;
   }
-  
+
   return true;
 }
 
@@ -4934,12 +4934,12 @@
 }
 
 bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const {
-  return false; 
+  return false;
 }
 
 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
-  // Depths > 0 not supported yet! 
+  // Depths > 0 not supported yet!
   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
     return SDValue();
 
@@ -4952,22 +4952,22 @@
   // Make sure the function really does not optimize away the store of the RA
   // to the stack.
   FuncInfo->setLRStoreRequired();
-  return DAG.getLoad(getPointerTy(), dl, 
+  return DAG.getLoad(getPointerTy(), dl,
                      DAG.getEntryNode(), RetAddrFI, NULL, 0);
 }
 
 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
-  // Depths > 0 not supported yet! 
+  // Depths > 0 not supported yet!
   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
     return SDValue();
-  
+
   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   bool isPPC64 = PtrVT == MVT::i64;
-  
+
   MachineFunction &MF = DAG.getMachineFunction();
   MachineFrameInfo *MFI = MF.getFrameInfo();
-  bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects()) 
+  bool is31 = (NoFramePointerElim || MFI->hasVarSizedObjects())
                   && MFI->getStackSize();
 
   if (isPPC64)
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 6bcaade..85a6c0b 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -80,7 +80,7 @@
     setUseUnderscoreSetJmp(true);
     setUseUnderscoreLongJmp(true);
   }
-  
+
   // Set up the register classes.
   addRegisterClass(MVT::i8, X86::GR8RegisterClass);
   addRegisterClass(MVT::i16, X86::GR16RegisterClass);
@@ -90,7 +90,7 @@
 
   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
 
-  // We don't accept any truncstore of integer registers.  
+  // We don't accept any truncstore of integer registers.
   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
   setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
@@ -234,7 +234,7 @@
   setOperationAction(ISD::FREM             , MVT::f64  , Expand);
   setOperationAction(ISD::FREM             , MVT::f80  , Expand);
   setOperationAction(ISD::FLT_ROUNDS_      , MVT::i32  , Custom);
-  
+
   setOperationAction(ISD::CTPOP            , MVT::i8   , Expand);
   setOperationAction(ISD::CTTZ             , MVT::i8   , Custom);
   setOperationAction(ISD::CTLZ             , MVT::i8   , Custom);
@@ -448,7 +448,7 @@
     if (Fast) {
       setConvertAction(MVT::f32, MVT::f64, Expand);
       setConvertAction(MVT::f32, MVT::f80, Expand);
-      setConvertAction(MVT::f80, MVT::f32, Expand);    
+      setConvertAction(MVT::f80, MVT::f32, Expand);
       setConvertAction(MVT::f64, MVT::f32, Expand);
       // And x87->x87 truncations also.
       setConvertAction(MVT::f80, MVT::f64, Expand);
@@ -473,7 +473,7 @@
     // this though and handle it in InstructionSelectPreprocess so that
     // dagcombine2 can hack on these.
     if (Fast) {
-      setConvertAction(MVT::f80, MVT::f32, Expand);    
+      setConvertAction(MVT::f80, MVT::f32, Expand);
       setConvertAction(MVT::f64, MVT::f32, Expand);
       setConvertAction(MVT::f80, MVT::f64, Expand);
     }
@@ -512,7 +512,7 @@
       TmpFlt2.changeSign();
       addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
     }
-    
+
     if (!UnsafeFPMath) {
       setOperationAction(ISD::FSIN           , MVT::f80  , Expand);
       setOperationAction(ISD::FCOS           , MVT::f80  , Expand);
@@ -761,7 +761,7 @@
     setOperationAction(ISD::LOAD,               MVT::v2i64, Legal);
     setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
     setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
-    
+
   }
 
   if (Subtarget->hasSSE41()) {
@@ -791,7 +791,7 @@
   if (Subtarget->hasSSE42()) {
     setOperationAction(ISD::VSETCC,             MVT::v2i64, Custom);
   }
-  
+
   // We want to custom lower some of our intrinsics.
   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
 
@@ -926,13 +926,13 @@
 SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
   assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
-  
+
   SmallVector<CCValAssign, 16> RVLocs;
   unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
   bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
   CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
   CCInfo.AnalyzeReturn(Op.getNode(), RetCC_X86);
-    
+
   // If this is the first return lowered for this function, add the regs to the
   // liveout set for the function.
   if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
@@ -941,7 +941,7 @@
         DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
   }
   SDValue Chain = Op.getOperand(0);
-  
+
   // Handle tail call return.
   Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL);
   if (Chain.getOpcode() == X86ISD::TAILCALL) {
@@ -952,7 +952,7 @@
                (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::EAX ||
                 cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) ||
               TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
-              TargetAddress.getOpcode() == ISD::TargetGlobalAddress) && 
+              TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
              "Expecting an global address, external symbol, or register");
     assert(StackAdjustment.getOpcode() == ISD::Constant &&
            "Expecting a const value");
@@ -966,10 +966,10 @@
     for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
       Operands.push_back(Chain.getOperand(i));
     }
-    return DAG.getNode(X86ISD::TC_RETURN, dl, MVT::Other, &Operands[0], 
+    return DAG.getNode(X86ISD::TC_RETURN, dl, MVT::Other, &Operands[0],
                        Operands.size());
   }
-  
+
   // Regular return.
   SDValue Flag;
 
@@ -977,13 +977,13 @@
   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
   // Operand #1 = Bytes To Pop
   RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
-  
+
   // Copy the result values into the output registers.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {
     CCValAssign &VA = RVLocs[i];
     assert(VA.isRegLoc() && "Can only return in registers!");
     SDValue ValToCopy = Op.getOperand(i*2+1);
-    
+
     // Returns in ST0/ST1 are handled specially: these are pushed as operands to
     // the RET instruction and handled by the FP Stackifier.
     if (VA.getLocReg() == X86::ST0 ||
@@ -1019,14 +1019,14 @@
     Chain = DAG.getCopyToReg(Chain, dl, X86::RAX, Val, Flag);
     Flag = Chain.getValue(1);
   }
-  
+
   RetOps[0] = Chain;  // Update chain.
 
   // Add the flag if we have it.
   if (Flag.getNode())
     RetOps.push_back(Flag);
-  
-  return DAG.getNode(X86ISD::RET_FLAG, dl, 
+
+  return DAG.getNode(X86ISD::RET_FLAG, dl,
                      MVT::Other, &RetOps[0], RetOps.size());
 }
 
@@ -1037,10 +1037,10 @@
 /// being lowered.  The returns a SDNode with the same number of values as the
 /// ISD::CALL.
 SDNode *X86TargetLowering::
-LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall, 
+LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
                 unsigned CallingConv, SelectionDAG &DAG) {
 
-  DebugLoc dl = TheCall->getDebugLoc();  
+  DebugLoc dl = TheCall->getDebugLoc();
   // Assign locations to each value returned by this call.
   SmallVector<CCValAssign, 16> RVLocs;
   bool isVarArg = TheCall->isVarArg();
@@ -1049,14 +1049,14 @@
   CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
 
   SmallVector<SDValue, 8> ResultVals;
-  
+
   // Copy all of the result registers out of their specified physreg.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {
     CCValAssign &VA = RVLocs[i];
     MVT CopyVT = VA.getValVT();
-  
+
     // If this is x86-64, and we disabled SSE, we can't return FP values
-    if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && 
+    if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
         ((Is64Bit || TheCall->isInreg()) && !Subtarget->hasSSE1())) {
       cerr << "SSE register return with SSE disabled\n";
       exit(1);
@@ -1070,7 +1070,7 @@
         isScalarFPTypeInSSEReg(VA.getValVT())) {
       CopyVT = MVT::f80;
     }
-    
+
     Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
                                CopyVT, InFlag).getValue(1);
     SDValue Val = Chain.getValue(0);
@@ -1083,7 +1083,7 @@
                         // This truncation won't change the value.
                         DAG.getIntPtrConstant(1));
     }
-    
+
     ResultVals.push_back(Val);
   }
 
@@ -1197,9 +1197,9 @@
 
 /// CallRequiresFnAddressInReg - Check whether the call requires the function
 /// address to be loaded in a register.
-bool 
+bool
 X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) {
-  return !Is64Bit && IsTailCall &&  
+  return !Is64Bit && IsTailCall &&
     getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
     Subtarget->isPICStyleGOT();
 }
@@ -1208,7 +1208,7 @@
 /// by "Src" to address "Dst" with size and alignment information specified by
 /// the specific parameter attribute. The copy will be passed as a byval
 /// function parameter.
-static SDValue 
+static SDValue
 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
                           DebugLoc dl) {
@@ -1229,7 +1229,7 @@
   bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
 
   // FIXME: For now, all byval parameter objects are marked mutable. This can be
-  // changed with more analysis.  
+  // changed with more analysis.
   // In case of tail call optimization mark all arguments mutable. Since they
   // could be overwritten by lowering of arguments in case of a tail call.
   int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
@@ -1246,7 +1246,7 @@
   MachineFunction &MF = DAG.getMachineFunction();
   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
   DebugLoc dl = Op.getDebugLoc();
-  
+
   const Function* Fn = MF.getFunction();
   if (Fn->hasExternalLinkage() &&
       Subtarget->isTargetCygMing() &&
@@ -1255,7 +1255,7 @@
 
   // Decorate the function name.
   FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op));
-  
+
   MachineFrameInfo *MFI = MF.getFrameInfo();
   SDValue Root = Op.getOperand(0);
   bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
@@ -1270,7 +1270,7 @@
   SmallVector<CCValAssign, 16> ArgLocs;
   CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
   CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(CC));
-  
+
   SmallVector<SDValue, 8> ArgValues;
   unsigned LastVal = ~0U;
   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
@@ -1280,7 +1280,7 @@
     assert(VA.getValNo() != LastVal &&
            "Don't support value assigned to multiple locs yet");
     LastVal = VA.getValNo();
-    
+
     if (VA.isRegLoc()) {
       MVT RegVT = VA.getLocVT();
       TargetRegisterClass *RC = NULL;
@@ -1315,7 +1315,7 @@
 
       unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
       SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT);
-      
+
       // If this is an 8 or 16-bit value, it is really passed promoted to 32
       // bits.  Insert an assert[sz]ext to capture this, then truncate to the
       // right size.
@@ -1325,10 +1325,10 @@
       else if (VA.getLocInfo() == CCValAssign::ZExt)
         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
                                DAG.getValueType(VA.getValVT()));
-      
+
       if (VA.getLocInfo() != CCValAssign::Full)
         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
-      
+
       // Handle MMX values passed in GPRs.
       if (Is64Bit && RegVT != VA.getLocVT()) {
         if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass)
@@ -1339,7 +1339,7 @@
           ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), ArgValue);
         }
       }
-      
+
       ArgValues.push_back(ArgValue);
     } else {
       assert(VA.isMemLoc());
@@ -1459,7 +1459,7 @@
                              &MemOps[0], MemOps.size());
     }
   }
-  
+
   ArgValues.push_back(Root);
 
   // Some CCs need callee pop.
@@ -1470,7 +1470,7 @@
     BytesToPopOnReturn  = 0; // Callee pops nothing.
     // If this is an sret function, the return should pop the hidden pointer.
     if (!Is64Bit && CC != CallingConv::Fast && ArgsAreStructReturn(Op))
-      BytesToPopOnReturn = 4;  
+      BytesToPopOnReturn = 4;
     BytesCallerReserves = StackSize;
   }
 
@@ -1506,12 +1506,12 @@
 
 /// EmitTailCallLoadRetAddr - Emit a load of return address if tail call
 /// optimization is performed and it is required.
-SDValue 
-X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, 
+SDValue
+X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
                                            SDValue &OutRetAddr,
-                                           SDValue Chain, 
-                                           bool IsTailCall, 
-                                           bool Is64Bit, 
+                                           SDValue Chain,
+                                           bool IsTailCall,
+                                           bool Is64Bit,
                                            int FPDiff,
                                            DebugLoc dl) {
   if (!IsTailCall || FPDiff==0) return Chain;
@@ -1527,19 +1527,19 @@
 
 /// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call
 /// optimization is performed and it is required (FPDiff!=0).
-static SDValue 
-EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, 
+static SDValue
+EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
                          SDValue Chain, SDValue RetAddrFrIdx,
                          bool Is64Bit, int FPDiff, DebugLoc dl) {
   // Store the return address to the appropriate stack slot.
   if (!FPDiff) return Chain;
   // Calculate the new stack slot for the return address.
   int SlotSize = Is64Bit ? 8 : 4;
-  int NewReturnAddrFI = 
+  int NewReturnAddrFI =
     MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
   MVT VT = Is64Bit ? MVT::i64 : MVT::i32;
   SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
-  Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, 
+  Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
                        PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0);
   return Chain;
 }
@@ -1564,7 +1564,7 @@
   SmallVector<CCValAssign, 16> ArgLocs;
   CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
   CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC));
-  
+
   // Get a count of how many bytes are to be pushed on the stack.
   unsigned NumBytes = CCInfo.getNextStackOffset();
   if (PerformTailCallOpt && CC == CallingConv::Fast)
@@ -1573,7 +1573,7 @@
   int FPDiff = 0;
   if (IsTailCall) {
     // Lower arguments at fp - stackoffset + fpdiff.
-    unsigned NumBytesCallerPushed = 
+    unsigned NumBytesCallerPushed =
       MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
     FPDiff = NumBytesCallerPushed - NumBytes;
 
@@ -1601,7 +1601,7 @@
     SDValue Arg = TheCall->getArg(i);
     ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
     bool isByVal = Flags.isByVal();
-  
+
     // Promote the value if needed.
     switch (VA.getLocInfo()) {
     default: assert(0 && "Unknown loc info!");
@@ -1616,7 +1616,7 @@
       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
       break;
     }
-    
+
     if (VA.isRegLoc()) {
       if (Is64Bit) {
         MVT RegVT = VA.getLocVT();
@@ -1648,13 +1648,13 @@
         assert(VA.isMemLoc());
         if (StackPtr.getNode() == 0)
           StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy());
-        
+
         MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
                                                Chain, Arg, Flags));
       }
     }
   }
-  
+
   if (!MemOpChains.empty())
     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                         &MemOpChains[0], MemOpChains.size());
@@ -1666,17 +1666,17 @@
   // tail call optimization the copies to registers are lowered later.
   if (!IsTailCall)
     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
-      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 
+      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
                                RegsToPass[i].second, InFlag);
       InFlag = Chain.getValue(1);
     }
 
   // ELF / PIC requires GOT in the EBX register before function calls via PLT
-  // GOT pointer.  
+  // GOT pointer.
   if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) {
     Chain = DAG.getCopyToReg(Chain, dl, X86::EBX,
-                             DAG.getNode(X86ISD::GlobalBaseReg, 
-                                         DebugLoc::getUnknownLoc(), 
+                             DAG.getNode(X86ISD::GlobalBaseReg,
+                                         DebugLoc::getUnknownLoc(),
                                          getPointerTy()),
                              InFlag);
     InFlag = Chain.getValue(1);
@@ -1713,9 +1713,9 @@
       X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
     };
     unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
-    assert((Subtarget->hasSSE1() || !NumXMMRegs) 
+    assert((Subtarget->hasSSE1() || !NumXMMRegs)
            && "SSE registers cannot be used when SSE is disabled");
-    
+
     Chain = DAG.getCopyToReg(Chain, dl, X86::AL,
                              DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
     InFlag = Chain.getValue(1);
@@ -1745,7 +1745,7 @@
           // Copy relative to framepointer.
           SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
           if (StackPtr.getNode() == 0)
-            StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, 
+            StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr,
                                           getPointerTy());
           Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
 
@@ -1756,7 +1756,7 @@
           MemOpChains2.push_back(
             DAG.getStore(Chain, dl, Arg, FIN,
                          PseudoSourceValue::getFixedStack(FI), 0));
-        }            
+        }
       }
     }
 
@@ -1766,7 +1766,7 @@
 
     // Copy arguments to their registers.
     for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
-      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 
+      Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
                                RegsToPass[i].second, InFlag);
       InFlag = Chain.getValue(1);
     }
@@ -1792,13 +1792,13 @@
     unsigned Opc = Is64Bit ? X86::R9 : X86::EAX;
 
     Chain = DAG.getCopyToReg(Chain,  dl,
-                             DAG.getRegister(Opc, getPointerTy()), 
+                             DAG.getRegister(Opc, getPointerTy()),
                              Callee,InFlag);
     Callee = DAG.getRegister(Opc, getPointerTy());
     // Add register as live out.
     DAG.getMachineFunction().getRegInfo().addLiveOut(Opc);
   }
- 
+
   // Returns a chain & a flag for retval copy to use.
   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
   SmallVector<SDValue, 8> Ops;
@@ -1807,12 +1807,12 @@
     Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
                            DAG.getIntPtrConstant(0, true), InFlag);
     InFlag = Chain.getValue(1);
- 
+
     // Returns a chain & a flag for retval copy to use.
     NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
     Ops.clear();
   }
-  
+
   Ops.push_back(Chain);
   Ops.push_back(Callee);
 
@@ -1824,7 +1824,7 @@
   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
                                   RegsToPass[i].second.getValueType()));
-  
+
   // Add an implicit use GOT pointer in EBX.
   if (!IsTailCall && !Is64Bit &&
       getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
@@ -1839,11 +1839,11 @@
     Ops.push_back(InFlag);
 
   if (IsTailCall) {
-    assert(InFlag.getNode() && 
+    assert(InFlag.getNode() &&
            "Flag must be set. Depend on flag being set in LowerRET");
     Chain = DAG.getNode(X86ISD::TAILCALL, dl,
                         TheCall->getVTList(), &Ops[0], Ops.size());
-      
+
     return SDValue(Chain.getNode(), Op.getResNo());
   }
 
@@ -1861,7 +1861,7 @@
     NumBytesForCalleeToPush = 4;
   else
     NumBytesForCalleeToPush = 0;  // Callee pops nothing.
-  
+
   // Returns a flag for retval copy to use.
   Chain = DAG.getCALLSEQ_END(Chain,
                              DAG.getIntPtrConstant(NumBytes, true),
@@ -1901,7 +1901,7 @@
 //    arg1
 //    arg2
 //    RETADDR
-//    [ new RETADDR 
+//    [ new RETADDR
 //      move area ]
 //    (possible EBP)
 //    ESI
@@ -1910,13 +1910,13 @@
 
 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
 /// for a 16 byte align requirement.
-unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, 
+unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
                                                         SelectionDAG& DAG) {
   MachineFunction &MF = DAG.getMachineFunction();
   const TargetMachine &TM = MF.getTarget();
   const TargetFrameInfo &TFI = *TM.getFrameInfo();
   unsigned StackAlignment = TFI.getStackAlignment();
-  uint64_t AlignMask = StackAlignment - 1; 
+  uint64_t AlignMask = StackAlignment - 1;
   int64_t Offset = StackSize;
   uint64_t SlotSize = TD->getPointerSize();
   if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
@@ -1924,7 +1924,7 @@
     Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
   } else {
     // Mask out lower bits, add stackalignment once plus the 12 bytes.
-    Offset = ((~AlignMask) & Offset) + StackAlignment + 
+    Offset = ((~AlignMask) & Offset) + StackAlignment +
       (StackAlignment-SlotSize);
   }
   return Offset;
@@ -2038,7 +2038,7 @@
     case ISD::SETUGE: return X86::COND_AE;
     }
   }
-  
+
   // First determine if it is required or is profitable to flip the operands.
 
   // If LHS is a foldable load, but RHS is not, flip the condition.
@@ -2896,7 +2896,7 @@
     SDValue Arg = Mask.getOperand(i);
     if (Arg.getOpcode() == ISD::UNDEF)
       continue;
-    
+
     unsigned Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
     if (Idx < NumElems) {
       unsigned Opc = V1.getNode()->getOpcode();
@@ -2922,7 +2922,7 @@
 static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG,
                              DebugLoc dl) {
   assert(VT.isVector() && "Expected a vector type");
-  
+
   // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
   // type.  This ensures they get CSE'd.
   SDValue Vec;
@@ -2943,7 +2943,7 @@
 ///
 static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) {
   assert(VT.isVector() && "Expected a vector type");
-  
+
   // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
   // type.  This ensures they get CSE'd.
   SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
@@ -2993,13 +2993,13 @@
   MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
   for (unsigned i = 1; i != NumElems; ++i)
     MaskVec.push_back(DAG.getConstant(i, BaseVT));
-  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, 
+  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
                      &MaskVec[0], MaskVec.size());
 }
 
 /// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
 /// of specified width.
-static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG, 
+static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG,
                               DebugLoc dl) {
   MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
   MVT BaseVT = MaskVT.getVectorElementType();
@@ -3008,7 +3008,7 @@
     MaskVec.push_back(DAG.getConstant(i,            BaseVT));
     MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
   }
-  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, 
+  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
                      &MaskVec[0], MaskVec.size());
 }
 
@@ -3024,7 +3024,7 @@
     MaskVec.push_back(DAG.getConstant(i + Half,            BaseVT));
     MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
   }
-  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, 
+  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
                      &MaskVec[0], MaskVec.size());
 }
 
@@ -3040,7 +3040,7 @@
   MaskVec.push_back(DAG.getConstant(DestElt, BaseVT));
   for (unsigned i = 1; i != NumElems; ++i)
     MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT));
-  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, 
+  return DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
                      &MaskVec[0], MaskVec.size());
 }
 
@@ -3113,7 +3113,7 @@
     assert(NumElems == 4);
     SDValue Cst0 = DAG.getTargetConstant(0, MVT::i32);
     SDValue Cst1 = DAG.getTargetConstant(1, MVT::i32);
-    Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, 
+    Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
                        Cst0, Cst1, Cst0, Cst1);
   }
 
@@ -3237,7 +3237,7 @@
       SDValue ThisElt(0, 0), LastElt(0, 0);
       bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
       if (LastIsNonZero) {
-        LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl, 
+        LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
                               MVT::i16, Op.getOperand(i-1));
       }
       if (ThisIsNonZero) {
@@ -3279,7 +3279,7 @@
           V = DAG.getUNDEF(MVT::v8i16);
         First = false;
       }
-      V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, 
+      V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
                       MVT::v8i16, V, Op.getOperand(i),
                       DAG.getIntPtrConstant(i));
     }
@@ -3354,7 +3354,7 @@
   if (NumNonZero == 1 && NumElems <= 4) {
     unsigned Idx = CountTrailingZeros_32(NonZeros);
     SDValue Item = Op.getOperand(Idx);
-    
+
     // If this is an insertion of an i64 value on x86-32, and if the top bits of
     // the value are obviously zero, truncate the value to i32 and do the
     // insertion that way.  Only do this if the value is non-constant or if the
@@ -3366,18 +3366,18 @@
         // Handle MMX and SSE both.
         MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
         unsigned VecElts = VT == MVT::v2i64 ? 4 : 2;
-        
+
         // Truncate the value (which may itself be a constant) to i32, and
         // convert it to a vector with movd (S2V+shuffle to zero extend).
         Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
         Item = getShuffleVectorZeroOrUndef(Item, 0, true,
                                            Subtarget->hasSSE2(), DAG);
-        
+
         // Now we have our 32-bit value zero extended in the low element of
         // a vector.  If Idx != 0, swizzle it into place.
         if (Idx != 0) {
-          SDValue Ops[] = { 
+          SDValue Ops[] = {
             Item, DAG.getUNDEF(Item.getValueType()),
             getSwapEltZeroMask(VecElts, Idx, DAG, dl)
           };
@@ -3386,7 +3386,7 @@
         return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item);
       }
     }
-    
+
     // If we have a constant or non-constant insertion into the low element of
     // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
     // the rest of the elements.  This will be matched as movd/movq/movss/movsd
@@ -3406,11 +3406,11 @@
         isZeroNode(Op.getOperand(0)) && !isZeroNode(Op.getOperand(1))) {
       unsigned NumBits = VT.getSizeInBits();
       return getVShift(true, VT,
-                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, 
+                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
                                    VT, Op.getOperand(1)),
                        NumBits/2, DAG, *this, dl);
     }
-    
+
     if (IsAllConstants) // Otherwise, it's better to do a constpool load.
       return SDValue();
 
@@ -3421,7 +3421,7 @@
     // place.
     if (EVTBits == 32) {
       Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
-      
+
       // Turn it into a shuffle of zero and zero-extended scalar to vector.
       Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
                                          Subtarget->hasSSE2(), DAG);
@@ -3440,7 +3440,7 @@
   // Splat is obviously ok. Let legalizer expand it to a shuffle.
   if (Values.size() == 1)
     return SDValue();
-  
+
   // A vector full of immediates; various special cases are already
   // handled, so this is best done with a single constant-pool load.
   if (IsAllConstants)
@@ -3647,9 +3647,9 @@
       if (AnyOutOrder) {
         for (unsigned i = 4; i != 8; ++i)
           MaskVec.push_back(DAG.getConstant(i, MaskEVT));
-        SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT, 
+        SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVT,
                                    &MaskVec[0], 8);
-        NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16, 
+        NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16,
                            NewV, NewV, Mask);
       }
     }
@@ -3682,9 +3682,9 @@
       }
 
       if (AnyOutOrder) {
-        SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, 
+        SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl,
                                    MaskVT, &MaskVec[0], 8);
-        NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16, 
+        NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v8i16,
                            NewV, NewV, Mask);
       }
     }
@@ -3894,7 +3894,7 @@
 
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
                      DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
-                                 DAG.getNode(ISD::BIT_CONVERT, dl, 
+                                 DAG.getNode(ISD::BIT_CONVERT, dl,
                                              OpVT, SrcOp)));
 }
 
@@ -3992,7 +3992,7 @@
       Mask1[2] = DAG.getConstant(HiIndex & 1 ? 6 : 4, MaskEVT);
       Mask1[3] = DAG.getConstant(HiIndex & 1 ? 4 : 6, MaskEVT);
       return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V1, V2,
-                         DAG.getNode(ISD::BUILD_VECTOR, dl, 
+                         DAG.getNode(ISD::BUILD_VECTOR, dl,
                                      MaskVT, &Mask1[0], 4));
     } else {
       Mask1[0] = DAG.getConstant(HiIndex & 1 ? 2 : 0, MaskEVT);
@@ -4008,7 +4008,7 @@
           DAG.getConstant(cast<ConstantSDNode>(Mask1[3])->getZExtValue()+4,
                           MaskEVT);
       return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, VT, V2, V1,
-                         DAG.getNode(ISD::BUILD_VECTOR, dl, 
+                         DAG.getNode(ISD::BUILD_VECTOR, dl,
                                      MaskVT, &Mask1[0], 4));
     }
   }
@@ -4105,7 +4105,7 @@
     SDValue NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG,
                                             *this, dl);
     if (NewOp.getNode())
-      return DAG.getNode(ISD::BIT_CONVERT, dl, VT, 
+      return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
                          LowerVECTOR_SHUFFLE(NewOp, DAG));
   } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
     // FIXME: Figure out a cleaner way to do this.
@@ -4138,7 +4138,7 @@
   SDValue ShVal;
   bool isShift = isVectorShift(Op, PermMask, DAG, isLeft, ShVal, ShAmt);
   if (isShift && ShVal.hasOneUse()) {
-    // If the shifted value has multiple uses, it may be cheaper to use 
+    // If the shifted value has multiple uses, it may be cheaper to use
     // v_set0 + movlhps or movhlps, etc.
     MVT EVT = VT.getVectorElementType();
     ShAmt *= EVT.getSizeInBits();
@@ -4177,7 +4177,7 @@
   // 1,1,1,1 -> v8i16 though.
   V1IsSplat = isSplatVector(V1.getNode());
   V2IsSplat = isSplatVector(V2.getNode());
-  
+
   // Canonicalize the splat or undef, if present, to be on the RHS.
   if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
     Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
@@ -4328,7 +4328,7 @@
          User->getValueType(0) != MVT::i32))
       return SDValue();
     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
-                                  DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, 
+                                  DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32,
                                               Op.getOperand(0)),
                                               Op.getOperand(1));
     return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract);
@@ -4361,7 +4361,7 @@
     if (Idx == 0)
       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
-                                     DAG.getNode(ISD::BIT_CONVERT, dl, 
+                                     DAG.getNode(ISD::BIT_CONVERT, dl,
                                                  MVT::v4i32, Vec),
                                      Op.getOperand(1)));
     // Transform it so it match pextrw which produces a 32-bit result.
@@ -4413,7 +4413,7 @@
                                  &IdxVec[0], IdxVec.size());
     SDValue Vec = Op.getOperand(0);
     Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, Vec.getValueType(),
-                      Vec, DAG.getUNDEF(Vec.getValueType()), 
+                      Vec, DAG.getUNDEF(Vec.getValueType()),
                       Mask);
     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
                        DAG.getIntPtrConstant(0));
@@ -4448,9 +4448,9 @@
     //  zero here.  The DAG Combiner may combine an extract_elt index into these
     //  bits.  For example (insert (extract, 3), 2) could be matched by putting
     //  the '3' into bits [7:6] of X86ISD::INSERTPS.
-    // Bits [5:4] of the constant are the destination select.  This is the 
+    // Bits [5:4] of the constant are the destination select.  This is the
     //  value of the incoming immediate.
-    // Bits [3:0] of the constant are the zero mask.  The DAG Combiner may 
+    // Bits [3:0] of the constant are the zero mask.  The DAG Combiner may
     //   combine either bitwise AND or insert of float 0.0 to set these bits.
     N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4);
     return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
@@ -4564,7 +4564,7 @@
                          DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
                          Result);
   }
-  
+
   // For Darwin & Mingw32, external and weak symbols are indirect, so we want to
   // load the value at address GV, not the value of GV itself. This means that
   // the GlobalAddress must be in the base or index register of the address, not
@@ -4718,7 +4718,7 @@
   if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
       !Subtarget->isPICStyleRIPRel()) {
     Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
-                         DAG.getNode(X86ISD::GlobalBaseReg, 
+                         DAG.getNode(X86ISD::GlobalBaseReg,
                                      DebugLoc::getUnknownLoc(),
                                      getPointerTy()),
                          Result);
@@ -4747,7 +4747,7 @@
 }
 
 /// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
-/// take a 2 x i32 value to shift plus a shift amount. 
+/// take a 2 x i32 value to shift plus a shift amount.
 SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
   MVT VT = Op.getValueType();
@@ -4758,7 +4758,7 @@
   SDValue ShOpHi = Op.getOperand(1);
   SDValue ShAmt  = Op.getOperand(2);
   SDValue Tmp1 = isSRA ?
-    DAG.getNode(ISD::SRA, dl, VT, ShOpHi, 
+    DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
                 DAG.getConstant(VTBits - 1, MVT::i8)) :
     DAG.getConstant(0, VT);
 
@@ -4797,14 +4797,14 @@
   MVT SrcVT = Op.getOperand(0).getValueType();
   assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
          "Unknown SINT_TO_FP to lower!");
-  
+
   // These are really Legal; caller falls through into that case.
   if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
     return SDValue();
-  if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && 
+  if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 &&
       Subtarget->is64Bit())
     return SDValue();
-  
+
   DebugLoc dl = Op.getDebugLoc();
   unsigned Size = SrcVT.getSizeInBits()/8;
   MachineFunction &MF = DAG.getMachineFunction();
@@ -4911,12 +4911,12 @@
   MaskVec.push_back(DAG.getConstant(4, MVT::i32));
   MaskVec.push_back(DAG.getConstant(1, MVT::i32));
   MaskVec.push_back(DAG.getConstant(5, MVT::i32));
-  SDValue UnpcklMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, 
+  SDValue UnpcklMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
                                    &MaskVec[0], MaskVec.size());
   SmallVector<SDValue, 4> MaskVec2;
   MaskVec2.push_back(DAG.getConstant(1, MVT::i32));
   MaskVec2.push_back(DAG.getConstant(0, MVT::i32));
-  SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, 
+  SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32,
                                  &MaskVec2[0], MaskVec2.size());
 
   SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
@@ -5027,7 +5027,7 @@
          "Unknown FP_TO_SINT to lower!");
 
   // These are really Legal.
-  if (Op.getValueType() == MVT::i32 && 
+  if (Op.getValueType() == MVT::i32 &&
       isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
     return std::make_pair(SDValue(), SDValue());
   if (Subtarget->is64Bit() &&
@@ -5076,7 +5076,7 @@
   std::pair<SDValue,SDValue> Vals = FP_TO_SINTHelper(Op, DAG);
   SDValue FIST = Vals.first, StackSlot = Vals.second;
   if (FIST.getNode() == 0) return SDValue();
-  
+
   // Load the result.
   return DAG.getLoad(Op.getValueType(), Op.getDebugLoc(),
                      FIST, StackSlot, NULL, 0);
@@ -5137,7 +5137,7 @@
   if (VT.isVector()) {
     return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
                        DAG.getNode(ISD::XOR, dl, MVT::v2i64,
-                    DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, 
+                    DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
                                 Op.getOperand(0)),
                     DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask)));
   } else {
@@ -5223,7 +5223,7 @@
   SDValue Op1 = Op.getOperand(1);
   DebugLoc dl = Op.getDebugLoc();
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
-  
+
   // Lower (X & (1 << N)) == 0 to BT(X, N).
   // Lower ((X >>u N) & 1) != 0 to BT(X, N).
   // Lower ((X >>s N) & 1) != 0 to BT(X, N).
@@ -5278,7 +5278,7 @@
 
   bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
   unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
-    
+
   SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
   return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
                      DAG.getConstant(X86CC, MVT::i8), Cond);
@@ -5305,7 +5305,7 @@
     default: break;
     case ISD::SETOEQ:
     case ISD::SETEQ:  SSECC = 0; break;
-    case ISD::SETOGT: 
+    case ISD::SETOGT:
     case ISD::SETGT: Swap = true; // Fallthrough
     case ISD::SETLT:
     case ISD::SETOLT: SSECC = 1; break;
@@ -5344,13 +5344,13 @@
     // Handle all other FP comparisons here.
     return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8));
   }
-  
+
   // We are handling one of the integer comparisons here.  Since SSE only has
   // GT and EQ comparisons for integer, swapping operands and multiple
   // operations may be required for some comparisons.
   unsigned Opc = 0, EQOpc = 0, GTOpc = 0;
   bool Swap = false, Invert = false, FlipSigns = false;
-  
+
   switch (VT.getSimpleVT()) {
   default: break;
   case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
@@ -5358,7 +5358,7 @@
   case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
   case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
   }
-  
+
   switch (SetCCOpcode) {
   default: break;
   case ISD::SETNE:  Invert = true;
@@ -5374,7 +5374,7 @@
   }
   if (Swap)
     std::swap(Op0, Op1);
-  
+
   // Since SSE has no unsigned integer comparisons, we need to flip  the sign
   // bits of the inputs before performing those operations.
   if (FlipSigns) {
@@ -5387,7 +5387,7 @@
     Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SignVec);
     Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SignVec);
   }
-  
+
   SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
 
   // If the logical-not of the result is required, perform that now.
@@ -5419,12 +5419,12 @@
     SDValue Cmp = Cond.getOperand(1);
     unsigned Opc = Cmp.getOpcode();
     MVT VT = Op.getValueType();
-    
+
     bool IllegalFPCMov = false;
     if (VT.isFloatingPoint() && !VT.isVector() &&
         !isScalarFPTypeInSSEReg(VT))  // FPStack?
       IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
-    
+
     if ((isX86LogicalCmp(Opc) && !IllegalFPCMov) || Opc == X86ISD::BT) { // FIXME
       Cond = Cmp;
       addTest = false;
@@ -5433,7 +5433,7 @@
 
   if (addTest) {
     CC = DAG.getConstant(X86::COND_NE, MVT::i8);
-    Cond= DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond, 
+    Cond= DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond,
                       DAG.getConstant(0, MVT::i8));
   }
 
@@ -5493,7 +5493,7 @@
            Cond.getOpcode() == X86ISD::UMUL)
     Cond = LowerXALUO(Cond, DAG);
 #endif
-  
+
   // If condition flag is set by a X86ISD::CMP, then use it as the condition
   // setting operand in place of the X86ISD::SETCC.
   if (Cond.getOpcode() == X86ISD::SETCC) {
@@ -5585,7 +5585,7 @@
 
   if (addTest) {
     CC = DAG.getConstant(X86::COND_NE, MVT::i8);
-    Cond= DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond, 
+    Cond= DAG.getNode(X86ISD::CMP, dl, MVT::i32, Cond,
                       DAG.getConstant(0, MVT::i8));
   }
   return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
@@ -5665,7 +5665,7 @@
         V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
       MVT IntPtr = getPointerTy();
       const Type *IntPtrTy = TD->getIntPtrType();
-      TargetLowering::ArgListTy Args; 
+      TargetLowering::ArgListTy Args;
       TargetLowering::ArgListEntry Entry;
       Entry.Node = Dst;
       Entry.Ty = IntPtrTy;
@@ -5673,8 +5673,8 @@
       Entry.Node = Size;
       Args.push_back(Entry);
       std::pair<SDValue,SDValue> CallResult =
-        LowerCallTo(Chain, Type::VoidTy, false, false, false, false, 
-                    CallingConv::C, false, 
+        LowerCallTo(Chain, Type::VoidTy, false, false, false, false,
+                    CallingConv::C, false,
                     DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl);
       return CallResult.second;
     }
@@ -5735,11 +5735,11 @@
     InFlag = Chain.getValue(1);
   }
 
-  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX : 
+  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
                                                               X86::ECX,
                             Count, InFlag);
   InFlag = Chain.getValue(1);
-  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI : 
+  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
                                                               X86::EDI,
                             Dst, InFlag);
   InFlag = Chain.getValue(1);
@@ -5757,7 +5757,7 @@
     MVT CVT = Count.getValueType();
     SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count,
                                DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
-    Chain  = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX : 
+    Chain  = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX :
                                                              X86::ECX,
                               Left, InFlag);
     InFlag = Chain.getValue(1);
@@ -5791,7 +5791,7 @@
                                       SDValue Size, unsigned Align,
                                       bool AlwaysInline,
                                       const Value *DstSV, uint64_t DstSVOff,
-                                      const Value *SrcSV, uint64_t SrcSVOff) {  
+                                      const Value *SrcSV, uint64_t SrcSVOff) {
   // This requires the copy size to be a constant, preferrably
   // within a subtarget-specific limit.
   ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
@@ -5816,15 +5816,15 @@
   unsigned BytesLeft = SizeVal % UBytes;
 
   SDValue InFlag(0, 0);
-  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX : 
+  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RCX :
                                                               X86::ECX,
                             Count, InFlag);
   InFlag = Chain.getValue(1);
-  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI : 
+  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RDI :
                                                              X86::EDI,
                             Dst, InFlag);
   InFlag = Chain.getValue(1);
-  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RSI : 
+  Chain  = DAG.getCopyToReg(Chain, dl, Subtarget->is64Bit() ? X86::RSI :
                                                               X86::ESI,
                             Src, InFlag);
   InFlag = Chain.getValue(1);
@@ -5844,7 +5844,7 @@
     MVT DstVT = Dst.getValueType();
     MVT SrcVT = Src.getValueType();
     MVT SizeVT = Size.getValueType();
-    Results.push_back(DAG.getMemcpy(Chain, dl, 
+    Results.push_back(DAG.getMemcpy(Chain, dl,
                                     DAG.getNode(ISD::ADD, dl, DstVT, Dst,
                                                 DAG.getConstant(Offset, DstVT)),
                                     DAG.getNode(ISD::ADD, dl, SrcVT, Src,
@@ -5855,7 +5855,7 @@
                                     SrcSV, SrcSVOff + Offset));
   }
 
-  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                      &Results[0], Results.size());
 }
 
@@ -5884,7 +5884,7 @@
   MemOps.push_back(Store);
 
   // Store fp_offset
-  FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), 
+  FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
                     FIN, DAG.getIntPtrConstant(4));
   Store = DAG.getStore(Op.getOperand(0), dl,
                        DAG.getConstant(VarArgsFPOffset, MVT::i32),
@@ -5892,19 +5892,19 @@
   MemOps.push_back(Store);
 
   // Store ptr to overflow_arg_area
-  FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), 
+  FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
                     FIN, DAG.getIntPtrConstant(4));
   SDValue OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
   Store = DAG.getStore(Op.getOperand(0), dl, OVFIN, FIN, SV, 0);
   MemOps.push_back(Store);
 
   // Store ptr to reg_save_area.
-  FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), 
+  FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
                     FIN, DAG.getIntPtrConstant(8));
   SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
   Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 0);
   MemOps.push_back(Store);
-  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
                      &MemOps[0], MemOps.size());
 }
 
@@ -6142,14 +6142,14 @@
       DAG.getConstant(TD->getPointerSize(),
                       Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
     return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
-                       DAG.getNode(ISD::ADD, dl, getPointerTy(), 
+                       DAG.getNode(ISD::ADD, dl, getPointerTy(),
                                    FrameAddr, Offset),
                        NULL, 0);
   }
 
   // Just load the return address.
   SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
-  return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 
+  return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
                      RetAddrFI, NULL, 0);
 }
 
@@ -6227,31 +6227,31 @@
     OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
                                 Addr, TrmpAddr, 0);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                        DAG.getConstant(2, MVT::i64));
     OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, TrmpAddr, 2, false, 2);
 
     // Load the 'nest' parameter value into R10.
     // R10 is specified in X86CallingConv.td
     OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                        DAG.getConstant(10, MVT::i64));
     OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
                                 Addr, TrmpAddr, 10);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                        DAG.getConstant(12, MVT::i64));
     OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 12, false, 2);
 
     // Jump to the nested function.
     OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                        DAG.getConstant(20, MVT::i64));
     OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
                                 Addr, TrmpAddr, 20);
 
     unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
                        DAG.getConstant(22, MVT::i64));
     OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
                                 TrmpAddr, 22);
@@ -6306,27 +6306,27 @@
     SDValue OutChains[4];
     SDValue Addr, Disp;
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                        DAG.getConstant(10, MVT::i32));
     Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
 
     const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
     const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg);
-    OutChains[0] = DAG.getStore(Root, dl, 
+    OutChains[0] = DAG.getStore(Root, dl,
                                 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
                                 Trmp, TrmpAddr, 0);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                        DAG.getConstant(1, MVT::i32));
     OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 1, false, 1);
 
     const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                        DAG.getConstant(5, MVT::i32));
     OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
                                 TrmpAddr, 5, false, 1);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 
+    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
                        DAG.getConstant(6, MVT::i32));
     OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, TrmpAddr, 6, false, 1);
 
@@ -6463,7 +6463,7 @@
   MVT VT = Op.getValueType();
   assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply");
   DebugLoc dl = Op.getDebugLoc();
-  
+
   //  ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32);
   //  ulong2 Bhi = __builtin_ia32_psrlqi128( b, 32);
   //  ulong2 AloBlo = __builtin_ia32_pmuludq128( a, b );
@@ -6476,7 +6476,7 @@
 
   SDValue A = Op.getOperand(0);
   SDValue B = Op.getOperand(1);
-  
+
   SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
                        DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
                        A, DAG.getConstant(32, MVT::i32));
@@ -6567,7 +6567,7 @@
   case MVT::i8:  Reg = X86::AL;  size = 1; break;
   case MVT::i16: Reg = X86::AX;  size = 2; break;
   case MVT::i32: Reg = X86::EAX; size = 4; break;
-  case MVT::i64: 
+  case MVT::i64:
     assert(Subtarget->is64Bit() && "Node not type legal!");
     Reg = X86::RAX; size = 8;
     break;
@@ -6581,7 +6581,7 @@
                     cpIn.getValue(1) };
   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
   SDValue Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, dl, Tys, Ops, 5);
-  SDValue cpOut = 
+  SDValue cpOut =
     DAG.getCopyFromReg(Result.getValue(0), dl, Reg, T, Result.getValue(1));
   return cpOut;
 }
@@ -6725,7 +6725,7 @@
     SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
     SDValue TheChain = N->getOperand(0);
     SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
-    SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32, 
+    SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32,
                                      rd.getValue(1));
     SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32,
                                      eax.getValue(2));
@@ -6871,14 +6871,14 @@
 
 // isLegalAddressingMode - Return true if the addressing mode represented
 // by AM is legal for this target, for a load/store of the specified type.
-bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, 
+bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
                                               const Type *Ty) const {
   // X86 supports extremely general addressing modes.
-  
+
   // X86 allows a sign-extended 32-bit immediate field as a displacement.
   if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
     return false;
-  
+
   if (AM.BaseGV) {
     // We can only fold this if we don't need an extra load.
     if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
@@ -6897,7 +6897,7 @@
         return false;
     }
   }
-  
+
   switch (AM.Scale) {
   case 0:
   case 1:
@@ -6917,7 +6917,7 @@
   default:  // Other stuff never works.
     return false;
   }
-  
+
   return true;
 }
 
@@ -6971,7 +6971,7 @@
   if (NumElts == 4) {
     return (isMOVLMask(&BVOps[0], 4)  ||
             isCommutedMOVL(&BVOps[0], 4, true) ||
-            isSHUFPMask(&BVOps[0], 4) || 
+            isSHUFPMask(&BVOps[0], 4) ||
             isCommutedSHUFP(&BVOps[0], 4));
   }
   return false;
@@ -7007,7 +7007,7 @@
   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
   MachineFunction::iterator MBBIter = MBB;
   ++MBBIter;
-  
+
   /// First build the CFG
   MachineFunction *F = MBB->getParent();
   MachineBasicBlock *thisMBB = MBB;
@@ -7015,17 +7015,17 @@
   MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
   F->insert(MBBIter, newMBB);
   F->insert(MBBIter, nextMBB);
-  
+
   // Move all successors to thisMBB to nextMBB
   nextMBB->transferSuccessors(thisMBB);
-    
+
   // Update thisMBB to fall through to newMBB
   thisMBB->addSuccessor(newMBB);
-  
+
   // newMBB jumps to itself and fall through to nextMBB
   newMBB->addSuccessor(nextMBB);
   newMBB->addSuccessor(newMBB);
-  
+
   // Insert instructions into newMBB based on incoming instruction
   assert(bInstr->getNumOperands() < 8 && "unexpected number of operands");
   DebugLoc dl = bInstr->getDebugLoc();
@@ -7038,7 +7038,7 @@
   // x86 address has 4 operands: base, index, scale, and displacement
   int lastAddrIndx = 3; // [0,3]
   int valArgIndx = 4;
-  
+
   unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
   MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1);
   for (int i=0; i <= lastAddrIndx; ++i)
@@ -7048,7 +7048,7 @@
   if (invSrc) {
     MIB = BuildMI(newMBB, dl, TII->get(notOpc), tt).addReg(t1);
   }
-  else 
+  else
     tt = t1;
 
   unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
@@ -7064,7 +7064,7 @@
 
   MIB = BuildMI(newMBB, dl, TII->get(copyOpc), EAXreg);
   MIB.addReg(t1);
-  
+
   MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc));
   for (int i=0; i <= lastAddrIndx; ++i)
     (*MIB).addOperand(*argOpers[i]);
@@ -7074,7 +7074,7 @@
 
   MIB = BuildMI(newMBB, dl, TII->get(copyOpc), destOper.getReg());
   MIB.addReg(EAXreg);
-  
+
   // insert branch
   BuildMI(newMBB, dl, TII->get(X86::JNE)).addMBB(newMBB);
 
@@ -7114,7 +7114,7 @@
   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
   MachineFunction::iterator MBBIter = MBB;
   ++MBBIter;
-  
+
   /// First build the CFG
   MachineFunction *F = MBB->getParent();
   MachineBasicBlock *thisMBB = MBB;
@@ -7122,17 +7122,17 @@
   MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
   F->insert(MBBIter, newMBB);
   F->insert(MBBIter, nextMBB);
-  
+
   // Move all successors to thisMBB to nextMBB
   nextMBB->transferSuccessors(thisMBB);
-    
+
   // Update thisMBB to fall through to newMBB
   thisMBB->addSuccessor(newMBB);
-  
+
   // newMBB jumps to itself and fall through to nextMBB
   newMBB->addSuccessor(nextMBB);
   newMBB->addSuccessor(newMBB);
-  
+
   DebugLoc dl = bInstr->getDebugLoc();
   // Insert instructions into newMBB based on incoming instruction
   // There are 8 "real" operands plus 9 implicit def/uses, ignored here.
@@ -7145,7 +7145,7 @@
 
   // x86 address has 4 operands: base, index, scale, and displacement
   int lastAddrIndx = 3; // [0,3]
-  
+
   unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
   MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1);
   for (int i=0; i <= lastAddrIndx; ++i)
@@ -7172,7 +7172,7 @@
 
   unsigned tt1 = F->getRegInfo().createVirtualRegister(RC);
   unsigned tt2 = F->getRegInfo().createVirtualRegister(RC);
-  if (invSrc) {  
+  if (invSrc) {
     MIB = BuildMI(newMBB, dl, TII->get(NotOpc), tt1).addReg(t1);
     MIB = BuildMI(newMBB, dl, TII->get(NotOpc), tt2).addReg(t2);
   } else {
@@ -7210,7 +7210,7 @@
   MIB.addReg(t5);
   MIB = BuildMI(newMBB, dl, TII->get(copyOpc), X86::ECX);
   MIB.addReg(t6);
-  
+
   MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B));
   for (int i=0; i <= lastAddrIndx; ++i)
     (*MIB).addOperand(*argOpers[i]);
@@ -7222,7 +7222,7 @@
   MIB.addReg(X86::EAX);
   MIB = BuildMI(newMBB, dl, TII->get(copyOpc), t4);
   MIB.addReg(X86::EDX);
-  
+
   // insert branch
   BuildMI(newMBB, dl, TII->get(X86::JNE)).addMBB(newMBB);
 
@@ -7239,7 +7239,7 @@
   //   thisMBB:
   //   newMBB:
   //     ld t1 = [min/max.addr]
-  //     mov t2 = [min/max.val] 
+  //     mov t2 = [min/max.val]
   //     cmp  t1, t2
   //     cmov[cond] t2 = t1
   //     mov EAX = t1
@@ -7251,7 +7251,7 @@
   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
   MachineFunction::iterator MBBIter = MBB;
   ++MBBIter;
-  
+
   /// First build the CFG
   MachineFunction *F = MBB->getParent();
   MachineBasicBlock *thisMBB = MBB;
@@ -7259,17 +7259,17 @@
   MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
   F->insert(MBBIter, newMBB);
   F->insert(MBBIter, nextMBB);
-  
+
   // Move all successors to thisMBB to nextMBB
   nextMBB->transferSuccessors(thisMBB);
-  
+
   // Update thisMBB to fall through to newMBB
   thisMBB->addSuccessor(newMBB);
-  
+
   // newMBB jumps to newMBB and fall through to nextMBB
   newMBB->addSuccessor(nextMBB);
   newMBB->addSuccessor(newMBB);
-  
+
   DebugLoc dl = mInstr->getDebugLoc();
   // Insert instructions into newMBB based on incoming instruction
   assert(mInstr->getNumOperands() < 8 && "unexpected number of operands");
@@ -7278,11 +7278,11 @@
   int numArgs = mInstr->getNumOperands() - 1;
   for (int i=0; i < numArgs; ++i)
     argOpers[i] = &mInstr->getOperand(i+1);
-  
+
   // x86 address has 4 operands: base, index, scale, and displacement
   int lastAddrIndx = 3; // [0,3]
   int valArgIndx = 4;
-  
+
   unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
   MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1);
   for (int i=0; i <= lastAddrIndx; ++i)
@@ -7292,11 +7292,11 @@
   assert((argOpers[valArgIndx]->isReg() ||
           argOpers[valArgIndx]->isImm()) &&
          "invalid operand");
-  
-  unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);  
+
+  unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
   if (argOpers[valArgIndx]->isReg())
     MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
-  else 
+  else
     MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2);
   (*MIB).addOperand(*argOpers[valArgIndx]);
 
@@ -7320,10 +7320,10 @@
   MIB.addReg(t3);
   assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand");
   (*MIB).addMemOperand(*F, *mInstr->memoperands_begin());
-  
+
   MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), destOper.getReg());
   MIB.addReg(X86::EAX);
-  
+
   // insert branch
   BuildMI(newMBB, dl, TII->get(X86::JNE)).addMBB(newMBB);
 
@@ -7414,7 +7414,7 @@
     // Load the old value of the high byte of the control word...
     unsigned OldCW =
       F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
-    addFrameReference(BuildMI(BB, dl, TII->get(X86::MOV16rm), OldCW), 
+    addFrameReference(BuildMI(BB, dl, TII->get(X86::MOV16rm), OldCW),
                       CWFrameIdx);
 
     // Set the high part to be round to zero...
@@ -7475,19 +7475,19 @@
   }
   case X86::ATOMAND32:
     return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
-                                               X86::AND32ri, X86::MOV32rm, 
+                                               X86::AND32ri, X86::MOV32rm,
                                                X86::LCMPXCHG32, X86::MOV32rr,
                                                X86::NOT32r, X86::EAX,
                                                X86::GR32RegisterClass);
   case X86::ATOMOR32:
-    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, 
-                                               X86::OR32ri, X86::MOV32rm, 
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
+                                               X86::OR32ri, X86::MOV32rm,
                                                X86::LCMPXCHG32, X86::MOV32rr,
                                                X86::NOT32r, X86::EAX,
                                                X86::GR32RegisterClass);
   case X86::ATOMXOR32:
     return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
-                                               X86::XOR32ri, X86::MOV32rm, 
+                                               X86::XOR32ri, X86::MOV32rm,
                                                X86::LCMPXCHG32, X86::MOV32rr,
                                                X86::NOT32r, X86::EAX,
                                                X86::GR32RegisterClass);
@@ -7513,7 +7513,7 @@
                                                X86::NOT16r, X86::AX,
                                                X86::GR16RegisterClass);
   case X86::ATOMOR16:
-    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, 
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
                                                X86::OR16ri, X86::MOV16rm,
                                                X86::LCMPXCHG16, X86::MOV16rr,
                                                X86::NOT16r, X86::AX,
@@ -7546,7 +7546,7 @@
                                                X86::NOT8r, X86::AL,
                                                X86::GR8RegisterClass);
   case X86::ATOMOR8:
-    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, 
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
                                                X86::OR8ri, X86::MOV8rm,
                                                X86::LCMPXCHG8, X86::MOV8rr,
                                                X86::NOT8r, X86::AL,
@@ -7567,19 +7567,19 @@
   // This group is for 64-bit host.
   case X86::ATOMAND64:
     return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
-                                               X86::AND64ri32, X86::MOV64rm, 
+                                               X86::AND64ri32, X86::MOV64rm,
                                                X86::LCMPXCHG64, X86::MOV64rr,
                                                X86::NOT64r, X86::RAX,
                                                X86::GR64RegisterClass);
   case X86::ATOMOR64:
-    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, 
-                                               X86::OR64ri32, X86::MOV64rm, 
+    return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
+                                               X86::OR64ri32, X86::MOV64rm,
                                                X86::LCMPXCHG64, X86::MOV64rr,
                                                X86::NOT64r, X86::RAX,
                                                X86::GR64RegisterClass);
   case X86::ATOMXOR64:
     return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
-                                               X86::XOR64ri32, X86::MOV64rm, 
+                                               X86::XOR64ri32, X86::MOV64rm,
                                                X86::LCMPXCHG64, X86::MOV64rr,
                                                X86::NOT64r, X86::RAX,
                                                X86::GR64RegisterClass);
@@ -7600,37 +7600,37 @@
 
   // This group does 64-bit operations on a 32-bit host.
   case X86::ATOMAND6432:
-    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+    return EmitAtomicBit6432WithCustomInserter(MI, BB,
                                                X86::AND32rr, X86::AND32rr,
                                                X86::AND32ri, X86::AND32ri,
                                                false);
   case X86::ATOMOR6432:
-    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+    return EmitAtomicBit6432WithCustomInserter(MI, BB,
                                                X86::OR32rr, X86::OR32rr,
                                                X86::OR32ri, X86::OR32ri,
                                                false);
   case X86::ATOMXOR6432:
-    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+    return EmitAtomicBit6432WithCustomInserter(MI, BB,
                                                X86::XOR32rr, X86::XOR32rr,
                                                X86::XOR32ri, X86::XOR32ri,
                                                false);
   case X86::ATOMNAND6432:
-    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+    return EmitAtomicBit6432WithCustomInserter(MI, BB,
                                                X86::AND32rr, X86::AND32rr,
                                                X86::AND32ri, X86::AND32ri,
                                                true);
   case X86::ATOMADD6432:
-    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+    return EmitAtomicBit6432WithCustomInserter(MI, BB,
                                                X86::ADD32rr, X86::ADC32rr,
                                                X86::ADD32ri, X86::ADC32ri,
                                                false);
   case X86::ATOMSUB6432:
-    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+    return EmitAtomicBit6432WithCustomInserter(MI, BB,
                                                X86::SUB32rr, X86::SBB32rr,
                                                X86::SUB32ri, X86::SBB32ri,
                                                false);
   case X86::ATOMSWAP6432:
-    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+    return EmitAtomicBit6432WithCustomInserter(MI, BB,
                                                X86::MOV32rr, X86::MOV32rr,
                                                X86::MOV32ri, X86::MOV32ri,
                                                false);
@@ -7751,7 +7751,7 @@
   LoadSDNode *LD = cast<LoadSDNode>(Base);
   if (isBaseAlignmentOfN(16, Base->getOperand(1).getNode(), TLI))
     return DAG.getLoad(VT, dl, LD->getChain(), LD->getBasePtr(),
-                       LD->getSrcValue(), LD->getSrcValueOffset(), 
+                       LD->getSrcValue(), LD->getSrcValueOffset(),
                        LD->isVolatile());
   return DAG.getLoad(VT, dl, LD->getChain(), LD->getBasePtr(),
                      LD->getSrcValue(), LD->getSrcValueOffset(),
@@ -7794,7 +7794,7 @@
 
   // Transform it into VZEXT_LOAD addr.
   LoadSDNode *LD = cast<LoadSDNode>(Base);
-  
+
   // Load must not be an extload.
   if (LD->getExtensionType() != ISD::NON_EXTLOAD)
     return SDValue();
@@ -7810,7 +7810,7 @@
   TLO.CombineTo(SDValue(Base, 1), ResNode.getValue(1));
   DCI.CommitTargetLoweringOpt(TLO);
   return ResNode;
-}                                           
+}
 
 /// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
@@ -7895,11 +7895,11 @@
   // so we have no knowledge of the shift amount.
   if (!Subtarget->hasSSE2())
     return SDValue();
-    
+
   MVT VT = N->getValueType(0);
   if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
     return SDValue();
-    
+
   SDValue ShAmtOp = N->getOperand(1);
   MVT EltVT = VT.getVectorElementType();
   DebugLoc dl = N->getDebugLoc();
@@ -8017,14 +8017,14 @@
       DebugLoc dl = N->getDebugLoc();
       // If we are a 64-bit capable x86, lower to a single movq load/store pair.
       if (Subtarget->is64Bit()) {
-        SDValue NewLd = DAG.getLoad(MVT::i64, dl, Ld->getChain(), 
-                                      Ld->getBasePtr(), Ld->getSrcValue(), 
+        SDValue NewLd = DAG.getLoad(MVT::i64, dl, Ld->getChain(),
+                                      Ld->getBasePtr(), Ld->getSrcValue(),
                                       Ld->getSrcValueOffset(), Ld->isVolatile(),
                                       Ld->getAlignment());
         SDValue NewChain = NewLd.getValue(1);
         if (TokenFactorIndex != -1) {
           Ops.push_back(NewChain);
-          NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Ops[0], 
+          NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Ops[0],
                                  Ops.size());
         }
         return DAG.getStore(NewChain, dl, NewLd, St->getBasePtr(),
@@ -8042,14 +8042,14 @@
                                    Ld->isVolatile(), Ld->getAlignment());
       SDValue HiLd = DAG.getLoad(MVT::i32, dl, Ld->getChain(), HiAddr,
                                    Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
-                                   Ld->isVolatile(), 
+                                   Ld->isVolatile(),
                                    MinAlign(Ld->getAlignment(), 4));
 
       SDValue NewChain = LoLd.getValue(1);
       if (TokenFactorIndex != -1) {
         Ops.push_back(LoLd);
         Ops.push_back(HiLd);
-        NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Ops[0], 
+        NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Ops[0],
                                Ops.size());
       }
 
@@ -8063,7 +8063,7 @@
       SDValue HiSt = DAG.getStore(NewChain, dl, HiLd, HiAddr,
                                     St->getSrcValue(),
                                     St->getSrcValueOffset() + 4,
-                                    St->isVolatile(), 
+                                    St->isVolatile(),
                                     MinAlign(St->getAlignment(), 4));
       return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoSt, HiSt);
     }
@@ -8184,7 +8184,7 @@
     if (Subtarget->hasSSE1())
       return "x";
   }
-  
+
   return TargetLowering::LowerXConstraint(ConstraintVT);
 }
 
@@ -8196,7 +8196,7 @@
                                                      std::vector<SDValue>&Ops,
                                                      SelectionDAG &DAG) const {
   SDValue Result(0, 0);
-  
+
   switch (Constraint) {
   default: break;
   case 'I':
@@ -8262,7 +8262,7 @@
     // an optional displacement) to be used with 'i'.
     GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
     int64_t Offset = 0;
-    
+
     // Match either (GA) or (GA+C)
     if (GA) {
       Offset = GA->getOffset();
@@ -8280,9 +8280,9 @@
           C = 0, GA = 0;
       }
     }
-    
+
     if (GA) {
-      if (hasMemory) 
+      if (hasMemory)
         Op = LowerGlobalAddress(GA->getGlobal(), Op.getDebugLoc(),
                                 Offset, DAG);
       else
@@ -8296,7 +8296,7 @@
     return;
   }
   }
-  
+
   if (Result.getNode()) {
     Ops.push_back(Result);
     return;
@@ -8346,7 +8346,7 @@
       if (VT == MVT::i16)
         return std::make_pair(0U, X86::GR16RegisterClass);
       if (VT == MVT::i32 || !Subtarget->is64Bit())
-        return std::make_pair(0U, X86::GR32RegisterClass);  
+        return std::make_pair(0U, X86::GR32RegisterClass);
       return std::make_pair(0U, X86::GR64RegisterClass);
     case 'f':  // FP Stack registers.
       // If SSE is enabled for this VT, use f80 to ensure the isel moves the
@@ -8386,7 +8386,7 @@
       break;
     }
   }
-  
+
   // Use the default implementation in TargetLowering to convert the register
   // constraint into a member of a register class.
   std::pair<unsigned, const TargetRegisterClass*> Res;
@@ -8498,24 +8498,24 @@
   assert(VT.isVector());
   if (isTypeLegal(VT))
     return VT;
-  
+
   // TODO: In computeRegisterProperty, we can compute the list of legal vector
   //       type based on element type.  This would speed up our search (though
   //       it may not be worth it since the size of the list is relatively
   //       small).
   MVT EltVT = VT.getVectorElementType();
   unsigned NElts = VT.getVectorNumElements();
-  
+
   // On X86, it make sense to widen any vector wider than 1
   if (NElts <= 1)
     return MVT::Other;
-  
-  for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE; 
+
+  for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE;
        nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
     MVT SVT = (MVT::SimpleValueType)nVT;
-    
-    if (isTypeLegal(SVT) && 
-        SVT.getVectorElementType() == EltVT && 
+
+    if (isTypeLegal(SVT) &&
+        SVT.getVectorElementType() == EltVT &&
         SVT.getVectorNumElements() > NElts)
       return SVT;
   }