Rename MVT to EVT, in preparation for splitting SimpleValueType out into its own struct type.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78610 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp
index 823ae2f..0757529 100644
--- a/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -63,7 +63,7 @@
 
  /// getI32Imm - Return a target constant with the specified value, of type i32.
   inline SDValue getI32Imm(unsigned Imm) {
-    return CurDAG->getTargetConstant(Imm, MVT::i32);
+    return CurDAG->getTargetConstant(Imm, EVT::i32);
   }
 
   SDNode *Select(SDValue Op);
@@ -156,13 +156,13 @@
   BaseReg = N.getOperand(0);
   unsigned ShImmVal = 0;
   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
-    ShReg = CurDAG->getRegister(0, MVT::i32);
+    ShReg = CurDAG->getRegister(0, EVT::i32);
     ShImmVal = RHS->getZExtValue() & 31;
   } else {
     ShReg = N.getOperand(1);
   }
   Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
-                                  MVT::i32);
+                                  EVT::i32);
   return true;
 }
 
@@ -185,7 +185,7 @@
           Base = Offset = N.getOperand(0);
           Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
                                                             ARM_AM::lsl),
-                                          MVT::i32);
+                                          EVT::i32);
           return true;
         }
       }
@@ -200,10 +200,10 @@
     } else if (N.getOpcode() == ARMISD::Wrapper) {
       Base = N.getOperand(0);
     }
-    Offset = CurDAG->getRegister(0, MVT::i32);
+    Offset = CurDAG->getRegister(0, EVT::i32);
     Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
                                                       ARM_AM::no_shift),
-                                    MVT::i32);
+                                    EVT::i32);
     return true;
   }
   
@@ -218,7 +218,7 @@
           int FI = cast<FrameIndexSDNode>(Base)->getIndex();
           Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
         }
-        Offset = CurDAG->getRegister(0, MVT::i32);
+        Offset = CurDAG->getRegister(0, EVT::i32);
 
         ARM_AM::AddrOpc AddSub = ARM_AM::add;
         if (RHSC < 0) {
@@ -227,7 +227,7 @@
         }
         Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
                                                           ARM_AM::no_shift),
-                                        MVT::i32);
+                                        EVT::i32);
         return true;
       }
     }
@@ -270,7 +270,7 @@
   }
   
   Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
-                                  MVT::i32);
+                                  EVT::i32);
   return true;
 }
 
@@ -285,10 +285,10 @@
   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
     int Val = (int)C->getZExtValue();
     if (Val >= 0 && Val < 0x1000) { // 12 bits.
-      Offset = CurDAG->getRegister(0, MVT::i32);
+      Offset = CurDAG->getRegister(0, EVT::i32);
       Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
                                                         ARM_AM::no_shift),
-                                      MVT::i32);
+                                      EVT::i32);
       return true;
     }
   }
@@ -308,7 +308,7 @@
   }
 
   Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
-                                  MVT::i32);
+                                  EVT::i32);
   return true;
 }
 
@@ -320,7 +320,7 @@
     // X - C  is canonicalize to X + -C, no need to handle it here.
     Base = N.getOperand(0);
     Offset = N.getOperand(1);
-    Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
+    Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),EVT::i32);
     return true;
   }
   
@@ -330,8 +330,8 @@
       int FI = cast<FrameIndexSDNode>(N)->getIndex();
       Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
     }
-    Offset = CurDAG->getRegister(0, MVT::i32);
-    Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
+    Offset = CurDAG->getRegister(0, EVT::i32);
+    Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),EVT::i32);
     return true;
   }
   
@@ -345,21 +345,21 @@
         int FI = cast<FrameIndexSDNode>(Base)->getIndex();
         Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
       }
-      Offset = CurDAG->getRegister(0, MVT::i32);
+      Offset = CurDAG->getRegister(0, EVT::i32);
 
       ARM_AM::AddrOpc AddSub = ARM_AM::add;
       if (RHSC < 0) {
         AddSub = ARM_AM::sub;
         RHSC = - RHSC;
       }
-      Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
+      Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),EVT::i32);
       return true;
     }
   }
   
   Base = N.getOperand(0);
   Offset = N.getOperand(1);
-  Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
+  Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), EVT::i32);
   return true;
 }
 
@@ -374,21 +374,21 @@
   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
     int Val = (int)C->getZExtValue();
     if (Val >= 0 && Val < 256) {
-      Offset = CurDAG->getRegister(0, MVT::i32);
-      Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
+      Offset = CurDAG->getRegister(0, EVT::i32);
+      Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), EVT::i32);
       return true;
     }
   }
 
   Offset = N;
-  Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
+  Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), EVT::i32);
   return true;
 }
 
 bool ARMDAGToDAGISel::SelectAddrMode4(SDValue Op, SDValue N,
                                       SDValue &Addr, SDValue &Mode) {
   Addr = N;
-  Mode = CurDAG->getTargetConstant(0, MVT::i32);
+  Mode = CurDAG->getTargetConstant(0, EVT::i32);
   return true;
 }
 
@@ -403,7 +403,7 @@
       Base = N.getOperand(0);
     }
     Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
-                                       MVT::i32);
+                                       EVT::i32);
     return true;
   }
   
@@ -426,7 +426,7 @@
           RHSC = - RHSC;
         }
         Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
-                                           MVT::i32);
+                                           EVT::i32);
         return true;
       }
     }
@@ -434,7 +434,7 @@
   
   Base = N;
   Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
-                                     MVT::i32);
+                                     EVT::i32);
   return true;
 }
 
@@ -443,8 +443,8 @@
                                       SDValue &Opc) {
   Addr = N;
   // The optional writeback is handled in ARMLoadStoreOpt.
-  Update = CurDAG->getRegister(0, MVT::i32);
-  Opc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(false), MVT::i32);
+  Update = CurDAG->getRegister(0, EVT::i32);
+  Opc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(false), EVT::i32);
   return true;
 }
 
@@ -454,7 +454,7 @@
     Offset = N.getOperand(0);
     SDValue N1 = N.getOperand(1);
     Label  = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
-                                       MVT::i32);
+                                       EVT::i32);
     return true;
   }
   return false;
@@ -493,8 +493,8 @@
 
   if (N.getOpcode() != ISD::ADD) {
     Base = (N.getOpcode() == ARMISD::Wrapper) ? N.getOperand(0) : N;
-    Offset = CurDAG->getRegister(0, MVT::i32);
-    OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+    Offset = CurDAG->getRegister(0, EVT::i32);
+    OffImm = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
 
@@ -504,8 +504,8 @@
   if ((LHSR && LHSR->getReg() == ARM::SP) ||
       (RHSR && RHSR->getReg() == ARM::SP)) {
     Base = N;
-    Offset = CurDAG->getRegister(0, MVT::i32);
-    OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+    Offset = CurDAG->getRegister(0, EVT::i32);
+    OffImm = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
 
@@ -516,8 +516,8 @@
       RHSC /= Scale;
       if (RHSC >= 0 && RHSC < 32) {
         Base = N.getOperand(0);
-        Offset = CurDAG->getRegister(0, MVT::i32);
-        OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+        Offset = CurDAG->getRegister(0, EVT::i32);
+        OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
         return true;
       }
     }
@@ -525,7 +525,7 @@
 
   Base = N.getOperand(0);
   Offset = N.getOperand(1);
-  OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+  OffImm = CurDAG->getTargetConstant(0, EVT::i32);
   return true;
 }
 
@@ -552,7 +552,7 @@
   if (N.getOpcode() == ISD::FrameIndex) {
     int FI = cast<FrameIndexSDNode>(N)->getIndex();
     Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
-    OffImm = CurDAG->getTargetConstant(0, MVT::i32);
+    OffImm = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
 
@@ -573,7 +573,7 @@
             int FI = cast<FrameIndexSDNode>(Base)->getIndex();
             Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
           }
-          OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+          OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
           return true;
         }
       }
@@ -612,7 +612,7 @@
     if (N.getOpcode() == ISD::FrameIndex) {
       int FI = cast<FrameIndexSDNode>(N)->getIndex();
       Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
-      OffImm  = CurDAG->getTargetConstant(0, MVT::i32);
+      OffImm  = CurDAG->getTargetConstant(0, EVT::i32);
       return true;
     }
     return false;
@@ -629,7 +629,7 @@
         int FI = cast<FrameIndexSDNode>(Base)->getIndex();
         Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
       }
-      OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+      OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
       return true;
     }
   }
@@ -652,7 +652,7 @@
           int FI = cast<FrameIndexSDNode>(Base)->getIndex();
           Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
         }
-        OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+        OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
         return true;
       }
     }
@@ -671,8 +671,8 @@
     int RHSC = (int)RHS->getZExtValue();
     if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
       OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
-        ? CurDAG->getTargetConstant(RHSC, MVT::i32)
-        : CurDAG->getTargetConstant(-RHSC, MVT::i32);
+        ? CurDAG->getTargetConstant(RHSC, EVT::i32)
+        : CurDAG->getTargetConstant(-RHSC, EVT::i32);
       return true;
     }
   }
@@ -688,7 +688,7 @@
       if (((RHSC & 0x3) == 0) &&
           ((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { // 8 bits.
         Base   = N.getOperand(0);
-        OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
+        OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
         return true;
       }
     }
@@ -697,7 +697,7 @@
       int RHSC = (int)RHS->getZExtValue();
       if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
         Base   = N.getOperand(0);
-        OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
+        OffImm = CurDAG->getTargetConstant(-RHSC, EVT::i32);
         return true;
       }
     }
@@ -719,8 +719,8 @@
       if (Base.getOpcode() == ISD::TargetConstantPool)
         return false;  // We want to select t2LDRpci instead.
     }
-    OffReg = CurDAG->getRegister(0, MVT::i32);
-    ShImm  = CurDAG->getTargetConstant(0, MVT::i32);
+    OffReg = CurDAG->getRegister(0, EVT::i32);
+    ShImm  = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
 
@@ -736,8 +736,8 @@
   // Thumb2 does not support (R - R) or (R - (R << [1,2,3])).
   if (N.getOpcode() == ISD::SUB) {
     Base = N;
-    OffReg = CurDAG->getRegister(0, MVT::i32);
-    ShImm  = CurDAG->getTargetConstant(0, MVT::i32);
+    OffReg = CurDAG->getRegister(0, EVT::i32);
+    ShImm  = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
 
@@ -771,7 +771,7 @@
     }
   }
   
-  ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
+  ShImm = CurDAG->getTargetConstant(ShAmt, EVT::i32);
 
   return true;
 }
@@ -780,7 +780,7 @@
 
 /// getAL - Returns a ARMCC::AL immediate node.
 static inline SDValue getAL(SelectionDAG *CurDAG) {
-  return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
+  return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, EVT::i32);
 }
 
 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDValue Op) {
@@ -789,22 +789,22 @@
   if (AM == ISD::UNINDEXED)
     return NULL;
 
-  MVT LoadedVT = LD->getMemoryVT();
+  EVT LoadedVT = LD->getMemoryVT();
   SDValue Offset, AMOpc;
   bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
   unsigned Opcode = 0;
   bool Match = false;
-  if (LoadedVT == MVT::i32 &&
+  if (LoadedVT == EVT::i32 &&
       SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
     Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
     Match = true;
-  } else if (LoadedVT == MVT::i16 &&
+  } else if (LoadedVT == EVT::i16 &&
              SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
     Match = true;
     Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
       ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
       : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
-  } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
+  } else if (LoadedVT == EVT::i8 || LoadedVT == EVT::i1) {
     if (LD->getExtensionType() == ISD::SEXTLOAD) {
       if (SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
         Match = true;
@@ -822,9 +822,9 @@
     SDValue Chain = LD->getChain();
     SDValue Base = LD->getBasePtr();
     SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
-                     CurDAG->getRegister(0, MVT::i32), Chain };
-    return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
-                                 MVT::Other, Ops, 6);
+                     CurDAG->getRegister(0, EVT::i32), Chain };
+    return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), EVT::i32, EVT::i32,
+                                 EVT::Other, Ops, 6);
   }
 
   return NULL;
@@ -836,7 +836,7 @@
   if (AM == ISD::UNINDEXED)
     return NULL;
 
-  MVT LoadedVT = LD->getMemoryVT();
+  EVT LoadedVT = LD->getMemoryVT();
   bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
   SDValue Offset;
   bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
@@ -844,17 +844,17 @@
   bool Match = false;
   if (SelectT2AddrModeImm8Offset(Op, LD->getOffset(), Offset)) {
     switch (LoadedVT.getSimpleVT()) {
-    case MVT::i32:
+    case EVT::i32:
       Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
       break;
-    case MVT::i16:
+    case EVT::i16:
       if (isSExtLd)
         Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
       else
         Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
       break;
-    case MVT::i8:
-    case MVT::i1:
+    case EVT::i8:
+    case EVT::i1:
       if (isSExtLd)
         Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
       else
@@ -870,9 +870,9 @@
     SDValue Chain = LD->getChain();
     SDValue Base = LD->getBasePtr();
     SDValue Ops[]= { Base, Offset, getAL(CurDAG),
-                     CurDAG->getRegister(0, MVT::i32), Chain };
-    return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
-                                 MVT::Other, Ops, 5);
+                     CurDAG->getRegister(0, EVT::i32), Chain };
+    return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), EVT::i32, EVT::i32,
+                                 EVT::Other, Ops, 5);
   }
 
   return NULL;
@@ -881,11 +881,11 @@
 SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDValue Op) {
   SDNode *N = Op.getNode();
   DebugLoc dl = N->getDebugLoc();
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   SDValue Chain = Op.getOperand(0);
   SDValue Size = Op.getOperand(1);
   SDValue Align = Op.getOperand(2);
-  SDValue SP = CurDAG->getRegister(ARM::SP, MVT::i32);
+  SDValue SP = CurDAG->getRegister(ARM::SP, EVT::i32);
   int32_t AlignVal = cast<ConstantSDNode>(Align)->getSExtValue();
   if (AlignVal < 0)
     // We need to align the stack. Use Thumb1 tAND which is the only thumb
@@ -900,8 +900,8 @@
   // tSUBspi - immediate is between 0 ... 508 inclusive.
   if (C <= 508 && ((C & 3) == 0))
     // FIXME: tSUBspi encode scale 4 implicitly.
-    return CurDAG->SelectNodeTo(N, ARM::tSUBspi_, VT, MVT::Other, SP,
-                                CurDAG->getTargetConstant(C/4, MVT::i32),
+    return CurDAG->SelectNodeTo(N, ARM::tSUBspi_, VT, EVT::Other, SP,
+                                CurDAG->getTargetConstant(C/4, EVT::i32),
                                 Chain);
 
   if (Subtarget->isThumb1Only()) {
@@ -909,22 +909,22 @@
     // should have negated the size operand already. FIXME: We can't insert
     // new target independent node at this stage so we are forced to negate
     // it earlier. Is there a better solution? 
-    return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, MVT::Other, SP, Size,
+    return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, EVT::Other, SP, Size,
                                 Chain);
   } else if (Subtarget->isThumb2()) {
     if (isC && Predicate_t2_so_imm(Size.getNode())) {
       // t2SUBrSPi
-      SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
-      return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi_, VT, MVT::Other, Ops, 3);
+      SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, EVT::i32), Chain };
+      return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi_, VT, EVT::Other, Ops, 3);
     } else if (isC && Predicate_imm0_4095(Size.getNode())) {
       // t2SUBrSPi12
-      SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
-      return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi12_, VT, MVT::Other, Ops, 3);
+      SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, EVT::i32), Chain };
+      return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi12_, VT, EVT::Other, Ops, 3);
     } else {
       // t2SUBrSPs
       SDValue Ops[] = { SP, Size,
                         getI32Imm(ARM_AM::getSORegOpc(ARM_AM::lsl,0)), Chain };
-      return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPs_, VT, MVT::Other, Ops, 4);
+      return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPs_, VT, EVT::Other, Ops, 4);
     }
   }
 
@@ -964,21 +964,21 @@
 
       SDNode *ResNode;
       if (Subtarget->isThumb1Only()) {
-        SDValue Pred = CurDAG->getTargetConstant(0xEULL, MVT::i32);
-        SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
+        SDValue Pred = CurDAG->getTargetConstant(0xEULL, EVT::i32);
+        SDValue PredReg = CurDAG->getRegister(0, EVT::i32);
         SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
-        ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
+        ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, EVT::i32, EVT::Other,
                                         Ops, 4);
       } else {
         SDValue Ops[] = {
           CPIdx, 
-          CurDAG->getRegister(0, MVT::i32),
-          CurDAG->getTargetConstant(0, MVT::i32),
+          CurDAG->getRegister(0, EVT::i32),
+          CurDAG->getTargetConstant(0, EVT::i32),
           getAL(CurDAG),
-          CurDAG->getRegister(0, MVT::i32),
+          CurDAG->getRegister(0, EVT::i32),
           CurDAG->getEntryNode()
         };
-        ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
+        ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, EVT::i32, EVT::Other,
                                       Ops, 6);
       }
       ReplaceUses(Op, SDValue(ResNode, 0));
@@ -990,19 +990,19 @@
   }
   case ISD::ConstantFP: {
     ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(N);
-    MVT VT = CFP->getValueType(0);
+    EVT VT = CFP->getValueType(0);
     ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue());
     SDValue CPIdx = CurDAG->getTargetConstantPool(LLVMC, TLI.getPointerTy());
     SDNode *ResNode;
     SDValue Ops[] = {
       CPIdx, 
-      CurDAG->getTargetConstant(0, MVT::i32),
+      CurDAG->getTargetConstant(0, EVT::i32),
       getAL(CurDAG),
-      CurDAG->getRegister(0, MVT::i32),
+      CurDAG->getRegister(0, EVT::i32),
       CurDAG->getEntryNode()
     };
-    unsigned Opc = (VT == MVT::f32) ? ARM::FLDS : ARM::FLDD;
-    ResNode=CurDAG->getTargetNode(Opc, dl, VT, MVT::Other, Ops, 5);
+    unsigned Opc = (VT == EVT::f32) ? ARM::FLDS : ARM::FLDD;
+    ResNode=CurDAG->getTargetNode(Opc, dl, VT, EVT::Other, Ops, 5);
     ReplaceUses(Op, SDValue(ResNode, 0));
     return NULL;
   }
@@ -1011,15 +1011,15 @@
     int FI = cast<FrameIndexSDNode>(N)->getIndex();
     SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
     if (Subtarget->isThumb1Only()) {
-      return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
-                                  CurDAG->getTargetConstant(0, MVT::i32));
+      return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, EVT::i32, TFI,
+                                  CurDAG->getTargetConstant(0, EVT::i32));
     } else {
       unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
                       ARM::t2ADDri : ARM::ADDri);
-      SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
-                        getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
-                        CurDAG->getRegister(0, MVT::i32) };
-      return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
+      SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, EVT::i32),
+                        getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
+                        CurDAG->getRegister(0, EVT::i32) };
+      return CurDAG->SelectNodeTo(N, Opc, EVT::i32, Ops, 5);
     }
   }
   case ARMISD::DYN_ALLOC:
@@ -1036,14 +1036,14 @@
           break;
         SDValue V = Op.getOperand(0);
         ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
-        SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
-        SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
+        SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, EVT::i32);
+        SDValue Reg0 = CurDAG->getRegister(0, EVT::i32);
         if (Subtarget->isThumb()) {
           SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
-          return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
+          return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, EVT::i32, Ops, 6);
         } else {
           SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
-          return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
+          return CurDAG->SelectNodeTo(N, ARM::ADDrs, EVT::i32, Ops, 7);
         }
       }
       if (isPowerOf2_32(RHSV+1)) {  // 2^n-1?
@@ -1052,35 +1052,35 @@
           break;
         SDValue V = Op.getOperand(0);
         ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
-        SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
-        SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
+        SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, EVT::i32);
+        SDValue Reg0 = CurDAG->getRegister(0, EVT::i32);
         if (Subtarget->isThumb()) {
           SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0 };
-          return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 5);
+          return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, EVT::i32, Ops, 5);
         } else {
           SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
-          return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
+          return CurDAG->SelectNodeTo(N, ARM::RSBrs, EVT::i32, Ops, 7);
         }
       }
     }
     break;
   case ARMISD::FMRRD:
-    return CurDAG->getTargetNode(ARM::FMRRD, dl, MVT::i32, MVT::i32,
+    return CurDAG->getTargetNode(ARM::FMRRD, dl, EVT::i32, EVT::i32,
                                  Op.getOperand(0), getAL(CurDAG),
-                                 CurDAG->getRegister(0, MVT::i32));
+                                 CurDAG->getRegister(0, EVT::i32));
   case ISD::UMUL_LOHI: {
     if (Subtarget->isThumb1Only())
       break;
     if (Subtarget->isThumb()) {
       SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
-                        getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
-                        CurDAG->getRegister(0, MVT::i32) };
-      return CurDAG->getTargetNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4);
+                        getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
+                        CurDAG->getRegister(0, EVT::i32) };
+      return CurDAG->getTargetNode(ARM::t2UMULL, dl, EVT::i32, EVT::i32, Ops,4);
     } else {
       SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
-                        getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
-                        CurDAG->getRegister(0, MVT::i32) };
-      return CurDAG->getTargetNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
+                        getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
+                        CurDAG->getRegister(0, EVT::i32) };
+      return CurDAG->getTargetNode(ARM::UMULL, dl, EVT::i32, EVT::i32, Ops, 5);
     }
   }
   case ISD::SMUL_LOHI: {
@@ -1088,13 +1088,13 @@
       break;
     if (Subtarget->isThumb()) {
       SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
-                        getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
-      return CurDAG->getTargetNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4);
+                        getAL(CurDAG), CurDAG->getRegister(0, EVT::i32) };
+      return CurDAG->getTargetNode(ARM::t2SMULL, dl, EVT::i32, EVT::i32, Ops,4);
     } else {
       SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
-                        getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
-                        CurDAG->getRegister(0, MVT::i32) };
-      return CurDAG->getTargetNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
+                        getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
+                        CurDAG->getRegister(0, EVT::i32) };
+      return CurDAG->getTargetNode(ARM::SMULL, dl, EVT::i32, EVT::i32, Ops, 5);
     }
   }
   case ISD::LOAD: {
@@ -1134,10 +1134,10 @@
 
     SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
                                cast<ConstantSDNode>(N2)->getZExtValue()),
-                               MVT::i32);
+                               EVT::i32);
     SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
-    SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, MVT::Other, 
-                                            MVT::Flag, Ops, 5);
+    SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other, 
+                                            EVT::Flag, Ops, 5);
     Chain = SDValue(ResNode, 0);
     if (Op.getNode()->getNumValues() == 2) {
       InFlag = SDValue(ResNode, 1);
@@ -1147,7 +1147,7 @@
     return NULL;
   }
   case ARMISD::CMOV: {
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     SDValue N0 = Op.getOperand(0);
     SDValue N1 = Op.getOperand(1);
     SDValue N2 = Op.getOperand(2);
@@ -1156,7 +1156,7 @@
     assert(N2.getOpcode() == ISD::Constant);
     assert(N3.getOpcode() == ISD::Register);
 
-    if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
+    if (!Subtarget->isThumb1Only() && VT == EVT::i32) {
       // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
       // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
       // Pattern complexity = 18  cost = 1  size = 0
@@ -1178,21 +1178,21 @@
             break;
           }
           SDValue SOShImm =
-            CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
+            CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), EVT::i32);
           SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
                                    cast<ConstantSDNode>(N2)->getZExtValue()),
-                                   MVT::i32);
+                                   EVT::i32);
           SDValue Ops[] = { N0, CPTmp0, SOShImm, Tmp2, N3, InFlag };
-          return CurDAG->SelectNodeTo(Op.getNode(), Opc, MVT::i32,Ops, 6);
+          return CurDAG->SelectNodeTo(Op.getNode(), Opc, EVT::i32,Ops, 6);
         }
       } else {
         if (SelectShifterOperandReg(Op, N1, CPTmp0, CPTmp1, CPTmp2)) {
           SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
                                    cast<ConstantSDNode>(N2)->getZExtValue()),
-                                   MVT::i32);
+                                   EVT::i32);
           SDValue Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag };
           return CurDAG->SelectNodeTo(Op.getNode(),
-                                      ARM::MOVCCs, MVT::i32, Ops, 7);
+                                      ARM::MOVCCs, EVT::i32, Ops, 7);
         }
       }
 
@@ -1207,25 +1207,25 @@
           if (Predicate_t2_so_imm(N3.getNode())) {
             SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
                                      cast<ConstantSDNode>(N1)->getZExtValue()),
-                                     MVT::i32);
+                                     EVT::i32);
             SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
                                      cast<ConstantSDNode>(N2)->getZExtValue()),
-                                     MVT::i32);
+                                     EVT::i32);
             SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
             return CurDAG->SelectNodeTo(Op.getNode(),
-                                        ARM::t2MOVCCi, MVT::i32, Ops, 5);
+                                        ARM::t2MOVCCi, EVT::i32, Ops, 5);
           }
         } else {
           if (Predicate_so_imm(N3.getNode())) {
             SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
                                      cast<ConstantSDNode>(N1)->getZExtValue()),
-                                     MVT::i32);
+                                     EVT::i32);
             SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
                                      cast<ConstantSDNode>(N2)->getZExtValue()),
-                                     MVT::i32);
+                                     EVT::i32);
             SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
             return CurDAG->SelectNodeTo(Op.getNode(),
-                                        ARM::MOVCCi, MVT::i32, Ops, 5);
+                                        ARM::MOVCCi, EVT::i32, Ops, 5);
           }
         }
       }
@@ -1242,28 +1242,28 @@
     // Also FCPYScc and FCPYDcc.
     SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
                                cast<ConstantSDNode>(N2)->getZExtValue()),
-                               MVT::i32);
+                               EVT::i32);
     SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
     unsigned Opc = 0;
     switch (VT.getSimpleVT()) {
     default: assert(false && "Illegal conditional move type!");
       break;
-    case MVT::i32:
+    case EVT::i32:
       Opc = Subtarget->isThumb()
         ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr)
         : ARM::MOVCCr;
       break;
-    case MVT::f32:
+    case EVT::f32:
       Opc = ARM::FCPYScc;
       break;
-    case MVT::f64:
+    case EVT::f64:
       Opc = ARM::FCPYDcc;
       break; 
     }
     return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
   }
   case ARMISD::CNEG: {
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     SDValue N0 = Op.getOperand(0);
     SDValue N1 = Op.getOperand(1);
     SDValue N2 = Op.getOperand(2);
@@ -1274,16 +1274,16 @@
 
     SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
                                cast<ConstantSDNode>(N2)->getZExtValue()),
-                               MVT::i32);
+                               EVT::i32);
     SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
     unsigned Opc = 0;
     switch (VT.getSimpleVT()) {
     default: assert(false && "Illegal conditional move type!");
       break;
-    case MVT::f32:
+    case EVT::f32:
       Opc = ARM::FNEGScc;
       break;
-    case MVT::f64:
+    case EVT::f64:
       Opc = ARM::FNEGDcc;
       break;
     }
@@ -1328,11 +1328,11 @@
     SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
     SDValue Ops[] = { Tmp1, Tmp2, Chain };
     return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
-                                 MVT::Other, Ops, 3);
+                                 EVT::Other, Ops, 3);
   }
 
   case ISD::VECTOR_SHUFFLE: {
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
 
     // Match 128-bit splat to VDUPLANEQ.  (This could be done with a Pat in
     // ARMInstrNEON.td but it is awkward because the shuffle mask needs to be
@@ -1345,22 +1345,22 @@
         Op.getOperand(1).getOpcode() == ISD::UNDEF) {
       unsigned LaneVal = SVOp->getSplatIndex();
 
-      MVT HalfVT;
+      EVT HalfVT;
       unsigned Opc = 0;
       switch (VT.getVectorElementType().getSimpleVT()) {
       default: llvm_unreachable("unhandled VDUP splat type");
-      case MVT::i8:  Opc = ARM::VDUPLN8q;  HalfVT = MVT::v8i8; break;
-      case MVT::i16: Opc = ARM::VDUPLN16q; HalfVT = MVT::v4i16; break;
-      case MVT::i32: Opc = ARM::VDUPLN32q; HalfVT = MVT::v2i32; break;
-      case MVT::f32: Opc = ARM::VDUPLNfq;  HalfVT = MVT::v2f32; break;
+      case EVT::i8:  Opc = ARM::VDUPLN8q;  HalfVT = EVT::v8i8; break;
+      case EVT::i16: Opc = ARM::VDUPLN16q; HalfVT = EVT::v4i16; break;
+      case EVT::i32: Opc = ARM::VDUPLN32q; HalfVT = EVT::v2i32; break;
+      case EVT::f32: Opc = ARM::VDUPLNfq;  HalfVT = EVT::v2f32; break;
       }
 
       // The source operand needs to be changed to a subreg of the original
       // 128-bit operand, and the lane number needs to be adjusted accordingly.
       unsigned NumElts = VT.getVectorNumElements() / 2;
       unsigned SRVal = (LaneVal < NumElts ? arm_dsubreg_0 : arm_dsubreg_1);
-      SDValue SR = CurDAG->getTargetConstant(SRVal, MVT::i32);
-      SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, MVT::i32);
+      SDValue SR = CurDAG->getTargetConstant(SRVal, EVT::i32);
+      SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, EVT::i32);
       SDNode *SubReg = CurDAG->getTargetNode(TargetInstrInfo::EXTRACT_SUBREG,
                                              dl, HalfVT, N->getOperand(0), SR);
       return CurDAG->SelectNodeTo(N, Opc, VT, SDValue(SubReg, 0), NewLane);
@@ -1374,16 +1374,16 @@
     if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
         return NULL;
     unsigned Opc = 0;
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     switch (VT.getSimpleVT()) {
     default: llvm_unreachable("unhandled VLD2D type");
-    case MVT::v8i8:  Opc = ARM::VLD2d8; break;
-    case MVT::v4i16: Opc = ARM::VLD2d16; break;
-    case MVT::v2f32:
-    case MVT::v2i32: Opc = ARM::VLD2d32; break;
+    case EVT::v8i8:  Opc = ARM::VLD2d8; break;
+    case EVT::v4i16: Opc = ARM::VLD2d16; break;
+    case EVT::v2f32:
+    case EVT::v2i32: Opc = ARM::VLD2d32; break;
     }
     const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc };
-    return CurDAG->getTargetNode(Opc, dl, VT, VT, MVT::Other, Ops, 3);
+    return CurDAG->getTargetNode(Opc, dl, VT, VT, EVT::Other, Ops, 3);
   }
 
   case ARMISD::VLD3D: {
@@ -1391,16 +1391,16 @@
     if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
         return NULL;
     unsigned Opc = 0;
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     switch (VT.getSimpleVT()) {
     default: llvm_unreachable("unhandled VLD3D type");
-    case MVT::v8i8:  Opc = ARM::VLD3d8; break;
-    case MVT::v4i16: Opc = ARM::VLD3d16; break;
-    case MVT::v2f32:
-    case MVT::v2i32: Opc = ARM::VLD3d32; break;
+    case EVT::v8i8:  Opc = ARM::VLD3d8; break;
+    case EVT::v4i16: Opc = ARM::VLD3d16; break;
+    case EVT::v2f32:
+    case EVT::v2i32: Opc = ARM::VLD3d32; break;
     }
     const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc };
-    return CurDAG->getTargetNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 3);
+    return CurDAG->getTargetNode(Opc, dl, VT, VT, VT, EVT::Other, Ops, 3);
   }
 
   case ARMISD::VLD4D: {
@@ -1408,17 +1408,17 @@
     if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
         return NULL;
     unsigned Opc = 0;
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     switch (VT.getSimpleVT()) {
     default: llvm_unreachable("unhandled VLD4D type");
-    case MVT::v8i8:  Opc = ARM::VLD4d8; break;
-    case MVT::v4i16: Opc = ARM::VLD4d16; break;
-    case MVT::v2f32:
-    case MVT::v2i32: Opc = ARM::VLD4d32; break;
+    case EVT::v8i8:  Opc = ARM::VLD4d8; break;
+    case EVT::v4i16: Opc = ARM::VLD4d16; break;
+    case EVT::v2f32:
+    case EVT::v2i32: Opc = ARM::VLD4d32; break;
     }
     const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc };
-    std::vector<MVT> ResTys(4, VT);
-    ResTys.push_back(MVT::Other);
+    std::vector<EVT> ResTys(4, VT);
+    ResTys.push_back(EVT::Other);
     return CurDAG->getTargetNode(Opc, dl, ResTys, Ops, 3);
   }
 
@@ -1429,14 +1429,14 @@
     unsigned Opc = 0;
     switch (N->getOperand(2).getValueType().getSimpleVT()) {
     default: llvm_unreachable("unhandled VST2D type");
-    case MVT::v8i8:  Opc = ARM::VST2d8; break;
-    case MVT::v4i16: Opc = ARM::VST2d16; break;
-    case MVT::v2f32:
-    case MVT::v2i32: Opc = ARM::VST2d32; break;
+    case EVT::v8i8:  Opc = ARM::VST2d8; break;
+    case EVT::v4i16: Opc = ARM::VST2d16; break;
+    case EVT::v2f32:
+    case EVT::v2i32: Opc = ARM::VST2d32; break;
     }
     const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
                             N->getOperand(2), N->getOperand(3) };
-    return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 5);
+    return CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 5);
   }
 
   case ARMISD::VST3D: {
@@ -1446,15 +1446,15 @@
     unsigned Opc = 0;
     switch (N->getOperand(2).getValueType().getSimpleVT()) {
     default: llvm_unreachable("unhandled VST3D type");
-    case MVT::v8i8:  Opc = ARM::VST3d8; break;
-    case MVT::v4i16: Opc = ARM::VST3d16; break;
-    case MVT::v2f32:
-    case MVT::v2i32: Opc = ARM::VST3d32; break;
+    case EVT::v8i8:  Opc = ARM::VST3d8; break;
+    case EVT::v4i16: Opc = ARM::VST3d16; break;
+    case EVT::v2f32:
+    case EVT::v2i32: Opc = ARM::VST3d32; break;
     }
     const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
                             N->getOperand(2), N->getOperand(3),
                             N->getOperand(4) };
-    return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 6);
+    return CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 6);
   }
 
   case ARMISD::VST4D: {
@@ -1464,20 +1464,20 @@
     unsigned Opc = 0;
     switch (N->getOperand(2).getValueType().getSimpleVT()) {
     default: llvm_unreachable("unhandled VST4D type");
-    case MVT::v8i8:  Opc = ARM::VST4d8; break;
-    case MVT::v4i16: Opc = ARM::VST4d16; break;
-    case MVT::v2f32:
-    case MVT::v2i32: Opc = ARM::VST4d32; break;
+    case EVT::v8i8:  Opc = ARM::VST4d8; break;
+    case EVT::v4i16: Opc = ARM::VST4d16; break;
+    case EVT::v2f32:
+    case EVT::v2i32: Opc = ARM::VST4d32; break;
     }
     const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
                             N->getOperand(2), N->getOperand(3),
                             N->getOperand(4), N->getOperand(5) };
-    return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 7);
+    return CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 7);
   }
 
   case ISD::INTRINSIC_WO_CHAIN: {
     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
-    MVT VT = N->getValueType(0);
+    EVT VT = N->getValueType(0);
     unsigned Opc = 0;
 
     // Match intrinsics that return multiple values.
@@ -1488,14 +1488,14 @@
     case Intrinsic::arm_neon_vtrnf:
       switch (VT.getSimpleVT()) {
       default: return NULL;
-      case MVT::v8i8:  Opc = ARM::VTRNd8; break;
-      case MVT::v4i16: Opc = ARM::VTRNd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VTRNd32; break;
-      case MVT::v16i8: Opc = ARM::VTRNq8; break;
-      case MVT::v8i16: Opc = ARM::VTRNq16; break;
-      case MVT::v4f32:
-      case MVT::v4i32: Opc = ARM::VTRNq32; break;
+      case EVT::v8i8:  Opc = ARM::VTRNd8; break;
+      case EVT::v4i16: Opc = ARM::VTRNd16; break;
+      case EVT::v2f32:
+      case EVT::v2i32: Opc = ARM::VTRNd32; break;
+      case EVT::v16i8: Opc = ARM::VTRNq8; break;
+      case EVT::v8i16: Opc = ARM::VTRNq16; break;
+      case EVT::v4f32:
+      case EVT::v4i32: Opc = ARM::VTRNq32; break;
       }
       return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1),
                                    N->getOperand(2));
@@ -1504,14 +1504,14 @@
     case Intrinsic::arm_neon_vuzpf:
       switch (VT.getSimpleVT()) {
       default: return NULL;
-      case MVT::v8i8:  Opc = ARM::VUZPd8; break;
-      case MVT::v4i16: Opc = ARM::VUZPd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VUZPd32; break;
-      case MVT::v16i8: Opc = ARM::VUZPq8; break;
-      case MVT::v8i16: Opc = ARM::VUZPq16; break;
-      case MVT::v4f32:
-      case MVT::v4i32: Opc = ARM::VUZPq32; break;
+      case EVT::v8i8:  Opc = ARM::VUZPd8; break;
+      case EVT::v4i16: Opc = ARM::VUZPd16; break;
+      case EVT::v2f32:
+      case EVT::v2i32: Opc = ARM::VUZPd32; break;
+      case EVT::v16i8: Opc = ARM::VUZPq8; break;
+      case EVT::v8i16: Opc = ARM::VUZPq16; break;
+      case EVT::v4f32:
+      case EVT::v4i32: Opc = ARM::VUZPq32; break;
       }
       return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1),
                                    N->getOperand(2));
@@ -1520,14 +1520,14 @@
     case Intrinsic::arm_neon_vzipf:
       switch (VT.getSimpleVT()) {
       default: return NULL;
-      case MVT::v8i8:  Opc = ARM::VZIPd8; break;
-      case MVT::v4i16: Opc = ARM::VZIPd16; break;
-      case MVT::v2f32:
-      case MVT::v2i32: Opc = ARM::VZIPd32; break;
-      case MVT::v16i8: Opc = ARM::VZIPq8; break;
-      case MVT::v8i16: Opc = ARM::VZIPq16; break;
-      case MVT::v4f32:
-      case MVT::v4i32: Opc = ARM::VZIPq32; break;
+      case EVT::v8i8:  Opc = ARM::VZIPd8; break;
+      case EVT::v4i16: Opc = ARM::VZIPd16; break;
+      case EVT::v2f32:
+      case EVT::v2i32: Opc = ARM::VZIPd32; break;
+      case EVT::v16i8: Opc = ARM::VZIPq8; break;
+      case EVT::v8i16: Opc = ARM::VZIPq16; break;
+      case EVT::v4f32:
+      case EVT::v4i32: Opc = ARM::VZIPq32; break;
       }
       return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1),
                                    N->getOperand(2));
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 8678e03..aedddaa 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -41,25 +41,25 @@
 #include "llvm/Support/MathExtras.h"
 using namespace llvm;
 
-static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                    CCValAssign::LocInfo &LocInfo,
                                    ISD::ArgFlagsTy &ArgFlags,
                                    CCState &State);
-static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                     CCValAssign::LocInfo &LocInfo,
                                     ISD::ArgFlagsTy &ArgFlags,
                                     CCState &State);
-static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                       CCValAssign::LocInfo &LocInfo,
                                       ISD::ArgFlagsTy &ArgFlags,
                                       CCState &State);
-static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                        CCValAssign::LocInfo &LocInfo,
                                        ISD::ArgFlagsTy &ArgFlags,
                                        CCState &State);
 
-void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
-                                       MVT PromotedBitwiseVT) {
+void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT,
+                                       EVT PromotedBitwiseVT) {
   if (VT != PromotedLdStVT) {
     setOperationAction(ISD::LOAD, VT.getSimpleVT(), Promote);
     AddPromotedToType (ISD::LOAD, VT.getSimpleVT(),
@@ -70,10 +70,10 @@
                        PromotedLdStVT.getSimpleVT());
   }
 
-  MVT ElemTy = VT.getVectorElementType();
-  if (ElemTy != MVT::i64 && ElemTy != MVT::f64)
+  EVT ElemTy = VT.getVectorElementType();
+  if (ElemTy != EVT::i64 && ElemTy != EVT::f64)
     setOperationAction(ISD::VSETCC, VT.getSimpleVT(), Custom);
-  if (ElemTy == MVT::i8 || ElemTy == MVT::i16)
+  if (ElemTy == EVT::i8 || ElemTy == EVT::i16)
     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
   setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
   setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
@@ -99,14 +99,14 @@
   }
 }
 
-void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
+void ARMTargetLowering::addDRTypeForNEON(EVT VT) {
   addRegisterClass(VT, ARM::DPRRegisterClass);
-  addTypeForNEON(VT, MVT::f64, MVT::v2i32);
+  addTypeForNEON(VT, EVT::f64, EVT::v2i32);
 }
 
-void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
+void ARMTargetLowering::addQRTypeForNEON(EVT VT) {
   addRegisterClass(VT, ARM::QPRRegisterClass);
-  addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
+  addTypeForNEON(VT, EVT::v2f64, EVT::v4i32);
 }
 
 static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) {
@@ -202,29 +202,29 @@
   setLibcallName(RTLIB::SRA_I128, 0);
 
   if (Subtarget->isThumb1Only())
-    addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
+    addRegisterClass(EVT::i32, ARM::tGPRRegisterClass);
   else
-    addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
+    addRegisterClass(EVT::i32, ARM::GPRRegisterClass);
   if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
-    addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
-    addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
+    addRegisterClass(EVT::f32, ARM::SPRRegisterClass);
+    addRegisterClass(EVT::f64, ARM::DPRRegisterClass);
 
-    setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+    setTruncStoreAction(EVT::f64, EVT::f32, Expand);
   }
 
   if (Subtarget->hasNEON()) {
-    addDRTypeForNEON(MVT::v2f32);
-    addDRTypeForNEON(MVT::v8i8);
-    addDRTypeForNEON(MVT::v4i16);
-    addDRTypeForNEON(MVT::v2i32);
-    addDRTypeForNEON(MVT::v1i64);
+    addDRTypeForNEON(EVT::v2f32);
+    addDRTypeForNEON(EVT::v8i8);
+    addDRTypeForNEON(EVT::v4i16);
+    addDRTypeForNEON(EVT::v2i32);
+    addDRTypeForNEON(EVT::v1i64);
 
-    addQRTypeForNEON(MVT::v4f32);
-    addQRTypeForNEON(MVT::v2f64);
-    addQRTypeForNEON(MVT::v16i8);
-    addQRTypeForNEON(MVT::v8i16);
-    addQRTypeForNEON(MVT::v4i32);
-    addQRTypeForNEON(MVT::v2i64);
+    addQRTypeForNEON(EVT::v4f32);
+    addQRTypeForNEON(EVT::v2f64);
+    addQRTypeForNEON(EVT::v16i8);
+    addQRTypeForNEON(EVT::v8i16);
+    addQRTypeForNEON(EVT::v4i32);
+    addQRTypeForNEON(EVT::v2i64);
 
     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
     setTargetDAGCombine(ISD::SHL);
@@ -238,137 +238,137 @@
   computeRegisterProperties();
 
   // ARM does not have f32 extending load.
-  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+  setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
 
   // ARM does not have i1 sign extending load.
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
 
   // ARM supports all 4 flavors of integer indexed load / store.
   if (!Subtarget->isThumb1Only()) {
     for (unsigned im = (unsigned)ISD::PRE_INC;
          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
-      setIndexedLoadAction(im,  MVT::i1,  Legal);
-      setIndexedLoadAction(im,  MVT::i8,  Legal);
-      setIndexedLoadAction(im,  MVT::i16, Legal);
-      setIndexedLoadAction(im,  MVT::i32, Legal);
-      setIndexedStoreAction(im, MVT::i1,  Legal);
-      setIndexedStoreAction(im, MVT::i8,  Legal);
-      setIndexedStoreAction(im, MVT::i16, Legal);
-      setIndexedStoreAction(im, MVT::i32, Legal);
+      setIndexedLoadAction(im,  EVT::i1,  Legal);
+      setIndexedLoadAction(im,  EVT::i8,  Legal);
+      setIndexedLoadAction(im,  EVT::i16, Legal);
+      setIndexedLoadAction(im,  EVT::i32, Legal);
+      setIndexedStoreAction(im, EVT::i1,  Legal);
+      setIndexedStoreAction(im, EVT::i8,  Legal);
+      setIndexedStoreAction(im, EVT::i16, Legal);
+      setIndexedStoreAction(im, EVT::i32, Legal);
     }
   }
 
   // i64 operation support.
   if (Subtarget->isThumb1Only()) {
-    setOperationAction(ISD::MUL,     MVT::i64, Expand);
-    setOperationAction(ISD::MULHU,   MVT::i32, Expand);
-    setOperationAction(ISD::MULHS,   MVT::i32, Expand);
-    setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
-    setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+    setOperationAction(ISD::MUL,     EVT::i64, Expand);
+    setOperationAction(ISD::MULHU,   EVT::i32, Expand);
+    setOperationAction(ISD::MULHS,   EVT::i32, Expand);
+    setOperationAction(ISD::UMUL_LOHI, EVT::i32, Expand);
+    setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
   } else {
-    setOperationAction(ISD::MUL,     MVT::i64, Expand);
-    setOperationAction(ISD::MULHU,   MVT::i32, Expand);
+    setOperationAction(ISD::MUL,     EVT::i64, Expand);
+    setOperationAction(ISD::MULHU,   EVT::i32, Expand);
     if (!Subtarget->hasV6Ops())
-      setOperationAction(ISD::MULHS, MVT::i32, Expand);
+      setOperationAction(ISD::MULHS, EVT::i32, Expand);
   }
-  setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRL,       MVT::i64, Custom);
-  setOperationAction(ISD::SRA,       MVT::i64, Custom);
+  setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRL,       EVT::i64, Custom);
+  setOperationAction(ISD::SRA,       EVT::i64, Custom);
 
   // ARM does not have ROTL.
-  setOperationAction(ISD::ROTL,  MVT::i32, Expand);
-  setOperationAction(ISD::CTTZ,  MVT::i32, Expand);
-  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
+  setOperationAction(ISD::ROTL,  EVT::i32, Expand);
+  setOperationAction(ISD::CTTZ,  EVT::i32, Expand);
+  setOperationAction(ISD::CTPOP, EVT::i32, Expand);
   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
-    setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+    setOperationAction(ISD::CTLZ, EVT::i32, Expand);
 
   // Only ARMv6 has BSWAP.
   if (!Subtarget->hasV6Ops())
-    setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+    setOperationAction(ISD::BSWAP, EVT::i32, Expand);
 
   // These are expanded into libcalls.
-  setOperationAction(ISD::SDIV,  MVT::i32, Expand);
-  setOperationAction(ISD::UDIV,  MVT::i32, Expand);
-  setOperationAction(ISD::SREM,  MVT::i32, Expand);
-  setOperationAction(ISD::UREM,  MVT::i32, Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+  setOperationAction(ISD::SDIV,  EVT::i32, Expand);
+  setOperationAction(ISD::UDIV,  EVT::i32, Expand);
+  setOperationAction(ISD::SREM,  EVT::i32, Expand);
+  setOperationAction(ISD::UREM,  EVT::i32, Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i32, Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i32, Expand);
 
   // Support label based line numbers.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
 
-  setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
-  setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
-  setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom);
-  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
+  setOperationAction(ISD::GlobalAddress, EVT::i32,   Custom);
+  setOperationAction(ISD::ConstantPool,  EVT::i32,   Custom);
+  setOperationAction(ISD::GLOBAL_OFFSET_TABLE, EVT::i32, Custom);
+  setOperationAction(ISD::GlobalTLSAddress, EVT::i32, Custom);
 
   // Use the default implementation.
-  setOperationAction(ISD::VASTART,            MVT::Other, Custom);
-  setOperationAction(ISD::VAARG,              MVT::Other, Expand);
-  setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
-  setOperationAction(ISD::VAEND,              MVT::Other, Expand);
-  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
+  setOperationAction(ISD::VASTART,            EVT::Other, Custom);
+  setOperationAction(ISD::VAARG,              EVT::Other, Expand);
+  setOperationAction(ISD::VACOPY,             EVT::Other, Expand);
+  setOperationAction(ISD::VAEND,              EVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE,          EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE,       EVT::Other, Expand);
   if (Subtarget->isThumb())
-    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32, Custom);
   else
-    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
-  setOperationAction(ISD::MEMBARRIER,         MVT::Other, Expand);
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32, Expand);
+  setOperationAction(ISD::MEMBARRIER,         EVT::Other, Expand);
 
   if (!Subtarget->hasV6Ops() && !Subtarget->isThumb2()) {
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
+    setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i16, Expand);
+    setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i8,  Expand);
   }
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
 
   if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
     // Turn f64->i64 into FMRRD, i64 -> f64 to FMDRR iff target supports vfp2.
-    setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
+    setOperationAction(ISD::BIT_CONVERT, EVT::i64, Custom);
 
   // We want to custom lower some of our intrinsics.
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-  setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
-  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
+  setOperationAction(ISD::INTRINSIC_WO_CHAIN, EVT::Other, Custom);
+  setOperationAction(ISD::INTRINSIC_W_CHAIN, EVT::Other, Custom);
+  setOperationAction(ISD::INTRINSIC_VOID, EVT::Other, Custom);
 
-  setOperationAction(ISD::SETCC,     MVT::i32, Expand);
-  setOperationAction(ISD::SETCC,     MVT::f32, Expand);
-  setOperationAction(ISD::SETCC,     MVT::f64, Expand);
-  setOperationAction(ISD::SELECT,    MVT::i32, Expand);
-  setOperationAction(ISD::SELECT,    MVT::f32, Expand);
-  setOperationAction(ISD::SELECT,    MVT::f64, Expand);
-  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
-  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
-  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
+  setOperationAction(ISD::SETCC,     EVT::i32, Expand);
+  setOperationAction(ISD::SETCC,     EVT::f32, Expand);
+  setOperationAction(ISD::SETCC,     EVT::f64, Expand);
+  setOperationAction(ISD::SELECT,    EVT::i32, Expand);
+  setOperationAction(ISD::SELECT,    EVT::f32, Expand);
+  setOperationAction(ISD::SELECT,    EVT::f64, Expand);
+  setOperationAction(ISD::SELECT_CC, EVT::i32, Custom);
+  setOperationAction(ISD::SELECT_CC, EVT::f32, Custom);
+  setOperationAction(ISD::SELECT_CC, EVT::f64, Custom);
 
-  setOperationAction(ISD::BRCOND,    MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
-  setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
-  setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
-  setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
+  setOperationAction(ISD::BRCOND,    EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC,     EVT::i32,   Custom);
+  setOperationAction(ISD::BR_CC,     EVT::f32,   Custom);
+  setOperationAction(ISD::BR_CC,     EVT::f64,   Custom);
+  setOperationAction(ISD::BR_JT,     EVT::Other, Custom);
 
   // We don't support sin/cos/fmod/copysign/pow
-  setOperationAction(ISD::FSIN,      MVT::f64, Expand);
-  setOperationAction(ISD::FSIN,      MVT::f32, Expand);
-  setOperationAction(ISD::FCOS,      MVT::f32, Expand);
-  setOperationAction(ISD::FCOS,      MVT::f64, Expand);
-  setOperationAction(ISD::FREM,      MVT::f64, Expand);
-  setOperationAction(ISD::FREM,      MVT::f32, Expand);
+  setOperationAction(ISD::FSIN,      EVT::f64, Expand);
+  setOperationAction(ISD::FSIN,      EVT::f32, Expand);
+  setOperationAction(ISD::FCOS,      EVT::f32, Expand);
+  setOperationAction(ISD::FCOS,      EVT::f64, Expand);
+  setOperationAction(ISD::FREM,      EVT::f64, Expand);
+  setOperationAction(ISD::FREM,      EVT::f32, Expand);
   if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
-    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
-    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f64, Custom);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f32, Custom);
   }
-  setOperationAction(ISD::FPOW,      MVT::f64, Expand);
-  setOperationAction(ISD::FPOW,      MVT::f32, Expand);
+  setOperationAction(ISD::FPOW,      EVT::f64, Expand);
+  setOperationAction(ISD::FPOW,      EVT::f32, Expand);
 
   // int <-> fp are custom expanded into bit_convert + ARMISD ops.
   if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
-    setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
-    setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
-    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
-    setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
+    setOperationAction(ISD::SINT_TO_FP, EVT::i32, Custom);
+    setOperationAction(ISD::UINT_TO_FP, EVT::i32, Custom);
+    setOperationAction(ISD::FP_TO_UINT, EVT::i32, Custom);
+    setOperationAction(ISD::FP_TO_SINT, EVT::i32, Custom);
   }
 
   // We have target-specific dag combine patterns for the following nodes:
@@ -549,7 +549,7 @@
 #include "ARMGenCallingConv.inc"
 
 // APCS f64 is in register pairs, possibly split to stack
-static bool f64AssignAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool f64AssignAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                           CCValAssign::LocInfo &LocInfo,
                           CCState &State, bool CanFail) {
   static const unsigned RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
@@ -579,20 +579,20 @@
   return true;
 }
 
-static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                    CCValAssign::LocInfo &LocInfo,
                                    ISD::ArgFlagsTy &ArgFlags,
                                    CCState &State) {
   if (!f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
     return false;
-  if (LocVT == MVT::v2f64 &&
+  if (LocVT == EVT::v2f64 &&
       !f64AssignAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
     return false;
   return true;  // we handled it
 }
 
 // AAPCS f64 is in aligned register pairs
-static bool f64AssignAAPCS(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool f64AssignAAPCS(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                            CCValAssign::LocInfo &LocInfo,
                            CCState &State, bool CanFail) {
   static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
@@ -622,19 +622,19 @@
   return true;
 }
 
-static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                     CCValAssign::LocInfo &LocInfo,
                                     ISD::ArgFlagsTy &ArgFlags,
                                     CCState &State) {
   if (!f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, true))
     return false;
-  if (LocVT == MVT::v2f64 &&
+  if (LocVT == EVT::v2f64 &&
       !f64AssignAAPCS(ValNo, ValVT, LocVT, LocInfo, State, false))
     return false;
   return true;  // we handled it
 }
 
-static bool f64RetAssign(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool f64RetAssign(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                          CCValAssign::LocInfo &LocInfo, CCState &State) {
   static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
   static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
@@ -654,18 +654,18 @@
   return true;
 }
 
-static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                       CCValAssign::LocInfo &LocInfo,
                                       ISD::ArgFlagsTy &ArgFlags,
                                       CCState &State) {
   if (!f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
     return false;
-  if (LocVT == MVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
+  if (LocVT == EVT::v2f64 && !f64RetAssign(ValNo, ValVT, LocVT, LocInfo, State))
     return false;
   return true;  // we handled it
 }
 
-static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                        CCValAssign::LocInfo &LocInfo,
                                        ISD::ArgFlagsTy &ArgFlags,
                                        CCState &State) {
@@ -725,33 +725,33 @@
     SDValue Val;
     if (VA.needsCustom()) {
       // Handle f64 or half of a v2f64.
-      SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
+      SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), EVT::i32,
                                       InFlag);
       Chain = Lo.getValue(1);
       InFlag = Lo.getValue(2);
       VA = RVLocs[++i]; // skip ahead to next loc
-      SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
+      SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), EVT::i32,
                                       InFlag);
       Chain = Hi.getValue(1);
       InFlag = Hi.getValue(2);
-      Val = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi);
+      Val = DAG.getNode(ARMISD::FMDRR, dl, EVT::f64, Lo, Hi);
 
-      if (VA.getLocVT() == MVT::v2f64) {
-        SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
-        Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
-                          DAG.getConstant(0, MVT::i32));
+      if (VA.getLocVT() == EVT::v2f64) {
+        SDValue Vec = DAG.getNode(ISD::UNDEF, dl, EVT::v2f64);
+        Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v2f64, Vec, Val,
+                          DAG.getConstant(0, EVT::i32));
 
         VA = RVLocs[++i]; // skip ahead to next loc
-        Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
+        Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), EVT::i32, InFlag);
         Chain = Lo.getValue(1);
         InFlag = Lo.getValue(2);
         VA = RVLocs[++i]; // skip ahead to next loc
-        Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
+        Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), EVT::i32, InFlag);
         Chain = Hi.getValue(1);
         InFlag = Hi.getValue(2);
-        Val = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi);
-        Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
-                          DAG.getConstant(1, MVT::i32));
+        Val = DAG.getNode(ARMISD::FMDRR, dl, EVT::f64, Lo, Hi);
+        Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v2f64, Vec, Val,
+                          DAG.getConstant(1, EVT::i32));
       }
     } else {
       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
@@ -784,7 +784,7 @@
 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
                           DebugLoc dl) {
-  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), EVT::i32);
   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
                        /*AlwaysInline=*/false, NULL, 0, NULL, 0);
 }
@@ -815,7 +815,7 @@
                                          ISD::ArgFlagsTy Flags) {
 
   SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
-                              DAG.getVTList(MVT::i32, MVT::i32), Arg);
+                              DAG.getVTList(EVT::i32, EVT::i32), Arg);
   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
 
   if (NextVA.isRegLoc())
@@ -858,7 +858,7 @@
   // These operations are automatically eliminated by the prolog/epilog pass
   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
 
-  SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32);
+  SDValue StackPtr = DAG.getRegister(ARM::SP, EVT::i32);
 
   RegsToPassVector RegsToPass;
   SmallVector<SDValue, 8> MemOpChains;
@@ -892,11 +892,11 @@
 
     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
     if (VA.needsCustom()) {
-      if (VA.getLocVT() == MVT::v2f64) {
-        SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
-                                  DAG.getConstant(0, MVT::i32));
-        SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
-                                  DAG.getConstant(1, MVT::i32));
+      if (VA.getLocVT() == EVT::v2f64) {
+        SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f64, Arg,
+                                  DAG.getConstant(0, EVT::i32));
+        SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f64, Arg,
+                                  DAG.getConstant(1, EVT::i32));
 
         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
@@ -930,7 +930,7 @@
   }
 
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token chain
@@ -962,10 +962,10 @@
       ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex,
                                                            ARMCP::CPStub, 4);
       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
-      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, CPAddr);
       Callee = DAG.getLoad(getPointerTy(), dl,
                            DAG.getEntryNode(), CPAddr, NULL, 0);
-      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
+      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, EVT::i32);
       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
                            getPointerTy(), Callee, PICLabel);
    } else
@@ -981,10 +981,10 @@
       ARMConstantPoolValue *CPV = new ARMConstantPoolValue(Sym, ARMPCLabelIndex,
                                                            ARMCP::CPStub, 4);
       SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
-      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+      CPAddr = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, CPAddr);
       Callee = DAG.getLoad(getPointerTy(), dl,
                            DAG.getEntryNode(), CPAddr, NULL, 0);
-      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
+      SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, EVT::i32);
       Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
                            getPointerTy(), Callee, PICLabel);
     } else
@@ -1005,7 +1005,7 @@
   }
   if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
     // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
-    Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
+    Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(EVT::i32),InFlag);
     InFlag = Chain.getValue(1);
   }
 
@@ -1022,7 +1022,7 @@
   if (InFlag.getNode())
     Ops.push_back(InFlag);
   // Returns a chain and a flag for retval copy to use.
-  Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
+  Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(EVT::Other, EVT::Flag),
                       &Ops[0], Ops.size());
   InFlag = Chain.getValue(1);
 
@@ -1082,12 +1082,12 @@
     }
 
     if (VA.needsCustom()) {
-      if (VA.getLocVT() == MVT::v2f64) {
+      if (VA.getLocVT() == EVT::v2f64) {
         // Extract the first half and return it in two registers.
-        SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
-                                   DAG.getConstant(0, MVT::i32));
+        SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f64, Arg,
+                                   DAG.getConstant(0, EVT::i32));
         SDValue HalfGPRs = DAG.getNode(ARMISD::FMRRD, dl,
-                                       DAG.getVTList(MVT::i32, MVT::i32), Half);
+                                       DAG.getVTList(EVT::i32, EVT::i32), Half);
 
         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag);
         Flag = Chain.getValue(1);
@@ -1098,13 +1098,13 @@
         VA = RVLocs[++i]; // skip ahead to next loc
 
         // Extract the 2nd half and fall through to handle it as an f64 value.
-        Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
-                          DAG.getConstant(1, MVT::i32));
+        Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f64, Arg,
+                          DAG.getConstant(1, EVT::i32));
       }
       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
       // available.
       SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
-                                  DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
+                                  DAG.getVTList(EVT::i32, EVT::i32), &Arg, 1);
       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
       Flag = Chain.getValue(1);
       VA = RVLocs[++i]; // skip ahead to next loc
@@ -1120,9 +1120,9 @@
 
   SDValue result;
   if (Flag.getNode())
-    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+    result = DAG.getNode(ARMISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
   else // Return Void
-    result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
+    result = DAG.getNode(ARMISD::RET_FLAG, dl, EVT::Other, Chain);
 
   return result;
 }
@@ -1134,7 +1134,7 @@
 // be used to form addressing mode. These wrapped nodes will be selected
 // into MOVi.
 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   // FIXME there is no actual debug info here
   DebugLoc dl = Op.getDebugLoc();
   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
@@ -1145,7 +1145,7 @@
   else
     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
                                     CP->getAlignment());
-  return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
+  return DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, Res);
 }
 
 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
@@ -1153,17 +1153,17 @@
 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
                                                  SelectionDAG &DAG) {
   DebugLoc dl = GA->getDebugLoc();
-  MVT PtrVT = getPointerTy();
+  EVT PtrVT = getPointerTy();
   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
   ARMConstantPoolValue *CPV =
     new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue,
                              PCAdj, "tlsgd", true);
   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
-  Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
+  Argument = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, Argument);
   Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, NULL, 0);
   SDValue Chain = Argument.getValue(1);
 
-  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
+  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, EVT::i32);
   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
 
   // call __tls_get_addr.
@@ -1189,7 +1189,7 @@
   DebugLoc dl = GA->getDebugLoc();
   SDValue Offset;
   SDValue Chain = DAG.getEntryNode();
-  MVT PtrVT = getPointerTy();
+  EVT PtrVT = getPointerTy();
   // Get the Thread Pointer
   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
 
@@ -1200,11 +1200,11 @@
       new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue,
                                PCAdj, "gottpoff", true);
     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
-    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
+    Offset = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, Offset);
     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0);
     Chain = Offset.getValue(1);
 
-    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
+    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, EVT::i32);
     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
 
     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0);
@@ -1213,7 +1213,7 @@
     ARMConstantPoolValue *CPV =
       new ARMConstantPoolValue(GV, ARMCP::CPValue, "tpoff");
     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
-    Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
+    Offset = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, Offset);
     Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, NULL, 0);
   }
 
@@ -1238,7 +1238,7 @@
 
 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
                                                  SelectionDAG &DAG) {
-  MVT PtrVT = getPointerTy();
+  EVT PtrVT = getPointerTy();
   DebugLoc dl = Op.getDebugLoc();
   GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
@@ -1247,7 +1247,7 @@
     ARMConstantPoolValue *CPV =
       new ARMConstantPoolValue(GV, ARMCP::CPValue, UseGOTOFF ? "GOTOFF":"GOT");
     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
-    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, CPAddr);
     SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
                                  CPAddr, NULL, 0);
     SDValue Chain = Result.getValue(1);
@@ -1258,7 +1258,7 @@
     return Result;
   } else {
     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
-    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+    CPAddr = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, CPAddr);
     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0);
   }
 }
@@ -1276,7 +1276,7 @@
 
 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
                                                     SelectionDAG &DAG) {
-  MVT PtrVT = getPointerTy();
+  EVT PtrVT = getPointerTy();
   DebugLoc dl = Op.getDebugLoc();
   GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
   Reloc::Model RelocM = getTargetMachine().getRelocationModel();
@@ -1293,13 +1293,13 @@
                                                          Kind, PCAdj);
     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
   }
-  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, CPAddr);
 
   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0);
   SDValue Chain = Result.getValue(1);
 
   if (RelocM == Reloc::PIC_) {
-    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
+    SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, EVT::i32);
     Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
   }
   if (IsIndirect)
@@ -1312,23 +1312,23 @@
                                                     SelectionDAG &DAG){
   assert(Subtarget->isTargetELF() &&
          "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
-  MVT PtrVT = getPointerTy();
+  EVT PtrVT = getPointerTy();
   DebugLoc dl = Op.getDebugLoc();
   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
   ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_",
                                                        ARMPCLabelIndex,
                                                        ARMCP::CPValue, PCAdj);
   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
-  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
+  CPAddr = DAG.getNode(ARMISD::Wrapper, dl, EVT::i32, CPAddr);
   SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, NULL, 0);
-  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
+  SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, EVT::i32);
   return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
 }
 
 static SDValue LowerNeonVLDIntrinsic(SDValue Op, SelectionDAG &DAG,
                                      unsigned Opcode) {
   SDNode *Node = Op.getNode();
-  MVT VT = Node->getValueType(0);
+  EVT VT = Node->getValueType(0);
   DebugLoc dl = Op.getDebugLoc();
 
   if (!VT.is64BitVector())
@@ -1342,7 +1342,7 @@
 static SDValue LowerNeonVSTIntrinsic(SDValue Op, SelectionDAG &DAG,
                                      unsigned Opcode, unsigned NumVecs) {
   SDNode *Node = Op.getNode();
-  MVT VT = Node->getOperand(3).getValueType();
+  EVT VT = Node->getOperand(3).getValueType();
   DebugLoc dl = Op.getDebugLoc();
 
   if (!VT.is64BitVector())
@@ -1353,7 +1353,7 @@
   Ops.push_back(Node->getOperand(2));
   for (unsigned N = 0; N < NumVecs; ++N)
     Ops.push_back(Node->getOperand(N + 3));
-  return DAG.getNode(Opcode, dl, MVT::Other, Ops.data(), Ops.size());
+  return DAG.getNode(Opcode, dl, EVT::Other, Ops.data(), Ops.size());
 }
 
 SDValue
@@ -1389,11 +1389,11 @@
   switch (IntNo) {
   default: return SDValue();    // Don't custom lower most intrinsics.
   case Intrinsic::arm_thread_pointer: {
-    MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+    EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
   }
   case Intrinsic::eh_sjlj_setjmp:
-    return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, MVT::i32, Op.getOperand(1));
+    return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, EVT::i32, Op.getOperand(1));
   }
 }
 
@@ -1402,7 +1402,7 @@
   // vastart just stores the address of the VarArgsFrameIndex slot into the
   // memory location argument.
   DebugLoc dl = Op.getDebugLoc();
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
@@ -1412,7 +1412,7 @@
 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
   SDNode *Node = Op.getNode();
   DebugLoc dl = Node->getDebugLoc();
-  MVT VT = Node->getValueType(0);
+  EVT VT = Node->getValueType(0);
   SDValue Chain = Op.getOperand(0);
   SDValue Size  = Op.getOperand(1);
   SDValue Align = Op.getOperand(2);
@@ -1445,7 +1445,7 @@
       Size = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, VT), Size);
   }
 
-  SDVTList VTList = DAG.getVTList(VT, MVT::Other);
+  SDVTList VTList = DAG.getVTList(VT, EVT::Other);
   SDValue Ops1[] = { Chain, Size, Align };
   SDValue Res = DAG.getNode(ARMISD::DYN_ALLOC, dl, VTList, Ops1, 3);
   Chain = Res.getValue(1);
@@ -1470,7 +1470,7 @@
 
   // Transform the arguments stored in physical registers into virtual ones.
   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
-  SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
+  SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, EVT::i32);
 
   SDValue ArgValue2;
   if (NextVA.isMemLoc()) {
@@ -1480,13 +1480,13 @@
 
     // Create load node to retrieve arguments from the stack.
     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
-    ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, NULL, 0);
+    ArgValue2 = DAG.getLoad(EVT::i32, dl, Root, FIN, NULL, 0);
   } else {
     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
-    ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
+    ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, EVT::i32);
   }
 
-  return DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, ArgValue, ArgValue2);
+  return DAG.getNode(ARMISD::FMDRR, dl, EVT::f64, ArgValue, ArgValue2);
 }
 
 SDValue
@@ -1517,24 +1517,24 @@
 
     // Arguments stored in registers.
     if (VA.isRegLoc()) {
-      MVT RegVT = VA.getLocVT();
+      EVT RegVT = VA.getLocVT();
 
       SDValue ArgValue;
       if (VA.needsCustom()) {
         // f64 and vector types are split up into multiple registers or
         // combinations of registers and stack slots.
-        RegVT = MVT::i32;
+        RegVT = EVT::i32;
 
-        if (VA.getLocVT() == MVT::v2f64) {
+        if (VA.getLocVT() == EVT::v2f64) {
           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
                                                    Chain, DAG, dl);
           VA = ArgLocs[++i]; // skip ahead to next loc
           SDValue ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
                                                    Chain, DAG, dl);
-          ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
-          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
+          ArgValue = DAG.getNode(ISD::UNDEF, dl, EVT::v2f64);
+          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v2f64,
                                  ArgValue, ArgValue1, DAG.getIntPtrConstant(0));
-          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
+          ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v2f64,
                                  ArgValue, ArgValue2, DAG.getIntPtrConstant(1));
         } else
           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
@@ -1542,13 +1542,13 @@
       } else {
         TargetRegisterClass *RC;
 
-        if (RegVT == MVT::f32)
+        if (RegVT == EVT::f32)
           RC = ARM::SPRRegisterClass;
-        else if (RegVT == MVT::f64)
+        else if (RegVT == EVT::f64)
           RC = ARM::DPRRegisterClass;
-        else if (RegVT == MVT::v2f64)
+        else if (RegVT == EVT::v2f64)
           RC = ARM::QPRRegisterClass;
-        else if (RegVT == MVT::i32)
+        else if (RegVT == EVT::i32)
           RC = (AFI->isThumb1OnlyFunction() ?
                 ARM::tGPRRegisterClass : ARM::GPRRegisterClass);
         else
@@ -1586,7 +1586,7 @@
 
       // sanity check
       assert(VA.isMemLoc());
-      assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
+      assert(VA.getValVT() != EVT::i64 && "i64 should already be lowered");
 
       unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
       int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset());
@@ -1629,14 +1629,14 @@
           RC = ARM::GPRRegisterClass;
 
         unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
-        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
         MemOps.push_back(Store);
         FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
                           DAG.getConstant(4, getPointerTy()));
       }
       if (!MemOps.empty())
-        Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+        Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                             &MemOps[0], MemOps.size());
     } else
       // This will point to the next argument passed via stack.
@@ -1682,28 +1682,28 @@
       case ISD::SETGE:
         if (isLegalCmpImmediate(C-1, isThumb1Only)) {
           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
-          RHS = DAG.getConstant(C-1, MVT::i32);
+          RHS = DAG.getConstant(C-1, EVT::i32);
         }
         break;
       case ISD::SETULT:
       case ISD::SETUGE:
         if (C > 0 && isLegalCmpImmediate(C-1, isThumb1Only)) {
           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
-          RHS = DAG.getConstant(C-1, MVT::i32);
+          RHS = DAG.getConstant(C-1, EVT::i32);
         }
         break;
       case ISD::SETLE:
       case ISD::SETGT:
         if (isLegalCmpImmediate(C+1, isThumb1Only)) {
           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
-          RHS = DAG.getConstant(C+1, MVT::i32);
+          RHS = DAG.getConstant(C+1, EVT::i32);
         }
         break;
       case ISD::SETULE:
       case ISD::SETUGT:
         if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb1Only)) {
           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
-          RHS = DAG.getConstant(C+1, MVT::i32);
+          RHS = DAG.getConstant(C+1, EVT::i32);
         }
         break;
       }
@@ -1722,8 +1722,8 @@
     CompareType = ARMISD::CMPZ;
     break;
   }
-  ARMCC = DAG.getConstant(CondCode, MVT::i32);
-  return DAG.getNode(CompareType, dl, MVT::Flag, LHS, RHS);
+  ARMCC = DAG.getConstant(CondCode, EVT::i32);
+  return DAG.getNode(CompareType, dl, EVT::Flag, LHS, RHS);
 }
 
 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
@@ -1731,15 +1731,15 @@
                          DebugLoc dl) {
   SDValue Cmp;
   if (!isFloatingPointZero(RHS))
-    Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Flag, LHS, RHS);
+    Cmp = DAG.getNode(ARMISD::CMPFP, dl, EVT::Flag, LHS, RHS);
   else
-    Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Flag, LHS);
-  return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Flag, Cmp);
+    Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, EVT::Flag, LHS);
+  return DAG.getNode(ARMISD::FMSTAT, dl, EVT::Flag, Cmp);
 }
 
 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
                               const ARMSubtarget *ST) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   SDValue LHS = Op.getOperand(0);
   SDValue RHS = Op.getOperand(1);
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
@@ -1747,9 +1747,9 @@
   SDValue FalseVal = Op.getOperand(3);
   DebugLoc dl = Op.getDebugLoc();
 
-  if (LHS.getValueType() == MVT::i32) {
+  if (LHS.getValueType() == EVT::i32) {
     SDValue ARMCC;
-    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+    SDValue CCR = DAG.getRegister(ARM::CPSR, EVT::i32);
     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
   }
@@ -1758,13 +1758,13 @@
   if (FPCCToARMCC(CC, CondCode, CondCode2))
     std::swap(TrueVal, FalseVal);
 
-  SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
-  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+  SDValue ARMCC = DAG.getConstant(CondCode, EVT::i32);
+  SDValue CCR = DAG.getRegister(ARM::CPSR, EVT::i32);
   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
   SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
                                  ARMCC, CCR, Cmp);
   if (CondCode2 != ARMCC::AL) {
-    SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
+    SDValue ARMCC2 = DAG.getConstant(CondCode2, EVT::i32);
     // FIXME: Needs another CMP because flag can have but one use.
     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
     Result = DAG.getNode(ARMISD::CMOV, dl, VT,
@@ -1782,28 +1782,28 @@
   SDValue   Dest = Op.getOperand(4);
   DebugLoc dl = Op.getDebugLoc();
 
-  if (LHS.getValueType() == MVT::i32) {
+  if (LHS.getValueType() == EVT::i32) {
     SDValue ARMCC;
-    SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+    SDValue CCR = DAG.getRegister(ARM::CPSR, EVT::i32);
     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
-    return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
+    return DAG.getNode(ARMISD::BRCOND, dl, EVT::Other,
                        Chain, Dest, ARMCC, CCR,Cmp);
   }
 
-  assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
+  assert(LHS.getValueType() == EVT::f32 || LHS.getValueType() == EVT::f64);
   ARMCC::CondCodes CondCode, CondCode2;
   if (FPCCToARMCC(CC, CondCode, CondCode2))
     // Swap the LHS/RHS of the comparison if needed.
     std::swap(LHS, RHS);
 
   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
-  SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
-  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
-  SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDValue ARMCC = DAG.getConstant(CondCode, EVT::i32);
+  SDValue CCR = DAG.getRegister(ARM::CPSR, EVT::i32);
+  SDVTList VTList = DAG.getVTList(EVT::Other, EVT::Flag);
   SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp };
   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
   if (CondCode2 != ARMCC::AL) {
-    ARMCC = DAG.getConstant(CondCode2, MVT::i32);
+    ARMCC = DAG.getConstant(CondCode2, EVT::i32);
     SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) };
     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5);
   }
@@ -1816,12 +1816,12 @@
   SDValue Index = Op.getOperand(2);
   DebugLoc dl = Op.getDebugLoc();
 
-  MVT PTy = getPointerTy();
+  EVT PTy = getPointerTy();
   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
   ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
   SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy);
   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
-  Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId);
+  Table = DAG.getNode(ARMISD::WrapperJT, dl, EVT::i32, JTI, UId);
   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy));
   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
   if (Subtarget->isThumb2()) {
@@ -1829,18 +1829,18 @@
     // which does another jump to the destination. This also makes it easier
     // to translate it to TBB / TBH later.
     // FIXME: This might not work if the function is extremely large.
-    return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
+    return DAG.getNode(ARMISD::BR2_JT, dl, EVT::Other, Chain,
                        Addr, Op.getOperand(2), JTI, UId);
   }
   if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
-    Addr = DAG.getLoad((MVT)MVT::i32, dl, Chain, Addr, NULL, 0);
+    Addr = DAG.getLoad((EVT)EVT::i32, dl, Chain, Addr, NULL, 0);
     Chain = Addr.getValue(1);
     Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
-    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
+    return DAG.getNode(ARMISD::BR_JT, dl, EVT::Other, Chain, Addr, JTI, UId);
   } else {
     Addr = DAG.getLoad(PTy, dl, Chain, Addr, NULL, 0);
     Chain = Addr.getValue(1);
-    return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId);
+    return DAG.getNode(ARMISD::BR_JT, dl, EVT::Other, Chain, Addr, JTI, UId);
   }
 }
 
@@ -1848,17 +1848,17 @@
   DebugLoc dl = Op.getDebugLoc();
   unsigned Opc =
     Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI;
-  Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
-  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+  Op = DAG.getNode(Opc, dl, EVT::f32, Op.getOperand(0));
+  return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Op);
 }
 
 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
   unsigned Opc =
     Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF;
 
-  Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
+  Op = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, Op.getOperand(0));
   return DAG.getNode(Opc, dl, VT, Op);
 }
 
@@ -1867,19 +1867,19 @@
   SDValue Tmp0 = Op.getOperand(0);
   SDValue Tmp1 = Op.getOperand(1);
   DebugLoc dl = Op.getDebugLoc();
-  MVT VT = Op.getValueType();
-  MVT SrcVT = Tmp1.getValueType();
+  EVT VT = Op.getValueType();
+  EVT SrcVT = Tmp1.getValueType();
   SDValue AbsVal = DAG.getNode(ISD::FABS, dl, VT, Tmp0);
   SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG, dl);
-  SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32);
-  SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
+  SDValue ARMCC = DAG.getConstant(ARMCC::LT, EVT::i32);
+  SDValue CCR = DAG.getRegister(ARM::CPSR, EVT::i32);
   return DAG.getNode(ARMISD::CNEG, dl, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp);
 }
 
 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
   MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
   MFI->setFrameAddressIsTaken(true);
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();  // FIXME probably not meaningful
   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
   unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin())
@@ -1914,7 +1914,7 @@
   unsigned BytesLeft = SizeVal & 3;
   unsigned NumMemOps = SizeVal >> 2;
   unsigned EmittedNumMemOps = 0;
-  MVT VT = MVT::i32;
+  EVT VT = EVT::i32;
   unsigned VTSize = 4;
   unsigned i = 0;
   const unsigned MAX_LOADS_IN_LDM = 6;
@@ -1929,23 +1929,23 @@
     for (i = 0;
          i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
       Loads[i] = DAG.getLoad(VT, dl, Chain,
-                             DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
-                                         DAG.getConstant(SrcOff, MVT::i32)),
+                             DAG.getNode(ISD::ADD, dl, EVT::i32, Src,
+                                         DAG.getConstant(SrcOff, EVT::i32)),
                              SrcSV, SrcSVOff + SrcOff);
       TFOps[i] = Loads[i].getValue(1);
       SrcOff += VTSize;
     }
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &TFOps[0], i);
 
     for (i = 0;
          i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
       TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
-                           DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
-                                       DAG.getConstant(DstOff, MVT::i32)),
+                           DAG.getNode(ISD::ADD, dl, EVT::i32, Dst,
+                                       DAG.getConstant(DstOff, EVT::i32)),
                            DstSV, DstSVOff + DstOff);
       DstOff += VTSize;
     }
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &TFOps[0], i);
 
     EmittedNumMemOps += i;
   }
@@ -1958,69 +1958,69 @@
   i = 0;
   while (BytesLeft) {
     if (BytesLeft >= 2) {
-      VT = MVT::i16;
+      VT = EVT::i16;
       VTSize = 2;
     } else {
-      VT = MVT::i8;
+      VT = EVT::i8;
       VTSize = 1;
     }
 
     Loads[i] = DAG.getLoad(VT, dl, Chain,
-                           DAG.getNode(ISD::ADD, dl, MVT::i32, Src,
-                                       DAG.getConstant(SrcOff, MVT::i32)),
+                           DAG.getNode(ISD::ADD, dl, EVT::i32, Src,
+                                       DAG.getConstant(SrcOff, EVT::i32)),
                            SrcSV, SrcSVOff + SrcOff);
     TFOps[i] = Loads[i].getValue(1);
     ++i;
     SrcOff += VTSize;
     BytesLeft -= VTSize;
   }
-  Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+  Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &TFOps[0], i);
 
   i = 0;
   BytesLeft = BytesLeftSave;
   while (BytesLeft) {
     if (BytesLeft >= 2) {
-      VT = MVT::i16;
+      VT = EVT::i16;
       VTSize = 2;
     } else {
-      VT = MVT::i8;
+      VT = EVT::i8;
       VTSize = 1;
     }
 
     TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
-                            DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
-                                        DAG.getConstant(DstOff, MVT::i32)),
+                            DAG.getNode(ISD::ADD, dl, EVT::i32, Dst,
+                                        DAG.getConstant(DstOff, EVT::i32)),
                             DstSV, DstSVOff + DstOff);
     ++i;
     DstOff += VTSize;
     BytesLeft -= VTSize;
   }
-  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &TFOps[0], i);
+  return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &TFOps[0], i);
 }
 
 static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
   SDValue Op = N->getOperand(0);
   DebugLoc dl = N->getDebugLoc();
-  if (N->getValueType(0) == MVT::f64) {
+  if (N->getValueType(0) == EVT::f64) {
     // Turn i64->f64 into FMDRR.
-    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
-                             DAG.getConstant(0, MVT::i32));
-    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
-                             DAG.getConstant(1, MVT::i32));
-    return DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi);
+    SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Op,
+                             DAG.getConstant(0, EVT::i32));
+    SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Op,
+                             DAG.getConstant(1, EVT::i32));
+    return DAG.getNode(ARMISD::FMDRR, dl, EVT::f64, Lo, Hi);
   }
 
   // Turn f64->i64 into FMRRD.
   SDValue Cvt = DAG.getNode(ARMISD::FMRRD, dl,
-                            DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
+                            DAG.getVTList(EVT::i32, EVT::i32), &Op, 1);
 
   // Merge the pieces into a single i64 value.
-  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
+  return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, Cvt, Cvt.getValue(1));
 }
 
 /// getZeroVector - Returns a vector of specified type with all zero elements.
 ///
-static SDValue getZeroVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) {
+static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
   assert(VT.isVector() && "Expected a vector type");
 
   // Zero vectors are used to represent vector negation and in those cases
@@ -2030,35 +2030,35 @@
   // the future, always build zero vectors as <4 x i32> or <2 x i32> bitcasted
   // to their dest type.  This ensures they get CSE'd.
   SDValue Vec;
-  SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
+  SDValue Cst = DAG.getTargetConstant(0, EVT::i32);
   if (VT.getSizeInBits() == 64)
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v2i32, Cst, Cst);
   else
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32, Cst, Cst, Cst, Cst);
 
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
 }
 
 /// getOnesVector - Returns a vector of specified type with all bits set.
 ///
-static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) {
+static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
   assert(VT.isVector() && "Expected a vector type");
 
   // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
   // type.  This ensures they get CSE'd.
   SDValue Vec;
-  SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
+  SDValue Cst = DAG.getTargetConstant(~0U, EVT::i32);
   if (VT.getSizeInBits() == 64)
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v2i32, Cst, Cst);
   else
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32, Cst, Cst, Cst, Cst);
 
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
 }
 
 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
                           const ARMSubtarget *ST) {
-  MVT VT = N->getValueType(0);
+  EVT VT = N->getValueType(0);
   DebugLoc dl = N->getDebugLoc();
 
   // Lower vector shifts on NEON to use VSHL.
@@ -2068,7 +2068,7 @@
     // Left shifts translate directly to the vshiftu intrinsic.
     if (N->getOpcode() == ISD::SHL)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                         DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32),
+                         DAG.getConstant(Intrinsic::arm_neon_vshiftu, EVT::i32),
                          N->getOperand(0), N->getOperand(1));
 
     assert((N->getOpcode() == ISD::SRA ||
@@ -2077,7 +2077,7 @@
     // NEON uses the same intrinsics for both left and right shifts.  For
     // right shifts, the shift amounts are negative, so negate the vector of
     // shift amounts.
-    MVT ShiftVT = N->getOperand(1).getValueType();
+    EVT ShiftVT = N->getOperand(1).getValueType();
     SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
                                        getZeroVector(ShiftVT, DAG, dl),
                                        N->getOperand(1));
@@ -2085,11 +2085,11 @@
                                Intrinsic::arm_neon_vshifts :
                                Intrinsic::arm_neon_vshiftu);
     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(vshiftInt, MVT::i32),
+                       DAG.getConstant(vshiftInt, EVT::i32),
                        N->getOperand(0), NegatedCount);
   }
 
-  assert(VT == MVT::i64 &&
+  assert(VT == EVT::i64 &&
          (N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
          "Unknown shift to lower!");
 
@@ -2102,21 +2102,21 @@
   if (ST->isThumb1Only()) return SDValue();
 
   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
-  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
-                             DAG.getConstant(0, MVT::i32));
-  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
-                             DAG.getConstant(1, MVT::i32));
+  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, N->getOperand(0),
+                             DAG.getConstant(0, EVT::i32));
+  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, N->getOperand(0),
+                             DAG.getConstant(1, EVT::i32));
 
   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
   // captures the result into a carry flag.
   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
-  Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
+  Hi = DAG.getNode(Opc, dl, DAG.getVTList(EVT::i32, EVT::Flag), &Hi, 1);
 
   // The low part is an ARMISD::RRX operand, which shifts the carry in.
-  Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
+  Lo = DAG.getNode(ARMISD::RRX, dl, EVT::i32, Lo, Hi.getValue(1));
 
   // Merge the pieces into a single i64 value.
- return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
+ return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, Lo, Hi);
 }
 
 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
@@ -2128,7 +2128,7 @@
   SDValue Op0 = Op.getOperand(0);
   SDValue Op1 = Op.getOperand(1);
   SDValue CC = Op.getOperand(2);
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
   DebugLoc dl = Op.getDebugLoc();
 
@@ -2227,13 +2227,13 @@
   case 8:
     // Any 1-byte value is OK.
     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
-    return DAG.getTargetConstant(SplatBits, MVT::i8);
+    return DAG.getTargetConstant(SplatBits, EVT::i8);
 
   case 16:
     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
     if ((SplatBits & ~0xff) == 0 ||
         (SplatBits & ~0xff00) == 0)
-      return DAG.getTargetConstant(SplatBits, MVT::i16);
+      return DAG.getTargetConstant(SplatBits, EVT::i16);
     break;
 
   case 32:
@@ -2245,15 +2245,15 @@
         (SplatBits & ~0xff00) == 0 ||
         (SplatBits & ~0xff0000) == 0 ||
         (SplatBits & ~0xff000000) == 0)
-      return DAG.getTargetConstant(SplatBits, MVT::i32);
+      return DAG.getTargetConstant(SplatBits, EVT::i32);
 
     if ((SplatBits & ~0xffff) == 0 &&
         ((SplatBits | SplatUndef) & 0xff) == 0xff)
-      return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
+      return DAG.getTargetConstant(SplatBits | 0xff, EVT::i32);
 
     if ((SplatBits & ~0xffffff) == 0 &&
         ((SplatBits | SplatUndef) & 0xffff) == 0xffff)
-      return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
+      return DAG.getTargetConstant(SplatBits | 0xffff, EVT::i32);
 
     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
@@ -2273,7 +2273,7 @@
         return SDValue();
       BitMask <<= 8;
     }
-    return DAG.getTargetConstant(Val, MVT::i64);
+    return DAG.getTargetConstant(Val, EVT::i64);
   }
 
   default:
@@ -2311,7 +2311,7 @@
   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
          "Only possible block sizes for VREV are: 16, 32, 64");
 
-  MVT VT = N->getValueType(0);
+  EVT VT = N->getValueType(0);
   unsigned NumElts = VT.getVectorNumElements();
   unsigned EltSz = VT.getVectorElementType().getSizeInBits();
   unsigned BlockElts = N->getMaskElt(0) + 1;
@@ -2328,7 +2328,7 @@
   return true;
 }
 
-static SDValue BuildSplat(SDValue Val, MVT VT, SelectionDAG &DAG, DebugLoc dl) {
+static SDValue BuildSplat(SDValue Val, EVT VT, SelectionDAG &DAG, DebugLoc dl) {
   // Canonicalize all-zeros and all-ones vectors.
   ConstantSDNode *ConstVal = dyn_cast<ConstantSDNode>(Val.getNode());
   if (ConstVal->isNullValue())
@@ -2336,22 +2336,22 @@
   if (ConstVal->isAllOnesValue())
     return getOnesVector(VT, DAG, dl);
 
-  MVT CanonicalVT;
+  EVT CanonicalVT;
   if (VT.is64BitVector()) {
     switch (Val.getValueType().getSizeInBits()) {
-    case 8:  CanonicalVT = MVT::v8i8; break;
-    case 16: CanonicalVT = MVT::v4i16; break;
-    case 32: CanonicalVT = MVT::v2i32; break;
-    case 64: CanonicalVT = MVT::v1i64; break;
+    case 8:  CanonicalVT = EVT::v8i8; break;
+    case 16: CanonicalVT = EVT::v4i16; break;
+    case 32: CanonicalVT = EVT::v2i32; break;
+    case 64: CanonicalVT = EVT::v1i64; break;
     default: llvm_unreachable("unexpected splat element type"); break;
     }
   } else {
     assert(VT.is128BitVector() && "unknown splat vector size");
     switch (Val.getValueType().getSizeInBits()) {
-    case 8:  CanonicalVT = MVT::v16i8; break;
-    case 16: CanonicalVT = MVT::v8i16; break;
-    case 32: CanonicalVT = MVT::v4i32; break;
-    case 64: CanonicalVT = MVT::v2i64; break;
+    case 8:  CanonicalVT = EVT::v16i8; break;
+    case 16: CanonicalVT = EVT::v8i16; break;
+    case 32: CanonicalVT = EVT::v4i32; break;
+    case 64: CanonicalVT = EVT::v2i64; break;
     default: llvm_unreachable("unexpected splat element type"); break;
     }
   }
@@ -2370,7 +2370,7 @@
   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
   assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
   DebugLoc dl = Op.getDebugLoc();
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
 
   APInt SplatBits, SplatUndef;
   unsigned SplatBitSize;
@@ -2411,14 +2411,14 @@
 }
 
 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
-  assert((VT == MVT::i8 || VT == MVT::i16) &&
+  assert((VT == EVT::i8 || VT == EVT::i16) &&
          "unexpected type for custom-lowering vector extract");
   SDValue Vec = Op.getOperand(0);
   SDValue Lane = Op.getOperand(1);
-  Op = DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
-  Op = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Op, DAG.getValueType(VT));
+  Op = DAG.getNode(ARMISD::VGETLANEu, dl, EVT::i32, Vec, Lane);
+  Op = DAG.getNode(ISD::AssertZext, dl, EVT::i32, Op, DAG.getValueType(VT));
   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
 }
 
@@ -2428,16 +2428,16 @@
   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
          "unexpected CONCAT_VECTORS");
   DebugLoc dl = Op.getDebugLoc();
-  SDValue Val = DAG.getUNDEF(MVT::v2f64);
+  SDValue Val = DAG.getUNDEF(EVT::v2f64);
   SDValue Op0 = Op.getOperand(0);
   SDValue Op1 = Op.getOperand(1);
   if (Op0.getOpcode() != ISD::UNDEF)
-    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
-                      DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
+    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v2f64, Val,
+                      DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, Op0),
                       DAG.getIntPtrConstant(0));
   if (Op1.getOpcode() != ISD::UNDEF)
-    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
-                      DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
+    Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v2f64, Val,
+                      DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, Op1),
                       DAG.getIntPtrConstant(1));
   return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
 }
@@ -2652,7 +2652,7 @@
                             TargetLowering::DAGCombinerInfo &DCI) {
   SelectionDAG &DAG = DCI.DAG;
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  MVT VT = N->getValueType(0);
+  EVT VT = N->getValueType(0);
   unsigned Opc = N->getOpcode();
   bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC;
   SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1);
@@ -2680,7 +2680,7 @@
              cast<ConstantSDNode>(RHS)->isNullValue()) {
     std::swap(LHS, RHS);
     SDValue Op0 = Slct.getOperand(0);
-    MVT OpVT = isSlctCC ? Op0.getValueType() :
+    EVT OpVT = isSlctCC ? Op0.getValueType() :
                           Op0.getOperand(0).getValueType();
     bool isInt = OpVT.isInteger();
     CC = ISD::getSetCCInverse(CC, isInt);
@@ -2775,7 +2775,7 @@
 /// operand of a vector shift left operation.  That value must be in the range:
 ///   0 <= Value < ElementBits for a left shift; or
 ///   0 <= Value <= ElementBits for a long left shift.
-static bool isVShiftLImm(SDValue Op, MVT VT, bool isLong, int64_t &Cnt) {
+static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
   assert(VT.isVector() && "vector shift count is not a vector type");
   unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
   if (! getVShiftImm(Op, ElementBits, Cnt))
@@ -2789,7 +2789,7 @@
 /// absolute value must be in the range:
 ///   1 <= |Value| <= ElementBits for a right shift; or
 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
-static bool isVShiftRImm(SDValue Op, MVT VT, bool isNarrow, bool isIntrinsic,
+static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
                          int64_t &Cnt) {
   assert(VT.isVector() && "vector shift count is not a vector type");
   unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
@@ -2830,7 +2830,7 @@
   case Intrinsic::arm_neon_vqrshiftns:
   case Intrinsic::arm_neon_vqrshiftnu:
   case Intrinsic::arm_neon_vqrshiftnsu: {
-    MVT VT = N->getOperand(1).getValueType();
+    EVT VT = N->getOperand(1).getValueType();
     int64_t Cnt;
     unsigned VShiftOpc = 0;
 
@@ -2930,11 +2930,11 @@
     }
 
     return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
-                       N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
+                       N->getOperand(1), DAG.getConstant(Cnt, EVT::i32));
   }
 
   case Intrinsic::arm_neon_vshiftins: {
-    MVT VT = N->getOperand(1).getValueType();
+    EVT VT = N->getOperand(1).getValueType();
     int64_t Cnt;
     unsigned VShiftOpc = 0;
 
@@ -2948,7 +2948,7 @@
 
     return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0),
                        N->getOperand(1), N->getOperand(2),
-                       DAG.getConstant(Cnt, MVT::i32));
+                       DAG.getConstant(Cnt, EVT::i32));
   }
 
   case Intrinsic::arm_neon_vqrshifts:
@@ -2967,7 +2967,7 @@
 /// their values after they get legalized to loads from a constant pool.
 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {
-  MVT VT = N->getValueType(0);
+  EVT VT = N->getValueType(0);
 
   // Nothing to be done for scalar shifts.
   if (! VT.isVector())
@@ -2982,7 +2982,7 @@
   case ISD::SHL:
     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
       return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0),
-                         DAG.getConstant(Cnt, MVT::i32));
+                         DAG.getConstant(Cnt, EVT::i32));
     break;
 
   case ISD::SRA:
@@ -2991,7 +2991,7 @@
       unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
                             ARMISD::VSHRs : ARMISD::VSHRu);
       return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0),
-                         DAG.getConstant(Cnt, MVT::i32));
+                         DAG.getConstant(Cnt, EVT::i32));
     }
   }
   return SDValue();
@@ -3010,12 +3010,12 @@
   if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
     SDValue Vec = N0.getOperand(0);
     SDValue Lane = N0.getOperand(1);
-    MVT VT = N->getValueType(0);
-    MVT EltVT = N0.getValueType();
+    EVT VT = N->getValueType(0);
+    EVT EltVT = N0.getValueType();
     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
-    if (VT == MVT::i32 &&
-        (EltVT == MVT::i8 || EltVT == MVT::i16) &&
+    if (VT == EVT::i32 &&
+        (EltVT == EVT::i8 || EltVT == EVT::i16) &&
         TLI.isTypeLegal(Vec.getValueType())) {
 
       unsigned Opc = 0;
@@ -3060,7 +3060,7 @@
 /// isLegalAddressImmediate - Return true if the integer value can be used
 /// as the offset of the target addressing mode for load / store of the
 /// given type.
-static bool isLegalAddressImmediate(int64_t V, MVT VT,
+static bool isLegalAddressImmediate(int64_t V, EVT VT,
                                     const ARMSubtarget *Subtarget) {
   if (V == 0)
     return true;
@@ -3075,15 +3075,15 @@
     unsigned Scale = 1;
     switch (VT.getSimpleVT()) {
     default: return false;
-    case MVT::i1:
-    case MVT::i8:
+    case EVT::i1:
+    case EVT::i8:
       // Scale == 1;
       break;
-    case MVT::i16:
+    case EVT::i16:
       // Scale == 2;
       Scale = 2;
       break;
-    case MVT::i32:
+    case EVT::i32:
       // Scale == 4;
       Scale = 4;
       break;
@@ -3099,16 +3099,16 @@
     V = - V;
   switch (VT.getSimpleVT()) {
   default: return false;
-  case MVT::i1:
-  case MVT::i8:
-  case MVT::i32:
+  case EVT::i1:
+  case EVT::i8:
+  case EVT::i32:
     // +- imm12
     return V == (V & ((1LL << 12) - 1));
-  case MVT::i16:
+  case EVT::i16:
     // +- imm8
     return V == (V & ((1LL << 8) - 1));
-  case MVT::f32:
-  case MVT::f64:
+  case EVT::f32:
+  case EVT::f64:
     if (!Subtarget->hasVFP2())
       return false;
     if ((V & 3) != 0)
@@ -3122,7 +3122,7 @@
 /// by AM is legal for this target, for a load/store of the specified type.
 bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
                                               const Type *Ty) const {
-  MVT VT = getValueType(Ty, true);
+  EVT VT = getValueType(Ty, true);
   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
     return false;
 
@@ -3148,10 +3148,10 @@
     int Scale = AM.Scale;
     switch (VT.getSimpleVT()) {
     default: return false;
-    case MVT::i1:
-    case MVT::i8:
-    case MVT::i32:
-    case MVT::i64:
+    case EVT::i1:
+    case EVT::i8:
+    case EVT::i32:
+    case EVT::i64:
       // This assumes i64 is legalized to a pair of i32. If not (i.e.
       // ldrd / strd are used, then its address mode is same as i16.
       // r + r
@@ -3160,13 +3160,13 @@
         return true;
       // r + r << imm
       return isPowerOf2_32(Scale & ~1);
-    case MVT::i16:
+    case EVT::i16:
       // r + r
       if (((unsigned)AM.HasBaseReg + Scale) <= 2)
         return true;
       return false;
 
-    case MVT::isVoid:
+    case EVT::isVoid:
       // Note, we allow "void" uses (basically, uses that aren't loads or
       // stores), because arm allows folding a scale into many arithmetic
       // operations.  This should be made more precise and revisited later.
@@ -3180,14 +3180,14 @@
   return true;
 }
 
-static bool getARMIndexedAddressParts(SDNode *Ptr, MVT VT,
+static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
                                       bool isSEXTLoad, SDValue &Base,
                                       SDValue &Offset, bool &isInc,
                                       SelectionDAG &DAG) {
   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
     return false;
 
-  if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
+  if (VT == EVT::i16 || ((VT == EVT::i8 || VT == EVT::i1) && isSEXTLoad)) {
     // AddressingMode 3
     Base = Ptr->getOperand(0);
     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
@@ -3202,7 +3202,7 @@
     isInc = (Ptr->getOpcode() == ISD::ADD);
     Offset = Ptr->getOperand(1);
     return true;
-  } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
+  } else if (VT == EVT::i32 || VT == EVT::i8 || VT == EVT::i1) {
     // AddressingMode 2
     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
       int RHSC = (int)RHS->getZExtValue();
@@ -3238,7 +3238,7 @@
   return false;
 }
 
-static bool getT2IndexedAddressParts(SDNode *Ptr, MVT VT,
+static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
                                      bool isSEXTLoad, SDValue &Base,
                                      SDValue &Offset, bool &isInc,
                                      SelectionDAG &DAG) {
@@ -3274,7 +3274,7 @@
   if (Subtarget->isThumb1Only())
     return false;
 
-  MVT VT;
+  EVT VT;
   SDValue Ptr;
   bool isSEXTLoad = false;
   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
@@ -3313,7 +3313,7 @@
   if (Subtarget->isThumb1Only())
     return false;
 
-  MVT VT;
+  EVT VT;
   SDValue Ptr;
   bool isSEXTLoad = false;
   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
@@ -3383,7 +3383,7 @@
 
 std::pair<unsigned, const TargetRegisterClass*>
 ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
-                                                MVT VT) const {
+                                                EVT VT) const {
   if (Constraint.size() == 1) {
     // GCC RS6000 Constraint Letters
     switch (Constraint[0]) {
@@ -3395,9 +3395,9 @@
     case 'r':
       return std::make_pair(0U, ARM::GPRRegisterClass);
     case 'w':
-      if (VT == MVT::f32)
+      if (VT == EVT::f32)
         return std::make_pair(0U, ARM::SPRRegisterClass);
-      if (VT == MVT::f64)
+      if (VT == EVT::f64)
         return std::make_pair(0U, ARM::DPRRegisterClass);
       break;
     }
@@ -3407,7 +3407,7 @@
 
 std::vector<unsigned> ARMTargetLowering::
 getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  MVT VT) const {
+                                  EVT VT) const {
   if (Constraint.size() != 1)
     return std::vector<unsigned>();
 
@@ -3423,7 +3423,7 @@
                                  ARM::R8, ARM::R9, ARM::R10, ARM::R11,
                                  ARM::R12, ARM::LR, 0);
   case 'w':
-    if (VT == MVT::f32)
+    if (VT == EVT::f32)
       return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
                                    ARM::S4, ARM::S5, ARM::S6, ARM::S7,
                                    ARM::S8, ARM::S9, ARM::S10, ARM::S11,
@@ -3432,7 +3432,7 @@
                                    ARM::S20,ARM::S21,ARM::S22,ARM::S23,
                                    ARM::S24,ARM::S25,ARM::S26,ARM::S27,
                                    ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
-    if (VT == MVT::f64)
+    if (VT == EVT::f64)
       return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
                                    ARM::D4, ARM::D5, ARM::D6, ARM::D7,
                                    ARM::D8, ARM::D9, ARM::D10,ARM::D11,
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 4649c18..648d388 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -194,10 +194,10 @@
     ConstraintType getConstraintType(const std::string &Constraint) const;
     std::pair<unsigned, const TargetRegisterClass*>
       getRegForInlineAsmConstraint(const std::string &Constraint,
-                                   MVT VT) const;
+                                   EVT VT) const;
     std::vector<unsigned>
     getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                      MVT VT) const;
+                                      EVT VT) const;
 
     /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
     /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is
@@ -225,9 +225,9 @@
     ///
     unsigned ARMPCLabelIndex;
 
-    void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
-    void addDRTypeForNEON(MVT VT);
-    void addQRTypeForNEON(MVT VT);
+    void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
+    void addDRTypeForNEON(EVT VT);
+    void addQRTypeForNEON(EVT VT);
 
     typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
     void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index 9789a3b..5190492 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -129,13 +129,13 @@
 // so_imm_neg_XFORM - Return a so_imm value packed into the format described for
 // so_imm_neg def below.
 def so_imm_neg_XFORM : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
+  return CurDAG->getTargetConstant(-(int)N->getZExtValue(), EVT::i32);
 }]>;
 
 // so_imm_not_XFORM - Return a so_imm value packed into the format described for
 // so_imm_not def below.
 def so_imm_not_XFORM : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
+  return CurDAG->getTargetConstant(~(int)N->getZExtValue(), EVT::i32);
 }]>;
 
 // rot_imm predicate - True if the 32-bit immediate is equal to 8, 16, or 24.
@@ -254,12 +254,12 @@
 
 def so_imm2part_1 : SDNodeXForm<imm, [{
   unsigned V = ARM_AM::getSOImmTwoPartFirst((unsigned)N->getZExtValue());
-  return CurDAG->getTargetConstant(V, MVT::i32);
+  return CurDAG->getTargetConstant(V, EVT::i32);
 }]>;
 
 def so_imm2part_2 : SDNodeXForm<imm, [{
   unsigned V = ARM_AM::getSOImmTwoPartSecond((unsigned)N->getZExtValue());
-  return CurDAG->getTargetConstant(V, MVT::i32);
+  return CurDAG->getTargetConstant(V, EVT::i32);
 }]>;
 
 
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index 77bea68..a028db7 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -277,33 +277,33 @@
 // Extract D sub-registers of Q registers.
 // (arm_dsubreg_0 is 5; arm_dsubreg_1 is 6)
 def DSubReg_i8_reg  : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, MVT::i32);
+  return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, EVT::i32);
 }]>;
 def DSubReg_i16_reg : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, MVT::i32);
+  return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, EVT::i32);
 }]>;
 def DSubReg_i32_reg : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, MVT::i32);
+  return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, EVT::i32);
 }]>;
 def DSubReg_f64_reg : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(5 + N->getZExtValue(), MVT::i32);
+  return CurDAG->getTargetConstant(5 + N->getZExtValue(), EVT::i32);
 }]>;
 
 // Extract S sub-registers of Q registers.
 // (arm_ssubreg_0 is 1; arm_ssubreg_1 is 2; etc.)
 def SSubReg_f32_reg : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(1 + N->getZExtValue(), MVT::i32);
+  return CurDAG->getTargetConstant(1 + N->getZExtValue(), EVT::i32);
 }]>;
 
 // Translate lane numbers from Q registers to D subregs.
 def SubReg_i8_lane  : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
+  return CurDAG->getTargetConstant(N->getZExtValue() & 7, EVT::i32);
 }]>;
 def SubReg_i16_lane : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
+  return CurDAG->getTargetConstant(N->getZExtValue() & 3, EVT::i32);
 }]>;
 def SubReg_i32_lane : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
+  return CurDAG->getTargetConstant(N->getZExtValue() & 1, EVT::i32);
 }]>;
 
 //===----------------------------------------------------------------------===//
@@ -1772,7 +1772,7 @@
 
 def SHUFFLE_get_splat_lane : SDNodeXForm<vector_shuffle, [{
   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
-  return CurDAG->getTargetConstant(SVOp->getSplatIndex(), MVT::i32);
+  return CurDAG->getTargetConstant(SVOp->getSplatIndex(), EVT::i32);
 }]>;
 
 def splat_lane : PatFrag<(ops node:$lhs, node:$rhs),
diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td
index 25dbddf..7d27af2 100644
--- a/lib/Target/ARM/ARMInstrThumb.td
+++ b/lib/Target/ARM/ARMInstrThumb.td
@@ -19,10 +19,10 @@
                       [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
 
 def imm_neg_XFORM : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
+  return CurDAG->getTargetConstant(-(int)N->getZExtValue(), EVT::i32);
 }]>;
 def imm_comp_XFORM : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
+  return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), EVT::i32);
 }]>;
 
 
@@ -58,12 +58,12 @@
 
 def thumb_immshifted_val : SDNodeXForm<imm, [{
   unsigned V = ARM_AM::getThumbImmNonShiftedVal((unsigned)N->getZExtValue());
-  return CurDAG->getTargetConstant(V, MVT::i32);
+  return CurDAG->getTargetConstant(V, EVT::i32);
 }]>;
 
 def thumb_immshifted_shamt : SDNodeXForm<imm, [{
   unsigned V = ARM_AM::getThumbImmValShift((unsigned)N->getZExtValue());
-  return CurDAG->getTargetConstant(V, MVT::i32);
+  return CurDAG->getTargetConstant(V, EVT::i32);
 }]>;
 
 // Define Thumb specific addressing modes.
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index 5499763..f7ea0d3 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -37,12 +37,12 @@
 
 // t2_so_imm_not_XFORM - Return the complement of a t2_so_imm value
 def t2_so_imm_not_XFORM : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
+  return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), EVT::i32);
 }]>;
 
 // t2_so_imm_neg_XFORM - Return the negation of a t2_so_imm value
 def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(-((int)N->getZExtValue()), MVT::i32);
+  return CurDAG->getTargetConstant(-((int)N->getZExtValue()), EVT::i32);
 }]>;
 
 // t2_so_imm - Match a 32-bit immediate operand, which is an
@@ -97,11 +97,11 @@
 /// Split a 32-bit immediate into two 16 bit parts.
 def t2_lo16 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() & 0xffff,
-                                   MVT::i32);
+                                   EVT::i32);
 }]>;
 
 def t2_hi16 : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
+  return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, EVT::i32);
 }]>;
 
 def t2_lo16AllZero : PatLeaf<(i32 imm), [{
diff --git a/lib/Target/ARM/Thumb1RegisterInfo.cpp b/lib/Target/ARM/Thumb1RegisterInfo.cpp
index 872f1d3..f862114 100644
--- a/lib/Target/ARM/Thumb1RegisterInfo.cpp
+++ b/lib/Target/ARM/Thumb1RegisterInfo.cpp
@@ -67,7 +67,7 @@
 }
 
 const TargetRegisterClass*
-Thumb1RegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, MVT VT) const {
+Thumb1RegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, EVT VT) const {
   if (isARMLowRegister(Reg))
     return ARM::tGPRRegisterClass;
   switch (Reg) {
diff --git a/lib/Target/ARM/Thumb1RegisterInfo.h b/lib/Target/ARM/Thumb1RegisterInfo.h
index 05b6ae9..c45c261 100644
--- a/lib/Target/ARM/Thumb1RegisterInfo.h
+++ b/lib/Target/ARM/Thumb1RegisterInfo.h
@@ -38,7 +38,7 @@
 
   /// Code Generation virtual methods...
   const TargetRegisterClass *
-    getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const;
+    getPhysicalRegisterRegClass(unsigned Reg, EVT VT = EVT::Other) const;
 
   bool requiresRegisterScavenging(const MachineFunction &MF) const;
 
diff --git a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
index 6b2cf70..d4b4e93 100644
--- a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
+++ b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp
@@ -152,7 +152,7 @@
     /// getI64Imm - Return a target constant with the specified value, of type
     /// i64.
     inline SDValue getI64Imm(int64_t Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i64);
+      return CurDAG->getTargetConstant(Imm, EVT::i64);
     }
 
     // Select - Convert the specified operand from a target-independent to a
@@ -251,8 +251,8 @@
 
   case ISD::FrameIndex: {
     int FI = cast<FrameIndexSDNode>(N)->getIndex();
-    return CurDAG->SelectNodeTo(N, Alpha::LDA, MVT::i64,
-                                CurDAG->getTargetFrameIndex(FI, MVT::i32),
+    return CurDAG->SelectNodeTo(N, Alpha::LDA, EVT::i64,
+                                CurDAG->getTargetFrameIndex(FI, EVT::i32),
                                 getI64Imm(0));
   }
   case ISD::GLOBAL_OFFSET_TABLE:
@@ -272,16 +272,16 @@
     Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, N0, 
                                  Chain.getValue(1));
     SDNode *CNode =
-      CurDAG->getTargetNode(Alpha::JSRs, dl, MVT::Other, MVT::Flag, 
+      CurDAG->getTargetNode(Alpha::JSRs, dl, EVT::Other, EVT::Flag, 
                             Chain, Chain.getValue(1));
-    Chain = CurDAG->getCopyFromReg(Chain, dl, Alpha::R27, MVT::i64, 
+    Chain = CurDAG->getCopyFromReg(Chain, dl, Alpha::R27, EVT::i64, 
                                    SDValue(CNode, 1));
-    return CurDAG->SelectNodeTo(N, Alpha::BISr, MVT::i64, Chain, Chain);
+    return CurDAG->SelectNodeTo(N, Alpha::BISr, EVT::i64, Chain, Chain);
   }
 
   case ISD::READCYCLECOUNTER: {
     SDValue Chain = N->getOperand(0);
-    return CurDAG->getTargetNode(Alpha::RPCC, dl, MVT::i64, MVT::Other,
+    return CurDAG->getTargetNode(Alpha::RPCC, dl, EVT::i64, EVT::Other,
                                  Chain);
   }
 
@@ -290,7 +290,7 @@
     
     if (uval == 0) {
       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
-                                                Alpha::R31, MVT::i64);
+                                                Alpha::R31, EVT::i64);
       ReplaceUses(Op, Result);
       return NULL;
     }
@@ -306,17 +306,17 @@
       break; //(zext (LDAH (LDA)))
     //Else use the constant pool
     ConstantInt *C = ConstantInt::get(Type::Int64Ty, uval);
-    SDValue CPI = CurDAG->getTargetConstantPool(C, MVT::i64);
-    SDNode *Tmp = CurDAG->getTargetNode(Alpha::LDAHr, dl, MVT::i64, CPI,
+    SDValue CPI = CurDAG->getTargetConstantPool(C, EVT::i64);
+    SDNode *Tmp = CurDAG->getTargetNode(Alpha::LDAHr, dl, EVT::i64, CPI,
                                         SDValue(getGlobalBaseReg(), 0));
-    return CurDAG->SelectNodeTo(N, Alpha::LDQr, MVT::i64, MVT::Other, 
+    return CurDAG->SelectNodeTo(N, Alpha::LDQr, EVT::i64, EVT::Other, 
                                 CPI, SDValue(Tmp, 0), CurDAG->getEntryNode());
   }
   case ISD::TargetConstantFP:
   case ISD::ConstantFP: {
     ConstantFPSDNode *CN = cast<ConstantFPSDNode>(N);
-    bool isDouble = N->getValueType(0) == MVT::f64;
-    MVT T = isDouble ? MVT::f64 : MVT::f32;
+    bool isDouble = N->getValueType(0) == EVT::f64;
+    EVT T = isDouble ? EVT::f64 : EVT::f32;
     if (CN->getValueAPF().isPosZero()) {
       return CurDAG->SelectNodeTo(N, isDouble ? Alpha::CPYST : Alpha::CPYSS,
                                   T, CurDAG->getRegister(Alpha::F31, T),
@@ -359,18 +359,18 @@
       };
       SDValue tmp1 = N->getOperand(rev?1:0);
       SDValue tmp2 = N->getOperand(rev?0:1);
-      SDNode *cmp = CurDAG->getTargetNode(Opc, dl, MVT::f64, tmp1, tmp2);
+      SDNode *cmp = CurDAG->getTargetNode(Opc, dl, EVT::f64, tmp1, tmp2);
       if (inv) 
         cmp = CurDAG->getTargetNode(Alpha::CMPTEQ, dl, 
-                                    MVT::f64, SDValue(cmp, 0), 
-                                    CurDAG->getRegister(Alpha::F31, MVT::f64));
+                                    EVT::f64, SDValue(cmp, 0), 
+                                    CurDAG->getRegister(Alpha::F31, EVT::f64));
       switch(CC) {
       case ISD::SETUEQ: case ISD::SETULT: case ISD::SETULE:
       case ISD::SETUNE: case ISD::SETUGT: case ISD::SETUGE:
        {
-         SDNode* cmp2 = CurDAG->getTargetNode(Alpha::CMPTUN, dl, MVT::f64,
+         SDNode* cmp2 = CurDAG->getTargetNode(Alpha::CMPTUN, dl, EVT::f64,
                                               tmp1, tmp2);
-         cmp = CurDAG->getTargetNode(Alpha::ADDT, dl, MVT::f64, 
+         cmp = CurDAG->getTargetNode(Alpha::ADDT, dl, EVT::f64, 
                                      SDValue(cmp2, 0), SDValue(cmp, 0));
          break;
        }
@@ -378,9 +378,9 @@
       }
 
       SDNode* LD = CurDAG->getTargetNode(Alpha::FTOIT, dl,
-                                         MVT::i64, SDValue(cmp, 0));
-      return CurDAG->getTargetNode(Alpha::CMPULT, dl, MVT::i64, 
-                                   CurDAG->getRegister(Alpha::R31, MVT::i64),
+                                         EVT::i64, SDValue(cmp, 0));
+      return CurDAG->getTargetNode(Alpha::CMPULT, dl, EVT::i64, 
+                                   CurDAG->getRegister(Alpha::R31, EVT::i64),
                                    SDValue(LD,0));
     }
     break;
@@ -406,10 +406,10 @@
       
       if (get_zapImm(mask)) {
         SDValue Z = 
-          SDValue(CurDAG->getTargetNode(Alpha::ZAPNOTi, dl, MVT::i64,
+          SDValue(CurDAG->getTargetNode(Alpha::ZAPNOTi, dl, EVT::i64,
                                           N->getOperand(0).getOperand(0),
                                           getI64Imm(get_zapImm(mask))), 0);
-        return CurDAG->getTargetNode(Alpha::SRLr, dl, MVT::i64, Z, 
+        return CurDAG->getTargetNode(Alpha::SRLr, dl, EVT::i64, Z, 
                                      getI64Imm(sval));
       }
     }
@@ -434,14 +434,14 @@
      SDValue GOT = SDValue(getGlobalBaseReg(), 0);
      Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R29, GOT, InFlag);
      InFlag = Chain.getValue(1);
-     Chain = SDValue(CurDAG->getTargetNode(Alpha::BSR, dl, MVT::Other, 
-                                           MVT::Flag, Addr.getOperand(0), 
+     Chain = SDValue(CurDAG->getTargetNode(Alpha::BSR, dl, EVT::Other, 
+                                           EVT::Flag, Addr.getOperand(0), 
                                            Chain, InFlag), 0);
    } else {
      Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, Addr, InFlag);
      InFlag = Chain.getValue(1);
-     Chain = SDValue(CurDAG->getTargetNode(Alpha::JSR, dl, MVT::Other,
-                                             MVT::Flag, Chain, InFlag), 0);
+     Chain = SDValue(CurDAG->getTargetNode(Alpha::JSR, dl, EVT::Other,
+                                             EVT::Flag, Chain, InFlag), 0);
    }
    InFlag = Chain.getValue(1);
 
diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp
index c8817b1..1033753 100644
--- a/lib/Target/Alpha/AlphaISelLowering.cpp
+++ b/lib/Target/Alpha/AlphaISelLowering.cpp
@@ -46,114 +46,114 @@
   : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
   // Set up the TargetLowering object.
   //I am having problems with shr n i8 1
-  setShiftAmountType(MVT::i64);
+  setShiftAmountType(EVT::i64);
   setBooleanContents(ZeroOrOneBooleanContent);
   
   setUsesGlobalOffsetTable(true);
   
-  addRegisterClass(MVT::i64, Alpha::GPRCRegisterClass);
-  addRegisterClass(MVT::f64, Alpha::F8RCRegisterClass);
-  addRegisterClass(MVT::f32, Alpha::F4RCRegisterClass);
+  addRegisterClass(EVT::i64, Alpha::GPRCRegisterClass);
+  addRegisterClass(EVT::f64, Alpha::F8RCRegisterClass);
+  addRegisterClass(EVT::f32, Alpha::F4RCRegisterClass);
 
   // We want to custom lower some of our intrinsics.
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+  setOperationAction(ISD::INTRINSIC_WO_CHAIN, EVT::Other, Custom);
 
-  setLoadExtAction(ISD::EXTLOAD, MVT::i1,  Promote);
-  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+  setLoadExtAction(ISD::EXTLOAD, EVT::i1,  Promote);
+  setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
   
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1,  Promote);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Expand);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i1,  Promote);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i32, Expand);
   
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1,  Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i8,  Expand);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1,  Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i8,  Expand);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i16, Expand);
 
-  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+  setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 
-  //  setOperationAction(ISD::BRIND,        MVT::Other,   Expand);
-  setOperationAction(ISD::BR_JT,        MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC,        MVT::Other, Expand);
-  setOperationAction(ISD::SELECT_CC,    MVT::Other, Expand);  
+  //  setOperationAction(ISD::BRIND,        EVT::Other,   Expand);
+  setOperationAction(ISD::BR_JT,        EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC,        EVT::Other, Expand);
+  setOperationAction(ISD::SELECT_CC,    EVT::Other, Expand);  
 
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
 
-  setOperationAction(ISD::FREM, MVT::f32, Expand);
-  setOperationAction(ISD::FREM, MVT::f64, Expand);
+  setOperationAction(ISD::FREM, EVT::f32, Expand);
+  setOperationAction(ISD::FREM, EVT::f64, Expand);
   
-  setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
-  setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
-  setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+  setOperationAction(ISD::UINT_TO_FP, EVT::i64, Expand);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i64, Custom);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i64, Expand);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i64, Custom);
 
   if (!TM.getSubtarget<AlphaSubtarget>().hasCT()) {
-    setOperationAction(ISD::CTPOP    , MVT::i64  , Expand);
-    setOperationAction(ISD::CTTZ     , MVT::i64  , Expand);
-    setOperationAction(ISD::CTLZ     , MVT::i64  , Expand);
+    setOperationAction(ISD::CTPOP    , EVT::i64  , Expand);
+    setOperationAction(ISD::CTTZ     , EVT::i64  , Expand);
+    setOperationAction(ISD::CTLZ     , EVT::i64  , Expand);
   }
-  setOperationAction(ISD::BSWAP    , MVT::i64, Expand);
-  setOperationAction(ISD::ROTL     , MVT::i64, Expand);
-  setOperationAction(ISD::ROTR     , MVT::i64, Expand);
+  setOperationAction(ISD::BSWAP    , EVT::i64, Expand);
+  setOperationAction(ISD::ROTL     , EVT::i64, Expand);
+  setOperationAction(ISD::ROTR     , EVT::i64, Expand);
   
-  setOperationAction(ISD::SREM     , MVT::i64, Custom);
-  setOperationAction(ISD::UREM     , MVT::i64, Custom);
-  setOperationAction(ISD::SDIV     , MVT::i64, Custom);
-  setOperationAction(ISD::UDIV     , MVT::i64, Custom);
+  setOperationAction(ISD::SREM     , EVT::i64, Custom);
+  setOperationAction(ISD::UREM     , EVT::i64, Custom);
+  setOperationAction(ISD::SDIV     , EVT::i64, Custom);
+  setOperationAction(ISD::UDIV     , EVT::i64, Custom);
 
-  setOperationAction(ISD::ADDC     , MVT::i64, Expand);
-  setOperationAction(ISD::ADDE     , MVT::i64, Expand);
-  setOperationAction(ISD::SUBC     , MVT::i64, Expand);
-  setOperationAction(ISD::SUBE     , MVT::i64, Expand);
+  setOperationAction(ISD::ADDC     , EVT::i64, Expand);
+  setOperationAction(ISD::ADDE     , EVT::i64, Expand);
+  setOperationAction(ISD::SUBC     , EVT::i64, Expand);
+  setOperationAction(ISD::SUBE     , EVT::i64, Expand);
 
-  setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i64, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i64, Expand);
 
-  setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
-  setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
-  setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
+  setOperationAction(ISD::SRL_PARTS, EVT::i64, Custom);
+  setOperationAction(ISD::SRA_PARTS, EVT::i64, Expand);
+  setOperationAction(ISD::SHL_PARTS, EVT::i64, Expand);
 
   // We don't support sin/cos/sqrt/pow
-  setOperationAction(ISD::FSIN , MVT::f64, Expand);
-  setOperationAction(ISD::FCOS , MVT::f64, Expand);
-  setOperationAction(ISD::FSIN , MVT::f32, Expand);
-  setOperationAction(ISD::FCOS , MVT::f32, Expand);
+  setOperationAction(ISD::FSIN , EVT::f64, Expand);
+  setOperationAction(ISD::FCOS , EVT::f64, Expand);
+  setOperationAction(ISD::FSIN , EVT::f32, Expand);
+  setOperationAction(ISD::FCOS , EVT::f32, Expand);
 
-  setOperationAction(ISD::FSQRT, MVT::f64, Expand);
-  setOperationAction(ISD::FSQRT, MVT::f32, Expand);
+  setOperationAction(ISD::FSQRT, EVT::f64, Expand);
+  setOperationAction(ISD::FSQRT, EVT::f32, Expand);
 
-  setOperationAction(ISD::FPOW , MVT::f32, Expand);
-  setOperationAction(ISD::FPOW , MVT::f64, Expand);
+  setOperationAction(ISD::FPOW , EVT::f32, Expand);
+  setOperationAction(ISD::FPOW , EVT::f64, Expand);
 
-  setOperationAction(ISD::SETCC, MVT::f32, Promote);
+  setOperationAction(ISD::SETCC, EVT::f32, Promote);
 
-  setOperationAction(ISD::BIT_CONVERT, MVT::f32, Promote);
+  setOperationAction(ISD::BIT_CONVERT, EVT::f32, Promote);
 
   // We don't have line number support yet.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
-  setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
-  setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
+  setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
+  setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
 
   // Not implemented yet.
-  setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 
-  setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
+  setOperationAction(ISD::STACKSAVE, EVT::Other, Expand); 
+  setOperationAction(ISD::STACKRESTORE, EVT::Other, Expand);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i64, Expand);
 
   // We want to legalize GlobalAddress and ConstantPool and
   // ExternalSymbols nodes into the appropriate instructions to
   // materialize the address.
-  setOperationAction(ISD::GlobalAddress,  MVT::i64, Custom);
-  setOperationAction(ISD::ConstantPool,   MVT::i64, Custom);
-  setOperationAction(ISD::ExternalSymbol, MVT::i64, Custom);
-  setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
+  setOperationAction(ISD::GlobalAddress,  EVT::i64, Custom);
+  setOperationAction(ISD::ConstantPool,   EVT::i64, Custom);
+  setOperationAction(ISD::ExternalSymbol, EVT::i64, Custom);
+  setOperationAction(ISD::GlobalTLSAddress, EVT::i64, Custom);
 
-  setOperationAction(ISD::VASTART, MVT::Other, Custom);
-  setOperationAction(ISD::VAEND,   MVT::Other, Expand);
-  setOperationAction(ISD::VACOPY,  MVT::Other, Custom);
-  setOperationAction(ISD::VAARG,   MVT::Other, Custom);
-  setOperationAction(ISD::VAARG,   MVT::i32,   Custom);
+  setOperationAction(ISD::VASTART, EVT::Other, Custom);
+  setOperationAction(ISD::VAEND,   EVT::Other, Expand);
+  setOperationAction(ISD::VACOPY,  EVT::Other, Custom);
+  setOperationAction(ISD::VAARG,   EVT::Other, Custom);
+  setOperationAction(ISD::VAARG,   EVT::i32,   Custom);
 
-  setOperationAction(ISD::JumpTable, MVT::i64, Custom);
-  setOperationAction(ISD::JumpTable, MVT::i32, Custom);
+  setOperationAction(ISD::JumpTable, EVT::i64, Custom);
+  setOperationAction(ISD::JumpTable, EVT::i32, Custom);
 
   setStackPointerRegisterToSaveRestore(Alpha::R30);
 
@@ -168,8 +168,8 @@
   computeRegisterProperties();
 }
 
-MVT::SimpleValueType AlphaTargetLowering::getSetCCResultType(MVT VT) const {
-  return MVT::i64;
+EVT::SimpleValueType AlphaTargetLowering::getSetCCResultType(EVT VT) const {
+  return EVT::i64;
 }
 
 const char *AlphaTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -196,16 +196,16 @@
 }
 
 static SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
   SDValue Zero = DAG.getConstant(0, PtrVT);
   // FIXME there isn't really any debug info here
   DebugLoc dl = Op.getDebugLoc();
   
-  SDValue Hi = DAG.getNode(AlphaISD::GPRelHi,  dl, MVT::i64, JTI,
-                             DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
-  SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, JTI, Hi);
+  SDValue Hi = DAG.getNode(AlphaISD::GPRelHi,  dl, EVT::i64, JTI,
+                             DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
+  SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, EVT::i64, JTI, Hi);
   return Lo;
 }
 
@@ -285,7 +285,7 @@
       assert(VA.isMemLoc());
 
       if (StackPtr.getNode() == 0)
-        StackPtr = DAG.getCopyFromReg(Chain, dl, Alpha::R30, MVT::i64);
+        StackPtr = DAG.getCopyFromReg(Chain, dl, Alpha::R30, EVT::i64);
 
       SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(),
                                    StackPtr,
@@ -299,7 +299,7 @@
   // Transform all store nodes into one single node because all store nodes are
   // independent of each other.
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token chain and
@@ -313,7 +313,7 @@
   }
 
   // Returns a chain & a flag for retval copy to use.
-  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(Callee);
@@ -406,27 +406,27 @@
   
   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
     SDValue argt;
-    MVT ObjectVT = Ins[ArgNo].VT;
+    EVT ObjectVT = Ins[ArgNo].VT;
     SDValue ArgVal;
 
     if (ArgNo  < 6) {
       switch (ObjectVT.getSimpleVT()) {
       default:
         assert(false && "Invalid value type!");
-      case MVT::f64:
+      case EVT::f64:
         args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo], 
                                       &Alpha::F8RCRegClass);
         ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
         break;
-      case MVT::f32:
+      case EVT::f32:
         args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo], 
                                       &Alpha::F4RCRegClass);
         ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
         break;
-      case MVT::i64:
+      case EVT::i64:
         args_int[ArgNo] = AddLiveIn(MF, args_int[ArgNo], 
                                     &Alpha::GPRCRegClass);
-        ArgVal = DAG.getCopyFromReg(Chain, dl, args_int[ArgNo], MVT::i64);
+        ArgVal = DAG.getCopyFromReg(Chain, dl, args_int[ArgNo], EVT::i64);
         break;
       }
     } else { //more args
@@ -435,7 +435,7 @@
 
       // Create the SelectionDAG nodes corresponding to a load
       //from this parameter
-      SDValue FIN = DAG.getFrameIndex(FI, MVT::i64);
+      SDValue FIN = DAG.getFrameIndex(FI, EVT::i64);
       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0);
     }
     InVals.push_back(ArgVal);
@@ -448,22 +448,22 @@
     for (int i = 0; i < 6; ++i) {
       if (TargetRegisterInfo::isPhysicalRegister(args_int[i]))
         args_int[i] = AddLiveIn(MF, args_int[i], &Alpha::GPRCRegClass);
-      SDValue argt = DAG.getCopyFromReg(Chain, dl, args_int[i], MVT::i64);
+      SDValue argt = DAG.getCopyFromReg(Chain, dl, args_int[i], EVT::i64);
       int FI = MFI->CreateFixedObject(8, -8 * (6 - i));
       if (i == 0) VarArgsBase = FI;
-      SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64);
+      SDValue SDFI = DAG.getFrameIndex(FI, EVT::i64);
       LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0));
 
       if (TargetRegisterInfo::isPhysicalRegister(args_float[i]))
         args_float[i] = AddLiveIn(MF, args_float[i], &Alpha::F8RCRegClass);
-      argt = DAG.getCopyFromReg(Chain, dl, args_float[i], MVT::f64);
+      argt = DAG.getCopyFromReg(Chain, dl, args_float[i], EVT::f64);
       FI = MFI->CreateFixedObject(8, - 8 * (12 - i));
-      SDFI = DAG.getFrameIndex(FI, MVT::i64);
+      SDFI = DAG.getFrameIndex(FI, EVT::i64);
       LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0));
     }
 
     //Set up a token factor with all the stack traffic
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &LS[0], LS.size());
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &LS[0], LS.size());
   }
 
   return Chain;
@@ -478,7 +478,7 @@
   SDValue Copy = DAG.getCopyToReg(Chain, dl, Alpha::R26,
                                   DAG.getNode(AlphaISD::GlobalRetAddr,
                                               DebugLoc::getUnknownLoc(),
-                                              MVT::i64),
+                                              EVT::i64),
                                   SDValue());
   switch (Outs.size()) {
   default:
@@ -487,7 +487,7 @@
     break;
     //return SDValue(); // ret void is legal
   case 1: {
-    MVT ArgVT = Outs[0].Val.getValueType();
+    EVT ArgVT = Outs[0].Val.getValueType();
     unsigned ArgReg;
     if (ArgVT.isInteger())
       ArgReg = Alpha::R0;
@@ -502,7 +502,7 @@
     break;
   }
   case 2: {
-    MVT ArgVT = Outs[0].Val.getValueType();
+    EVT ArgVT = Outs[0].Val.getValueType();
     unsigned ArgReg1, ArgReg2;
     if (ArgVT.isInteger()) {
       ArgReg1 = Alpha::R0;
@@ -528,7 +528,7 @@
   }
   }
   return DAG.getNode(AlphaISD::RET_FLAG, dl, 
-                     MVT::Other, Copy, Copy.getValue(1));
+                     EVT::Other, Copy, Copy.getValue(1));
 }
 
 void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain,
@@ -538,26 +538,26 @@
   const Value *VAListS = cast<SrcValueSDNode>(N->getOperand(2))->getValue();
   DebugLoc dl = N->getDebugLoc();
 
-  SDValue Base = DAG.getLoad(MVT::i64, dl, Chain, VAListP, VAListS, 0);
-  SDValue Tmp = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
-                              DAG.getConstant(8, MVT::i64));
-  SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Base.getValue(1),
-                                    Tmp, NULL, 0, MVT::i32);
-  DataPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Base, Offset);
+  SDValue Base = DAG.getLoad(EVT::i64, dl, Chain, VAListP, VAListS, 0);
+  SDValue Tmp = DAG.getNode(ISD::ADD, dl, EVT::i64, VAListP,
+                              DAG.getConstant(8, EVT::i64));
+  SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, dl, EVT::i64, Base.getValue(1),
+                                    Tmp, NULL, 0, EVT::i32);
+  DataPtr = DAG.getNode(ISD::ADD, dl, EVT::i64, Base, Offset);
   if (N->getValueType(0).isFloatingPoint())
   {
     //if fp && Offset < 6*8, then subtract 6*8 from DataPtr
-    SDValue FPDataPtr = DAG.getNode(ISD::SUB, dl, MVT::i64, DataPtr,
-                                      DAG.getConstant(8*6, MVT::i64));
-    SDValue CC = DAG.getSetCC(dl, MVT::i64, Offset,
-                                DAG.getConstant(8*6, MVT::i64), ISD::SETLT);
-    DataPtr = DAG.getNode(ISD::SELECT, dl, MVT::i64, CC, FPDataPtr, DataPtr);
+    SDValue FPDataPtr = DAG.getNode(ISD::SUB, dl, EVT::i64, DataPtr,
+                                      DAG.getConstant(8*6, EVT::i64));
+    SDValue CC = DAG.getSetCC(dl, EVT::i64, Offset,
+                                DAG.getConstant(8*6, EVT::i64), ISD::SETLT);
+    DataPtr = DAG.getNode(ISD::SELECT, dl, EVT::i64, CC, FPDataPtr, DataPtr);
   }
 
-  SDValue NewOffset = DAG.getNode(ISD::ADD, dl, MVT::i64, Offset,
-                                    DAG.getConstant(8, MVT::i64));
+  SDValue NewOffset = DAG.getNode(ISD::ADD, dl, EVT::i64, Offset,
+                                    DAG.getConstant(8, EVT::i64));
   Chain = DAG.getTruncStore(Offset.getValue(1), dl, NewOffset, Tmp, NULL, 0,
-                            MVT::i32);
+                            EVT::i32);
 }
 
 /// LowerOperation - Provide custom lowering hooks for some operations.
@@ -573,7 +573,7 @@
     switch (IntNo) {
     default: break;    // Don't custom lower most intrinsics.
     case Intrinsic::alpha_umulh:
-      return DAG.getNode(ISD::MULHU, dl, MVT::i64, 
+      return DAG.getNode(ISD::MULHU, dl, EVT::i64, 
                          Op.getOperand(1), Op.getOperand(2));
     }
   }
@@ -582,23 +582,23 @@
     SDValue ShOpLo = Op.getOperand(0);
     SDValue ShOpHi = Op.getOperand(1);
     SDValue ShAmt  = Op.getOperand(2);
-    SDValue bm = DAG.getNode(ISD::SUB, dl, MVT::i64, 
-			     DAG.getConstant(64, MVT::i64), ShAmt);
-    SDValue BMCC = DAG.getSetCC(dl, MVT::i64, bm,
-                                DAG.getConstant(0, MVT::i64), ISD::SETLE);
+    SDValue bm = DAG.getNode(ISD::SUB, dl, EVT::i64, 
+			     DAG.getConstant(64, EVT::i64), ShAmt);
+    SDValue BMCC = DAG.getSetCC(dl, EVT::i64, bm,
+                                DAG.getConstant(0, EVT::i64), ISD::SETLE);
     // if 64 - shAmt <= 0
-    SDValue Hi_Neg = DAG.getConstant(0, MVT::i64);
-    SDValue ShAmt_Neg = DAG.getNode(ISD::SUB, dl, MVT::i64,
-				    DAG.getConstant(0, MVT::i64), bm);
-    SDValue Lo_Neg = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpHi, ShAmt_Neg);
+    SDValue Hi_Neg = DAG.getConstant(0, EVT::i64);
+    SDValue ShAmt_Neg = DAG.getNode(ISD::SUB, dl, EVT::i64,
+				    DAG.getConstant(0, EVT::i64), bm);
+    SDValue Lo_Neg = DAG.getNode(ISD::SRL, dl, EVT::i64, ShOpHi, ShAmt_Neg);
     // else
-    SDValue carries = DAG.getNode(ISD::SHL, dl, MVT::i64, ShOpHi, bm);
-    SDValue Hi_Pos =  DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpHi, ShAmt);
-    SDValue Lo_Pos = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpLo, ShAmt);
-    Lo_Pos = DAG.getNode(ISD::OR, dl, MVT::i64, Lo_Pos, carries);
+    SDValue carries = DAG.getNode(ISD::SHL, dl, EVT::i64, ShOpHi, bm);
+    SDValue Hi_Pos =  DAG.getNode(ISD::SRL, dl, EVT::i64, ShOpHi, ShAmt);
+    SDValue Lo_Pos = DAG.getNode(ISD::SRL, dl, EVT::i64, ShOpLo, ShAmt);
+    Lo_Pos = DAG.getNode(ISD::OR, dl, EVT::i64, Lo_Pos, carries);
     // Merge
-    SDValue Hi = DAG.getNode(ISD::SELECT, dl, MVT::i64, BMCC, Hi_Neg, Hi_Pos);
-    SDValue Lo = DAG.getNode(ISD::SELECT, dl, MVT::i64, BMCC, Lo_Neg, Lo_Pos);
+    SDValue Hi = DAG.getNode(ISD::SELECT, dl, EVT::i64, BMCC, Hi_Neg, Hi_Pos);
+    SDValue Lo = DAG.getNode(ISD::SELECT, dl, EVT::i64, BMCC, Lo_Neg, Lo_Pos);
     SDValue Ops[2] = { Lo, Hi };
     return DAG.getMergeValues(Ops, 2, dl);
   }			
@@ -608,35 +608,35 @@
 
 
   case ISD::SINT_TO_FP: {
-    assert(Op.getOperand(0).getValueType() == MVT::i64 &&
+    assert(Op.getOperand(0).getValueType() == EVT::i64 &&
            "Unhandled SINT_TO_FP type in custom expander!");
     SDValue LD;
-    bool isDouble = Op.getValueType() == MVT::f64;
-    LD = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op.getOperand(0));
+    bool isDouble = Op.getValueType() == EVT::f64;
+    LD = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, Op.getOperand(0));
     SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl,
-                               isDouble?MVT::f64:MVT::f32, LD);
+                               isDouble?EVT::f64:EVT::f32, LD);
     return FP;
   }
   case ISD::FP_TO_SINT: {
-    bool isDouble = Op.getOperand(0).getValueType() == MVT::f64;
+    bool isDouble = Op.getOperand(0).getValueType() == EVT::f64;
     SDValue src = Op.getOperand(0);
 
     if (!isDouble) //Promote
-      src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, src);
+      src = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, src);
     
-    src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src);
+    src = DAG.getNode(AlphaISD::CVTTQ_, dl, EVT::f64, src);
 
-    return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, src);
+    return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i64, src);
   }
   case ISD::ConstantPool: {
     ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
     Constant *C = CP->getConstVal();
-    SDValue CPI = DAG.getTargetConstantPool(C, MVT::i64, CP->getAlignment());
+    SDValue CPI = DAG.getTargetConstantPool(C, EVT::i64, CP->getAlignment());
     // FIXME there isn't really any debug info here
     
-    SDValue Hi = DAG.getNode(AlphaISD::GPRelHi,  dl, MVT::i64, CPI,
-                               DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
-    SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, CPI, Hi);
+    SDValue Hi = DAG.getNode(AlphaISD::GPRelHi,  dl, EVT::i64, CPI,
+                               DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
+    SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, EVT::i64, CPI, Hi);
     return Lo;
   }
   case ISD::GlobalTLSAddress:
@@ -644,31 +644,31 @@
   case ISD::GlobalAddress: {
     GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
     GlobalValue *GV = GSDN->getGlobal();
-    SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i64, GSDN->getOffset());
+    SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i64, GSDN->getOffset());
     // FIXME there isn't really any debug info here
 
     //    if (!GV->hasWeakLinkage() && !GV->isDeclaration() && !GV->hasLinkOnceLinkage()) {
     if (GV->hasLocalLinkage()) {
-      SDValue Hi = DAG.getNode(AlphaISD::GPRelHi,  dl, MVT::i64, GA,
-                                DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
-      SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, GA, Hi);
+      SDValue Hi = DAG.getNode(AlphaISD::GPRelHi,  dl, EVT::i64, GA,
+                                DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
+      SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, EVT::i64, GA, Hi);
       return Lo;
     } else
-      return DAG.getNode(AlphaISD::RelLit, dl, MVT::i64, GA, 
-                         DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
+      return DAG.getNode(AlphaISD::RelLit, dl, EVT::i64, GA, 
+                         DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
   }
   case ISD::ExternalSymbol: {
-    return DAG.getNode(AlphaISD::RelLit, dl, MVT::i64, 
+    return DAG.getNode(AlphaISD::RelLit, dl, EVT::i64, 
                        DAG.getTargetExternalSymbol(cast<ExternalSymbolSDNode>(Op)
-                                                   ->getSymbol(), MVT::i64),
-                       DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
+                                                   ->getSymbol(), EVT::i64),
+                       DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
   }
 
   case ISD::UREM:
   case ISD::SREM:
     //Expand only on constant case
     if (Op.getOperand(1).getOpcode() == ISD::Constant) {
-      MVT VT = Op.getNode()->getValueType(0);
+      EVT VT = Op.getNode()->getValueType(0);
       SDValue Tmp1 = Op.getNode()->getOpcode() == ISD::UREM ?
         BuildUDIV(Op.getNode(), DAG, NULL) :
         BuildSDIV(Op.getNode(), DAG, NULL);
@@ -692,8 +692,8 @@
       }
       SDValue Tmp1 = Op.getOperand(0),
         Tmp2 = Op.getOperand(1),
-        Addr = DAG.getExternalSymbol(opstr, MVT::i64);
-      return DAG.getNode(AlphaISD::DivCall, dl, MVT::i64, Addr, Tmp1, Tmp2);
+        Addr = DAG.getExternalSymbol(opstr, EVT::i64);
+      return DAG.getNode(AlphaISD::DivCall, dl, EVT::i64, Addr, Tmp1, Tmp2);
     }
     break;
 
@@ -702,9 +702,9 @@
     LowerVAARG(Op.getNode(), Chain, DataPtr, DAG);
 
     SDValue Result;
-    if (Op.getValueType() == MVT::i32)
-      Result = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Chain, DataPtr,
-                              NULL, 0, MVT::i32);
+    if (Op.getValueType() == EVT::i32)
+      Result = DAG.getExtLoad(ISD::SEXTLOAD, dl, EVT::i64, Chain, DataPtr,
+                              NULL, 0, EVT::i32);
     else
       Result = DAG.getLoad(Op.getValueType(), dl, Chain, DataPtr, NULL, 0);
     return Result;
@@ -718,13 +718,13 @@
     
     SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP, SrcS, 0);
     SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP, DestS, 0);
-    SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP, 
-                               DAG.getConstant(8, MVT::i64));
-    Val = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Result, 
-                         NP, NULL,0, MVT::i32);
-    SDValue NPD = DAG.getNode(ISD::ADD, dl, MVT::i64, DestP,
-                                DAG.getConstant(8, MVT::i64));
-    return DAG.getTruncStore(Val.getValue(1), dl, Val, NPD, NULL, 0, MVT::i32);
+    SDValue NP = DAG.getNode(ISD::ADD, dl, EVT::i64, SrcP, 
+                               DAG.getConstant(8, EVT::i64));
+    Val = DAG.getExtLoad(ISD::SEXTLOAD, dl, EVT::i64, Result, 
+                         NP, NULL,0, EVT::i32);
+    SDValue NPD = DAG.getNode(ISD::ADD, dl, EVT::i64, DestP,
+                                DAG.getConstant(8, EVT::i64));
+    return DAG.getTruncStore(Val.getValue(1), dl, Val, NPD, NULL, 0, EVT::i32);
   }
   case ISD::VASTART: {
     SDValue Chain = Op.getOperand(0);
@@ -732,16 +732,16 @@
     const Value *VAListS = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
     
     // vastart stores the address of the VarArgsBase and VarArgsOffset
-    SDValue FR  = DAG.getFrameIndex(VarArgsBase, MVT::i64);
+    SDValue FR  = DAG.getFrameIndex(VarArgsBase, EVT::i64);
     SDValue S1  = DAG.getStore(Chain, dl, FR, VAListP, VAListS, 0);
-    SDValue SA2 = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
-                                DAG.getConstant(8, MVT::i64));
-    return DAG.getTruncStore(S1, dl, DAG.getConstant(VarArgsOffset, MVT::i64),
-                             SA2, NULL, 0, MVT::i32);
+    SDValue SA2 = DAG.getNode(ISD::ADD, dl, EVT::i64, VAListP,
+                                DAG.getConstant(8, EVT::i64));
+    return DAG.getTruncStore(S1, dl, DAG.getConstant(VarArgsOffset, EVT::i64),
+                             SA2, NULL, 0, EVT::i32);
   }
   case ISD::RETURNADDR:        
     return DAG.getNode(AlphaISD::GlobalRetAddr, DebugLoc::getUnknownLoc(),
-                       MVT::i64);
+                       EVT::i64);
       //FIXME: implement
   case ISD::FRAMEADDR:          break;
   }
@@ -753,7 +753,7 @@
                                              SmallVectorImpl<SDValue>&Results,
                                              SelectionDAG &DAG) {
   DebugLoc dl = N->getDebugLoc();
-  assert(N->getValueType(0) == MVT::i32 &&
+  assert(N->getValueType(0) == EVT::i32 &&
          N->getOpcode() == ISD::VAARG &&
          "Unknown node to custom promote!");
 
@@ -784,7 +784,7 @@
 
 std::vector<unsigned> AlphaTargetLowering::
 getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  MVT VT) const {
+                                  EVT VT) const {
   if (Constraint.size() == 1) {
     switch (Constraint[0]) {
     default: break;  // Unknown constriant letter
diff --git a/lib/Target/Alpha/AlphaISelLowering.h b/lib/Target/Alpha/AlphaISelLowering.h
index 4e1de33..f18d6fa 100644
--- a/lib/Target/Alpha/AlphaISelLowering.h
+++ b/lib/Target/Alpha/AlphaISelLowering.h
@@ -67,7 +67,7 @@
     explicit AlphaTargetLowering(TargetMachine &TM);
     
     /// getSetCCResultType - Get the SETCC result ValueType
-    virtual MVT::SimpleValueType getSetCCResultType(MVT VT) const;
+    virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
 
     /// LowerOperation - Provide custom lowering hooks for some operations.
     ///
@@ -92,7 +92,7 @@
 
     std::vector<unsigned> 
       getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                        MVT VT) const;
+                                        EVT VT) const;
 
     bool hasITOF() { return useITOF; }
 
diff --git a/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp b/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
index 062b22a..3bf8fc5 100644
--- a/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
+++ b/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
@@ -94,9 +94,9 @@
     // Selects to ADDpp FI, 0 which in turn will become ADDimm7 SP, imm or ADDpp
     // SP, Px
     int FI = cast<FrameIndexSDNode>(N)->getIndex();
-    SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
-    return CurDAG->SelectNodeTo(N, BF::ADDpp, MVT::i32, TFI,
-                                CurDAG->getTargetConstant(0, MVT::i32));
+    SDValue TFI = CurDAG->getTargetFrameIndex(FI, EVT::i32);
+    return CurDAG->SelectNodeTo(N, BF::ADDpp, EVT::i32, TFI,
+                                CurDAG->getTargetConstant(0, EVT::i32));
   }
   }
 
@@ -109,8 +109,8 @@
                                           SDValue &Offset) {
   FrameIndexSDNode *FIN = 0;
   if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
-    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-    Offset = CurDAG->getTargetConstant(0, MVT::i32);
+    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
+    Offset = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
   if (Addr.getOpcode() == ISD::ADD) {
@@ -119,8 +119,8 @@
         (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) &&
         (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
       // Constant positive word offset from frame index
-      Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
+      Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
+      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
       return true;
     }
   }
@@ -179,9 +179,9 @@
         SDNode *Copy =
           DAG.getTargetNode(TargetInstrInfo::COPY_TO_REGCLASS,
                             NI->getDebugLoc(),
-                            MVT::i32,
+                            EVT::i32,
                             UI.getUse().get(),
-                            DAG.getTargetConstant(BF::DRegClassID, MVT::i32));
+                            DAG.getTargetConstant(BF::DRegClassID, EVT::i32));
         UpdateNodeOperand(DAG, *UI, UI.getOperandNo(), SDValue(Copy, 0));
       }
     }
diff --git a/lib/Target/Blackfin/BlackfinISelLowering.cpp b/lib/Target/Blackfin/BlackfinISelLowering.cpp
index 8b83791..f40d958 100644
--- a/lib/Target/Blackfin/BlackfinISelLowering.cpp
+++ b/lib/Target/Blackfin/BlackfinISelLowering.cpp
@@ -40,92 +40,92 @@
 
 BlackfinTargetLowering::BlackfinTargetLowering(TargetMachine &TM)
   : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
-  setShiftAmountType(MVT::i16);
+  setShiftAmountType(EVT::i16);
   setBooleanContents(ZeroOrOneBooleanContent);
   setStackPointerRegisterToSaveRestore(BF::SP);
   setIntDivIsCheap(false);
 
   // Set up the legal register classes.
-  addRegisterClass(MVT::i32, BF::DRegisterClass);
-  addRegisterClass(MVT::i16, BF::D16RegisterClass);
+  addRegisterClass(EVT::i32, BF::DRegisterClass);
+  addRegisterClass(EVT::i16, BF::D16RegisterClass);
 
   computeRegisterProperties();
 
   // Blackfin doesn't have i1 loads or stores
-  setLoadExtAction(ISD::EXTLOAD,  MVT::i1, Promote);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::i1, Promote);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
 
-  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
-  setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
+  setOperationAction(ISD::GlobalAddress, EVT::i32, Custom);
+  setOperationAction(ISD::JumpTable,     EVT::i32, Custom);
 
-  setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
-  setOperationAction(ISD::BR_JT,     MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC,     MVT::Other, Expand);
+  setOperationAction(ISD::SELECT_CC, EVT::Other, Expand);
+  setOperationAction(ISD::BR_JT,     EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC,     EVT::Other, Expand);
 
   // i16 registers don't do much
-  setOperationAction(ISD::AND,   MVT::i16, Promote);
-  setOperationAction(ISD::OR,    MVT::i16, Promote);
-  setOperationAction(ISD::XOR,   MVT::i16, Promote);
-  setOperationAction(ISD::CTPOP, MVT::i16, Promote);
+  setOperationAction(ISD::AND,   EVT::i16, Promote);
+  setOperationAction(ISD::OR,    EVT::i16, Promote);
+  setOperationAction(ISD::XOR,   EVT::i16, Promote);
+  setOperationAction(ISD::CTPOP, EVT::i16, Promote);
   // The expansion of CTLZ/CTTZ uses AND/OR, so we might as well promote
   // immediately.
-  setOperationAction(ISD::CTLZ,  MVT::i16, Promote);
-  setOperationAction(ISD::CTTZ,  MVT::i16, Promote);
-  setOperationAction(ISD::SETCC, MVT::i16, Promote);
+  setOperationAction(ISD::CTLZ,  EVT::i16, Promote);
+  setOperationAction(ISD::CTTZ,  EVT::i16, Promote);
+  setOperationAction(ISD::SETCC, EVT::i16, Promote);
 
   // Blackfin has no division
-  setOperationAction(ISD::SDIV,    MVT::i16, Expand);
-  setOperationAction(ISD::SDIV,    MVT::i32, Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
-  setOperationAction(ISD::SREM,    MVT::i16, Expand);
-  setOperationAction(ISD::SREM,    MVT::i32, Expand);
-  setOperationAction(ISD::UDIV,    MVT::i16, Expand);
-  setOperationAction(ISD::UDIV,    MVT::i32, Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
-  setOperationAction(ISD::UREM,    MVT::i16, Expand);
-  setOperationAction(ISD::UREM,    MVT::i32, Expand);
+  setOperationAction(ISD::SDIV,    EVT::i16, Expand);
+  setOperationAction(ISD::SDIV,    EVT::i32, Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i16, Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i32, Expand);
+  setOperationAction(ISD::SREM,    EVT::i16, Expand);
+  setOperationAction(ISD::SREM,    EVT::i32, Expand);
+  setOperationAction(ISD::UDIV,    EVT::i16, Expand);
+  setOperationAction(ISD::UDIV,    EVT::i32, Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i16, Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i32, Expand);
+  setOperationAction(ISD::UREM,    EVT::i16, Expand);
+  setOperationAction(ISD::UREM,    EVT::i32, Expand);
 
-  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
-  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
-  setOperationAction(ISD::MULHU,     MVT::i32, Expand);
-  setOperationAction(ISD::MULHS,     MVT::i32, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i32, Expand);
+  setOperationAction(ISD::MULHU,     EVT::i32, Expand);
+  setOperationAction(ISD::MULHS,     EVT::i32, Expand);
 
   // No carry-in operations.
-  setOperationAction(ISD::ADDE, MVT::i32, Custom);
-  setOperationAction(ISD::SUBE, MVT::i32, Custom);
+  setOperationAction(ISD::ADDE, EVT::i32, Custom);
+  setOperationAction(ISD::SUBE, EVT::i32, Custom);
 
   // Blackfin has no intrinsics for these particular operations.
-  setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
-  setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+  setOperationAction(ISD::MEMBARRIER, EVT::Other, Expand);
+  setOperationAction(ISD::BSWAP, EVT::i32, Expand);
 
-  setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+  setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
 
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
 
   // i32 has native CTPOP, but not CTLZ/CTTZ
-  setOperationAction(ISD::CTLZ, MVT::i32, Expand);
-  setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+  setOperationAction(ISD::CTLZ, EVT::i32, Expand);
+  setOperationAction(ISD::CTTZ, EVT::i32, Expand);
 
   // READCYCLECOUNTER needs special type legalization.
-  setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
+  setOperationAction(ISD::READCYCLECOUNTER, EVT::i64, Custom);
 
   // We don't have line number support yet.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
-  setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
-  setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
-  setOperationAction(ISD::DECLARE, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
+  setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
+  setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
+  setOperationAction(ISD::DECLARE, EVT::Other, Expand);
 
   // Use the default implementation.
-  setOperationAction(ISD::VACOPY, MVT::Other, Expand);
-  setOperationAction(ISD::VAEND, MVT::Other, Expand);
-  setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+  setOperationAction(ISD::VACOPY, EVT::Other, Expand);
+  setOperationAction(ISD::VAEND, EVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE, EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE, EVT::Other, Expand);
 }
 
 const char *BlackfinTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -137,10 +137,10 @@
   }
 }
 
-MVT::SimpleValueType BlackfinTargetLowering::getSetCCResultType(MVT VT) const {
+EVT::SimpleValueType BlackfinTargetLowering::getSetCCResultType(EVT VT) const {
   // SETCC always sets the CC register. Technically that is an i1 register, but
   // that type is not legal, so we treat it as an i32 register.
-  return MVT::i32;
+  return EVT::i32;
 }
 
 SDValue BlackfinTargetLowering::LowerGlobalAddress(SDValue Op,
@@ -148,16 +148,16 @@
   DebugLoc DL = Op.getDebugLoc();
   GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
 
-  Op = DAG.getTargetGlobalAddress(GV, MVT::i32);
-  return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
+  Op = DAG.getTargetGlobalAddress(GV, EVT::i32);
+  return DAG.getNode(BFISD::Wrapper, DL, EVT::i32, Op);
 }
 
 SDValue BlackfinTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
   DebugLoc DL = Op.getDebugLoc();
   int JTI = cast<JumpTableSDNode>(Op)->getIndex();
 
-  Op = DAG.getTargetJumpTable(JTI, MVT::i32);
-  return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
+  Op = DAG.getTargetJumpTable(JTI, EVT::i32);
+  return DAG.getNode(BFISD::Wrapper, DL, EVT::i32, Op);
 }
 
 SDValue
@@ -181,7 +181,7 @@
     CCValAssign &VA = ArgLocs[i];
 
     if (VA.isRegLoc()) {
-      MVT RegVT = VA.getLocVT();
+      EVT RegVT = VA.getLocVT();
       TargetRegisterClass *RC = VA.getLocReg() == BF::P0 ?
         BF::PRegisterClass : BF::DRegisterClass;
       assert(RC->contains(VA.getLocReg()) && "Unexpected regclass in CCState");
@@ -209,7 +209,7 @@
       assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
       unsigned ObjSize = VA.getLocVT().getStoreSizeInBits()/8;
       int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset());
-      SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+      SDValue FIN = DAG.getFrameIndex(FI, EVT::i32);
       InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0));
     }
   }
@@ -268,9 +268,9 @@
   }
 
   if (Flag.getNode()) {
-    return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+    return DAG.getNode(BFISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
   } else {
-    return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain);
+    return DAG.getNode(BFISD::RET_FLAG, dl, EVT::Other, Chain);
   }
 }
 
@@ -325,10 +325,10 @@
       assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
       int Offset = VA.getLocMemOffset();
       assert(Offset%4 == 0 && "Unaligned LocMemOffset");
-      assert(VA.getLocVT()==MVT::i32 && "Illegal CCValAssign type");
-      SDValue SPN = DAG.getCopyFromReg(Chain, dl, BF::SP, MVT::i32);
+      assert(VA.getLocVT()==EVT::i32 && "Illegal CCValAssign type");
+      SDValue SPN = DAG.getCopyFromReg(Chain, dl, BF::SP, EVT::i32);
       SDValue OffsetN = DAG.getIntPtrConstant(Offset);
-      OffsetN = DAG.getNode(ISD::ADD, dl, MVT::i32, SPN, OffsetN);
+      OffsetN = DAG.getNode(ISD::ADD, dl, EVT::i32, SPN, OffsetN);
       MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, OffsetN,
                                          PseudoSourceValue::getStack(),
                                          Offset));
@@ -338,7 +338,7 @@
   // Transform all store nodes into one single node because
   // all store nodes are independent of each other.
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token
@@ -356,13 +356,13 @@
   // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
   // Likewise ExternalSymbol -> TargetExternalSymbol.
   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
-    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
+    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i32);
   else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
-    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
+    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i32);
 
-  std::vector<MVT> NodeTys;
-  NodeTys.push_back(MVT::Other);   // Returns a chain
-  NodeTys.push_back(MVT::Flag);    // Returns a flag for retval copy to use.
+  std::vector<EVT> NodeTys;
+  NodeTys.push_back(EVT::Other);   // Returns a chain
+  NodeTys.push_back(EVT::Flag);    // Returns a flag for retval copy to use.
   SDValue Ops[] = { Chain, Callee, InFlag };
   Chain = DAG.getNode(BFISD::CALL, dl, NodeTys, Ops,
                       InFlag.getNode() ? 3 : 2);
@@ -423,25 +423,25 @@
   unsigned Opcode = Op.getOpcode()==ISD::ADDE ? BF::ADD : BF::SUB;
 
   // zext incoming carry flag in AC0 to 32 bits
-  SDNode* CarryIn = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, MVT::i32,
+  SDNode* CarryIn = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, EVT::i32,
                                       /* flag= */ Op.getOperand(2));
-  CarryIn = DAG.getTargetNode(BF::MOVECC_zext, dl, MVT::i32,
+  CarryIn = DAG.getTargetNode(BF::MOVECC_zext, dl, EVT::i32,
                               SDValue(CarryIn, 0));
 
   // Add operands, produce sum and carry flag
-  SDNode *Sum = DAG.getTargetNode(Opcode, dl, MVT::i32, MVT::Flag,
+  SDNode *Sum = DAG.getTargetNode(Opcode, dl, EVT::i32, EVT::Flag,
                                   Op.getOperand(0), Op.getOperand(1));
 
   // Store intermediate carry from Sum
-  SDNode* Carry1 = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, MVT::i32,
+  SDNode* Carry1 = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, EVT::i32,
                                      /* flag= */ SDValue(Sum, 1));
 
   // Add incoming carry, again producing an output flag
-  Sum = DAG.getTargetNode(Opcode, dl, MVT::i32, MVT::Flag,
+  Sum = DAG.getTargetNode(Opcode, dl, EVT::i32, EVT::Flag,
                           SDValue(Sum, 0), SDValue(CarryIn, 0));
 
   // Update AC0 with the intermediate carry, producing a flag.
-  SDNode *CarryOut = DAG.getTargetNode(BF::OR_ac0_cc, dl, MVT::Flag,
+  SDNode *CarryOut = DAG.getTargetNode(BF::OR_ac0_cc, dl, EVT::Flag,
                                        SDValue(Carry1, 0));
 
   // Compose (i32, flag) pair
@@ -480,10 +480,10 @@
     // CYCLES2. Reading CYCLES will latch the value of CYCLES2, so we must read
     // CYCLES2 last.
     SDValue TheChain = N->getOperand(0);
-    SDValue lo = DAG.getCopyFromReg(TheChain, dl, BF::CYCLES, MVT::i32);
-    SDValue hi = DAG.getCopyFromReg(lo.getValue(1), dl, BF::CYCLES2, MVT::i32);
+    SDValue lo = DAG.getCopyFromReg(TheChain, dl, BF::CYCLES, EVT::i32);
+    SDValue hi = DAG.getCopyFromReg(lo.getValue(1), dl, BF::CYCLES2, EVT::i32);
     // Use a buildpair to merge the two 32-bit values into a 64-bit one.
-    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, lo, hi));
+    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, lo, hi));
     // Outgoing chain. If we were to use the chain from lo instead, it would be
     // possible to entirely eliminate the CYCLES2 read in (i32 (trunc
     // readcyclecounter)). Unfortunately this could possibly delay the CYCLES2
@@ -549,7 +549,7 @@
 /// getRegForInlineAsmConstraint - Return register no and class for a C_Register
 /// constraint.
 std::pair<unsigned, const TargetRegisterClass*> BlackfinTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
+getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
   typedef std::pair<unsigned, const TargetRegisterClass*> Pair;
   using namespace BF;
 
@@ -559,7 +559,7 @@
   switch (Constraint[0]) {
     // Standard constraints
   case 'r':
-    return Pair(0U, VT == MVT::i16 ? D16RegisterClass : DPRegisterClass);
+    return Pair(0U, VT == EVT::i16 ? D16RegisterClass : DPRegisterClass);
 
     // Blackfin-specific constraints
   case 'a': return Pair(0U, PRegisterClass);
@@ -585,7 +585,7 @@
 }
 
 std::vector<unsigned> BlackfinTargetLowering::
-getRegClassForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
+getRegClassForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
   using namespace BF;
 
   if (Constraint.size() != 1)
diff --git a/lib/Target/Blackfin/BlackfinISelLowering.h b/lib/Target/Blackfin/BlackfinISelLowering.h
index a0e88a9..1cbb98d 100644
--- a/lib/Target/Blackfin/BlackfinISelLowering.h
+++ b/lib/Target/Blackfin/BlackfinISelLowering.h
@@ -33,7 +33,7 @@
     int VarArgsFrameOffset;   // Frame offset to start of varargs area.
   public:
     BlackfinTargetLowering(TargetMachine &TM);
-    virtual MVT::SimpleValueType getSetCCResultType(MVT VT) const;
+    virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
     virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
     virtual void ReplaceNodeResults(SDNode *N,
                                     SmallVectorImpl<SDValue> &Results,
@@ -43,10 +43,10 @@
 
     ConstraintType getConstraintType(const std::string &Constraint) const;
     std::pair<unsigned, const TargetRegisterClass*>
-    getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+    getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
     std::vector<unsigned>
     getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                      MVT VT) const;
+                                      EVT VT) const;
     virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
     const char *getTargetNodeName(unsigned Opcode) const;
     unsigned getFunctionAlignment(const Function *F) const;
diff --git a/lib/Target/Blackfin/BlackfinInstrInfo.td b/lib/Target/Blackfin/BlackfinInstrInfo.td
index b0a2cc1..6fc34a8 100644
--- a/lib/Target/Blackfin/BlackfinInstrInfo.td
+++ b/lib/Target/Blackfin/BlackfinInstrInfo.td
@@ -42,21 +42,21 @@
 
 def trailingZeros_xform : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(N->getAPIntValue().countTrailingZeros(),
-                                   MVT::i32);
+                                   EVT::i32);
 }]>;
 
 def trailingOnes_xform : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(N->getAPIntValue().countTrailingOnes(),
-                                   MVT::i32);
+                                   EVT::i32);
 }]>;
 
 def LO16 : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant((unsigned short)N->getZExtValue(), MVT::i16);
+  return CurDAG->getTargetConstant((unsigned short)N->getZExtValue(), EVT::i16);
 }]>;
 
 def HI16 : SDNodeXForm<imm, [{
   // Transformation function: shift the immediate value down into the low bits.
-  return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 16, MVT::i16);
+  return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 16, EVT::i16);
 }]>;
 
 //===----------------------------------------------------------------------===//
diff --git a/lib/Target/Blackfin/BlackfinRegisterInfo.cpp b/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
index 86a9550..b8fc6ac 100644
--- a/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
+++ b/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
@@ -87,7 +87,7 @@
 }
 
 const TargetRegisterClass*
-BlackfinRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, MVT VT) const {
+BlackfinRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
   assert(isPhysicalRegister(reg) && "reg must be a physical register");
 
   // Pick the smallest register class of the right type that contains
@@ -96,7 +96,7 @@
   for (regclass_iterator I = regclass_begin(), E = regclass_end();
        I != E; ++I) {
     const TargetRegisterClass* RC = *I;
-    if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
+    if ((VT == EVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
         (!BestRC || RC->getNumRegs() < BestRC->getNumRegs()))
       BestRC = RC;
   }
diff --git a/lib/Target/Blackfin/BlackfinRegisterInfo.h b/lib/Target/Blackfin/BlackfinRegisterInfo.h
index 8af65a3..83abc2e 100644
--- a/lib/Target/Blackfin/BlackfinRegisterInfo.h
+++ b/lib/Target/Blackfin/BlackfinRegisterInfo.h
@@ -62,7 +62,7 @@
     }
 
     const TargetRegisterClass *getPhysicalRegisterRegClass(unsigned reg,
-                                                           MVT VT) const;
+                                                           EVT VT) const;
 
     bool hasFP(const MachineFunction &MF) const;
 
diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
index cca5288..dd7009f 100644
--- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
+++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
@@ -103,11 +103,11 @@
   bool
   isIntS16Immediate(ConstantSDNode *CN, short &Imm)
   {
-    MVT vt = CN->getValueType(0);
+    EVT vt = CN->getValueType(0);
     Imm = (short) CN->getZExtValue();
-    if (vt.getSimpleVT() >= MVT::i1 && vt.getSimpleVT() <= MVT::i16) {
+    if (vt.getSimpleVT() >= EVT::i1 && vt.getSimpleVT() <= EVT::i16) {
       return true;
-    } else if (vt == MVT::i32) {
+    } else if (vt == EVT::i32) {
       int32_t i_val = (int32_t) CN->getZExtValue();
       short s_val = (short) i_val;
       return i_val == s_val;
@@ -132,8 +132,8 @@
   static bool
   isFPS16Immediate(ConstantFPSDNode *FPN, short &Imm)
   {
-    MVT vt = FPN->getValueType(0);
-    if (vt == MVT::f32) {
+    EVT vt = FPN->getValueType(0);
+    if (vt == EVT::f32) {
       int val = FloatToBits(FPN->getValueAPF().convertToFloat());
       int sval = (int) ((val << 16) >> 16);
       Imm = (short) val;
@@ -154,34 +154,34 @@
   }
 
   //===------------------------------------------------------------------===//
-  //! MVT to "useful stuff" mapping structure:
+  //! EVT to "useful stuff" mapping structure:
 
   struct valtype_map_s {
-    MVT VT;
+    EVT VT;
     unsigned ldresult_ins;      /// LDRESULT instruction (0 = undefined)
     bool ldresult_imm;          /// LDRESULT instruction requires immediate?
     unsigned lrinst;            /// LR instruction
   };
 
   const valtype_map_s valtype_map[] = {
-    { MVT::i8,    SPU::ORBIr8,  true,  SPU::LRr8 },
-    { MVT::i16,   SPU::ORHIr16, true,  SPU::LRr16 },
-    { MVT::i32,   SPU::ORIr32,  true,  SPU::LRr32 },
-    { MVT::i64,   SPU::ORr64,   false, SPU::LRr64 },
-    { MVT::f32,   SPU::ORf32,   false, SPU::LRf32 },
-    { MVT::f64,   SPU::ORf64,   false, SPU::LRf64 },
+    { EVT::i8,    SPU::ORBIr8,  true,  SPU::LRr8 },
+    { EVT::i16,   SPU::ORHIr16, true,  SPU::LRr16 },
+    { EVT::i32,   SPU::ORIr32,  true,  SPU::LRr32 },
+    { EVT::i64,   SPU::ORr64,   false, SPU::LRr64 },
+    { EVT::f32,   SPU::ORf32,   false, SPU::LRf32 },
+    { EVT::f64,   SPU::ORf64,   false, SPU::LRf64 },
     // vector types... (sigh!)
-    { MVT::v16i8, 0,            false, SPU::LRv16i8 },
-    { MVT::v8i16, 0,            false, SPU::LRv8i16 },
-    { MVT::v4i32, 0,            false, SPU::LRv4i32 },
-    { MVT::v2i64, 0,            false, SPU::LRv2i64 },
-    { MVT::v4f32, 0,            false, SPU::LRv4f32 },
-    { MVT::v2f64, 0,            false, SPU::LRv2f64 }
+    { EVT::v16i8, 0,            false, SPU::LRv16i8 },
+    { EVT::v8i16, 0,            false, SPU::LRv8i16 },
+    { EVT::v4i32, 0,            false, SPU::LRv4i32 },
+    { EVT::v2i64, 0,            false, SPU::LRv2i64 },
+    { EVT::v4f32, 0,            false, SPU::LRv4f32 },
+    { EVT::v2f64, 0,            false, SPU::LRv2f64 }
   };
 
   const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
 
-  const valtype_map_s *getValueTypeMapEntry(MVT VT)
+  const valtype_map_s *getValueTypeMapEntry(EVT VT)
   {
     const valtype_map_s *retval = 0;
     for (size_t i = 0; i < n_valtype_map; ++i) {
@@ -197,7 +197,7 @@
       std::string msg;
       raw_string_ostream Msg(msg);
       Msg << "SPUISelDAGToDAG.cpp: getValueTypeMapEntry returns NULL for "
-           << VT.getMVTString();
+           << VT.getEVTString();
       llvm_report_error(Msg.str());
     }
 #endif
@@ -211,12 +211,12 @@
 
     // Create the shuffle mask for "rotating" the borrow up one register slot
     // once the borrow is generated.
-    ShufBytes.push_back(DAG.getConstant(0x04050607, MVT::i32));
-    ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32));
-    ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
-    ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0x04050607, EVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0x80808080, EVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, EVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0x80808080, EVT::i32));
 
-    return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+    return DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                        &ShufBytes[0], ShufBytes.size());
   }
 
@@ -226,12 +226,12 @@
 
     // Create the shuffle mask for "rotating" the borrow up one register slot
     // once the borrow is generated.
-    ShufBytes.push_back(DAG.getConstant(0x04050607, MVT::i32));
-    ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32));
-    ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
-    ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0x04050607, EVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, EVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, EVT::i32));
+    ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, EVT::i32));
 
-    return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+    return DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                        &ShufBytes[0], ShufBytes.size());
   }
 
@@ -263,13 +263,13 @@
     /// getI32Imm - Return a target constant with the specified value, of type
     /// i32.
     inline SDValue getI32Imm(uint32_t Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i32);
+      return CurDAG->getTargetConstant(Imm, EVT::i32);
     }
 
     /// getI64Imm - Return a target constant with the specified value, of type
     /// i64.
     inline SDValue getI64Imm(uint64_t Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i64);
+      return CurDAG->getTargetConstant(Imm, EVT::i64);
     }
 
     /// getSmallIPtrImm - Return a target constant of pointer type.
@@ -278,24 +278,24 @@
       }
 
     SDNode *emitBuildVector(SDValue build_vec) {
-      MVT vecVT = build_vec.getValueType();
-      MVT eltVT = vecVT.getVectorElementType();
+      EVT vecVT = build_vec.getValueType();
+      EVT eltVT = vecVT.getVectorElementType();
       SDNode *bvNode = build_vec.getNode();
       DebugLoc dl = bvNode->getDebugLoc();
 
       // Check to see if this vector can be represented as a CellSPU immediate
       // constant by invoking all of the instruction selection predicates:
-      if (((vecVT == MVT::v8i16) &&
-           (SPU::get_vec_i16imm(bvNode, *CurDAG, MVT::i16).getNode() != 0)) ||
-          ((vecVT == MVT::v4i32) &&
-           ((SPU::get_vec_i16imm(bvNode, *CurDAG, MVT::i32).getNode() != 0) ||
-            (SPU::get_ILHUvec_imm(bvNode, *CurDAG, MVT::i32).getNode() != 0) ||
-            (SPU::get_vec_u18imm(bvNode, *CurDAG, MVT::i32).getNode() != 0) ||
+      if (((vecVT == EVT::v8i16) &&
+           (SPU::get_vec_i16imm(bvNode, *CurDAG, EVT::i16).getNode() != 0)) ||
+          ((vecVT == EVT::v4i32) &&
+           ((SPU::get_vec_i16imm(bvNode, *CurDAG, EVT::i32).getNode() != 0) ||
+            (SPU::get_ILHUvec_imm(bvNode, *CurDAG, EVT::i32).getNode() != 0) ||
+            (SPU::get_vec_u18imm(bvNode, *CurDAG, EVT::i32).getNode() != 0) ||
             (SPU::get_v4i32_imm(bvNode, *CurDAG).getNode() != 0))) ||
-          ((vecVT == MVT::v2i64) &&
-           ((SPU::get_vec_i16imm(bvNode, *CurDAG, MVT::i64).getNode() != 0) ||
-            (SPU::get_ILHUvec_imm(bvNode, *CurDAG, MVT::i64).getNode() != 0) ||
-            (SPU::get_vec_u18imm(bvNode, *CurDAG, MVT::i64).getNode() != 0))))
+          ((vecVT == EVT::v2i64) &&
+           ((SPU::get_vec_i16imm(bvNode, *CurDAG, EVT::i64).getNode() != 0) ||
+            (SPU::get_ILHUvec_imm(bvNode, *CurDAG, EVT::i64).getNode() != 0) ||
+            (SPU::get_vec_u18imm(bvNode, *CurDAG, EVT::i64).getNode() != 0))))
         return Select(build_vec);
 
       // No, need to emit a constant pool spill:
@@ -323,19 +323,19 @@
     SDNode *Select(SDValue Op);
 
     //! Emit the instruction sequence for i64 shl
-    SDNode *SelectSHLi64(SDValue &Op, MVT OpVT);
+    SDNode *SelectSHLi64(SDValue &Op, EVT OpVT);
 
     //! Emit the instruction sequence for i64 srl
-    SDNode *SelectSRLi64(SDValue &Op, MVT OpVT);
+    SDNode *SelectSRLi64(SDValue &Op, EVT OpVT);
 
     //! Emit the instruction sequence for i64 sra
-    SDNode *SelectSRAi64(SDValue &Op, MVT OpVT);
+    SDNode *SelectSRAi64(SDValue &Op, EVT OpVT);
 
     //! Emit the necessary sequence for loading i64 constants:
-    SDNode *SelectI64Constant(SDValue &Op, MVT OpVT, DebugLoc dl);
+    SDNode *SelectI64Constant(SDValue &Op, EVT OpVT, DebugLoc dl);
 
     //! Alternate instruction emit sequence for loading i64 constants
-    SDNode *SelectI64Constant(uint64_t i64const, MVT OpVT, DebugLoc dl);
+    SDNode *SelectI64Constant(uint64_t i64const, EVT OpVT, DebugLoc dl);
 
     //! Returns true if the address N is an A-form (local store) address
     bool SelectAFormAddr(SDValue Op, SDValue N, SDValue &Base,
@@ -434,7 +434,7 @@
 SPUDAGToDAGISel::SelectAFormAddr(SDValue Op, SDValue N, SDValue &Base,
                     SDValue &Index) {
   // These match the addr256k operand type:
-  MVT OffsVT = MVT::i16;
+  EVT OffsVT = EVT::i16;
   SDValue Zero = CurDAG->getTargetConstant(0, OffsVT);
 
   switch (N.getOpcode()) {
@@ -514,7 +514,7 @@
                                       SDValue &Index, int minOffset,
                                       int maxOffset) {
   unsigned Opc = N.getOpcode();
-  MVT PtrTy = SPUtli.getPointerTy();
+  EVT PtrTy = SPUtli.getPointerTy();
 
   if (Opc == ISD::FrameIndex) {
     // Stack frame index must be less than 512 (divided by 16):
@@ -692,7 +692,7 @@
   unsigned Opc = N->getOpcode();
   int n_ops = -1;
   unsigned NewOpc;
-  MVT OpVT = Op.getValueType();
+  EVT OpVT = Op.getValueType();
   SDValue Ops[8];
   DebugLoc dl = N->getDebugLoc();
 
@@ -717,45 +717,45 @@
                                              TFI, Imm0), 0);
       n_ops = 2;
     }
-  } else if (Opc == ISD::Constant && OpVT == MVT::i64) {
+  } else if (Opc == ISD::Constant && OpVT == EVT::i64) {
     // Catch the i64 constants that end up here. Note: The backend doesn't
     // attempt to legalize the constant (it's useless because DAGCombiner
     // will insert 64-bit constants and we can't stop it).
     return SelectI64Constant(Op, OpVT, Op.getDebugLoc());
   } else if ((Opc == ISD::ZERO_EXTEND || Opc == ISD::ANY_EXTEND)
-             && OpVT == MVT::i64) {
+             && OpVT == EVT::i64) {
     SDValue Op0 = Op.getOperand(0);
-    MVT Op0VT = Op0.getValueType();
-    MVT Op0VecVT = MVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits()));
-    MVT OpVecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
+    EVT Op0VT = Op0.getValueType();
+    EVT Op0VecVT = EVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits()));
+    EVT OpVecVT = EVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
     SDValue shufMask;
 
     switch (Op0VT.getSimpleVT()) {
     default:
-      llvm_report_error("CellSPU Select: Unhandled zero/any extend MVT");
+      llvm_report_error("CellSPU Select: Unhandled zero/any extend EVT");
       /*NOTREACHED*/
-    case MVT::i32:
-      shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
-                                 CurDAG->getConstant(0x80808080, MVT::i32),
-                                 CurDAG->getConstant(0x00010203, MVT::i32),
-                                 CurDAG->getConstant(0x80808080, MVT::i32),
-                                 CurDAG->getConstant(0x08090a0b, MVT::i32));
+    case EVT::i32:
+      shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
+                                 CurDAG->getConstant(0x80808080, EVT::i32),
+                                 CurDAG->getConstant(0x00010203, EVT::i32),
+                                 CurDAG->getConstant(0x80808080, EVT::i32),
+                                 CurDAG->getConstant(0x08090a0b, EVT::i32));
       break;
 
-    case MVT::i16:
-      shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
-                                 CurDAG->getConstant(0x80808080, MVT::i32),
-                                 CurDAG->getConstant(0x80800203, MVT::i32),
-                                 CurDAG->getConstant(0x80808080, MVT::i32),
-                                 CurDAG->getConstant(0x80800a0b, MVT::i32));
+    case EVT::i16:
+      shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
+                                 CurDAG->getConstant(0x80808080, EVT::i32),
+                                 CurDAG->getConstant(0x80800203, EVT::i32),
+                                 CurDAG->getConstant(0x80808080, EVT::i32),
+                                 CurDAG->getConstant(0x80800a0b, EVT::i32));
       break;
 
-    case MVT::i8:
-      shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
-                                 CurDAG->getConstant(0x80808080, MVT::i32),
-                                 CurDAG->getConstant(0x80808003, MVT::i32),
-                                 CurDAG->getConstant(0x80808080, MVT::i32),
-                                 CurDAG->getConstant(0x8080800b, MVT::i32));
+    case EVT::i8:
+      shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
+                                 CurDAG->getConstant(0x80808080, EVT::i32),
+                                 CurDAG->getConstant(0x80808003, EVT::i32),
+                                 CurDAG->getConstant(0x80808080, EVT::i32),
+                                 CurDAG->getConstant(0x8080800b, EVT::i32));
       break;
     }
 
@@ -775,21 +775,21 @@
     SelectCode(CurDAG->getNode(ISD::BIT_CONVERT, dl, OpVecVT, zextShuffle));
     return SelectCode(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT,
                                       zextShuffle));
-  } else if (Opc == ISD::ADD && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) {
+  } else if (Opc == ISD::ADD && (OpVT == EVT::i64 || OpVT == EVT::v2i64)) {
     SDNode *CGLoad =
             emitBuildVector(getCarryGenerateShufMask(*CurDAG, dl));
 
     return SelectCode(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT,
                                       Op.getOperand(0), Op.getOperand(1),
                                       SDValue(CGLoad, 0)));
-  } else if (Opc == ISD::SUB && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) {
+  } else if (Opc == ISD::SUB && (OpVT == EVT::i64 || OpVT == EVT::v2i64)) {
     SDNode *CGLoad =
             emitBuildVector(getBorrowGenerateShufMask(*CurDAG, dl));
 
     return SelectCode(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT,
                                       Op.getOperand(0), Op.getOperand(1),
                                       SDValue(CGLoad, 0)));
-  } else if (Opc == ISD::MUL && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) {
+  } else if (Opc == ISD::MUL && (OpVT == EVT::i64 || OpVT == EVT::v2i64)) {
     SDNode *CGLoad =
             emitBuildVector(getCarryGenerateShufMask(*CurDAG, dl));
 
@@ -799,8 +799,8 @@
   } else if (Opc == ISD::TRUNCATE) {
     SDValue Op0 = Op.getOperand(0);
     if ((Op0.getOpcode() == ISD::SRA || Op0.getOpcode() == ISD::SRL)
-        && OpVT == MVT::i32
-        && Op0.getValueType() == MVT::i64) {
+        && OpVT == EVT::i32
+        && Op0.getValueType() == EVT::i64) {
       // Catch (truncate:i32 ([sra|srl]:i64 arg, c), where c >= 32
       //
       // Take advantage of the fact that the upper 32 bits are in the
@@ -817,7 +817,7 @@
           shift_amt -= 32;
           if (shift_amt > 0) {
             // Take care of the additional shift, if present:
-            SDValue shift = CurDAG->getTargetConstant(shift_amt, MVT::i32);
+            SDValue shift = CurDAG->getTargetConstant(shift_amt, EVT::i32);
             unsigned Opc = SPU::ROTMAIr32_i32;
 
             if (Op0.getOpcode() == ISD::SRL)
@@ -832,19 +832,19 @@
       }
     }
   } else if (Opc == ISD::SHL) {
-    if (OpVT == MVT::i64) {
+    if (OpVT == EVT::i64) {
       return SelectSHLi64(Op, OpVT);
     }
   } else if (Opc == ISD::SRL) {
-    if (OpVT == MVT::i64) {
+    if (OpVT == EVT::i64) {
       return SelectSRLi64(Op, OpVT);
     }
   } else if (Opc == ISD::SRA) {
-    if (OpVT == MVT::i64) {
+    if (OpVT == EVT::i64) {
       return SelectSRAi64(Op, OpVT);
     }
   } else if (Opc == ISD::FNEG
-             && (OpVT == MVT::f64 || OpVT == MVT::v2f64)) {
+             && (OpVT == EVT::f64 || OpVT == EVT::v2f64)) {
     DebugLoc dl = Op.getDebugLoc();
     // Check if the pattern is a special form of DFNMS:
     // (fneg (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC))
@@ -853,7 +853,7 @@
       SDValue Op00 = Op0.getOperand(0);
       if (Op00.getOpcode() == ISD::FMUL) {
         unsigned Opc = SPU::DFNMSf64;
-        if (OpVT == MVT::v2f64)
+        if (OpVT == EVT::v2f64)
           Opc = SPU::DFNMSv2f64;
 
         return CurDAG->getTargetNode(Opc, dl, OpVT,
@@ -863,29 +863,29 @@
       }
     }
 
-    SDValue negConst = CurDAG->getConstant(0x8000000000000000ULL, MVT::i64);
+    SDValue negConst = CurDAG->getConstant(0x8000000000000000ULL, EVT::i64);
     SDNode *signMask = 0;
     unsigned Opc = SPU::XORfneg64;
 
-    if (OpVT == MVT::f64) {
-      signMask = SelectI64Constant(negConst, MVT::i64, dl);
-    } else if (OpVT == MVT::v2f64) {
+    if (OpVT == EVT::f64) {
+      signMask = SelectI64Constant(negConst, EVT::i64, dl);
+    } else if (OpVT == EVT::v2f64) {
       Opc = SPU::XORfnegvec;
       signMask = emitBuildVector(CurDAG->getNode(ISD::BUILD_VECTOR, dl,
-                                                 MVT::v2i64,
+                                                 EVT::v2i64,
                                                  negConst, negConst));
     }
 
     return CurDAG->getTargetNode(Opc, dl, OpVT,
                                  Op.getOperand(0), SDValue(signMask, 0));
   } else if (Opc == ISD::FABS) {
-    if (OpVT == MVT::f64) {
-      SDNode *signMask = SelectI64Constant(0x7fffffffffffffffULL, MVT::i64, dl);
+    if (OpVT == EVT::f64) {
+      SDNode *signMask = SelectI64Constant(0x7fffffffffffffffULL, EVT::i64, dl);
       return CurDAG->getTargetNode(SPU::ANDfabs64, dl, OpVT,
                                    Op.getOperand(0), SDValue(signMask, 0));
-    } else if (OpVT == MVT::v2f64) {
-      SDValue absConst = CurDAG->getConstant(0x7fffffffffffffffULL, MVT::i64);
-      SDValue absVec = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
+    } else if (OpVT == EVT::v2f64) {
+      SDValue absConst = CurDAG->getConstant(0x7fffffffffffffffULL, EVT::i64);
+      SDValue absVec = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v2i64,
                                        absConst, absConst);
       SDNode *signMask = emitBuildVector(absVec);
       return CurDAG->getTargetNode(SPU::ANDfabsvec, dl, OpVT,
@@ -893,7 +893,7 @@
     }
   } else if (Opc == SPUISD::LDRESULT) {
     // Custom select instructions for LDRESULT
-    MVT VT = N->getValueType(0);
+    EVT VT = N->getValueType(0);
     SDValue Arg = N->getOperand(0);
     SDValue Chain = N->getOperand(1);
     SDNode *Result;
@@ -903,7 +903,7 @@
       std::string msg;
       raw_string_ostream Msg(msg);
       Msg << "LDRESULT for unsupported type: "
-           << VT.getMVTString();
+           << VT.getEVTString();
       llvm_report_error(Msg.str());
     }
 
@@ -911,9 +911,9 @@
     if (vtm->ldresult_imm) {
       SDValue Zero = CurDAG->getTargetConstant(0, VT);
 
-      Result = CurDAG->getTargetNode(Opc, dl, VT, MVT::Other, Arg, Zero, Chain);
+      Result = CurDAG->getTargetNode(Opc, dl, VT, EVT::Other, Arg, Zero, Chain);
     } else {
-      Result = CurDAG->getTargetNode(Opc, dl, VT, MVT::Other, Arg, Arg, Chain);
+      Result = CurDAG->getTargetNode(Opc, dl, VT, EVT::Other, Arg, Arg, Chain);
     }
 
     return Result;
@@ -924,7 +924,7 @@
     // SPUInstrInfo catches the following patterns:
     // (SPUindirect (SPUhi ...), (SPUlo ...))
     // (SPUindirect $sp, imm)
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     SDValue Op0 = N->getOperand(0);
     SDValue Op1 = N->getOperand(1);
     RegisterSDNode *RN;
@@ -967,17 +967,17 @@
  * @return The SDNode with the entire instruction sequence
  */
 SDNode *
-SPUDAGToDAGISel::SelectSHLi64(SDValue &Op, MVT OpVT) {
+SPUDAGToDAGISel::SelectSHLi64(SDValue &Op, EVT OpVT) {
   SDValue Op0 = Op.getOperand(0);
-  MVT VecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
+  EVT VecVT = EVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
   SDValue ShiftAmt = Op.getOperand(1);
-  MVT ShiftAmtVT = ShiftAmt.getValueType();
+  EVT ShiftAmtVT = ShiftAmt.getValueType();
   SDNode *VecOp0, *SelMask, *ZeroFill, *Shift = 0;
   SDValue SelMaskVal;
   DebugLoc dl = Op.getDebugLoc();
 
   VecOp0 = CurDAG->getTargetNode(SPU::ORv2i64_i64, dl, VecVT, Op0);
-  SelMaskVal = CurDAG->getTargetConstant(0xff00ULL, MVT::i16);
+  SelMaskVal = CurDAG->getTargetConstant(0xff00ULL, EVT::i16);
   SelMask = CurDAG->getTargetNode(SPU::FSMBIv2i64, dl, VecVT, SelMaskVal);
   ZeroFill = CurDAG->getTargetNode(SPU::ILv2i64, dl, VecVT,
                                    CurDAG->getTargetConstant(0, OpVT));
@@ -1032,11 +1032,11 @@
  * @return The SDNode with the entire instruction sequence
  */
 SDNode *
-SPUDAGToDAGISel::SelectSRLi64(SDValue &Op, MVT OpVT) {
+SPUDAGToDAGISel::SelectSRLi64(SDValue &Op, EVT OpVT) {
   SDValue Op0 = Op.getOperand(0);
-  MVT VecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
+  EVT VecVT = EVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
   SDValue ShiftAmt = Op.getOperand(1);
-  MVT ShiftAmtVT = ShiftAmt.getValueType();
+  EVT ShiftAmtVT = ShiftAmt.getValueType();
   SDNode *VecOp0, *Shift = 0;
   DebugLoc dl = Op.getDebugLoc();
 
@@ -1098,11 +1098,11 @@
  * @return The SDNode with the entire instruction sequence
  */
 SDNode *
-SPUDAGToDAGISel::SelectSRAi64(SDValue &Op, MVT OpVT) {
+SPUDAGToDAGISel::SelectSRAi64(SDValue &Op, EVT OpVT) {
   // Promote Op0 to vector
-  MVT VecVT = MVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
+  EVT VecVT = EVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
   SDValue ShiftAmt = Op.getOperand(1);
-  MVT ShiftAmtVT = ShiftAmt.getValueType();
+  EVT ShiftAmtVT = ShiftAmt.getValueType();
   DebugLoc dl = Op.getDebugLoc();
 
   SDNode *VecOp0 =
@@ -1110,16 +1110,16 @@
 
   SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT);
   SDNode *SignRot =
-    CurDAG->getTargetNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64,
+    CurDAG->getTargetNode(SPU::ROTMAIv2i64_i32, dl, EVT::v2i64,
                           SDValue(VecOp0, 0), SignRotAmt);
   SDNode *UpperHalfSign =
-    CurDAG->getTargetNode(SPU::ORi32_v4i32, dl, MVT::i32, SDValue(SignRot, 0));
+    CurDAG->getTargetNode(SPU::ORi32_v4i32, dl, EVT::i32, SDValue(SignRot, 0));
 
   SDNode *UpperHalfSignMask =
     CurDAG->getTargetNode(SPU::FSM64r32, dl, VecVT, SDValue(UpperHalfSign, 0));
   SDNode *UpperLowerMask =
     CurDAG->getTargetNode(SPU::FSMBIv2i64, dl, VecVT,
-                          CurDAG->getTargetConstant(0xff00ULL, MVT::i16));
+                          CurDAG->getTargetConstant(0xff00ULL, EVT::i16));
   SDNode *UpperLowerSelect =
     CurDAG->getTargetNode(SPU::SELBv2i64, dl, VecVT,
                           SDValue(UpperHalfSignMask, 0),
@@ -1166,15 +1166,15 @@
 /*!
  Do the necessary magic necessary to load a i64 constant
  */
-SDNode *SPUDAGToDAGISel::SelectI64Constant(SDValue& Op, MVT OpVT,
+SDNode *SPUDAGToDAGISel::SelectI64Constant(SDValue& Op, EVT OpVT,
                                            DebugLoc dl) {
   ConstantSDNode *CN = cast<ConstantSDNode>(Op.getNode());
   return SelectI64Constant(CN->getZExtValue(), OpVT, dl);
 }
 
-SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, MVT OpVT,
+SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
                                            DebugLoc dl) {
-  MVT OpVecVT = MVT::getVectorVT(OpVT, 2);
+  EVT OpVecVT = EVT::getVectorVT(OpVT, 2);
   SDValue i64vec =
           SPU::LowerV2I64Splat(OpVecVT, *CurDAG, Value64, dl);
 
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index 5cf9afe..5c80324 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -40,26 +40,26 @@
 namespace {
   std::map<unsigned, const char *> node_names;
 
-  //! MVT mapping to useful data for Cell SPU
+  //! EVT mapping to useful data for Cell SPU
   struct valtype_map_s {
-    const MVT   valtype;
+    const EVT   valtype;
     const int   prefslot_byte;
   };
 
   const valtype_map_s valtype_map[] = {
-    { MVT::i1,   3 },
-    { MVT::i8,   3 },
-    { MVT::i16,  2 },
-    { MVT::i32,  0 },
-    { MVT::f32,  0 },
-    { MVT::i64,  0 },
-    { MVT::f64,  0 },
-    { MVT::i128, 0 }
+    { EVT::i1,   3 },
+    { EVT::i8,   3 },
+    { EVT::i16,  2 },
+    { EVT::i32,  0 },
+    { EVT::f32,  0 },
+    { EVT::i64,  0 },
+    { EVT::f64,  0 },
+    { EVT::i128, 0 }
   };
 
   const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
 
-  const valtype_map_s *getValueTypeMapEntry(MVT VT) {
+  const valtype_map_s *getValueTypeMapEntry(EVT VT) {
     const valtype_map_s *retval = 0;
 
     for (size_t i = 0; i < n_valtype_map; ++i) {
@@ -74,7 +74,7 @@
       std::string msg;
       raw_string_ostream Msg(msg);
       Msg << "getValueTypeMapEntry returns NULL for "
-           << VT.getMVTString();
+           << VT.getEVTString();
       llvm_report_error(Msg.str());
     }
 #endif
@@ -100,8 +100,8 @@
     TargetLowering::ArgListTy Args;
     TargetLowering::ArgListEntry Entry;
     for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
-      MVT ArgVT = Op.getOperand(i).getValueType();
-      const Type *ArgTy = ArgVT.getTypeForMVT();
+      EVT ArgVT = Op.getOperand(i).getValueType();
+      const Type *ArgTy = ArgVT.getTypeForEVT();
       Entry.Node = Op.getOperand(i);
       Entry.Ty = ArgTy;
       Entry.isSExt = isSigned;
@@ -112,7 +112,7 @@
                                            TLI.getPointerTy());
 
     // Splice the libcall in wherever FindInputOutputChains tells us to.
-    const Type *RetTy = Op.getNode()->getValueType(0).getTypeForMVT();
+    const Type *RetTy = Op.getNode()->getValueType(0).getTypeForEVT();
     std::pair<SDValue, SDValue> CallInfo =
             TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
                             0, CallingConv::C, false,
@@ -138,37 +138,37 @@
   setLibcallName(RTLIB::DIV_F64, "__fast_divdf3");
 
   // Set up the SPU's register classes:
-  addRegisterClass(MVT::i8,   SPU::R8CRegisterClass);
-  addRegisterClass(MVT::i16,  SPU::R16CRegisterClass);
-  addRegisterClass(MVT::i32,  SPU::R32CRegisterClass);
-  addRegisterClass(MVT::i64,  SPU::R64CRegisterClass);
-  addRegisterClass(MVT::f32,  SPU::R32FPRegisterClass);
-  addRegisterClass(MVT::f64,  SPU::R64FPRegisterClass);
-  addRegisterClass(MVT::i128, SPU::GPRCRegisterClass);
+  addRegisterClass(EVT::i8,   SPU::R8CRegisterClass);
+  addRegisterClass(EVT::i16,  SPU::R16CRegisterClass);
+  addRegisterClass(EVT::i32,  SPU::R32CRegisterClass);
+  addRegisterClass(EVT::i64,  SPU::R64CRegisterClass);
+  addRegisterClass(EVT::f32,  SPU::R32FPRegisterClass);
+  addRegisterClass(EVT::f64,  SPU::R64FPRegisterClass);
+  addRegisterClass(EVT::i128, SPU::GPRCRegisterClass);
 
   // SPU has no sign or zero extended loads for i1, i8, i16:
-  setLoadExtAction(ISD::EXTLOAD,  MVT::i1, Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
 
-  setLoadExtAction(ISD::EXTLOAD,  MVT::f32, Expand);
-  setLoadExtAction(ISD::EXTLOAD,  MVT::f64, Expand);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::f32, Expand);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::f64, Expand);
 
-  setTruncStoreAction(MVT::i128, MVT::i64, Expand);
-  setTruncStoreAction(MVT::i128, MVT::i32, Expand);
-  setTruncStoreAction(MVT::i128, MVT::i16, Expand);
-  setTruncStoreAction(MVT::i128, MVT::i8, Expand);
+  setTruncStoreAction(EVT::i128, EVT::i64, Expand);
+  setTruncStoreAction(EVT::i128, EVT::i32, Expand);
+  setTruncStoreAction(EVT::i128, EVT::i16, Expand);
+  setTruncStoreAction(EVT::i128, EVT::i8, Expand);
 
-  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+  setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 
   // SPU constant load actions are custom lowered:
-  setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
-  setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
+  setOperationAction(ISD::ConstantFP, EVT::f32, Legal);
+  setOperationAction(ISD::ConstantFP, EVT::f64, Custom);
 
   // SPU's loads and stores have to be custom lowered:
-  for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::i128;
+  for (unsigned sctype = (unsigned) EVT::i8; sctype < (unsigned) EVT::i128;
        ++sctype) {
-    MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
+    EVT::SimpleValueType VT = (EVT::SimpleValueType)sctype;
 
     setOperationAction(ISD::LOAD,   VT, Custom);
     setOperationAction(ISD::STORE,  VT, Custom);
@@ -176,222 +176,222 @@
     setLoadExtAction(ISD::ZEXTLOAD, VT, Custom);
     setLoadExtAction(ISD::SEXTLOAD, VT, Custom);
 
-    for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::i8; --stype) {
-      MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
+    for (unsigned stype = sctype - 1; stype >= (unsigned) EVT::i8; --stype) {
+      EVT::SimpleValueType StoreVT = (EVT::SimpleValueType) stype;
       setTruncStoreAction(VT, StoreVT, Expand);
     }
   }
 
-  for (unsigned sctype = (unsigned) MVT::f32; sctype < (unsigned) MVT::f64;
+  for (unsigned sctype = (unsigned) EVT::f32; sctype < (unsigned) EVT::f64;
        ++sctype) {
-    MVT::SimpleValueType VT = (MVT::SimpleValueType) sctype;
+    EVT::SimpleValueType VT = (EVT::SimpleValueType) sctype;
 
     setOperationAction(ISD::LOAD,   VT, Custom);
     setOperationAction(ISD::STORE,  VT, Custom);
 
-    for (unsigned stype = sctype - 1; stype >= (unsigned) MVT::f32; --stype) {
-      MVT::SimpleValueType StoreVT = (MVT::SimpleValueType) stype;
+    for (unsigned stype = sctype - 1; stype >= (unsigned) EVT::f32; --stype) {
+      EVT::SimpleValueType StoreVT = (EVT::SimpleValueType) stype;
       setTruncStoreAction(VT, StoreVT, Expand);
     }
   }
 
   // Expand the jumptable branches
-  setOperationAction(ISD::BR_JT,        MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC,        MVT::Other, Expand);
+  setOperationAction(ISD::BR_JT,        EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC,        EVT::Other, Expand);
 
   // Custom lower SELECT_CC for most cases, but expand by default
-  setOperationAction(ISD::SELECT_CC,    MVT::Other, Expand);
-  setOperationAction(ISD::SELECT_CC,    MVT::i8,    Custom);
-  setOperationAction(ISD::SELECT_CC,    MVT::i16,   Custom);
-  setOperationAction(ISD::SELECT_CC,    MVT::i32,   Custom);
-  setOperationAction(ISD::SELECT_CC,    MVT::i64,   Custom);
+  setOperationAction(ISD::SELECT_CC,    EVT::Other, Expand);
+  setOperationAction(ISD::SELECT_CC,    EVT::i8,    Custom);
+  setOperationAction(ISD::SELECT_CC,    EVT::i16,   Custom);
+  setOperationAction(ISD::SELECT_CC,    EVT::i32,   Custom);
+  setOperationAction(ISD::SELECT_CC,    EVT::i64,   Custom);
 
   // SPU has no intrinsics for these particular operations:
-  setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
+  setOperationAction(ISD::MEMBARRIER, EVT::Other, Expand);
 
   // SPU has no division/remainder instructions
-  setOperationAction(ISD::SREM,    MVT::i8,   Expand);
-  setOperationAction(ISD::UREM,    MVT::i8,   Expand);
-  setOperationAction(ISD::SDIV,    MVT::i8,   Expand);
-  setOperationAction(ISD::UDIV,    MVT::i8,   Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i8,   Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i8,   Expand);
-  setOperationAction(ISD::SREM,    MVT::i16,  Expand);
-  setOperationAction(ISD::UREM,    MVT::i16,  Expand);
-  setOperationAction(ISD::SDIV,    MVT::i16,  Expand);
-  setOperationAction(ISD::UDIV,    MVT::i16,  Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i16,  Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i16,  Expand);
-  setOperationAction(ISD::SREM,    MVT::i32,  Expand);
-  setOperationAction(ISD::UREM,    MVT::i32,  Expand);
-  setOperationAction(ISD::SDIV,    MVT::i32,  Expand);
-  setOperationAction(ISD::UDIV,    MVT::i32,  Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i32,  Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i32,  Expand);
-  setOperationAction(ISD::SREM,    MVT::i64,  Expand);
-  setOperationAction(ISD::UREM,    MVT::i64,  Expand);
-  setOperationAction(ISD::SDIV,    MVT::i64,  Expand);
-  setOperationAction(ISD::UDIV,    MVT::i64,  Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i64,  Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i64,  Expand);
-  setOperationAction(ISD::SREM,    MVT::i128, Expand);
-  setOperationAction(ISD::UREM,    MVT::i128, Expand);
-  setOperationAction(ISD::SDIV,    MVT::i128, Expand);
-  setOperationAction(ISD::UDIV,    MVT::i128, Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i128, Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i128, Expand);
+  setOperationAction(ISD::SREM,    EVT::i8,   Expand);
+  setOperationAction(ISD::UREM,    EVT::i8,   Expand);
+  setOperationAction(ISD::SDIV,    EVT::i8,   Expand);
+  setOperationAction(ISD::UDIV,    EVT::i8,   Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i8,   Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i8,   Expand);
+  setOperationAction(ISD::SREM,    EVT::i16,  Expand);
+  setOperationAction(ISD::UREM,    EVT::i16,  Expand);
+  setOperationAction(ISD::SDIV,    EVT::i16,  Expand);
+  setOperationAction(ISD::UDIV,    EVT::i16,  Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i16,  Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i16,  Expand);
+  setOperationAction(ISD::SREM,    EVT::i32,  Expand);
+  setOperationAction(ISD::UREM,    EVT::i32,  Expand);
+  setOperationAction(ISD::SDIV,    EVT::i32,  Expand);
+  setOperationAction(ISD::UDIV,    EVT::i32,  Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i32,  Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i32,  Expand);
+  setOperationAction(ISD::SREM,    EVT::i64,  Expand);
+  setOperationAction(ISD::UREM,    EVT::i64,  Expand);
+  setOperationAction(ISD::SDIV,    EVT::i64,  Expand);
+  setOperationAction(ISD::UDIV,    EVT::i64,  Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i64,  Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i64,  Expand);
+  setOperationAction(ISD::SREM,    EVT::i128, Expand);
+  setOperationAction(ISD::UREM,    EVT::i128, Expand);
+  setOperationAction(ISD::SDIV,    EVT::i128, Expand);
+  setOperationAction(ISD::UDIV,    EVT::i128, Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i128, Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i128, Expand);
 
   // We don't support sin/cos/sqrt/fmod
-  setOperationAction(ISD::FSIN , MVT::f64, Expand);
-  setOperationAction(ISD::FCOS , MVT::f64, Expand);
-  setOperationAction(ISD::FREM , MVT::f64, Expand);
-  setOperationAction(ISD::FSIN , MVT::f32, Expand);
-  setOperationAction(ISD::FCOS , MVT::f32, Expand);
-  setOperationAction(ISD::FREM , MVT::f32, Expand);
+  setOperationAction(ISD::FSIN , EVT::f64, Expand);
+  setOperationAction(ISD::FCOS , EVT::f64, Expand);
+  setOperationAction(ISD::FREM , EVT::f64, Expand);
+  setOperationAction(ISD::FSIN , EVT::f32, Expand);
+  setOperationAction(ISD::FCOS , EVT::f32, Expand);
+  setOperationAction(ISD::FREM , EVT::f32, Expand);
 
   // Expand fsqrt to the appropriate libcall (NOTE: should use h/w fsqrt
   // for f32!)
-  setOperationAction(ISD::FSQRT, MVT::f64, Expand);
-  setOperationAction(ISD::FSQRT, MVT::f32, Expand);
+  setOperationAction(ISD::FSQRT, EVT::f64, Expand);
+  setOperationAction(ISD::FSQRT, EVT::f32, Expand);
 
-  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+  setOperationAction(ISD::FCOPYSIGN, EVT::f64, Expand);
+  setOperationAction(ISD::FCOPYSIGN, EVT::f32, Expand);
 
   // SPU can do rotate right and left, so legalize it... but customize for i8
   // because instructions don't exist.
 
   // FIXME: Change from "expand" to appropriate type once ROTR is supported in
   //        .td files.
-  setOperationAction(ISD::ROTR, MVT::i32,    Expand /*Legal*/);
-  setOperationAction(ISD::ROTR, MVT::i16,    Expand /*Legal*/);
-  setOperationAction(ISD::ROTR, MVT::i8,     Expand /*Custom*/);
+  setOperationAction(ISD::ROTR, EVT::i32,    Expand /*Legal*/);
+  setOperationAction(ISD::ROTR, EVT::i16,    Expand /*Legal*/);
+  setOperationAction(ISD::ROTR, EVT::i8,     Expand /*Custom*/);
 
-  setOperationAction(ISD::ROTL, MVT::i32,    Legal);
-  setOperationAction(ISD::ROTL, MVT::i16,    Legal);
-  setOperationAction(ISD::ROTL, MVT::i8,     Custom);
+  setOperationAction(ISD::ROTL, EVT::i32,    Legal);
+  setOperationAction(ISD::ROTL, EVT::i16,    Legal);
+  setOperationAction(ISD::ROTL, EVT::i8,     Custom);
 
   // SPU has no native version of shift left/right for i8
-  setOperationAction(ISD::SHL,  MVT::i8,     Custom);
-  setOperationAction(ISD::SRL,  MVT::i8,     Custom);
-  setOperationAction(ISD::SRA,  MVT::i8,     Custom);
+  setOperationAction(ISD::SHL,  EVT::i8,     Custom);
+  setOperationAction(ISD::SRL,  EVT::i8,     Custom);
+  setOperationAction(ISD::SRA,  EVT::i8,     Custom);
 
   // Make these operations legal and handle them during instruction selection:
-  setOperationAction(ISD::SHL,  MVT::i64,    Legal);
-  setOperationAction(ISD::SRL,  MVT::i64,    Legal);
-  setOperationAction(ISD::SRA,  MVT::i64,    Legal);
+  setOperationAction(ISD::SHL,  EVT::i64,    Legal);
+  setOperationAction(ISD::SRL,  EVT::i64,    Legal);
+  setOperationAction(ISD::SRA,  EVT::i64,    Legal);
 
   // Custom lower i8, i32 and i64 multiplications
-  setOperationAction(ISD::MUL,  MVT::i8,     Custom);
-  setOperationAction(ISD::MUL,  MVT::i32,    Legal);
-  setOperationAction(ISD::MUL,  MVT::i64,    Legal);
+  setOperationAction(ISD::MUL,  EVT::i8,     Custom);
+  setOperationAction(ISD::MUL,  EVT::i32,    Legal);
+  setOperationAction(ISD::MUL,  EVT::i64,    Legal);
 
   // Expand double-width multiplication
   // FIXME: It would probably be reasonable to support some of these operations
-  setOperationAction(ISD::UMUL_LOHI, MVT::i8,  Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i8,  Expand);
-  setOperationAction(ISD::MULHU,     MVT::i8,  Expand);
-  setOperationAction(ISD::MULHS,     MVT::i8,  Expand);
-  setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
-  setOperationAction(ISD::MULHU,     MVT::i16, Expand);
-  setOperationAction(ISD::MULHS,     MVT::i16, Expand);
-  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
-  setOperationAction(ISD::MULHU,     MVT::i32, Expand);
-  setOperationAction(ISD::MULHS,     MVT::i32, Expand);
-  setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
-  setOperationAction(ISD::MULHU,     MVT::i64, Expand);
-  setOperationAction(ISD::MULHS,     MVT::i64, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i8,  Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i8,  Expand);
+  setOperationAction(ISD::MULHU,     EVT::i8,  Expand);
+  setOperationAction(ISD::MULHS,     EVT::i8,  Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i16, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i16, Expand);
+  setOperationAction(ISD::MULHU,     EVT::i16, Expand);
+  setOperationAction(ISD::MULHS,     EVT::i16, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i32, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
+  setOperationAction(ISD::MULHU,     EVT::i32, Expand);
+  setOperationAction(ISD::MULHS,     EVT::i32, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i64, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i64, Expand);
+  setOperationAction(ISD::MULHU,     EVT::i64, Expand);
+  setOperationAction(ISD::MULHS,     EVT::i64, Expand);
 
   // Need to custom handle (some) common i8, i64 math ops
-  setOperationAction(ISD::ADD,  MVT::i8,     Custom);
-  setOperationAction(ISD::ADD,  MVT::i64,    Legal);
-  setOperationAction(ISD::SUB,  MVT::i8,     Custom);
-  setOperationAction(ISD::SUB,  MVT::i64,    Legal);
+  setOperationAction(ISD::ADD,  EVT::i8,     Custom);
+  setOperationAction(ISD::ADD,  EVT::i64,    Legal);
+  setOperationAction(ISD::SUB,  EVT::i8,     Custom);
+  setOperationAction(ISD::SUB,  EVT::i64,    Legal);
 
   // SPU does not have BSWAP. It does have i32 support CTLZ.
   // CTPOP has to be custom lowered.
-  setOperationAction(ISD::BSWAP, MVT::i32,   Expand);
-  setOperationAction(ISD::BSWAP, MVT::i64,   Expand);
+  setOperationAction(ISD::BSWAP, EVT::i32,   Expand);
+  setOperationAction(ISD::BSWAP, EVT::i64,   Expand);
 
-  setOperationAction(ISD::CTPOP, MVT::i8,    Custom);
-  setOperationAction(ISD::CTPOP, MVT::i16,   Custom);
-  setOperationAction(ISD::CTPOP, MVT::i32,   Custom);
-  setOperationAction(ISD::CTPOP, MVT::i64,   Custom);
-  setOperationAction(ISD::CTPOP, MVT::i128,  Expand);
+  setOperationAction(ISD::CTPOP, EVT::i8,    Custom);
+  setOperationAction(ISD::CTPOP, EVT::i16,   Custom);
+  setOperationAction(ISD::CTPOP, EVT::i32,   Custom);
+  setOperationAction(ISD::CTPOP, EVT::i64,   Custom);
+  setOperationAction(ISD::CTPOP, EVT::i128,  Expand);
 
-  setOperationAction(ISD::CTTZ , MVT::i8,    Expand);
-  setOperationAction(ISD::CTTZ , MVT::i16,   Expand);
-  setOperationAction(ISD::CTTZ , MVT::i32,   Expand);
-  setOperationAction(ISD::CTTZ , MVT::i64,   Expand);
-  setOperationAction(ISD::CTTZ , MVT::i128,  Expand);
+  setOperationAction(ISD::CTTZ , EVT::i8,    Expand);
+  setOperationAction(ISD::CTTZ , EVT::i16,   Expand);
+  setOperationAction(ISD::CTTZ , EVT::i32,   Expand);
+  setOperationAction(ISD::CTTZ , EVT::i64,   Expand);
+  setOperationAction(ISD::CTTZ , EVT::i128,  Expand);
 
-  setOperationAction(ISD::CTLZ , MVT::i8,    Promote);
-  setOperationAction(ISD::CTLZ , MVT::i16,   Promote);
-  setOperationAction(ISD::CTLZ , MVT::i32,   Legal);
-  setOperationAction(ISD::CTLZ , MVT::i64,   Expand);
-  setOperationAction(ISD::CTLZ , MVT::i128,  Expand);
+  setOperationAction(ISD::CTLZ , EVT::i8,    Promote);
+  setOperationAction(ISD::CTLZ , EVT::i16,   Promote);
+  setOperationAction(ISD::CTLZ , EVT::i32,   Legal);
+  setOperationAction(ISD::CTLZ , EVT::i64,   Expand);
+  setOperationAction(ISD::CTLZ , EVT::i128,  Expand);
 
   // SPU has a version of select that implements (a&~c)|(b&c), just like
   // select ought to work:
-  setOperationAction(ISD::SELECT, MVT::i8,   Legal);
-  setOperationAction(ISD::SELECT, MVT::i16,  Legal);
-  setOperationAction(ISD::SELECT, MVT::i32,  Legal);
-  setOperationAction(ISD::SELECT, MVT::i64,  Legal);
+  setOperationAction(ISD::SELECT, EVT::i8,   Legal);
+  setOperationAction(ISD::SELECT, EVT::i16,  Legal);
+  setOperationAction(ISD::SELECT, EVT::i32,  Legal);
+  setOperationAction(ISD::SELECT, EVT::i64,  Legal);
 
-  setOperationAction(ISD::SETCC, MVT::i8,    Legal);
-  setOperationAction(ISD::SETCC, MVT::i16,   Legal);
-  setOperationAction(ISD::SETCC, MVT::i32,   Legal);
-  setOperationAction(ISD::SETCC, MVT::i64,   Legal);
-  setOperationAction(ISD::SETCC, MVT::f64,   Custom);
+  setOperationAction(ISD::SETCC, EVT::i8,    Legal);
+  setOperationAction(ISD::SETCC, EVT::i16,   Legal);
+  setOperationAction(ISD::SETCC, EVT::i32,   Legal);
+  setOperationAction(ISD::SETCC, EVT::i64,   Legal);
+  setOperationAction(ISD::SETCC, EVT::f64,   Custom);
 
   // Custom lower i128 -> i64 truncates
-  setOperationAction(ISD::TRUNCATE, MVT::i64, Custom);
+  setOperationAction(ISD::TRUNCATE, EVT::i64, Custom);
 
-  setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
-  setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i8, Promote);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i8, Promote);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i16, Promote);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i16, Promote);
   // SPU has a legal FP -> signed INT instruction for f32, but for f64, need
   // to expand to a libcall, hence the custom lowering:
-  setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
-  setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
-  setOperationAction(ISD::FP_TO_SINT, MVT::i128, Expand);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i128, Expand);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i32, Custom);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i32, Custom);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i64, Expand);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i64, Expand);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i128, Expand);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i128, Expand);
 
   // FDIV on SPU requires custom lowering
-  setOperationAction(ISD::FDIV, MVT::f64, Expand);      // to libcall
+  setOperationAction(ISD::FDIV, EVT::f64, Expand);      // to libcall
 
   // SPU has [U|S]INT_TO_FP for f32->i32, but not for f64->i32, f64->i64:
-  setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
-  setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
-  setOperationAction(ISD::SINT_TO_FP, MVT::i8,  Promote);
-  setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
-  setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
-  setOperationAction(ISD::UINT_TO_FP, MVT::i8,  Promote);
-  setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
-  setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i32, Custom);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i16, Promote);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i8,  Promote);
+  setOperationAction(ISD::UINT_TO_FP, EVT::i32, Custom);
+  setOperationAction(ISD::UINT_TO_FP, EVT::i16, Promote);
+  setOperationAction(ISD::UINT_TO_FP, EVT::i8,  Promote);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i64, Custom);
+  setOperationAction(ISD::UINT_TO_FP, EVT::i64, Custom);
 
-  setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
-  setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
-  setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
-  setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
+  setOperationAction(ISD::BIT_CONVERT, EVT::i32, Legal);
+  setOperationAction(ISD::BIT_CONVERT, EVT::f32, Legal);
+  setOperationAction(ISD::BIT_CONVERT, EVT::i64, Legal);
+  setOperationAction(ISD::BIT_CONVERT, EVT::f64, Legal);
 
   // We cannot sextinreg(i1).  Expand to shifts.
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
 
   // Support label based line numbers.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
 
   // We want to legalize GlobalAddress and ConstantPool nodes into the
   // appropriate instructions to materialize the address.
-  for (unsigned sctype = (unsigned) MVT::i8; sctype < (unsigned) MVT::f128;
+  for (unsigned sctype = (unsigned) EVT::i8; sctype < (unsigned) EVT::f128;
        ++sctype) {
-    MVT::SimpleValueType VT = (MVT::SimpleValueType)sctype;
+    EVT::SimpleValueType VT = (EVT::SimpleValueType)sctype;
 
     setOperationAction(ISD::GlobalAddress,  VT, Custom);
     setOperationAction(ISD::ConstantPool,   VT, Custom);
@@ -399,42 +399,42 @@
   }
 
   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
-  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
+  setOperationAction(ISD::VASTART           , EVT::Other, Custom);
 
   // Use the default implementation.
-  setOperationAction(ISD::VAARG             , MVT::Other, Expand);
-  setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
-  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
-  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE      , MVT::Other, Expand);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Expand);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Expand);
+  setOperationAction(ISD::VAARG             , EVT::Other, Expand);
+  setOperationAction(ISD::VACOPY            , EVT::Other, Expand);
+  setOperationAction(ISD::VAEND             , EVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE         , EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE      , EVT::Other, Expand);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32  , Expand);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i64  , Expand);
 
   // Cell SPU has instructions for converting between i64 and fp.
-  setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
-  setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i64, Custom);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i64, Custom);
 
   // To take advantage of the above i64 FP_TO_SINT, promote i32 FP_TO_UINT
-  setOperationAction(ISD::FP_TO_UINT, MVT::i32, Promote);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i32, Promote);
 
   // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
-  setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
+  setOperationAction(ISD::BUILD_PAIR, EVT::i64, Expand);
 
   // First set operation action for all vector types to expand. Then we
   // will selectively turn on ones that can be effectively codegen'd.
-  addRegisterClass(MVT::v16i8, SPU::VECREGRegisterClass);
-  addRegisterClass(MVT::v8i16, SPU::VECREGRegisterClass);
-  addRegisterClass(MVT::v4i32, SPU::VECREGRegisterClass);
-  addRegisterClass(MVT::v2i64, SPU::VECREGRegisterClass);
-  addRegisterClass(MVT::v4f32, SPU::VECREGRegisterClass);
-  addRegisterClass(MVT::v2f64, SPU::VECREGRegisterClass);
+  addRegisterClass(EVT::v16i8, SPU::VECREGRegisterClass);
+  addRegisterClass(EVT::v8i16, SPU::VECREGRegisterClass);
+  addRegisterClass(EVT::v4i32, SPU::VECREGRegisterClass);
+  addRegisterClass(EVT::v2i64, SPU::VECREGRegisterClass);
+  addRegisterClass(EVT::v4f32, SPU::VECREGRegisterClass);
+  addRegisterClass(EVT::v2f64, SPU::VECREGRegisterClass);
 
   // "Odd size" vector classes that we're willing to support:
-  addRegisterClass(MVT::v2i32, SPU::VECREGRegisterClass);
+  addRegisterClass(EVT::v2i32, SPU::VECREGRegisterClass);
 
-  for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
-       i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
-    MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
+  for (unsigned i = (unsigned)EVT::FIRST_VECTOR_VALUETYPE;
+       i <= (unsigned)EVT::LAST_VECTOR_VALUETYPE; ++i) {
+    EVT::SimpleValueType VT = (EVT::SimpleValueType)i;
 
     // add/sub are legal for all supported vector VT's.
     setOperationAction(ISD::ADD,     VT, Legal);
@@ -465,14 +465,14 @@
     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
   }
 
-  setOperationAction(ISD::AND, MVT::v16i8, Custom);
-  setOperationAction(ISD::OR,  MVT::v16i8, Custom);
-  setOperationAction(ISD::XOR, MVT::v16i8, Custom);
-  setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
+  setOperationAction(ISD::AND, EVT::v16i8, Custom);
+  setOperationAction(ISD::OR,  EVT::v16i8, Custom);
+  setOperationAction(ISD::XOR, EVT::v16i8, Custom);
+  setOperationAction(ISD::SCALAR_TO_VECTOR, EVT::v4f32, Custom);
 
-  setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
+  setOperationAction(ISD::FDIV, EVT::v4f32, Legal);
 
-  setShiftAmountType(MVT::i32);
+  setShiftAmountType(EVT::i32);
   setBooleanContents(ZeroOrNegativeOneBooleanContent);
 
   setStackPointerRegisterToSaveRestore(SPU::R1);
@@ -539,11 +539,11 @@
 // Return the Cell SPU's SETCC result type
 //===----------------------------------------------------------------------===//
 
-MVT::SimpleValueType SPUTargetLowering::getSetCCResultType(MVT VT) const {
+EVT::SimpleValueType SPUTargetLowering::getSetCCResultType(EVT VT) const {
   // i16 and i32 are valid SETCC result types
-  return ((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) ?
+  return ((VT == EVT::i8 || VT == EVT::i16 || VT == EVT::i32) ?
     VT.getSimpleVT() :
-    MVT::i32);
+    EVT::i32);
 }
 
 //===----------------------------------------------------------------------===//
@@ -562,7 +562,7 @@
  within a 16-byte block, we have to rotate to extract the requested element.
 
  For extending loads, we also want to ensure that the following sequence is
- emitted, e.g. for MVT::f32 extending load to MVT::f64:
+ emitted, e.g. for EVT::f32 extending load to EVT::f64:
 
 \verbatim
 %1  v16i8,ch = load
@@ -576,9 +576,9 @@
 LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
   LoadSDNode *LN = cast<LoadSDNode>(Op);
   SDValue the_chain = LN->getChain();
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  MVT InVT = LN->getMemoryVT();
-  MVT OutVT = Op.getValueType();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT InVT = LN->getMemoryVT();
+  EVT OutVT = Op.getValueType();
   ISD::LoadExtType ExtType = LN->getExtensionType();
   unsigned alignment = LN->getAlignment();
   const valtype_map_s *vtm = getValueTypeMapEntry(InVT);
@@ -604,7 +604,7 @@
         if (rotamt < 0)
           rotamt += 16;
 
-        rotate = DAG.getConstant(rotamt, MVT::i16);
+        rotate = DAG.getConstant(rotamt, EVT::i16);
 
         // Simplify the base pointer for this case:
         basePtr = basePtr.getOperand(0);
@@ -622,7 +622,7 @@
         int64_t rotamt = -vtm->prefslot_byte;
         if (rotamt < 0)
           rotamt += 16;
-        rotate = DAG.getConstant(rotamt, MVT::i16);
+        rotate = DAG.getConstant(rotamt, EVT::i16);
       } else {
         // Offset the rotate amount by the basePtr and the preferred slot
         // byte offset
@@ -670,7 +670,7 @@
     }
 
     // Re-emit as a v16i8 vector load
-    result = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
+    result = DAG.getLoad(EVT::v16i8, dl, the_chain, basePtr,
                          LN->getSrcValue(), LN->getSrcValueOffset(),
                          LN->isVolatile(), 16);
 
@@ -678,12 +678,12 @@
     the_chain = result.getValue(1);
 
     // Rotate into the preferred slot:
-    result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, MVT::v16i8,
+    result = DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, EVT::v16i8,
                          result.getValue(0), rotate);
 
     // Convert the loaded v16i8 vector to the appropriate vector type
     // specified by the operand:
-    MVT vecVT = MVT::getVectorVT(InVT, (128 / InVT.getSizeInBits()));
+    EVT vecVT = EVT::getVectorVT(InVT, (128 / InVT.getSizeInBits()));
     result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
                          DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
 
@@ -701,7 +701,7 @@
       result = DAG.getNode(NewOpc, dl, OutVT, result);
     }
 
-    SDVTList retvts = DAG.getVTList(OutVT, MVT::Other);
+    SDVTList retvts = DAG.getVTList(OutVT, EVT::Other);
     SDValue retops[2] = {
       result,
       the_chain
@@ -740,17 +740,17 @@
 LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
   StoreSDNode *SN = cast<StoreSDNode>(Op);
   SDValue Value = SN->getValue();
-  MVT VT = Value.getValueType();
-  MVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT VT = Value.getValueType();
+  EVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT());
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   DebugLoc dl = Op.getDebugLoc();
   unsigned alignment = SN->getAlignment();
 
   switch (SN->getAddressingMode()) {
   case ISD::UNINDEXED: {
     // The vector type we really want to load from the 16-byte chunk.
-    MVT vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())),
-        stVecVT = MVT::getVectorVT(StVT, (128 / StVT.getSizeInBits()));
+    EVT vecVT = EVT::getVectorVT(VT, (128 / VT.getSizeInBits())),
+        stVecVT = EVT::getVectorVT(StVT, (128 / StVT.getSizeInBits()));
 
     SDValue alignLoadVec;
     SDValue basePtr = SN->getBasePtr();
@@ -820,7 +820,7 @@
     }
 
     // Re-emit as a v16i8 vector load
-    alignLoadVec = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
+    alignLoadVec = DAG.getLoad(EVT::v16i8, dl, the_chain, basePtr,
                                SN->getSrcValue(), SN->getSrcValueOffset(),
                                SN->isVolatile(), 16);
 
@@ -859,7 +859,7 @@
     result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
                          vectorizeOp, alignLoadVec,
                          DAG.getNode(ISD::BIT_CONVERT, dl,
-                                     MVT::v4i32, insertEltOp));
+                                     EVT::v4i32, insertEltOp));
 
     result = DAG.getStore(the_chain, dl, result, basePtr,
                           LN->getSrcValue(), LN->getSrcValueOffset(),
@@ -902,7 +902,7 @@
 //! Generate the address of a constant pool entry.
 static SDValue
 LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
   Constant *C = CP->getConstVal();
   SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
@@ -935,7 +935,7 @@
 
 static SDValue
 LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
   SDValue Zero = DAG.getConstant(0, PtrVT);
@@ -960,7 +960,7 @@
 
 static SDValue
 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
   GlobalValue *GV = GSDN->getGlobal();
   SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
@@ -989,21 +989,21 @@
 //! Custom lower double precision floating point constants
 static SDValue
 LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   // FIXME there is no actual debug info here
   DebugLoc dl = Op.getDebugLoc();
 
-  if (VT == MVT::f64) {
+  if (VT == EVT::f64) {
     ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.getNode());
 
     assert((FP != 0) &&
            "LowerConstantFP: Node is not ConstantFPSDNode");
 
     uint64_t dbits = DoubleToBits(FP->getValueAPF().convertToDouble());
-    SDValue T = DAG.getConstant(dbits, MVT::i64);
-    SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
+    SDValue T = DAG.getConstant(dbits, EVT::i64);
+    SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v2i64, T, T);
     return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
-                       DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
+                       DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2f64, Tvec));
   }
 
   return SDValue();
@@ -1028,11 +1028,11 @@
   unsigned ArgRegIdx = 0;
   unsigned StackSlotSize = SPUFrameInfo::stackSlotSize();
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
   // Add DAG nodes to load the arguments or copy them out of registers.
   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
-    MVT ObjectVT = Ins[ArgNo].VT;
+    EVT ObjectVT = Ins[ArgNo].VT;
     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
     SDValue ArgVal;
 
@@ -1044,36 +1044,36 @@
         std::string msg;
         raw_string_ostream Msg(msg);
         Msg << "LowerFormalArguments Unhandled argument type: "
-             << ObjectVT.getMVTString();
+             << ObjectVT.getEVTString();
         llvm_report_error(Msg.str());
       }
-      case MVT::i8:
+      case EVT::i8:
         ArgRegClass = &SPU::R8CRegClass;
         break;
-      case MVT::i16:
+      case EVT::i16:
         ArgRegClass = &SPU::R16CRegClass;
         break;
-      case MVT::i32:
+      case EVT::i32:
         ArgRegClass = &SPU::R32CRegClass;
         break;
-      case MVT::i64:
+      case EVT::i64:
         ArgRegClass = &SPU::R64CRegClass;
         break;
-      case MVT::i128:
+      case EVT::i128:
         ArgRegClass = &SPU::GPRCRegClass;
         break;
-      case MVT::f32:
+      case EVT::f32:
         ArgRegClass = &SPU::R32FPRegClass;
         break;
-      case MVT::f64:
+      case EVT::f64:
         ArgRegClass = &SPU::R64FPRegClass;
         break;
-      case MVT::v2f64:
-      case MVT::v4f32:
-      case MVT::v2i64:
-      case MVT::v4i32:
-      case MVT::v8i16:
-      case MVT::v16i8:
+      case EVT::v2f64:
+      case EVT::v4f32:
+      case EVT::v2i64:
+      case EVT::v4i32:
+      case EVT::v8i16:
+      case EVT::v16i8:
         ArgRegClass = &SPU::VECREGRegClass;
         break;
       }
@@ -1108,7 +1108,7 @@
     for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) {
       VarArgsFrameIndex = MFI->CreateFixedObject(StackSlotSize, ArgOffset);
       SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
-      SDValue ArgVal = DAG.getRegister(ArgRegs[ArgRegIdx], MVT::v16i8);
+      SDValue ArgVal = DAG.getRegister(ArgRegs[ArgRegIdx], EVT::v16i8);
       SDValue Store = DAG.getStore(Chain, dl, ArgVal, FIN, NULL, 0);
       Chain = Store.getOperand(0);
       MemOps.push_back(Store);
@@ -1117,7 +1117,7 @@
       ArgOffset += StackSlotSize;
     }
     if (!MemOps.empty())
-      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+      Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                           &MemOps[0], MemOps.size());
   }
 
@@ -1135,7 +1135,7 @@
       (Addr << 14 >> 14) != Addr)
     return 0;  // Top 14 bits have to be sext of immediate.
 
-  return DAG.getConstant((int)C->getZExtValue() >> 2, MVT::i32).getNode();
+  return DAG.getConstant((int)C->getZExtValue() >> 2, EVT::i32).getNode();
 }
 
 SDValue
@@ -1154,7 +1154,7 @@
   const unsigned NumArgRegs = SPURegisterInfo::getNumArgRegs();
 
   // Handy pointer type
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
   // Accumulate how many bytes are to be pushed on the stack, including the
   // linkage area, and parameter passing area.  According to the SPU ABI,
@@ -1164,7 +1164,7 @@
   // Set up a copy of the stack pointer for use loading and storing any
   // arguments that may not fit in the registers available for argument
   // passing.
-  SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32);
+  SDValue StackPtr = DAG.getRegister(SPU::R1, EVT::i32);
 
   // Figure out which arguments are going to go in registers, and which in
   // memory.
@@ -1186,11 +1186,11 @@
 
     switch (Arg.getValueType().getSimpleVT()) {
     default: llvm_unreachable("Unexpected ValueType for argument!");
-    case MVT::i8:
-    case MVT::i16:
-    case MVT::i32:
-    case MVT::i64:
-    case MVT::i128:
+    case EVT::i8:
+    case EVT::i16:
+    case EVT::i32:
+    case EVT::i64:
+    case EVT::i128:
       if (ArgRegIdx != NumArgRegs) {
         RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
       } else {
@@ -1198,8 +1198,8 @@
         ArgOffset += StackSlotSize;
       }
       break;
-    case MVT::f32:
-    case MVT::f64:
+    case EVT::f32:
+    case EVT::f64:
       if (ArgRegIdx != NumArgRegs) {
         RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
       } else {
@@ -1207,12 +1207,12 @@
         ArgOffset += StackSlotSize;
       }
       break;
-    case MVT::v2i64:
-    case MVT::v2f64:
-    case MVT::v4f32:
-    case MVT::v4i32:
-    case MVT::v8i16:
-    case MVT::v16i8:
+    case EVT::v2i64:
+    case EVT::v2f64:
+    case EVT::v4f32:
+    case EVT::v4i32:
+    case EVT::v8i16:
+    case EVT::v16i8:
       if (ArgRegIdx != NumArgRegs) {
         RegsToPass.push_back(std::make_pair(ArgRegs[ArgRegIdx++], Arg));
       } else {
@@ -1230,7 +1230,7 @@
 
   if (!MemOpChains.empty()) {
     // Adjust the stack pointer for the stack arguments.
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
   }
 
@@ -1251,7 +1251,7 @@
   // node so that legalize doesn't hack it.
   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
     GlobalValue *GV = G->getGlobal();
-    MVT CalleeVT = Callee.getValueType();
+    EVT CalleeVT = Callee.getValueType();
     SDValue Zero = DAG.getConstant(0, PtrVT);
     SDValue GA = DAG.getTargetGlobalAddress(GV, CalleeVT);
 
@@ -1275,7 +1275,7 @@
       Callee = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, GA, Zero);
     }
   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
-    MVT CalleeVT = Callee.getValueType();
+    EVT CalleeVT = Callee.getValueType();
     SDValue Zero = DAG.getConstant(0, PtrVT);
     SDValue ExtSym = DAG.getTargetExternalSymbol(S->getSymbol(),
         Callee.getValueType());
@@ -1303,7 +1303,7 @@
   if (InFlag.getNode())
     Ops.push_back(InFlag);
   // Returns a chain and a flag for retval copy to use.
-  Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(MVT::Other, MVT::Flag),
+  Chain = DAG.getNode(CallOpc, dl, DAG.getVTList(EVT::Other, EVT::Flag),
                       &Ops[0], Ops.size());
   InFlag = Chain.getValue(1);
 
@@ -1319,43 +1319,43 @@
   // If the call has results, copy the values out of the ret val registers.
   switch (Ins[0].VT.getSimpleVT()) {
   default: llvm_unreachable("Unexpected ret value!");
-  case MVT::Other: break;
-  case MVT::i32:
-    if (Ins.size() > 1 && Ins[1].VT == MVT::i32) {
+  case EVT::Other: break;
+  case EVT::i32:
+    if (Ins.size() > 1 && Ins[1].VT == EVT::i32) {
       Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4,
-                                 MVT::i32, InFlag).getValue(1);
+                                 EVT::i32, InFlag).getValue(1);
       InVals.push_back(Chain.getValue(0));
-      Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
+      Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, EVT::i32,
                                  Chain.getValue(2)).getValue(1);
       InVals.push_back(Chain.getValue(0));
     } else {
-      Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32,
+      Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, EVT::i32,
                                  InFlag).getValue(1);
       InVals.push_back(Chain.getValue(0));
     }
     break;
-  case MVT::i64:
-    Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i64,
+  case EVT::i64:
+    Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, EVT::i64,
                                InFlag).getValue(1);
     InVals.push_back(Chain.getValue(0));
     break;
-  case MVT::i128:
-    Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i128,
+  case EVT::i128:
+    Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, EVT::i128,
                                InFlag).getValue(1);
     InVals.push_back(Chain.getValue(0));
     break;
-  case MVT::f32:
-  case MVT::f64:
+  case EVT::f32:
+  case EVT::f64:
     Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
                                InFlag).getValue(1);
     InVals.push_back(Chain.getValue(0));
     break;
-  case MVT::v2f64:
-  case MVT::v2i64:
-  case MVT::v4f32:
-  case MVT::v4i32:
-  case MVT::v8i16:
-  case MVT::v16i8:
+  case EVT::v2f64:
+  case EVT::v2i64:
+  case EVT::v4f32:
+  case EVT::v4i32:
+  case EVT::v8i16:
+  case EVT::v16i8:
     Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, Ins[0].VT,
                                    InFlag).getValue(1);
     InVals.push_back(Chain.getValue(0));
@@ -1395,9 +1395,9 @@
   }
 
   if (Flag.getNode())
-    return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+    return DAG.getNode(SPUISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
   else
-    return DAG.getNode(SPUISD::RET_FLAG, dl, MVT::Other, Chain);
+    return DAG.getNode(SPUISD::RET_FLAG, dl, EVT::Other, Chain);
 }
 
 
@@ -1431,10 +1431,10 @@
 /// and the value fits into an unsigned 18-bit constant, and if so, return the
 /// constant
 SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
-                              MVT ValueType) {
+                              EVT ValueType) {
   if (ConstantSDNode *CN = getVecImm(N)) {
     uint64_t Value = CN->getZExtValue();
-    if (ValueType == MVT::i64) {
+    if (ValueType == EVT::i64) {
       uint64_t UValue = CN->getZExtValue();
       uint32_t upper = uint32_t(UValue >> 32);
       uint32_t lower = uint32_t(UValue);
@@ -1453,10 +1453,10 @@
 /// and the value fits into a signed 16-bit constant, and if so, return the
 /// constant
 SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
-                              MVT ValueType) {
+                              EVT ValueType) {
   if (ConstantSDNode *CN = getVecImm(N)) {
     int64_t Value = CN->getSExtValue();
-    if (ValueType == MVT::i64) {
+    if (ValueType == EVT::i64) {
       uint64_t UValue = CN->getZExtValue();
       uint32_t upper = uint32_t(UValue >> 32);
       uint32_t lower = uint32_t(UValue);
@@ -1476,10 +1476,10 @@
 /// and the value fits into a signed 10-bit constant, and if so, return the
 /// constant
 SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
-                              MVT ValueType) {
+                              EVT ValueType) {
   if (ConstantSDNode *CN = getVecImm(N)) {
     int64_t Value = CN->getSExtValue();
-    if (ValueType == MVT::i64) {
+    if (ValueType == EVT::i64) {
       uint64_t UValue = CN->getZExtValue();
       uint32_t upper = uint32_t(UValue >> 32);
       uint32_t lower = uint32_t(UValue);
@@ -1502,14 +1502,14 @@
 /// constant vectors. Thus, we test to see if the upper and lower bytes are the
 /// same value.
 SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
-                             MVT ValueType) {
+                             EVT ValueType) {
   if (ConstantSDNode *CN = getVecImm(N)) {
     int Value = (int) CN->getZExtValue();
-    if (ValueType == MVT::i16
+    if (ValueType == EVT::i16
         && Value <= 0xffff                 /* truncated from uint64_t */
         && ((short) Value >> 8) == ((short) Value & 0xff))
       return DAG.getTargetConstant(Value & 0xff, ValueType);
-    else if (ValueType == MVT::i8
+    else if (ValueType == EVT::i8
              && (Value & 0xff) == Value)
       return DAG.getTargetConstant(Value, ValueType);
   }
@@ -1521,12 +1521,12 @@
 /// and the value fits into a signed 16-bit constant, and if so, return the
 /// constant
 SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
-                               MVT ValueType) {
+                               EVT ValueType) {
   if (ConstantSDNode *CN = getVecImm(N)) {
     uint64_t Value = CN->getZExtValue();
-    if ((ValueType == MVT::i32
+    if ((ValueType == EVT::i32
           && ((unsigned) Value & 0xffff0000) == (unsigned) Value)
-        || (ValueType == MVT::i64 && (Value & 0xffff0000) == Value))
+        || (ValueType == EVT::i64 && (Value & 0xffff0000) == Value))
       return DAG.getTargetConstant(Value >> 16, ValueType);
   }
 
@@ -1536,7 +1536,7 @@
 /// get_v4i32_imm - Catch-all for general 32-bit constant vectors
 SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) {
   if (ConstantSDNode *CN = getVecImm(N)) {
-    return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i32);
+    return DAG.getTargetConstant((unsigned) CN->getZExtValue(), EVT::i32);
   }
 
   return SDValue();
@@ -1545,7 +1545,7 @@
 /// get_v4i32_imm - Catch-all for general 64-bit constant vectors
 SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) {
   if (ConstantSDNode *CN = getVecImm(N)) {
-    return DAG.getTargetConstant((unsigned) CN->getZExtValue(), MVT::i64);
+    return DAG.getTargetConstant((unsigned) CN->getZExtValue(), EVT::i64);
   }
 
   return SDValue();
@@ -1554,8 +1554,8 @@
 //! Lower a BUILD_VECTOR instruction creatively:
 static SDValue
 LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
-  MVT EltVT = VT.getVectorElementType();
+  EVT VT = Op.getValueType();
+  EVT EltVT = VT.getVectorElementType();
   DebugLoc dl = Op.getDebugLoc();
   BuildVectorSDNode *BCN = dyn_cast<BuildVectorSDNode>(Op.getNode());
   assert(BCN != 0 && "Expected BuildVectorSDNode in SPU LowerBUILD_VECTOR");
@@ -1580,40 +1580,40 @@
     std::string msg;
     raw_string_ostream Msg(msg);
     Msg << "CellSPU: Unhandled VT in LowerBUILD_VECTOR, VT = "
-         << VT.getMVTString();
+         << VT.getEVTString();
     llvm_report_error(Msg.str());
     /*NOTREACHED*/
   }
-  case MVT::v4f32: {
+  case EVT::v4f32: {
     uint32_t Value32 = uint32_t(SplatBits);
     assert(SplatBitSize == 32
            && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
     // NOTE: pretend the constant is an integer. LLVM won't load FP constants
-    SDValue T = DAG.getConstant(Value32, MVT::i32);
-    return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
-                       DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
+    SDValue T = DAG.getConstant(Value32, EVT::i32);
+    return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v4f32,
+                       DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32, T,T,T,T));
     break;
   }
-  case MVT::v2f64: {
+  case EVT::v2f64: {
     uint64_t f64val = uint64_t(SplatBits);
     assert(SplatBitSize == 64
            && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
     // NOTE: pretend the constant is an integer. LLVM won't load FP constants
-    SDValue T = DAG.getConstant(f64val, MVT::i64);
-    return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
-                       DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
+    SDValue T = DAG.getConstant(f64val, EVT::i64);
+    return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2f64,
+                       DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v2i64, T, T));
     break;
   }
-  case MVT::v16i8: {
+  case EVT::v16i8: {
    // 8-bit constants have to be expanded to 16-bits
    unsigned short Value16 = SplatBits /* | (SplatBits << 8) */;
    SmallVector<SDValue, 8> Ops;
 
-   Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
+   Ops.assign(8, DAG.getConstant(Value16, EVT::i16));
    return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
-                      DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
+                      DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v8i16, &Ops[0], Ops.size()));
   }
-  case MVT::v8i16: {
+  case EVT::v8i16: {
     unsigned short Value16 = SplatBits;
     SDValue T = DAG.getConstant(Value16, EltVT);
     SmallVector<SDValue, 8> Ops;
@@ -1621,15 +1621,15 @@
     Ops.assign(8, T);
     return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], Ops.size());
   }
-  case MVT::v4i32: {
+  case EVT::v4i32: {
     SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
     return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T, T, T);
   }
-  case MVT::v2i32: {
+  case EVT::v2i32: {
     SDValue T = DAG.getConstant(unsigned(SplatBits), VT.getVectorElementType());
     return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, T, T);
   }
-  case MVT::v2i64: {
+  case EVT::v2i64: {
     return SPU::LowerV2I64Splat(VT, DAG, SplatBits, dl);
   }
   }
@@ -1640,16 +1640,16 @@
 /*!
  */
 SDValue
-SPU::LowerV2I64Splat(MVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
+SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
                      DebugLoc dl) {
   uint32_t upper = uint32_t(SplatVal >> 32);
   uint32_t lower = uint32_t(SplatVal);
 
   if (upper == lower) {
     // Magic constant that can be matched by IL, ILA, et. al.
-    SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
+    SDValue Val = DAG.getTargetConstant(upper, EVT::i32);
     return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
-                       DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+                       DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                                    Val, Val, Val, Val));
   } else {
     bool upper_special, lower_special;
@@ -1664,8 +1664,8 @@
 
     // Both upper and lower are special, lower to a constant pool load:
     if (lower_special && upper_special) {
-      SDValue SplatValCN = DAG.getConstant(SplatVal, MVT::i64);
-      return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
+      SDValue SplatValCN = DAG.getConstant(SplatVal, EVT::i64);
+      return DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v2i64,
                          SplatValCN, SplatValCN);
     }
 
@@ -1676,17 +1676,17 @@
 
     // Create lower vector if not a special pattern
     if (!lower_special) {
-      SDValue LO32C = DAG.getConstant(lower, MVT::i32);
+      SDValue LO32C = DAG.getConstant(lower, EVT::i32);
       LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
-                         DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+                         DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                                      LO32C, LO32C, LO32C, LO32C));
     }
 
     // Create upper vector if not a special pattern
     if (!upper_special) {
-      SDValue HI32C = DAG.getConstant(upper, MVT::i32);
+      SDValue HI32C = DAG.getConstant(upper, EVT::i32);
       HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
-                         DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+                         DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                                      HI32C, HI32C, HI32C, HI32C));
     }
 
@@ -1720,11 +1720,11 @@
           val |= i * 4 + j + ((i & 1) * 16);
       }
 
-      ShufBytes.push_back(DAG.getConstant(val, MVT::i32));
+      ShufBytes.push_back(DAG.getConstant(val, EVT::i32));
     }
 
     return DAG.getNode(SPUISD::SHUFB, dl, OpVT, HI32, LO32,
-                       DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+                       DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                                    &ShufBytes[0], ShufBytes.size()));
   }
 }
@@ -1753,8 +1753,8 @@
   // If we have a single element being moved from V1 to V2, this can be handled
   // using the C*[DX] compute mask instructions, but the vector elements have
   // to be monotonically increasing with one exception element.
-  MVT VecVT = V1.getValueType();
-  MVT EltVT = VecVT.getVectorElementType();
+  EVT VecVT = V1.getValueType();
+  EVT EltVT = VecVT.getVectorElementType();
   unsigned EltsFromV2 = 0;
   unsigned V2Elt = 0;
   unsigned V2EltIdx0 = 0;
@@ -1765,13 +1765,13 @@
   bool monotonic = true;
   bool rotate = true;
 
-  if (EltVT == MVT::i8) {
+  if (EltVT == EVT::i8) {
     V2EltIdx0 = 16;
-  } else if (EltVT == MVT::i16) {
+  } else if (EltVT == EVT::i16) {
     V2EltIdx0 = 8;
-  } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
+  } else if (EltVT == EVT::i32 || EltVT == EVT::f32) {
     V2EltIdx0 = 4;
-  } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
+  } else if (EltVT == EVT::i64 || EltVT == EVT::f64) {
     V2EltIdx0 = 2;
   } else
     llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
@@ -1819,14 +1819,14 @@
     MachineFunction &MF = DAG.getMachineFunction();
     MachineRegisterInfo &RegInfo = MF.getRegInfo();
     unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
-    MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+    EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
     // Initialize temporary register to 0
     SDValue InitTempReg =
       DAG.getCopyToReg(DAG.getEntryNode(), dl, VReg, DAG.getConstant(0, PtrVT));
     // Copy register's contents as index in SHUFFLE_MASK:
     SDValue ShufMaskOp =
-      DAG.getNode(SPUISD::SHUFFLE_MASK, dl, MVT::v4i32,
-                  DAG.getTargetConstant(V2Elt, MVT::i32),
+      DAG.getNode(SPUISD::SHUFFLE_MASK, dl, EVT::v4i32,
+                  DAG.getTargetConstant(V2Elt, EVT::i32),
                   DAG.getCopyFromReg(InitTempReg, dl, VReg, PtrVT));
     // Use shuffle mask in SHUFB synthetic instruction:
     return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
@@ -1835,7 +1835,7 @@
     int rotamt = (MaxElts - V0Elt) * EltVT.getSizeInBits()/8;
 
     return DAG.getNode(SPUISD::ROTBYTES_LEFT, dl, V1.getValueType(),
-                       V1, DAG.getConstant(rotamt, MVT::i16));
+                       V1, DAG.getConstant(rotamt, EVT::i16));
   } else {
    // Convert the SHUFFLE_VECTOR mask's input element units to the
    // actual bytes.
@@ -1846,10 +1846,10 @@
       unsigned SrcElt = SVN->getMaskElt(i) < 0 ? 0 : SVN->getMaskElt(i);
 
       for (unsigned j = 0; j < BytesPerElement; ++j)
-        ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,MVT::i8));
+        ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,EVT::i8));
     }
 
-    SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
+    SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v16i8,
                                     &ResultMask[0], ResultMask.size());
     return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V1, V2, VPermMask);
   }
@@ -1865,19 +1865,19 @@
 
     ConstantSDNode *CN = cast<ConstantSDNode>(Op0.getNode());
     SmallVector<SDValue, 16> ConstVecValues;
-    MVT VT;
+    EVT VT;
     size_t n_copies;
 
     // Create a constant vector:
     switch (Op.getValueType().getSimpleVT()) {
     default: llvm_unreachable("Unexpected constant value type in "
                               "LowerSCALAR_TO_VECTOR");
-    case MVT::v16i8: n_copies = 16; VT = MVT::i8; break;
-    case MVT::v8i16: n_copies = 8; VT = MVT::i16; break;
-    case MVT::v4i32: n_copies = 4; VT = MVT::i32; break;
-    case MVT::v4f32: n_copies = 4; VT = MVT::f32; break;
-    case MVT::v2i64: n_copies = 2; VT = MVT::i64; break;
-    case MVT::v2f64: n_copies = 2; VT = MVT::f64; break;
+    case EVT::v16i8: n_copies = 16; VT = EVT::i8; break;
+    case EVT::v8i16: n_copies = 8; VT = EVT::i16; break;
+    case EVT::v4i32: n_copies = 4; VT = EVT::i32; break;
+    case EVT::v4f32: n_copies = 4; VT = EVT::f32; break;
+    case EVT::v2i64: n_copies = 2; VT = EVT::i64; break;
+    case EVT::v2f64: n_copies = 2; VT = EVT::f64; break;
     }
 
     SDValue CValue = DAG.getConstant(CN->getZExtValue(), VT);
@@ -1890,12 +1890,12 @@
     // Otherwise, copy the value from one register to another:
     switch (Op0.getValueType().getSimpleVT()) {
     default: llvm_unreachable("Unexpected value type in LowerSCALAR_TO_VECTOR");
-    case MVT::i8:
-    case MVT::i16:
-    case MVT::i32:
-    case MVT::i64:
-    case MVT::f32:
-    case MVT::f64:
+    case EVT::i8:
+    case EVT::i16:
+    case EVT::i32:
+    case EVT::i64:
+    case EVT::f32:
+    case EVT::f64:
       return DAG.getNode(SPUISD::PREFSLOT2VEC, dl, Op.getValueType(), Op0, Op0);
     }
   }
@@ -1904,7 +1904,7 @@
 }
 
 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   SDValue N = Op.getOperand(0);
   SDValue Elt = Op.getOperand(1);
   DebugLoc dl = Op.getDebugLoc();
@@ -1915,16 +1915,16 @@
     int EltNo = (int) C->getZExtValue();
 
     // sanity checks:
-    if (VT == MVT::i8 && EltNo >= 16)
+    if (VT == EVT::i8 && EltNo >= 16)
       llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i8 extraction slot > 15");
-    else if (VT == MVT::i16 && EltNo >= 8)
+    else if (VT == EVT::i16 && EltNo >= 8)
       llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i16 extraction slot > 7");
-    else if (VT == MVT::i32 && EltNo >= 4)
+    else if (VT == EVT::i32 && EltNo >= 4)
       llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i32 extraction slot > 4");
-    else if (VT == MVT::i64 && EltNo >= 2)
+    else if (VT == EVT::i64 && EltNo >= 2)
       llvm_unreachable("SPU LowerEXTRACT_VECTOR_ELT: i64 extraction slot > 2");
 
-    if (EltNo == 0 && (VT == MVT::i32 || VT == MVT::i64)) {
+    if (EltNo == 0 && (VT == EVT::i32 || VT == EVT::i64)) {
       // i32 and i64: Element 0 is the preferred slot
       return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, N);
     }
@@ -1936,21 +1936,21 @@
     switch (VT.getSimpleVT()) {
     default:
       assert(false && "Invalid value type!");
-    case MVT::i8: {
+    case EVT::i8: {
       prefslot_begin = prefslot_end = 3;
       break;
     }
-    case MVT::i16: {
+    case EVT::i16: {
       prefslot_begin = 2; prefslot_end = 3;
       break;
     }
-    case MVT::i32:
-    case MVT::f32: {
+    case EVT::i32:
+    case EVT::f32: {
       prefslot_begin = 0; prefslot_end = 3;
       break;
     }
-    case MVT::i64:
-    case MVT::f64: {
+    case EVT::i64:
+    case EVT::f64: {
       prefslot_begin = 0; prefslot_end = 7;
       break;
     }
@@ -1982,11 +1982,11 @@
                            (ShufBytes[bidx+1] << 16) |
                            (ShufBytes[bidx+2] << 8) |
                            ShufBytes[bidx+3]);
-      ShufMask[i] = DAG.getConstant(bits, MVT::i32);
+      ShufMask[i] = DAG.getConstant(bits, EVT::i32);
     }
 
     SDValue ShufMaskVec =
-      DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+      DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                   &ShufMask[0], sizeof(ShufMask)/sizeof(ShufMask[0]));
 
     retval = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
@@ -1995,15 +1995,15 @@
   } else {
     // Variable index: Rotate the requested element into slot 0, then replicate
     // slot 0 across the vector
-    MVT VecVT = N.getValueType();
+    EVT VecVT = N.getValueType();
     if (!VecVT.isSimple() || !VecVT.isVector() || !VecVT.is128BitVector()) {
       llvm_report_error("LowerEXTRACT_VECTOR_ELT: Must have a simple, 128-bit"
                         "vector type!");
     }
 
     // Make life easier by making sure the index is zero-extended to i32
-    if (Elt.getValueType() != MVT::i32)
-      Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Elt);
+    if (Elt.getValueType() != EVT::i32)
+      Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, EVT::i32, Elt);
 
     // Scale the index to a bit/byte shift quantity
     APInt scaleFactor =
@@ -2013,8 +2013,8 @@
 
     if (scaleShift > 0) {
       // Scale the shift factor:
-      Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
-                        DAG.getConstant(scaleShift, MVT::i32));
+      Elt = DAG.getNode(ISD::SHL, dl, EVT::i32, Elt,
+                        DAG.getConstant(scaleShift, EVT::i32));
     }
 
     vecShift = DAG.getNode(SPUISD::SHLQUAD_L_BYTES, dl, VecVT, N, Elt);
@@ -2028,30 +2028,30 @@
       llvm_report_error("LowerEXTRACT_VECTOR_ELT(varable): Unhandled vector"
                         "type");
       /*NOTREACHED*/
-    case MVT::i8: {
-      SDValue factor = DAG.getConstant(0x00000000, MVT::i32);
-      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+    case EVT::i8: {
+      SDValue factor = DAG.getConstant(0x00000000, EVT::i32);
+      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                               factor, factor, factor, factor);
       break;
     }
-    case MVT::i16: {
-      SDValue factor = DAG.getConstant(0x00010001, MVT::i32);
-      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+    case EVT::i16: {
+      SDValue factor = DAG.getConstant(0x00010001, EVT::i32);
+      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                               factor, factor, factor, factor);
       break;
     }
-    case MVT::i32:
-    case MVT::f32: {
-      SDValue factor = DAG.getConstant(0x00010203, MVT::i32);
-      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+    case EVT::i32:
+    case EVT::f32: {
+      SDValue factor = DAG.getConstant(0x00010203, EVT::i32);
+      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                               factor, factor, factor, factor);
       break;
     }
-    case MVT::i64:
-    case MVT::f64: {
-      SDValue loFactor = DAG.getConstant(0x00010203, MVT::i32);
-      SDValue hiFactor = DAG.getConstant(0x04050607, MVT::i32);
-      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
+    case EVT::i64:
+    case EVT::f64: {
+      SDValue loFactor = DAG.getConstant(0x00010203, EVT::i32);
+      SDValue hiFactor = DAG.getConstant(0x04050607, EVT::i32);
+      replicate = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
                               loFactor, hiFactor, loFactor, hiFactor);
       break;
     }
@@ -2070,12 +2070,12 @@
   SDValue ValOp = Op.getOperand(1);
   SDValue IdxOp = Op.getOperand(2);
   DebugLoc dl = Op.getDebugLoc();
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
 
   ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp);
   assert(CN != 0 && "LowerINSERT_VECTOR_ELT: Index is not constant!");
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   // Use $sp ($1) because it's always 16-byte aligned and it's available:
   SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
                                 DAG.getRegister(SPU::R1, PtrVT),
@@ -2086,7 +2086,7 @@
     DAG.getNode(SPUISD::SHUFB, dl, VT,
                 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
                 VecOp,
-                DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
+                DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v4i32, ShufMask));
 
   return result;
 }
@@ -2096,9 +2096,9 @@
 {
   SDValue N0 = Op.getOperand(0);      // Everything has at least one operand
   DebugLoc dl = Op.getDebugLoc();
-  MVT ShiftVT = TLI.getShiftAmountTy();
+  EVT ShiftVT = TLI.getShiftAmountTy();
 
-  assert(Op.getValueType() == MVT::i8);
+  assert(Op.getValueType() == EVT::i8);
   switch (Opc) {
   default:
     llvm_unreachable("Unhandled i8 math operator");
@@ -2108,10 +2108,10 @@
     // 8-bit addition: Promote the arguments up to 16-bits and truncate
     // the result:
     SDValue N1 = Op.getOperand(1);
-    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
-    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
-    return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
-                       DAG.getNode(Opc, dl, MVT::i16, N0, N1));
+    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i16, N0);
+    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i16, N1);
+    return DAG.getNode(ISD::TRUNCATE, dl, EVT::i8,
+                       DAG.getNode(Opc, dl, EVT::i16, N0, N1));
 
   }
 
@@ -2119,17 +2119,17 @@
     // 8-bit subtraction: Promote the arguments up to 16-bits and truncate
     // the result:
     SDValue N1 = Op.getOperand(1);
-    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
-    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
-    return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
-                       DAG.getNode(Opc, dl, MVT::i16, N0, N1));
+    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i16, N0);
+    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i16, N1);
+    return DAG.getNode(ISD::TRUNCATE, dl, EVT::i8,
+                       DAG.getNode(Opc, dl, EVT::i16, N0, N1));
   }
   case ISD::ROTR:
   case ISD::ROTL: {
     SDValue N1 = Op.getOperand(1);
-    MVT N1VT = N1.getValueType();
+    EVT N1VT = N1.getValueType();
 
-    N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
+    N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, EVT::i16, N0);
     if (!N1VT.bitsEq(ShiftVT)) {
       unsigned N1Opc = N1.getValueType().bitsLT(ShiftVT)
                        ? ISD::ZERO_EXTEND
@@ -2139,20 +2139,20 @@
 
     // Replicate lower 8-bits into upper 8:
     SDValue ExpandArg =
-      DAG.getNode(ISD::OR, dl, MVT::i16, N0,
-                  DAG.getNode(ISD::SHL, dl, MVT::i16,
-                              N0, DAG.getConstant(8, MVT::i32)));
+      DAG.getNode(ISD::OR, dl, EVT::i16, N0,
+                  DAG.getNode(ISD::SHL, dl, EVT::i16,
+                              N0, DAG.getConstant(8, EVT::i32)));
 
     // Truncate back down to i8
-    return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
-                       DAG.getNode(Opc, dl, MVT::i16, ExpandArg, N1));
+    return DAG.getNode(ISD::TRUNCATE, dl, EVT::i8,
+                       DAG.getNode(Opc, dl, EVT::i16, ExpandArg, N1));
   }
   case ISD::SRL:
   case ISD::SHL: {
     SDValue N1 = Op.getOperand(1);
-    MVT N1VT = N1.getValueType();
+    EVT N1VT = N1.getValueType();
 
-    N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, N0);
+    N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, EVT::i16, N0);
     if (!N1VT.bitsEq(ShiftVT)) {
       unsigned N1Opc = ISD::ZERO_EXTEND;
 
@@ -2162,14 +2162,14 @@
       N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
     }
 
-    return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
-                       DAG.getNode(Opc, dl, MVT::i16, N0, N1));
+    return DAG.getNode(ISD::TRUNCATE, dl, EVT::i8,
+                       DAG.getNode(Opc, dl, EVT::i16, N0, N1));
   }
   case ISD::SRA: {
     SDValue N1 = Op.getOperand(1);
-    MVT N1VT = N1.getValueType();
+    EVT N1VT = N1.getValueType();
 
-    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
+    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i16, N0);
     if (!N1VT.bitsEq(ShiftVT)) {
       unsigned N1Opc = ISD::SIGN_EXTEND;
 
@@ -2178,16 +2178,16 @@
       N1 = DAG.getNode(N1Opc, dl, ShiftVT, N1);
     }
 
-    return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
-                       DAG.getNode(Opc, dl, MVT::i16, N0, N1));
+    return DAG.getNode(ISD::TRUNCATE, dl, EVT::i8,
+                       DAG.getNode(Opc, dl, EVT::i16, N0, N1));
   }
   case ISD::MUL: {
     SDValue N1 = Op.getOperand(1);
 
-    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N0);
-    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i16, N1);
-    return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
-                       DAG.getNode(Opc, dl, MVT::i16, N0, N1));
+    N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i16, N0);
+    N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i16, N1);
+    return DAG.getNode(ISD::TRUNCATE, dl, EVT::i8,
+                       DAG.getNode(Opc, dl, EVT::i16, N0, N1));
     break;
   }
   }
@@ -2200,7 +2200,7 @@
 LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
   SDValue ConstVec;
   SDValue Arg;
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
 
   ConstVec = Op.getOperand(0);
@@ -2230,7 +2230,7 @@
                               HasAnyUndefs, minSplatBits)
         && minSplatBits <= SplatBitSize) {
       uint64_t SplatBits = APSplatBits.getZExtValue();
-      SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8);
+      SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, EVT::i8);
 
       SmallVector<SDValue, 16> tcVec;
       tcVec.assign(16, tc);
@@ -2251,33 +2251,33 @@
   ones per byte, which then have to be accumulated.
 */
 static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
-  MVT vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
+  EVT VT = Op.getValueType();
+  EVT vecVT = EVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
   DebugLoc dl = Op.getDebugLoc();
 
   switch (VT.getSimpleVT()) {
   default:
     assert(false && "Invalid value type!");
-  case MVT::i8: {
+  case EVT::i8: {
     SDValue N = Op.getOperand(0);
-    SDValue Elt0 = DAG.getConstant(0, MVT::i32);
+    SDValue Elt0 = DAG.getConstant(0, EVT::i32);
 
     SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
     SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
 
-    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i8, CNTB, Elt0);
+    return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i8, CNTB, Elt0);
   }
 
-  case MVT::i16: {
+  case EVT::i16: {
     MachineFunction &MF = DAG.getMachineFunction();
     MachineRegisterInfo &RegInfo = MF.getRegInfo();
 
     unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass);
 
     SDValue N = Op.getOperand(0);
-    SDValue Elt0 = DAG.getConstant(0, MVT::i16);
-    SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16);
-    SDValue Shift1 = DAG.getConstant(8, MVT::i32);
+    SDValue Elt0 = DAG.getConstant(0, EVT::i16);
+    SDValue Mask0 = DAG.getConstant(0x0f, EVT::i16);
+    SDValue Shift1 = DAG.getConstant(8, EVT::i32);
 
     SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
     SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
@@ -2285,22 +2285,22 @@
     // CNTB_result becomes the chain to which all of the virtual registers
     // CNTB_reg, SUM1_reg become associated:
     SDValue CNTB_result =
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, CNTB, Elt0);
+      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i16, CNTB, Elt0);
 
     SDValue CNTB_rescopy =
       DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
 
-    SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i16);
+    SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, EVT::i16);
 
-    return DAG.getNode(ISD::AND, dl, MVT::i16,
-                       DAG.getNode(ISD::ADD, dl, MVT::i16,
-                                   DAG.getNode(ISD::SRL, dl, MVT::i16,
+    return DAG.getNode(ISD::AND, dl, EVT::i16,
+                       DAG.getNode(ISD::ADD, dl, EVT::i16,
+                                   DAG.getNode(ISD::SRL, dl, EVT::i16,
                                                Tmp1, Shift1),
                                    Tmp1),
                        Mask0);
   }
 
-  case MVT::i32: {
+  case EVT::i32: {
     MachineFunction &MF = DAG.getMachineFunction();
     MachineRegisterInfo &RegInfo = MF.getRegInfo();
 
@@ -2308,10 +2308,10 @@
     unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
 
     SDValue N = Op.getOperand(0);
-    SDValue Elt0 = DAG.getConstant(0, MVT::i32);
-    SDValue Mask0 = DAG.getConstant(0xff, MVT::i32);
-    SDValue Shift1 = DAG.getConstant(16, MVT::i32);
-    SDValue Shift2 = DAG.getConstant(8, MVT::i32);
+    SDValue Elt0 = DAG.getConstant(0, EVT::i32);
+    SDValue Mask0 = DAG.getConstant(0xff, EVT::i32);
+    SDValue Shift1 = DAG.getConstant(16, EVT::i32);
+    SDValue Shift2 = DAG.getConstant(8, EVT::i32);
 
     SDValue Promote = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, N, N);
     SDValue CNTB = DAG.getNode(SPUISD::CNTB, dl, vecVT, Promote);
@@ -2319,35 +2319,35 @@
     // CNTB_result becomes the chain to which all of the virtual registers
     // CNTB_reg, SUM1_reg become associated:
     SDValue CNTB_result =
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, CNTB, Elt0);
+      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i32, CNTB, Elt0);
 
     SDValue CNTB_rescopy =
       DAG.getCopyToReg(CNTB_result, dl, CNTB_reg, CNTB_result);
 
     SDValue Comp1 =
-      DAG.getNode(ISD::SRL, dl, MVT::i32,
-                  DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32),
+      DAG.getNode(ISD::SRL, dl, EVT::i32,
+                  DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, EVT::i32),
                   Shift1);
 
     SDValue Sum1 =
-      DAG.getNode(ISD::ADD, dl, MVT::i32, Comp1,
-                  DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32));
+      DAG.getNode(ISD::ADD, dl, EVT::i32, Comp1,
+                  DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, EVT::i32));
 
     SDValue Sum1_rescopy =
       DAG.getCopyToReg(CNTB_result, dl, SUM1_reg, Sum1);
 
     SDValue Comp2 =
-      DAG.getNode(ISD::SRL, dl, MVT::i32,
-                  DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32),
+      DAG.getNode(ISD::SRL, dl, EVT::i32,
+                  DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, EVT::i32),
                   Shift2);
     SDValue Sum2 =
-      DAG.getNode(ISD::ADD, dl, MVT::i32, Comp2,
-                  DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, MVT::i32));
+      DAG.getNode(ISD::ADD, dl, EVT::i32, Comp2,
+                  DAG.getCopyFromReg(Sum1_rescopy, dl, SUM1_reg, EVT::i32));
 
-    return DAG.getNode(ISD::AND, dl, MVT::i32, Sum2, Mask0);
+    return DAG.getNode(ISD::AND, dl, EVT::i32, Sum2, Mask0);
   }
 
-  case MVT::i64:
+  case EVT::i64:
     break;
   }
 
@@ -2361,12 +2361,12 @@
  */
 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
                               SPUTargetLowering &TLI) {
-  MVT OpVT = Op.getValueType();
+  EVT OpVT = Op.getValueType();
   SDValue Op0 = Op.getOperand(0);
-  MVT Op0VT = Op0.getValueType();
+  EVT Op0VT = Op0.getValueType();
 
-  if ((OpVT == MVT::i32 && Op0VT == MVT::f64)
-      || OpVT == MVT::i64) {
+  if ((OpVT == EVT::i32 && Op0VT == EVT::f64)
+      || OpVT == EVT::i64) {
     // Convert f32 / f64 to i32 / i64 via libcall.
     RTLIB::Libcall LC =
             (Op.getOpcode() == ISD::FP_TO_SINT)
@@ -2387,12 +2387,12 @@
  */
 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
                               SPUTargetLowering &TLI) {
-  MVT OpVT = Op.getValueType();
+  EVT OpVT = Op.getValueType();
   SDValue Op0 = Op.getOperand(0);
-  MVT Op0VT = Op0.getValueType();
+  EVT Op0VT = Op0.getValueType();
 
-  if ((OpVT == MVT::f64 && Op0VT == MVT::i32)
-      || Op0VT == MVT::i64) {
+  if ((OpVT == EVT::f64 && Op0VT == EVT::i32)
+      || Op0VT == EVT::i64) {
     // Convert i32, i64 to f64 via libcall:
     RTLIB::Libcall LC =
             (Op.getOpcode() == ISD::SINT_TO_FP)
@@ -2408,7 +2408,7 @@
 
 //! Lower ISD::SETCC
 /*!
- This handles MVT::f64 (double floating point) condition lowering
+ This handles EVT::f64 (double floating point) condition lowering
  */
 static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
                           const TargetLowering &TLI) {
@@ -2418,25 +2418,25 @@
 
   SDValue lhs = Op.getOperand(0);
   SDValue rhs = Op.getOperand(1);
-  MVT lhsVT = lhs.getValueType();
-  assert(lhsVT == MVT::f64 && "LowerSETCC: type other than MVT::64\n");
+  EVT lhsVT = lhs.getValueType();
+  assert(lhsVT == EVT::f64 && "LowerSETCC: type other than EVT::64\n");
 
-  MVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
+  EVT ccResultVT = TLI.getSetCCResultType(lhs.getValueType());
   APInt ccResultOnes = APInt::getAllOnesValue(ccResultVT.getSizeInBits());
-  MVT IntVT(MVT::i64);
+  EVT IntVT(EVT::i64);
 
   // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
   // selected to a NOP:
   SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
   SDValue lhsHi32 =
-          DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
+          DAG.getNode(ISD::TRUNCATE, dl, EVT::i32,
                       DAG.getNode(ISD::SRL, dl, IntVT,
-                                  i64lhs, DAG.getConstant(32, MVT::i32)));
+                                  i64lhs, DAG.getConstant(32, EVT::i32)));
   SDValue lhsHi32abs =
-          DAG.getNode(ISD::AND, dl, MVT::i32,
-                      lhsHi32, DAG.getConstant(0x7fffffff, MVT::i32));
+          DAG.getNode(ISD::AND, dl, EVT::i32,
+                      lhsHi32, DAG.getConstant(0x7fffffff, EVT::i32));
   SDValue lhsLo32 =
-          DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, i64lhs);
+          DAG.getNode(ISD::TRUNCATE, dl, EVT::i32, i64lhs);
 
   // SETO and SETUO only use the lhs operand:
   if (CC->get() == ISD::SETO) {
@@ -2453,33 +2453,33 @@
     return DAG.getNode(ISD::AND, dl, ccResultVT,
                        DAG.getSetCC(dl, ccResultVT,
                                     lhsHi32abs,
-                                    DAG.getConstant(0x7ff00000, MVT::i32),
+                                    DAG.getConstant(0x7ff00000, EVT::i32),
                                     ISD::SETGE),
                        DAG.getSetCC(dl, ccResultVT,
                                     lhsLo32,
-                                    DAG.getConstant(0, MVT::i32),
+                                    DAG.getConstant(0, EVT::i32),
                                     ISD::SETGT));
   }
 
   SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
   SDValue rhsHi32 =
-          DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
+          DAG.getNode(ISD::TRUNCATE, dl, EVT::i32,
                       DAG.getNode(ISD::SRL, dl, IntVT,
-                                  i64rhs, DAG.getConstant(32, MVT::i32)));
+                                  i64rhs, DAG.getConstant(32, EVT::i32)));
 
   // If a value is negative, subtract from the sign magnitude constant:
   SDValue signMag2TC = DAG.getConstant(0x8000000000000000ULL, IntVT);
 
   // Convert the sign-magnitude representation into 2's complement:
   SDValue lhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
-                                      lhsHi32, DAG.getConstant(31, MVT::i32));
+                                      lhsHi32, DAG.getConstant(31, EVT::i32));
   SDValue lhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64lhs);
   SDValue lhsSelect =
           DAG.getNode(ISD::SELECT, dl, IntVT,
                       lhsSelectMask, lhsSignMag2TC, i64lhs);
 
   SDValue rhsSelectMask = DAG.getNode(ISD::SRA, dl, ccResultVT,
-                                      rhsHi32, DAG.getConstant(31, MVT::i32));
+                                      rhsHi32, DAG.getConstant(31, EVT::i32));
   SDValue rhsSignMag2TC = DAG.getNode(ISD::SUB, dl, IntVT, signMag2TC, i64rhs);
   SDValue rhsSelect =
           DAG.getNode(ISD::SELECT, dl, IntVT,
@@ -2517,10 +2517,10 @@
   if ((CC->get() & 0x8) == 0) {
     // Ordered comparison:
     SDValue lhsNaN = DAG.getSetCC(dl, ccResultVT,
-                                  lhs, DAG.getConstantFP(0.0, MVT::f64),
+                                  lhs, DAG.getConstantFP(0.0, EVT::f64),
                                   ISD::SETO);
     SDValue rhsNaN = DAG.getSetCC(dl, ccResultVT,
-                                  rhs, DAG.getConstantFP(0.0, MVT::f64),
+                                  rhs, DAG.getConstantFP(0.0, EVT::f64),
                                   ISD::SETO);
     SDValue ordered = DAG.getNode(ISD::AND, dl, ccResultVT, lhsNaN, rhsNaN);
 
@@ -2544,7 +2544,7 @@
 
 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
                               const TargetLowering &TLI) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   SDValue lhs = Op.getOperand(0);
   SDValue rhs = Op.getOperand(1);
   SDValue trueval = Op.getOperand(2);
@@ -2573,25 +2573,25 @@
 static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
 {
   // Type to truncate to
-  MVT VT = Op.getValueType();
-  MVT::SimpleValueType simpleVT = VT.getSimpleVT();
-  MVT VecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
+  EVT VT = Op.getValueType();
+  EVT::SimpleValueType simpleVT = VT.getSimpleVT();
+  EVT VecVT = EVT::getVectorVT(VT, (128 / VT.getSizeInBits()));
   DebugLoc dl = Op.getDebugLoc();
 
   // Type to truncate from
   SDValue Op0 = Op.getOperand(0);
-  MVT Op0VT = Op0.getValueType();
+  EVT Op0VT = Op0.getValueType();
 
-  if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) {
+  if (Op0VT.getSimpleVT() == EVT::i128 && simpleVT == EVT::i64) {
     // Create shuffle mask, least significant doubleword of quadword
     unsigned maskHigh = 0x08090a0b;
     unsigned maskLow = 0x0c0d0e0f;
     // Use a shuffle to perform the truncation
-    SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
-                                   DAG.getConstant(maskHigh, MVT::i32),
-                                   DAG.getConstant(maskLow, MVT::i32),
-                                   DAG.getConstant(maskHigh, MVT::i32),
-                                   DAG.getConstant(maskLow, MVT::i32));
+    SDValue shufMask = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
+                                   DAG.getConstant(maskHigh, EVT::i32),
+                                   DAG.getConstant(maskLow, EVT::i32),
+                                   DAG.getConstant(maskHigh, EVT::i32),
+                                   DAG.getConstant(maskLow, EVT::i32));
 
     SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT,
                                        Op0, Op0, shufMask);
@@ -2611,7 +2611,7 @@
 SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
 {
   unsigned Opc = (unsigned) Op.getOpcode();
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
 
   switch (Opc) {
   default: {
@@ -2647,7 +2647,7 @@
   case ISD::SRL:
   case ISD::SHL:
   case ISD::SRA: {
-    if (VT == MVT::i8)
+    if (VT == EVT::i8)
       return LowerI8Math(Op, DAG, Opc, *this);
     break;
   }
@@ -2680,7 +2680,7 @@
 
   // Vector and i8 multiply:
   case ISD::MUL:
-    if (VT == MVT::i8)
+    if (VT == EVT::i8)
       return LowerI8Math(Op, DAG, Opc, *this);
 
   case ISD::CTPOP:
@@ -2705,7 +2705,7 @@
 {
 #if 0
   unsigned Opc = (unsigned) N->getOpcode();
-  MVT OpVT = N->getValueType(0);
+  EVT OpVT = N->getValueType(0);
 
   switch (Opc) {
   default: {
@@ -2735,8 +2735,8 @@
   const SPUSubtarget *ST = SPUTM.getSubtargetImpl();
   SelectionDAG &DAG = DCI.DAG;
   SDValue Op0 = N->getOperand(0);       // everything has at least one operand
-  MVT NodeVT = N->getValueType(0);      // The node's value type
-  MVT Op0VT = Op0.getValueType();       // The first operand's result
+  EVT NodeVT = N->getValueType(0);      // The node's value type
+  EVT Op0VT = Op0.getValueType();       // The first operand's result
   SDValue Result;                       // Initially, empty result
   DebugLoc dl = N->getDebugLoc();
 
@@ -2938,20 +2938,20 @@
 
 std::pair<unsigned, const TargetRegisterClass*>
 SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
-                                                MVT VT) const
+                                                EVT VT) const
 {
   if (Constraint.size() == 1) {
     // GCC RS6000 Constraint Letters
     switch (Constraint[0]) {
     case 'b':   // R1-R31
     case 'r':   // R0-R31
-      if (VT == MVT::i64)
+      if (VT == EVT::i64)
         return std::make_pair(0U, SPU::R64CRegisterClass);
       return std::make_pair(0U, SPU::R32CRegisterClass);
     case 'f':
-      if (VT == MVT::f32)
+      if (VT == EVT::f32)
         return std::make_pair(0U, SPU::R32FPRegisterClass);
-      else if (VT == MVT::f64)
+      else if (VT == EVT::f64)
         return std::make_pair(0U, SPU::R64FPRegisterClass);
       break;
     case 'v':
@@ -3006,10 +3006,10 @@
     return 1;
 
   case ISD::SETCC: {
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
 
-    if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32) {
-      VT = MVT::i32;
+    if (VT != EVT::i8 && VT != EVT::i16 && VT != EVT::i32) {
+      VT = EVT::i32;
     }
     return VT.getSizeInBits();
   }
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h
index a042e7d..07811e4 100644
--- a/lib/Target/CellSPU/SPUISelLowering.h
+++ b/lib/Target/CellSPU/SPUISelLowering.h
@@ -64,22 +64,22 @@
   //! Utility functions specific to CellSPU:
   namespace SPU {
     SDValue get_vec_u18imm(SDNode *N, SelectionDAG &DAG,
-                             MVT ValueType);
+                             EVT ValueType);
     SDValue get_vec_i16imm(SDNode *N, SelectionDAG &DAG,
-                             MVT ValueType);
+                             EVT ValueType);
     SDValue get_vec_i10imm(SDNode *N, SelectionDAG &DAG,
-                             MVT ValueType);
+                             EVT ValueType);
     SDValue get_vec_i8imm(SDNode *N, SelectionDAG &DAG,
-                            MVT ValueType);
+                            EVT ValueType);
     SDValue get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG,
-                              MVT ValueType);
+                              EVT ValueType);
     SDValue get_v4i32_imm(SDNode *N, SelectionDAG &DAG);
     SDValue get_v2i64_imm(SDNode *N, SelectionDAG &DAG);
 
     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG,
                               const SPUTargetMachine &TM);
-    //! Simplify a MVT::v2i64 constant splat to CellSPU-ready form
-    SDValue LowerV2I64Splat(MVT OpVT, SelectionDAG &DAG, uint64_t splat,
+    //! Simplify a EVT::v2i64 constant splat to CellSPU-ready form
+    SDValue LowerV2I64Splat(EVT OpVT, SelectionDAG &DAG, uint64_t splat,
                              DebugLoc dl);
   }
 
@@ -109,7 +109,7 @@
     virtual const char *getTargetNodeName(unsigned Opcode) const;
 
     /// getSetCCResultType - Return the ValueType for ISD::SETCC
-    virtual MVT::SimpleValueType getSetCCResultType(MVT VT) const;
+    virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
 
     //! Custom lowering hooks
     virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
@@ -134,7 +134,7 @@
 
     std::pair<unsigned, const TargetRegisterClass*>
       getRegForInlineAsmConstraint(const std::string &Constraint,
-                                   MVT VT) const;
+                                   EVT VT) const;
 
     void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
                                       bool hasMemory,
diff --git a/lib/Target/CellSPU/SPUOperands.td b/lib/Target/CellSPU/SPUOperands.td
index 802628f..4db8e86 100644
--- a/lib/Target/CellSPU/SPUOperands.td
+++ b/lib/Target/CellSPU/SPUOperands.td
@@ -144,7 +144,7 @@
 def lo16 : PatLeaf<(imm), [{
   // lo16 predicate - returns true if the immediate has all zeros in the
   // low order bits and is a 32-bit constant:
-  if (N->getValueType(0) == MVT::i32) {
+  if (N->getValueType(0) == EVT::i32) {
     uint32_t val = N->getZExtValue();
     return ((val & 0x0000ffff) == val);
   }
@@ -155,10 +155,10 @@
 def hi16 : PatLeaf<(imm), [{
   // hi16 predicate - returns true if the immediate has all zeros in the
   // low order bits and is a 32-bit constant:
-  if (N->getValueType(0) == MVT::i32) {
+  if (N->getValueType(0) == EVT::i32) {
     uint32_t val = uint32_t(N->getZExtValue());
     return ((val & 0xffff0000) == val);
-  } else if (N->getValueType(0) == MVT::i64) {
+  } else if (N->getValueType(0) == EVT::i64) {
     uint64_t val = N->getZExtValue();
     return ((val & 0xffff0000ULL) == val);
   }
@@ -208,7 +208,7 @@
 
 // Does the SFP constant only have upp 16 bits set?
 def hi16_f32 : PatLeaf<(fpimm), [{
-  if (N->getValueType(0) == MVT::f32) {
+  if (N->getValueType(0) == EVT::f32) {
     uint32_t val = FloatToBits(N->getValueAPF().convertToFloat());
     return ((val & 0xffff0000) == val);
   }
@@ -218,7 +218,7 @@
 
 // Does the SFP constant fit into 18 bits?
 def fpimm18  : PatLeaf<(fpimm), [{
-  if (N->getValueType(0) == MVT::f32) {
+  if (N->getValueType(0) == EVT::f32) {
     uint32_t Value = FloatToBits(N->getValueAPF().convertToFloat());
     return ((Value & ((1 << 19) - 1)) == Value);
   }
@@ -238,7 +238,7 @@
 // immediate constant load for v16i8 vectors. N.B.: The incoming constant has
 // to be a 16-bit quantity with the upper and lower bytes equal (e.g., 0x2a2a).
 def v16i8SExt8Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8);
+  return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8);
 }]>;
 
 // v16i8SExt8Imm: Predicate test for 8-bit sign extended immediate constant
@@ -246,14 +246,14 @@
 // incoming constant being a 16-bit quantity, where the upper and lower bytes
 // are EXACTLY the same (e.g., 0x2a2a)
 def v16i8SExt8Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8).getNode() != 0;
+  return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8).getNode() != 0;
 }], v16i8SExt8Imm_xform>;
 
 // v16i8U8Imm_xform function: convert build_vector to unsigned 8-bit
 // immediate constant load for v16i8 vectors. N.B.: The incoming constant has
 // to be a 16-bit quantity with the upper and lower bytes equal (e.g., 0x2a2a).
 def v16i8U8Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8);
+  return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8);
 }]>;
 
 // v16i8U8Imm: Predicate test for unsigned 8-bit immediate constant
@@ -261,114 +261,114 @@
 // incoming constant being a 16-bit quantity, where the upper and lower bytes
 // are EXACTLY the same (e.g., 0x2a2a)
 def v16i8U8Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8).getNode() != 0;
+  return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8).getNode() != 0;
 }], v16i8U8Imm_xform>;
 
 // v8i16SExt8Imm_xform function: convert build_vector to 8-bit sign extended
 // immediate constant load for v8i16 vectors.
 def v8i16SExt8Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i8imm(N, *CurDAG, MVT::i16);
+  return SPU::get_vec_i8imm(N, *CurDAG, EVT::i16);
 }]>;
 
 // v8i16SExt8Imm: Predicate test for 8-bit sign extended immediate constant
 // load, works in conjunction with its transform function.
 def v8i16SExt8Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i8imm(N, *CurDAG, MVT::i16).getNode() != 0;
+  return SPU::get_vec_i8imm(N, *CurDAG, EVT::i16).getNode() != 0;
 }], v8i16SExt8Imm_xform>;
 
 // v8i16SExt10Imm_xform function: convert build_vector to 16-bit sign extended
 // immediate constant load for v8i16 vectors.
 def v8i16SExt10Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16);
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16);
 }]>;
 
 // v8i16SExt10Imm: Predicate test for 16-bit sign extended immediate constant
 // load, works in conjunction with its transform function.
 def v8i16SExt10Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16).getNode() != 0;
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16).getNode() != 0;
 }], v8i16SExt10Imm_xform>;
 
 // v8i16Uns10Imm_xform function: convert build_vector to 16-bit unsigned
 // immediate constant load for v8i16 vectors.
 def v8i16Uns10Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16);
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16);
 }]>;
 
 // v8i16Uns10Imm: Predicate test for 16-bit unsigned immediate constant
 // load, works in conjunction with its transform function.
 def v8i16Uns10Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16).getNode() != 0;
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16).getNode() != 0;
 }], v8i16Uns10Imm_xform>;
 
 // v8i16SExt16Imm_xform function: convert build_vector to 16-bit sign extended
 // immediate constant load for v8i16 vectors.
 def v8i16Uns16Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i16imm(N, *CurDAG, MVT::i16);
+  return SPU::get_vec_i16imm(N, *CurDAG, EVT::i16);
 }]>;
 
 // v8i16SExt16Imm: Predicate test for 16-bit sign extended immediate constant
 // load, works in conjunction with its transform function.
 def v8i16SExt16Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i16imm(N, *CurDAG, MVT::i16).getNode() != 0;
+  return SPU::get_vec_i16imm(N, *CurDAG, EVT::i16).getNode() != 0;
 }], v8i16Uns16Imm_xform>;
 
 // v4i32SExt10Imm_xform function: convert build_vector to 10-bit sign extended
 // immediate constant load for v4i32 vectors.
 def v4i32SExt10Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32);
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32);
 }]>;
 
 // v4i32SExt10Imm: Predicate test for 10-bit sign extended immediate constant
 // load, works in conjunction with its transform function.
 def v4i32SExt10Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32).getNode() != 0;
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32).getNode() != 0;
 }], v4i32SExt10Imm_xform>;
 
 // v4i32Uns10Imm_xform function: convert build_vector to 10-bit unsigned
 // immediate constant load for v4i32 vectors.
 def v4i32Uns10Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32);
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32);
 }]>;
 
 // v4i32Uns10Imm: Predicate test for 10-bit unsigned immediate constant
 // load, works in conjunction with its transform function.
 def v4i32Uns10Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32).getNode() != 0;
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32).getNode() != 0;
 }], v4i32Uns10Imm_xform>;
 
 // v4i32SExt16Imm_xform function: convert build_vector to 16-bit sign extended
 // immediate constant load for v4i32 vectors.
 def v4i32SExt16Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i16imm(N, *CurDAG, MVT::i32);
+  return SPU::get_vec_i16imm(N, *CurDAG, EVT::i32);
 }]>;
 
 // v4i32SExt16Imm: Predicate test for 16-bit sign extended immediate constant
 // load, works in conjunction with its transform function.
 def v4i32SExt16Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i16imm(N, *CurDAG, MVT::i32).getNode() != 0;
+  return SPU::get_vec_i16imm(N, *CurDAG, EVT::i32).getNode() != 0;
 }], v4i32SExt16Imm_xform>;
 
 // v4i32Uns18Imm_xform function: convert build_vector to 18-bit unsigned
 // immediate constant load for v4i32 vectors.
 def v4i32Uns18Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_u18imm(N, *CurDAG, MVT::i32);
+  return SPU::get_vec_u18imm(N, *CurDAG, EVT::i32);
 }]>;
 
 // v4i32Uns18Imm: Predicate test for 18-bit unsigned immediate constant load,
 // works in conjunction with its transform function.
 def v4i32Uns18Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_u18imm(N, *CurDAG, MVT::i32).getNode() != 0;
+  return SPU::get_vec_u18imm(N, *CurDAG, EVT::i32).getNode() != 0;
 }], v4i32Uns18Imm_xform>;
 
 // ILHUvec_get_imm xform function: convert build_vector to ILHUvec imm constant
 // load.
 def ILHUvec_get_imm: SDNodeXForm<build_vector, [{
-  return SPU::get_ILHUvec_imm(N, *CurDAG, MVT::i32);
+  return SPU::get_ILHUvec_imm(N, *CurDAG, EVT::i32);
 }]>;
 
 /// immILHUvec: Predicate test for a ILHU constant vector.
 def immILHUvec: PatLeaf<(build_vector), [{
-  return SPU::get_ILHUvec_imm(N, *CurDAG, MVT::i32).getNode() != 0;
+  return SPU::get_ILHUvec_imm(N, *CurDAG, EVT::i32).getNode() != 0;
 }], ILHUvec_get_imm>;
 
 // Catch-all for any other i32 vector constants
@@ -383,42 +383,42 @@
 // v2i64SExt10Imm_xform function: convert build_vector to 10-bit sign extended
 // immediate constant load for v2i64 vectors.
 def v2i64SExt10Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i64);
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i64);
 }]>;
 
 // v2i64SExt10Imm: Predicate test for 10-bit sign extended immediate constant
 // load, works in conjunction with its transform function.
 def v2i64SExt10Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i10imm(N, *CurDAG, MVT::i64).getNode() != 0;
+  return SPU::get_vec_i10imm(N, *CurDAG, EVT::i64).getNode() != 0;
 }], v2i64SExt10Imm_xform>;
 
 // v2i64SExt16Imm_xform function: convert build_vector to 16-bit sign extended
 // immediate constant load for v2i64 vectors.
 def v2i64SExt16Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_i16imm(N, *CurDAG, MVT::i64);
+  return SPU::get_vec_i16imm(N, *CurDAG, EVT::i64);
 }]>;
 
 // v2i64SExt16Imm: Predicate test for 16-bit sign extended immediate constant
 // load, works in conjunction with its transform function.
 def v2i64SExt16Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_i16imm(N, *CurDAG, MVT::i64).getNode() != 0;
+  return SPU::get_vec_i16imm(N, *CurDAG, EVT::i64).getNode() != 0;
 }], v2i64SExt16Imm_xform>;
 
 // v2i64Uns18Imm_xform function: convert build_vector to 18-bit unsigned
 // immediate constant load for v2i64 vectors.
 def v2i64Uns18Imm_xform: SDNodeXForm<build_vector, [{
-  return SPU::get_vec_u18imm(N, *CurDAG, MVT::i64);
+  return SPU::get_vec_u18imm(N, *CurDAG, EVT::i64);
 }]>;
 
 // v2i64Uns18Imm: Predicate test for 18-bit unsigned immediate constant load,
 // works in conjunction with its transform function.
 def v2i64Uns18Imm: PatLeaf<(build_vector), [{
-  return SPU::get_vec_u18imm(N, *CurDAG, MVT::i64).getNode() != 0;
+  return SPU::get_vec_u18imm(N, *CurDAG, EVT::i64).getNode() != 0;
 }], v2i64Uns18Imm_xform>;
 
 /// immILHUvec: Predicate test for a ILHU constant vector.
 def immILHUvec_i64: PatLeaf<(build_vector), [{
-  return SPU::get_ILHUvec_imm(N, *CurDAG, MVT::i64).getNode() != 0;
+  return SPU::get_ILHUvec_imm(N, *CurDAG, EVT::i64).getNode() != 0;
 }], ILHUvec_get_imm>;
 
 // Catch-all for any other i32 vector constants
diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
index 6dd3b2a..bb3439b 100644
--- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
+++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp
@@ -78,8 +78,8 @@
                                     SDValue &Base, SDValue &Disp) {
   // Try to match frame address first.
   if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
-    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i16);
-    Disp = CurDAG->getTargetConstant(0, MVT::i16);
+    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i16);
+    Disp = CurDAG->getTargetConstant(0, EVT::i16);
     return true;
   }
 
@@ -92,11 +92,11 @@
       if (((CVal << 48) >> 48) == CVal) {
         SDValue N0 = Addr.getOperand(0);
         if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N0))
-          Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i16);
+          Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i16);
         else
           Base = N0;
 
-        Disp = CurDAG->getTargetConstant(CVal, MVT::i16);
+        Disp = CurDAG->getTargetConstant(CVal, EVT::i16);
         return true;
       }
     }
@@ -105,18 +105,18 @@
     SDValue N0 = Addr.getOperand(0);
     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
       Base = CurDAG->getTargetGlobalAddress(G->getGlobal(),
-                                            MVT::i16, G->getOffset());
-      Disp = CurDAG->getTargetConstant(0, MVT::i16);
+                                            EVT::i16, G->getOffset());
+      Disp = CurDAG->getTargetConstant(0, EVT::i16);
       return true;
     } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(N0)) {
-      Base = CurDAG->getTargetExternalSymbol(E->getSymbol(), MVT::i16);
-      Disp = CurDAG->getTargetConstant(0, MVT::i16);
+      Base = CurDAG->getTargetExternalSymbol(E->getSymbol(), EVT::i16);
+      Disp = CurDAG->getTargetConstant(0, EVT::i16);
     }
     break;
   };
 
   Base = Addr;
-  Disp = CurDAG->getTargetConstant(0, MVT::i16);
+  Disp = CurDAG->getTargetConstant(0, EVT::i16);
 
   return true;
 }
@@ -168,14 +168,14 @@
   switch (Node->getOpcode()) {
   default: break;
   case ISD::FrameIndex: {
-    assert(Op.getValueType() == MVT::i16);
+    assert(Op.getValueType() == EVT::i16);
     int FI = cast<FrameIndexSDNode>(Node)->getIndex();
-    SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i16);
+    SDValue TFI = CurDAG->getTargetFrameIndex(FI, EVT::i16);
     if (Node->hasOneUse())
-      return CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, MVT::i16,
-                                  TFI, CurDAG->getTargetConstant(0, MVT::i16));
-    return CurDAG->getTargetNode(MSP430::ADD16ri, dl, MVT::i16,
-                                 TFI, CurDAG->getTargetConstant(0, MVT::i16));
+      return CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, EVT::i16,
+                                  TFI, CurDAG->getTargetConstant(0, EVT::i16));
+    return CurDAG->getTargetNode(MSP430::ADD16ri, dl, EVT::i16,
+                                 TFI, CurDAG->getTargetConstant(0, EVT::i16));
   }
   }
 
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index d1a504b..a5c4e9f 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -42,8 +42,8 @@
   Subtarget(*tm.getSubtargetImpl()), TM(tm) {
 
   // Set up the register classes.
-  addRegisterClass(MVT::i8,  MSP430::GR8RegisterClass);
-  addRegisterClass(MVT::i16, MSP430::GR16RegisterClass);
+  addRegisterClass(EVT::i8,  MSP430::GR8RegisterClass);
+  addRegisterClass(EVT::i16, MSP430::GR16RegisterClass);
 
   // Compute derived properties from the register classes
   computeRegisterProperties();
@@ -55,75 +55,75 @@
 
   // Even if we have only 1 bit shift here, we can perform
   // shifts of the whole bitwidth 1 bit per step.
-  setShiftAmountType(MVT::i8);
+  setShiftAmountType(EVT::i8);
 
   setStackPointerRegisterToSaveRestore(MSP430::SPW);
   setBooleanContents(ZeroOrOneBooleanContent);
   setSchedulingPreference(SchedulingForLatency);
 
-  setLoadExtAction(ISD::EXTLOAD,  MVT::i1, Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i8, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i16, Expand);
 
   // We don't have any truncstores
-  setTruncStoreAction(MVT::i16, MVT::i8, Expand);
+  setTruncStoreAction(EVT::i16, EVT::i8, Expand);
 
-  setOperationAction(ISD::SRA,              MVT::i8,    Custom);
-  setOperationAction(ISD::SHL,              MVT::i8,    Custom);
-  setOperationAction(ISD::SRL,              MVT::i8,    Custom);
-  setOperationAction(ISD::SRA,              MVT::i16,   Custom);
-  setOperationAction(ISD::SHL,              MVT::i16,   Custom);
-  setOperationAction(ISD::SRL,              MVT::i16,   Custom);
-  setOperationAction(ISD::ROTL,             MVT::i8,    Expand);
-  setOperationAction(ISD::ROTR,             MVT::i8,    Expand);
-  setOperationAction(ISD::ROTL,             MVT::i16,   Expand);
-  setOperationAction(ISD::ROTR,             MVT::i16,   Expand);
-  setOperationAction(ISD::GlobalAddress,    MVT::i16,   Custom);
-  setOperationAction(ISD::ExternalSymbol,   MVT::i16,   Custom);
-  setOperationAction(ISD::BR_JT,            MVT::Other, Expand);
-  setOperationAction(ISD::BRIND,            MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC,            MVT::i8,    Custom);
-  setOperationAction(ISD::BR_CC,            MVT::i16,   Custom);
-  setOperationAction(ISD::BRCOND,           MVT::Other, Expand);
-  setOperationAction(ISD::SETCC,            MVT::i8,    Expand);
-  setOperationAction(ISD::SETCC,            MVT::i16,   Expand);
-  setOperationAction(ISD::SELECT,           MVT::i8,    Expand);
-  setOperationAction(ISD::SELECT,           MVT::i16,   Expand);
-  setOperationAction(ISD::SELECT_CC,        MVT::i8,    Custom);
-  setOperationAction(ISD::SELECT_CC,        MVT::i16,   Custom);
-  setOperationAction(ISD::SIGN_EXTEND,      MVT::i16,   Custom);
+  setOperationAction(ISD::SRA,              EVT::i8,    Custom);
+  setOperationAction(ISD::SHL,              EVT::i8,    Custom);
+  setOperationAction(ISD::SRL,              EVT::i8,    Custom);
+  setOperationAction(ISD::SRA,              EVT::i16,   Custom);
+  setOperationAction(ISD::SHL,              EVT::i16,   Custom);
+  setOperationAction(ISD::SRL,              EVT::i16,   Custom);
+  setOperationAction(ISD::ROTL,             EVT::i8,    Expand);
+  setOperationAction(ISD::ROTR,             EVT::i8,    Expand);
+  setOperationAction(ISD::ROTL,             EVT::i16,   Expand);
+  setOperationAction(ISD::ROTR,             EVT::i16,   Expand);
+  setOperationAction(ISD::GlobalAddress,    EVT::i16,   Custom);
+  setOperationAction(ISD::ExternalSymbol,   EVT::i16,   Custom);
+  setOperationAction(ISD::BR_JT,            EVT::Other, Expand);
+  setOperationAction(ISD::BRIND,            EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC,            EVT::i8,    Custom);
+  setOperationAction(ISD::BR_CC,            EVT::i16,   Custom);
+  setOperationAction(ISD::BRCOND,           EVT::Other, Expand);
+  setOperationAction(ISD::SETCC,            EVT::i8,    Expand);
+  setOperationAction(ISD::SETCC,            EVT::i16,   Expand);
+  setOperationAction(ISD::SELECT,           EVT::i8,    Expand);
+  setOperationAction(ISD::SELECT,           EVT::i16,   Expand);
+  setOperationAction(ISD::SELECT_CC,        EVT::i8,    Custom);
+  setOperationAction(ISD::SELECT_CC,        EVT::i16,   Custom);
+  setOperationAction(ISD::SIGN_EXTEND,      EVT::i16,   Custom);
 
-  setOperationAction(ISD::CTTZ,             MVT::i8,    Expand);
-  setOperationAction(ISD::CTTZ,             MVT::i16,   Expand);
-  setOperationAction(ISD::CTLZ,             MVT::i8,    Expand);
-  setOperationAction(ISD::CTLZ,             MVT::i16,   Expand);
-  setOperationAction(ISD::CTPOP,            MVT::i8,    Expand);
-  setOperationAction(ISD::CTPOP,            MVT::i16,   Expand);
+  setOperationAction(ISD::CTTZ,             EVT::i8,    Expand);
+  setOperationAction(ISD::CTTZ,             EVT::i16,   Expand);
+  setOperationAction(ISD::CTLZ,             EVT::i8,    Expand);
+  setOperationAction(ISD::CTLZ,             EVT::i16,   Expand);
+  setOperationAction(ISD::CTPOP,            EVT::i8,    Expand);
+  setOperationAction(ISD::CTPOP,            EVT::i16,   Expand);
 
-  setOperationAction(ISD::SHL_PARTS,        MVT::i8,    Expand);
-  setOperationAction(ISD::SHL_PARTS,        MVT::i16,   Expand);
-  setOperationAction(ISD::SRL_PARTS,        MVT::i8,    Expand);
-  setOperationAction(ISD::SRL_PARTS,        MVT::i16,   Expand);
-  setOperationAction(ISD::SRA_PARTS,        MVT::i8,    Expand);
-  setOperationAction(ISD::SRA_PARTS,        MVT::i16,   Expand);
+  setOperationAction(ISD::SHL_PARTS,        EVT::i8,    Expand);
+  setOperationAction(ISD::SHL_PARTS,        EVT::i16,   Expand);
+  setOperationAction(ISD::SRL_PARTS,        EVT::i8,    Expand);
+  setOperationAction(ISD::SRL_PARTS,        EVT::i16,   Expand);
+  setOperationAction(ISD::SRA_PARTS,        EVT::i8,    Expand);
+  setOperationAction(ISD::SRA_PARTS,        EVT::i16,   Expand);
 
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1,   Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1,   Expand);
 
   // FIXME: Implement efficiently multiplication by a constant
-  setOperationAction(ISD::MUL,              MVT::i16,   Expand);
-  setOperationAction(ISD::MULHS,            MVT::i16,   Expand);
-  setOperationAction(ISD::MULHU,            MVT::i16,   Expand);
-  setOperationAction(ISD::SMUL_LOHI,        MVT::i16,   Expand);
-  setOperationAction(ISD::UMUL_LOHI,        MVT::i16,   Expand);
+  setOperationAction(ISD::MUL,              EVT::i16,   Expand);
+  setOperationAction(ISD::MULHS,            EVT::i16,   Expand);
+  setOperationAction(ISD::MULHU,            EVT::i16,   Expand);
+  setOperationAction(ISD::SMUL_LOHI,        EVT::i16,   Expand);
+  setOperationAction(ISD::UMUL_LOHI,        EVT::i16,   Expand);
 
-  setOperationAction(ISD::UDIV,             MVT::i16,   Expand);
-  setOperationAction(ISD::UDIVREM,          MVT::i16,   Expand);
-  setOperationAction(ISD::UREM,             MVT::i16,   Expand);
-  setOperationAction(ISD::SDIV,             MVT::i16,   Expand);
-  setOperationAction(ISD::SDIVREM,          MVT::i16,   Expand);
-  setOperationAction(ISD::SREM,             MVT::i16,   Expand);
+  setOperationAction(ISD::UDIV,             EVT::i16,   Expand);
+  setOperationAction(ISD::UDIVREM,          EVT::i16,   Expand);
+  setOperationAction(ISD::UREM,             EVT::i16,   Expand);
+  setOperationAction(ISD::SDIV,             EVT::i16,   Expand);
+  setOperationAction(ISD::SDIVREM,          EVT::i16,   Expand);
+  setOperationAction(ISD::SREM,             EVT::i16,   Expand);
 }
 
 SDValue MSP430TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
@@ -220,7 +220,7 @@
     CCValAssign &VA = ArgLocs[i];
     if (VA.isRegLoc()) {
       // Arguments passed in registers
-      MVT RegVT = VA.getLocVT();
+      EVT RegVT = VA.getLocVT();
       switch (RegVT.getSimpleVT()) {
       default: 
         {
@@ -230,7 +230,7 @@
 #endif
           llvm_unreachable(0);
         }
-      case MVT::i16:
+      case EVT::i16:
         unsigned VReg =
           RegInfo.createVirtualRegister(MSP430::GR16RegisterClass);
         RegInfo.addLiveIn(VA.getLocReg(), VReg);
@@ -266,7 +266,7 @@
 
       // Create the SelectionDAG nodes corresponding to a load
       //from this parameter
-      SDValue FIN = DAG.getFrameIndex(FI, MVT::i16);
+      SDValue FIN = DAG.getFrameIndex(FI, EVT::i16);
       InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
                                    PseudoSourceValue::getFixedStack(FI), 0));
     }
@@ -315,10 +315,10 @@
   }
 
   if (Flag.getNode())
-    return DAG.getNode(MSP430ISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+    return DAG.getNode(MSP430ISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
 
   // Return Void
-  return DAG.getNode(MSP430ISD::RET_FLAG, dl, MVT::Other, Chain);
+  return DAG.getNode(MSP430ISD::RET_FLAG, dl, EVT::Other, Chain);
 }
 
 /// LowerCCCCallTo - functions arguments are copied from virtual regs to
@@ -395,7 +395,7 @@
   // Transform all store nodes into one single node because all store nodes are
   // independent of each other.
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token chain and
@@ -412,12 +412,12 @@
   // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
   // Likewise ExternalSymbol -> TargetExternalSymbol.
   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
-    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i16);
+    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i16);
   else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
-    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i16);
+    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i16);
 
   // Returns a chain & a flag for retval copy to use.
-  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(Callee);
@@ -479,7 +479,7 @@
                                           SelectionDAG &DAG) {
   unsigned Opc = Op.getOpcode();
   SDNode* N = Op.getNode();
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = N->getDebugLoc();
 
   // We currently only lower shifts of constant argument.
@@ -564,7 +564,7 @@
     break;
   }
 
-  return DAG.getNode(MSP430ISD::CMP, dl, MVT::Flag, LHS, RHS);
+  return DAG.getNode(MSP430ISD::CMP, dl, EVT::Flag, LHS, RHS);
 }
 
 
@@ -581,7 +581,7 @@
 
   return DAG.getNode(MSP430ISD::BR_CC, dl, Op.getValueType(),
                      Chain,
-                     Dest, DAG.getConstant(TargetCC, MVT::i8),
+                     Dest, DAG.getConstant(TargetCC, EVT::i8),
                      Flag);
 }
 
@@ -596,11 +596,11 @@
   unsigned TargetCC = MSP430::COND_INVALID;
   SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
 
-  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
+  SDVTList VTs = DAG.getVTList(Op.getValueType(), EVT::Flag);
   SmallVector<SDValue, 4> Ops;
   Ops.push_back(TrueV);
   Ops.push_back(FalseV);
-  Ops.push_back(DAG.getConstant(TargetCC, MVT::i8));
+  Ops.push_back(DAG.getConstant(TargetCC, EVT::i8));
   Ops.push_back(Flag);
 
   return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, &Ops[0], Ops.size());
@@ -609,10 +609,10 @@
 SDValue MSP430TargetLowering::LowerSIGN_EXTEND(SDValue Op,
                                                SelectionDAG &DAG) {
   SDValue Val = Op.getOperand(0);
-  MVT VT      = Op.getValueType();
+  EVT VT      = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
 
-  assert(VT == MVT::i16 && "Only support i16 for now!");
+  assert(VT == EVT::i16 && "Only support i16 for now!");
 
   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT,
                      DAG.getNode(ISD::ANY_EXTEND, dl, VT, Val),
diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp
index 33819b2..b03f785 100644
--- a/lib/Target/Mips/MipsISelDAGToDAG.cpp
+++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp
@@ -95,7 +95,7 @@
   // getI32Imm - Return a target constant with the specified
   // value, of type i32.
   inline SDValue getI32Imm(unsigned Imm) {
-    return CurDAG->getTargetConstant(Imm, MVT::i32);
+    return CurDAG->getTargetConstant(Imm, EVT::i32);
   }
 
 
@@ -142,8 +142,8 @@
 {
   // if Address is FI, get the TargetFrameIndex.
   if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
-    Base   = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-    Offset = CurDAG->getTargetConstant(0, MVT::i32);
+    Base   = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
+    Offset = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
     
@@ -151,7 +151,7 @@
   if (TM.getRelocationModel() == Reloc::PIC_) {
     if ((Addr.getOpcode() == ISD::TargetGlobalAddress) || 
         (Addr.getOpcode() == ISD::TargetJumpTable)){
-      Base   = CurDAG->getRegister(Mips::GP, MVT::i32);
+      Base   = CurDAG->getRegister(Mips::GP, EVT::i32);
       Offset = Addr;
       return true;
     }
@@ -169,19 +169,19 @@
         // If the first operand is a FI, get the TargetFI Node
         if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
                                     (Addr.getOperand(0))) {
-          Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+          Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
         } else {
           Base = Addr.getOperand(0);
         }
 
-        Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
+        Offset = CurDAG->getTargetConstant(CN->getZExtValue(), EVT::i32);
         return true;
       }
     }
   }
 
   Base   = Addr;
-  Offset = CurDAG->getTargetConstant(0, MVT::i32);
+  Offset = CurDAG->getTargetConstant(0, EVT::i32);
   return true;
 }
 
@@ -243,12 +243,12 @@
       SDValue LHS = Node->getOperand(0);
       SDValue RHS = Node->getOperand(1);
 
-      MVT VT = LHS.getValueType();
+      EVT VT = LHS.getValueType();
       SDNode *Carry = CurDAG->getTargetNode(Mips::SLTu, dl, VT, Ops, 2);
       SDNode *AddCarry = CurDAG->getTargetNode(Mips::ADDu, dl, VT, 
                                                SDValue(Carry,0), RHS);
 
-      return CurDAG->SelectNodeTo(N.getNode(), MOp, VT, MVT::Flag,
+      return CurDAG->SelectNodeTo(N.getNode(), MOp, VT, EVT::Flag,
                                   LHS, SDValue(AddCarry,0));
     }
 
@@ -266,13 +266,13 @@
       else
         Op = (Opcode == ISD::UDIVREM ? Mips::DIVu : Mips::DIV);
 
-      SDNode *Node = CurDAG->getTargetNode(Op, dl, MVT::Flag, Op1, Op2);
+      SDNode *Node = CurDAG->getTargetNode(Op, dl, EVT::Flag, Op1, Op2);
 
       SDValue InFlag = SDValue(Node, 0);
-      SDNode *Lo = CurDAG->getTargetNode(Mips::MFLO, dl, MVT::i32, 
-                                         MVT::Flag, InFlag);
+      SDNode *Lo = CurDAG->getTargetNode(Mips::MFLO, dl, EVT::i32, 
+                                         EVT::Flag, InFlag);
       InFlag = SDValue(Lo,1);
-      SDNode *Hi = CurDAG->getTargetNode(Mips::MFHI, dl, MVT::i32, InFlag);
+      SDNode *Hi = CurDAG->getTargetNode(Mips::MFHI, dl, EVT::i32, InFlag);
 
       if (!N.getValue(0).use_empty()) 
         ReplaceUses(N.getValue(0), SDValue(Lo,0));
@@ -292,14 +292,14 @@
 
       unsigned MulOp  = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT);
       SDNode *MulNode = CurDAG->getTargetNode(MulOp, dl, 
-                                              MVT::Flag, MulOp1, MulOp2);
+                                              EVT::Flag, MulOp1, MulOp2);
 
       SDValue InFlag = SDValue(MulNode, 0);
 
       if (MulOp == ISD::MUL)
-        return CurDAG->getTargetNode(Mips::MFLO, dl, MVT::i32, InFlag);
+        return CurDAG->getTargetNode(Mips::MFLO, dl, EVT::i32, InFlag);
       else
-        return CurDAG->getTargetNode(Mips::MFHI, dl, MVT::i32, InFlag);
+        return CurDAG->getTargetNode(Mips::MFHI, dl, EVT::i32, InFlag);
     }
 
     /// Div/Rem operations
@@ -318,10 +318,10 @@
         Op  = (Opcode == ISD::SREM ? Mips::DIV : Mips::DIVu);
         MOp = Mips::MFHI;
       }
-      SDNode *Node = CurDAG->getTargetNode(Op, dl, MVT::Flag, Op1, Op2);
+      SDNode *Node = CurDAG->getTargetNode(Op, dl, EVT::Flag, Op1, Op2);
 
       SDValue InFlag = SDValue(Node, 0);
-      return CurDAG->getTargetNode(MOp, dl, MVT::i32, InFlag);
+      return CurDAG->getTargetNode(MOp, dl, EVT::i32, InFlag);
     }
 
     // Get target GOT address.
@@ -337,19 +337,19 @@
         //bool isCodeLarge = (TM.getCodeModel() == CodeModel::Large);
         SDValue Chain  = Node->getOperand(0);
         SDValue Callee = Node->getOperand(1);
-        SDValue T9Reg = CurDAG->getRegister(Mips::T9, MVT::i32);
+        SDValue T9Reg = CurDAG->getRegister(Mips::T9, EVT::i32);
         SDValue InFlag(0, 0);
 
         if ( (isa<GlobalAddressSDNode>(Callee)) ||
              (isa<ExternalSymbolSDNode>(Callee)) )
         {
           /// Direct call for global addresses and external symbols
-          SDValue GPReg = CurDAG->getRegister(Mips::GP, MVT::i32);
+          SDValue GPReg = CurDAG->getRegister(Mips::GP, EVT::i32);
 
           // Use load to get GOT target
           SDValue Ops[] = { Callee, GPReg, Chain };
-          SDValue Load = SDValue(CurDAG->getTargetNode(Mips::LW, dl, MVT::i32, 
-                                     MVT::Other, Ops, 3), 0);
+          SDValue Load = SDValue(CurDAG->getTargetNode(Mips::LW, dl, EVT::i32, 
+                                     EVT::Other, Ops, 3), 0);
           Chain = Load.getValue(1);
 
           // Call target must be on T9
@@ -359,8 +359,8 @@
           Chain = CurDAG->getCopyToReg(Chain, dl, T9Reg, Callee, InFlag);
 
         // Emit Jump and Link Register
-        SDNode *ResNode = CurDAG->getTargetNode(Mips::JALR, dl, MVT::Other,
-                                  MVT::Flag, T9Reg, Chain);
+        SDNode *ResNode = CurDAG->getTargetNode(Mips::JALR, dl, EVT::Other,
+                                  EVT::Flag, T9Reg, Chain);
         Chain  = SDValue(ResNode, 0);
         InFlag = SDValue(ResNode, 1);
         ReplaceUses(SDValue(Node, 0), Chain);
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 54d260e..0058272 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -65,108 +65,108 @@
   setUsesGlobalOffsetTable(true);
 
   // Set up the register classes
-  addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass);
-  addRegisterClass(MVT::f32, Mips::FGR32RegisterClass);
+  addRegisterClass(EVT::i32, Mips::CPURegsRegisterClass);
+  addRegisterClass(EVT::f32, Mips::FGR32RegisterClass);
 
   // When dealing with single precision only, use libcalls
   if (!Subtarget->isSingleFloat())
     if (!Subtarget->isFP64bit())
-      addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
+      addRegisterClass(EVT::f64, Mips::AFGR64RegisterClass);
 
   // Legal fp constants
   addLegalFPImmediate(APFloat(+0.0f));
 
   // Load extented operations for i1 types must be promoted 
-  setLoadExtAction(ISD::EXTLOAD,  MVT::i1,  Promote);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1,  Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1,  Promote);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::i1,  Promote);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i1,  Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1,  Promote);
 
   // MIPS doesn't have extending float->double load/store
-  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
-  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+  setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
+  setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 
   // Used by legalize types to correctly generate the setcc result. 
   // Without this, every float setcc comes with a AND/OR with the result, 
   // we don't want this, since the fpcmp result goes to a flag register, 
   // which is used implicitly by brcond and select operations.
-  AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
+  AddPromotedToType(ISD::SETCC, EVT::i1, EVT::i32);
 
   // Mips Custom Operations
-  setOperationAction(ISD::GlobalAddress,      MVT::i32,   Custom);
-  setOperationAction(ISD::GlobalTLSAddress,   MVT::i32,   Custom);
-  setOperationAction(ISD::JumpTable,          MVT::i32,   Custom);
-  setOperationAction(ISD::ConstantPool,       MVT::i32,   Custom);
-  setOperationAction(ISD::SELECT,             MVT::f32,   Custom);
-  setOperationAction(ISD::SELECT,             MVT::f64,   Custom);
-  setOperationAction(ISD::SELECT,             MVT::i32,   Custom);
-  setOperationAction(ISD::SETCC,              MVT::f32,   Custom);
-  setOperationAction(ISD::SETCC,              MVT::f64,   Custom);
-  setOperationAction(ISD::BRCOND,             MVT::Other, Custom);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32,   Custom);
-  setOperationAction(ISD::FP_TO_SINT,         MVT::i32,   Custom);
+  setOperationAction(ISD::GlobalAddress,      EVT::i32,   Custom);
+  setOperationAction(ISD::GlobalTLSAddress,   EVT::i32,   Custom);
+  setOperationAction(ISD::JumpTable,          EVT::i32,   Custom);
+  setOperationAction(ISD::ConstantPool,       EVT::i32,   Custom);
+  setOperationAction(ISD::SELECT,             EVT::f32,   Custom);
+  setOperationAction(ISD::SELECT,             EVT::f64,   Custom);
+  setOperationAction(ISD::SELECT,             EVT::i32,   Custom);
+  setOperationAction(ISD::SETCC,              EVT::f32,   Custom);
+  setOperationAction(ISD::SETCC,              EVT::f64,   Custom);
+  setOperationAction(ISD::BRCOND,             EVT::Other, Custom);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32,   Custom);
+  setOperationAction(ISD::FP_TO_SINT,         EVT::i32,   Custom);
 
   // We custom lower AND/OR to handle the case where the DAG contain 'ands/ors' 
   // with operands comming from setcc fp comparions. This is necessary since 
   // the result from these setcc are in a flag registers (FCR31).
-  setOperationAction(ISD::AND,              MVT::i32,   Custom);
-  setOperationAction(ISD::OR,               MVT::i32,   Custom);
+  setOperationAction(ISD::AND,              EVT::i32,   Custom);
+  setOperationAction(ISD::OR,               EVT::i32,   Custom);
 
   // Operations not directly supported by Mips.
-  setOperationAction(ISD::BR_JT,             MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC,             MVT::Other, Expand);
-  setOperationAction(ISD::SELECT_CC,         MVT::Other, Expand);
-  setOperationAction(ISD::UINT_TO_FP,        MVT::i32,   Expand);
-  setOperationAction(ISD::FP_TO_UINT,        MVT::i32,   Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1,    Expand);
-  setOperationAction(ISD::CTPOP,             MVT::i32,   Expand);
-  setOperationAction(ISD::CTTZ,              MVT::i32,   Expand);
-  setOperationAction(ISD::ROTL,              MVT::i32,   Expand);
-  setOperationAction(ISD::ROTR,              MVT::i32,   Expand);
-  setOperationAction(ISD::SHL_PARTS,         MVT::i32,   Expand);
-  setOperationAction(ISD::SRA_PARTS,         MVT::i32,   Expand);
-  setOperationAction(ISD::SRL_PARTS,         MVT::i32,   Expand);
-  setOperationAction(ISD::FCOPYSIGN,         MVT::f32,   Expand);
-  setOperationAction(ISD::FCOPYSIGN,         MVT::f64,   Expand);
-  setOperationAction(ISD::FSIN,              MVT::f32,   Expand);
-  setOperationAction(ISD::FCOS,              MVT::f32,   Expand);
-  setOperationAction(ISD::FPOWI,             MVT::f32,   Expand);
-  setOperationAction(ISD::FPOW,              MVT::f32,   Expand);
-  setOperationAction(ISD::FLOG,              MVT::f32,   Expand);
-  setOperationAction(ISD::FLOG2,             MVT::f32,   Expand);
-  setOperationAction(ISD::FLOG10,            MVT::f32,   Expand);
-  setOperationAction(ISD::FEXP,              MVT::f32,   Expand);
+  setOperationAction(ISD::BR_JT,             EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC,             EVT::Other, Expand);
+  setOperationAction(ISD::SELECT_CC,         EVT::Other, Expand);
+  setOperationAction(ISD::UINT_TO_FP,        EVT::i32,   Expand);
+  setOperationAction(ISD::FP_TO_UINT,        EVT::i32,   Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1,    Expand);
+  setOperationAction(ISD::CTPOP,             EVT::i32,   Expand);
+  setOperationAction(ISD::CTTZ,              EVT::i32,   Expand);
+  setOperationAction(ISD::ROTL,              EVT::i32,   Expand);
+  setOperationAction(ISD::ROTR,              EVT::i32,   Expand);
+  setOperationAction(ISD::SHL_PARTS,         EVT::i32,   Expand);
+  setOperationAction(ISD::SRA_PARTS,         EVT::i32,   Expand);
+  setOperationAction(ISD::SRL_PARTS,         EVT::i32,   Expand);
+  setOperationAction(ISD::FCOPYSIGN,         EVT::f32,   Expand);
+  setOperationAction(ISD::FCOPYSIGN,         EVT::f64,   Expand);
+  setOperationAction(ISD::FSIN,              EVT::f32,   Expand);
+  setOperationAction(ISD::FCOS,              EVT::f32,   Expand);
+  setOperationAction(ISD::FPOWI,             EVT::f32,   Expand);
+  setOperationAction(ISD::FPOW,              EVT::f32,   Expand);
+  setOperationAction(ISD::FLOG,              EVT::f32,   Expand);
+  setOperationAction(ISD::FLOG2,             EVT::f32,   Expand);
+  setOperationAction(ISD::FLOG10,            EVT::f32,   Expand);
+  setOperationAction(ISD::FEXP,              EVT::f32,   Expand);
 
   // We don't have line number support yet.
-  setOperationAction(ISD::DBG_STOPPOINT,     MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC,         MVT::Other, Expand);
-  setOperationAction(ISD::DBG_LABEL,         MVT::Other, Expand);
-  setOperationAction(ISD::EH_LABEL,          MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT,     EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC,         EVT::Other, Expand);
+  setOperationAction(ISD::DBG_LABEL,         EVT::Other, Expand);
+  setOperationAction(ISD::EH_LABEL,          EVT::Other, Expand);
 
   // Use the default for now
-  setOperationAction(ISD::STACKSAVE,         MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE,      MVT::Other, Expand);
-  setOperationAction(ISD::MEMBARRIER,        MVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE,         EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE,      EVT::Other, Expand);
+  setOperationAction(ISD::MEMBARRIER,        EVT::Other, Expand);
 
   if (Subtarget->isSingleFloat())
-    setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+    setOperationAction(ISD::SELECT_CC, EVT::f64, Expand);
 
   if (!Subtarget->hasSEInReg()) {
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+    setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i8,  Expand);
+    setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i16, Expand);
   }
 
   if (!Subtarget->hasBitCount())
-    setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+    setOperationAction(ISD::CTLZ, EVT::i32, Expand);
 
   if (!Subtarget->hasSwap())
-    setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+    setOperationAction(ISD::BSWAP, EVT::i32, Expand);
 
   setStackPointerRegisterToSaveRestore(Mips::SP);
   computeRegisterProperties();
 }
 
-MVT::SimpleValueType MipsTargetLowering::getSetCCResultType(MVT VT) const {
-  return MVT::i32;
+EVT::SimpleValueType MipsTargetLowering::getSetCCResultType(EVT VT) const {
+  return EVT::i32;
 }
 
 /// getFunctionAlignment - Return the Log2 alignment of this function.
@@ -358,22 +358,22 @@
   SDValue Src = Op.getOperand(0);
 
   // Set the condition register
-  SDValue CondReg = DAG.getCopyFromReg(Chain, dl, CCReg, MVT::i32);
+  SDValue CondReg = DAG.getCopyFromReg(Chain, dl, CCReg, EVT::i32);
   CondReg = DAG.getCopyToReg(Chain, dl, Mips::AT, CondReg);
-  CondReg = DAG.getCopyFromReg(CondReg, dl, Mips::AT, MVT::i32);
+  CondReg = DAG.getCopyFromReg(CondReg, dl, Mips::AT, EVT::i32);
 
-  SDValue Cst = DAG.getConstant(3, MVT::i32);
-  SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i32, CondReg, Cst);
-  Cst = DAG.getConstant(2, MVT::i32);
-  SDValue Xor = DAG.getNode(ISD::XOR, dl, MVT::i32, Or, Cst);
+  SDValue Cst = DAG.getConstant(3, EVT::i32);
+  SDValue Or = DAG.getNode(ISD::OR, dl, EVT::i32, CondReg, Cst);
+  Cst = DAG.getConstant(2, EVT::i32);
+  SDValue Xor = DAG.getNode(ISD::XOR, dl, EVT::i32, Or, Cst);
 
   SDValue InFlag(0, 0);
   CondReg = DAG.getCopyToReg(Chain, dl, Mips::FCR31, Xor, InFlag);
 
   // Emit the round instruction and bit convert to integer
-  SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, MVT::f32,
+  SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, EVT::f32,
                               Src, CondReg.getValue(1));
-  SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Trunc);
+  SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Trunc);
   return BitCvt;
 }
 
@@ -385,11 +385,11 @@
   DebugLoc dl = Op.getDebugLoc();
 
   // Get a reference from Mips stack pointer
-  SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, MVT::i32);
+  SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, EVT::i32);
 
   // Subtract the dynamic size from the actual stack size to
   // obtain the new stack size.
-  SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
+  SDValue Sub = DAG.getNode(ISD::SUB, dl, EVT::i32, StackPointer, Size);
 
   // The Sub result contains the new stack start address, so it 
   // must be placed in the stack pointer register.
@@ -411,15 +411,15 @@
   if (LHS.getOpcode() != MipsISD::FPCmp || RHS.getOpcode() != MipsISD::FPCmp)
     return Op;
 
-  SDValue True  = DAG.getConstant(1, MVT::i32);
-  SDValue False = DAG.getConstant(0, MVT::i32);
+  SDValue True  = DAG.getConstant(1, EVT::i32);
+  SDValue False = DAG.getConstant(0, EVT::i32);
 
   SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(), 
                              LHS, True, False, LHS.getOperand(2));
   SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(), 
                              RHS, True, False, RHS.getOperand(2));
 
-  return DAG.getNode(Op.getOpcode(), dl, MVT::i32, LSEL, RSEL);
+  return DAG.getNode(Op.getOpcode(), dl, EVT::i32, LSEL, RSEL);
 }
 
 SDValue MipsTargetLowering::
@@ -438,7 +438,7 @@
   SDValue CCNode  = CondRes.getOperand(2);
   Mips::CondCode CC =
     (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
-  SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32); 
+  SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), EVT::i32); 
 
   return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode, 
              Dest, CondRes);
@@ -457,7 +457,7 @@
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
   
   return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS, 
-                 DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32));
+                 DAG.getConstant(FPCondCCodeToFCC(CC), EVT::i32));
 }
 
 SDValue MipsTargetLowering::
@@ -491,23 +491,23 @@
   // FIXME there isn't actually debug info here
   DebugLoc dl = Op.getDebugLoc();
   GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
-  SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
+  SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
 
   if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
     // %hi/%lo relocation
-    SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, GA);
-    SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, GA);
-    return DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
+    SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, EVT::i32, GA);
+    SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, GA);
+    return DAG.getNode(ISD::ADD, dl, EVT::i32, HiPart, Lo);
 
   } else { // Abicall relocations, TODO: make this cleaner.
-    SDValue ResNode = DAG.getLoad(MVT::i32, dl, 
+    SDValue ResNode = DAG.getLoad(EVT::i32, dl, 
                                   DAG.getEntryNode(), GA, NULL, 0);
     // On functions and global targets not internal linked only
     // a load from got/GP is necessary for PIC to work.
     if (!GV->hasLocalLinkage() || isa<Function>(GV))
       return ResNode;
-    SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, GA);
-    return DAG.getNode(ISD::ADD, dl, MVT::i32, ResNode, Lo);
+    SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, GA);
+    return DAG.getNode(ISD::ADD, dl, EVT::i32, ResNode, Lo);
   }
 
   llvm_unreachable("Dont know how to handle GlobalAddress");
@@ -529,19 +529,19 @@
   // FIXME there isn't actually debug info here
   DebugLoc dl = Op.getDebugLoc();
 
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   JumpTableSDNode *JT  = cast<JumpTableSDNode>(Op);
   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
 
   if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
-    SDVTList VTs = DAG.getVTList(MVT::i32);
+    SDVTList VTs = DAG.getVTList(EVT::i32);
     SDValue Ops[] = { JTI };
     HiPart = DAG.getNode(MipsISD::Hi, dl, VTs, Ops, 1);
   } else // Emit Load from Global Pointer
-    HiPart = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(), JTI, NULL, 0);
+    HiPart = DAG.getLoad(EVT::i32, dl, DAG.getEntryNode(), JTI, NULL, 0);
 
-  SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, JTI);
-  ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
+  SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, JTI);
+  ResNode = DAG.getNode(ISD::ADD, dl, EVT::i32, HiPart, Lo);
 
   return ResNode;
 }
@@ -552,7 +552,7 @@
   SDValue ResNode;
   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
   Constant *C = N->getConstVal();
-  SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
+  SDValue CP = DAG.getTargetConstantPool(C, EVT::i32, N->getAlignment());
   // FIXME there isn't actually debug info here
   DebugLoc dl = Op.getDebugLoc();
 
@@ -562,13 +562,13 @@
   // hacking it. This feature should come soon so we can uncomment the 
   // stuff below.
   //if (IsInSmallSection(C->getType())) {
-  //  SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
-  //  SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
-  //  ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode); 
+  //  SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, EVT::i32, CP);
+  //  SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(EVT::i32);
+  //  ResNode = DAG.getNode(ISD::ADD, EVT::i32, GOT, GPRelNode); 
   //} else { // %hi/%lo relocation
-    SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CP);
-    SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
-    ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
+    SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, EVT::i32, CP);
+    SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, CP);
+    ResNode = DAG.getNode(ISD::ADD, dl, EVT::i32, HiPart, Lo);
   //}
 
   return ResNode;
@@ -593,8 +593,8 @@
 //       go to stack.
 //===----------------------------------------------------------------------===//
 
-static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
-                       MVT LocVT, CCValAssign::LocInfo LocInfo,
+static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
+                       EVT LocVT, CCValAssign::LocInfo LocInfo,
                        ISD::ArgFlagsTy ArgFlags, CCState &State) {
 
   static const unsigned IntRegsSize=4, FloatRegsSize=2; 
@@ -614,8 +614,8 @@
   bool IntRegUsed = (IntRegs[UnallocIntReg] != (unsigned (Mips::A0)));
 
   // Promote i8 and i16
-  if (LocVT == MVT::i8 || LocVT == MVT::i16) {
-    LocVT = MVT::i32;
+  if (LocVT == EVT::i8 || LocVT == EVT::i16) {
+    LocVT = EVT::i32;
     if (ArgFlags.isSExt())
       LocInfo = CCValAssign::SExt;
     else if (ArgFlags.isZExt())
@@ -624,20 +624,20 @@
       LocInfo = CCValAssign::AExt;
   }
 
-  if (ValVT == MVT::i32 || (ValVT == MVT::f32 && IntRegUsed)) {
+  if (ValVT == EVT::i32 || (ValVT == EVT::f32 && IntRegUsed)) {
     Reg = State.AllocateReg(IntRegs, IntRegsSize);
     IntRegUsed = true;
-    LocVT = MVT::i32;
+    LocVT = EVT::i32;
   }
 
   if (ValVT.isFloatingPoint() && !IntRegUsed) {
-    if (ValVT == MVT::f32)
+    if (ValVT == EVT::f32)
       Reg = State.AllocateReg(F32Regs, FloatRegsSize);
     else
       Reg = State.AllocateReg(F64Regs, FloatRegsSize);
   }
 
-  if (ValVT == MVT::f64 && IntRegUsed) {
+  if (ValVT == EVT::f64 && IntRegUsed) {
     if (UnallocIntReg != IntRegsSize) {
       // If we hit register A3 as the first not allocated, we must
       // mark it as allocated (shadow) and use the stack instead.
@@ -646,7 +646,7 @@
       for (;UnallocIntReg < IntRegsSize; ++UnallocIntReg)
         State.AllocateReg(UnallocIntReg);
     } 
-    LocVT = MVT::i32;
+    LocVT = EVT::i32;
   }
 
   if (!Reg) {
@@ -686,7 +686,7 @@
   // To meet O32 ABI, Mips must always allocate 16 bytes on
   // the stack (even if less than 4 are used as arguments)
   if (Subtarget->isABI_O32()) {
-    int VTsize = MVT(MVT::i32).getSizeInBits()/8;
+    int VTsize = EVT(EVT::i32).getSizeInBits()/8;
     MFI->CreateFixedObject(VTsize, (VTsize*3));
     CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
   } else
@@ -715,13 +715,13 @@
     default: llvm_unreachable("Unknown loc info!");
     case CCValAssign::Full: 
       if (Subtarget->isABI_O32() && VA.isRegLoc()) {
-        if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i32)
-          Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Arg);
-        if (VA.getValVT() == MVT::f64 && VA.getLocVT() == MVT::i32) {
-          Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
-          SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
+        if (VA.getValVT() == EVT::f32 && VA.getLocVT() == EVT::i32)
+          Arg = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Arg);
+        if (VA.getValVT() == EVT::f64 && VA.getLocVT() == EVT::i32) {
+          Arg = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i64, Arg);
+          SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Arg,
                                    DAG.getConstant(0, getPointerTy()));
-          SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
+          SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Arg,
                                    DAG.getConstant(1, getPointerTy()));
           RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
           RegsToPass.push_back(std::make_pair(VA.getLocReg()+1, Hi));
@@ -768,7 +768,7 @@
   // Transform all store nodes into one single node because all store
   // nodes are independent of each other.
   if (!MemOpChains.empty())     
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, 
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token 
@@ -794,7 +794,7 @@
   //             = Chain, Callee, Reg#1, Reg#2, ...  
   //
   // Returns a chain & a flag for retval copy to use.
-  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(Callee);
@@ -838,9 +838,9 @@
       // Reload GP value.
       FI = MipsFI->getGPFI();
       SDValue FIN = DAG.getFrameIndex(FI,getPointerTy());
-      SDValue GPLoad = DAG.getLoad(MVT::i32, dl, Chain, FIN, NULL, 0);
+      SDValue GPLoad = DAG.getLoad(EVT::i32, dl, Chain, FIN, NULL, 0);
       Chain = GPLoad.getValue(1);
-      Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32), 
+      Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, EVT::i32), 
                                GPLoad, SDValue(0,0));
       InFlag = Chain.getValue(1);
   }      
@@ -919,14 +919,14 @@
 
     // Arguments stored on registers
     if (VA.isRegLoc()) {
-      MVT RegVT = VA.getLocVT();
+      EVT RegVT = VA.getLocVT();
       TargetRegisterClass *RC = 0;
 
-      if (RegVT == MVT::i32)
+      if (RegVT == EVT::i32)
         RC = Mips::CPURegsRegisterClass; 
-      else if (RegVT == MVT::f32) 
+      else if (RegVT == EVT::f32) 
         RC = Mips::FGR32RegisterClass;
-      else if (RegVT == MVT::f64) {
+      else if (RegVT == EVT::f64) {
         if (!Subtarget->isSingleFloat()) 
           RC = Mips::AFGR64RegisterClass;
       } else  
@@ -954,15 +954,15 @@
 
       // Handle O32 ABI cases: i32->f32 and (i32,i32)->f64 
       if (Subtarget->isABI_O32()) {
-        if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32) 
-          ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
-        if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) {
+        if (RegVT == EVT::i32 && VA.getValVT() == EVT::f32) 
+          ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, ArgValue);
+        if (RegVT == EVT::i32 && VA.getValVT() == EVT::f64) {
           unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(), 
                                     VA.getLocReg()+1, RC);
           SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
-          SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
-          SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue2);
-          ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi);
+          SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, ArgValue);
+          SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, ArgValue2);
+          ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, EVT::f64, Lo, Hi);
         }
       }
 
@@ -1021,11 +1021,11 @@
   if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
     unsigned Reg = MipsFI->getSRetReturnReg();
     if (!Reg) {
-      Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
+      Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(EVT::i32));
       MipsFI->setSRetReturnReg(Reg);
     }
     SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Copy, Chain);
   }
 
   return Chain;
@@ -1094,11 +1094,11 @@
 
   // Return on Mips is always a "jr $ra"
   if (Flag.getNode())
-    return DAG.getNode(MipsISD::Ret, dl, MVT::Other, 
-                       Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag);
+    return DAG.getNode(MipsISD::Ret, dl, EVT::Other, 
+                       Chain, DAG.getRegister(Mips::RA, EVT::i32), Flag);
   else // Return Void
-    return DAG.getNode(MipsISD::Ret, dl, MVT::Other, 
-                       Chain, DAG.getRegister(Mips::RA, MVT::i32));
+    return DAG.getNode(MipsISD::Ret, dl, EVT::Other, 
+                       Chain, DAG.getRegister(Mips::RA, EVT::i32));
 }
 
 //===----------------------------------------------------------------------===//
@@ -1135,16 +1135,16 @@
 /// return a list of registers that can be used to satisfy the constraint.
 /// This should only be used for C_RegisterClass constraints.
 std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
-getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
+getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
 {
   if (Constraint.size() == 1) {
     switch (Constraint[0]) {
     case 'r':
       return std::make_pair(0U, Mips::CPURegsRegisterClass);
     case 'f':
-      if (VT == MVT::f32)
+      if (VT == EVT::f32)
         return std::make_pair(0U, Mips::FGR32RegisterClass);
-      if (VT == MVT::f64)    
+      if (VT == EVT::f64)    
         if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
           return std::make_pair(0U, Mips::AFGR64RegisterClass);
     }
@@ -1157,7 +1157,7 @@
 /// pointer.
 std::vector<unsigned> MipsTargetLowering::
 getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  MVT VT) const
+                                  EVT VT) const
 {
   if (Constraint.size() != 1)
     return std::vector<unsigned>();
@@ -1174,7 +1174,7 @@
              Mips::T8, 0);
 
     case 'f':
-      if (VT == MVT::f32) {
+      if (VT == EVT::f32) {
         if (Subtarget->isSingleFloat())
           return make_vector<unsigned>(Mips::F2, Mips::F3, Mips::F4, Mips::F5,
                  Mips::F6, Mips::F7, Mips::F8, Mips::F9, Mips::F10, Mips::F11,
@@ -1187,7 +1187,7 @@
                  Mips::F28, Mips::F30, 0);
       }
 
-      if (VT == MVT::f64)    
+      if (VT == EVT::f64)    
         if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
           return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4, 
                  Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13, 
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 5e5aacd..667356a 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -80,7 +80,7 @@
     virtual const char *getTargetNodeName(unsigned Opcode) const;
 
     /// getSetCCResultType - get the ISD::SETCC result ValueType
-    MVT::SimpleValueType getSetCCResultType(MVT VT) const;
+    EVT::SimpleValueType getSetCCResultType(EVT VT) const;
 
     /// getFunctionAlignment - Return the Log2 alignment of this function.
     virtual unsigned getFunctionAlignment(const Function *F) const;
@@ -137,11 +137,11 @@
 
     std::pair<unsigned, const TargetRegisterClass*> 
               getRegForInlineAsmConstraint(const std::string &Constraint,
-              MVT VT) const;
+              EVT VT) const;
 
     std::vector<unsigned>
     getRegClassForInlineAsmConstraint(const std::string &Constraint,
-              MVT VT) const;
+              EVT VT) const;
 
     virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
   };
diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td
index b9276fe..53d6d61 100644
--- a/lib/Target/Mips/MipsInstrInfo.td
+++ b/lib/Target/Mips/MipsInstrInfo.td
@@ -96,7 +96,7 @@
 // Node immediate fits as 16-bit sign extended on target immediate.
 // e.g. addi, andi
 def immSExt16  : PatLeaf<(imm), [{
-  if (N->getValueType(0) == MVT::i32)
+  if (N->getValueType(0) == EVT::i32)
     return (int32_t)N->getZExtValue() == (short)N->getZExtValue();
   else
     return (int64_t)N->getZExtValue() == (short)N->getZExtValue();
@@ -107,7 +107,7 @@
 // immediate are caught.
 // e.g. addiu, sltiu
 def immZExt16  : PatLeaf<(imm), [{
-  if (N->getValueType(0) == MVT::i32)
+  if (N->getValueType(0) == EVT::i32)
     return (uint32_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
   else
     return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
diff --git a/lib/Target/PIC16/PIC16ISelLowering.cpp b/lib/Target/PIC16/PIC16ISelLowering.cpp
index a98604b..72cc1e2 100644
--- a/lib/Target/PIC16/PIC16ISelLowering.cpp
+++ b/lib/Target/PIC16/PIC16ISelLowering.cpp
@@ -147,9 +147,9 @@
  
   Subtarget = &TM.getSubtarget<PIC16Subtarget>();
 
-  addRegisterClass(MVT::i8, PIC16::GPRRegisterClass);
+  addRegisterClass(EVT::i8, PIC16::GPRRegisterClass);
 
-  setShiftAmountType(MVT::i8);
+  setShiftAmountType(EVT::i8);
   
   // Std lib call names
   setLibcallName(RTLIB::COS_F32, getStdLibCallName(RTLIB::COS_F32));
@@ -243,65 +243,65 @@
   setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
   setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
 
-  setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
-  setOperationAction(ISD::ExternalSymbol, MVT::i16, Custom);
+  setOperationAction(ISD::GlobalAddress, EVT::i16, Custom);
+  setOperationAction(ISD::ExternalSymbol, EVT::i16, Custom);
 
-  setOperationAction(ISD::LOAD,   MVT::i8,  Legal);
-  setOperationAction(ISD::LOAD,   MVT::i16, Custom);
-  setOperationAction(ISD::LOAD,   MVT::i32, Custom);
+  setOperationAction(ISD::LOAD,   EVT::i8,  Legal);
+  setOperationAction(ISD::LOAD,   EVT::i16, Custom);
+  setOperationAction(ISD::LOAD,   EVT::i32, Custom);
 
-  setOperationAction(ISD::STORE,  MVT::i8,  Legal);
-  setOperationAction(ISD::STORE,  MVT::i16, Custom);
-  setOperationAction(ISD::STORE,  MVT::i32, Custom);
-  setOperationAction(ISD::STORE,  MVT::i64, Custom);
+  setOperationAction(ISD::STORE,  EVT::i8,  Legal);
+  setOperationAction(ISD::STORE,  EVT::i16, Custom);
+  setOperationAction(ISD::STORE,  EVT::i32, Custom);
+  setOperationAction(ISD::STORE,  EVT::i64, Custom);
 
-  setOperationAction(ISD::ADDE,    MVT::i8,  Custom);
-  setOperationAction(ISD::ADDC,    MVT::i8,  Custom);
-  setOperationAction(ISD::SUBE,    MVT::i8,  Custom);
-  setOperationAction(ISD::SUBC,    MVT::i8,  Custom);
-  setOperationAction(ISD::SUB,    MVT::i8,  Custom);
-  setOperationAction(ISD::ADD,    MVT::i8,  Custom);
-  setOperationAction(ISD::ADD,    MVT::i16, Custom);
+  setOperationAction(ISD::ADDE,    EVT::i8,  Custom);
+  setOperationAction(ISD::ADDC,    EVT::i8,  Custom);
+  setOperationAction(ISD::SUBE,    EVT::i8,  Custom);
+  setOperationAction(ISD::SUBC,    EVT::i8,  Custom);
+  setOperationAction(ISD::SUB,    EVT::i8,  Custom);
+  setOperationAction(ISD::ADD,    EVT::i8,  Custom);
+  setOperationAction(ISD::ADD,    EVT::i16, Custom);
 
-  setOperationAction(ISD::OR,     MVT::i8,  Custom);
-  setOperationAction(ISD::AND,    MVT::i8,  Custom);
-  setOperationAction(ISD::XOR,    MVT::i8,  Custom);
+  setOperationAction(ISD::OR,     EVT::i8,  Custom);
+  setOperationAction(ISD::AND,    EVT::i8,  Custom);
+  setOperationAction(ISD::XOR,    EVT::i8,  Custom);
 
-  setOperationAction(ISD::FrameIndex, MVT::i16, Custom);
+  setOperationAction(ISD::FrameIndex, EVT::i16, Custom);
 
-  setOperationAction(ISD::MUL,    MVT::i8,  Custom);
+  setOperationAction(ISD::MUL,    EVT::i8,  Custom);
 
-  setOperationAction(ISD::SMUL_LOHI,    MVT::i8,  Expand);
-  setOperationAction(ISD::UMUL_LOHI,    MVT::i8,  Expand);
-  setOperationAction(ISD::MULHU,        MVT::i8, Expand);
-  setOperationAction(ISD::MULHS,        MVT::i8, Expand);
+  setOperationAction(ISD::SMUL_LOHI,    EVT::i8,  Expand);
+  setOperationAction(ISD::UMUL_LOHI,    EVT::i8,  Expand);
+  setOperationAction(ISD::MULHU,        EVT::i8, Expand);
+  setOperationAction(ISD::MULHS,        EVT::i8, Expand);
 
-  setOperationAction(ISD::SRA,    MVT::i8,  Custom);
-  setOperationAction(ISD::SHL,    MVT::i8,  Custom);
-  setOperationAction(ISD::SRL,    MVT::i8,  Custom);
+  setOperationAction(ISD::SRA,    EVT::i8,  Custom);
+  setOperationAction(ISD::SHL,    EVT::i8,  Custom);
+  setOperationAction(ISD::SRL,    EVT::i8,  Custom);
 
-  setOperationAction(ISD::ROTL,    MVT::i8,  Expand);
-  setOperationAction(ISD::ROTR,    MVT::i8,  Expand);
+  setOperationAction(ISD::ROTL,    EVT::i8,  Expand);
+  setOperationAction(ISD::ROTR,    EVT::i8,  Expand);
 
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
 
   // PIC16 does not support shift parts
-  setOperationAction(ISD::SRA_PARTS,    MVT::i8, Expand);
-  setOperationAction(ISD::SHL_PARTS,    MVT::i8, Expand);
-  setOperationAction(ISD::SRL_PARTS,    MVT::i8, Expand);
+  setOperationAction(ISD::SRA_PARTS,    EVT::i8, Expand);
+  setOperationAction(ISD::SHL_PARTS,    EVT::i8, Expand);
+  setOperationAction(ISD::SRL_PARTS,    EVT::i8, Expand);
 
 
   // PIC16 does not have a SETCC, expand it to SELECT_CC.
-  setOperationAction(ISD::SETCC,  MVT::i8, Expand);
-  setOperationAction(ISD::SELECT,  MVT::i8, Expand);
-  setOperationAction(ISD::BRCOND, MVT::Other, Expand);
-  setOperationAction(ISD::BRIND, MVT::Other, Expand);
+  setOperationAction(ISD::SETCC,  EVT::i8, Expand);
+  setOperationAction(ISD::SELECT,  EVT::i8, Expand);
+  setOperationAction(ISD::BRCOND, EVT::Other, Expand);
+  setOperationAction(ISD::BRIND, EVT::Other, Expand);
 
-  setOperationAction(ISD::SELECT_CC,  MVT::i8, Custom);
-  setOperationAction(ISD::BR_CC,  MVT::i8, Custom);
+  setOperationAction(ISD::SELECT_CC,  EVT::i8, Custom);
+  setOperationAction(ISD::BR_CC,  EVT::i8, Custom);
 
-  //setOperationAction(ISD::TRUNCATE, MVT::i16, Custom);
-  setTruncStoreAction(MVT::i16,   MVT::i8,  Custom);
+  //setOperationAction(ISD::TRUNCATE, EVT::i16, Custom);
+  setTruncStoreAction(EVT::i16,   EVT::i8,  Custom);
 
   // Now deduce the information based on the above mentioned 
   // actions
@@ -313,7 +313,7 @@
   // Flag is the last value of the node.
   SDValue Flag = Op.getValue(Op.getNode()->getNumValues() - 1);
 
-  assert (Flag.getValueType() == MVT::Flag 
+  assert (Flag.getValueType() == EVT::Flag 
           && "Node does not have an out Flag");
 
   return Flag;
@@ -340,12 +340,12 @@
 
   // If the last value returned in Flag then the chain is
   // second last value returned.
-  if (Chain.getValueType() == MVT::Flag)
+  if (Chain.getValueType() == EVT::Flag)
     Chain = Op.getValue(Op.getNode()->getNumValues() - 2);
   
   // All nodes may not produce a chain. Therefore following assert
   // verifies that the node is returning a chain only.
-  assert (Chain.getValueType() == MVT::Other 
+  assert (Chain.getValueType() == EVT::Other 
           && "Node does not have a chain");
 
   return Chain;
@@ -365,9 +365,9 @@
     Results.push_back(N);
 }
 
-MVT::SimpleValueType
-PIC16TargetLowering::getSetCCResultType(MVT ValType) const {
-  return MVT::i8;
+EVT::SimpleValueType
+PIC16TargetLowering::getSetCCResultType(EVT ValType) const {
+  return EVT::i8;
 }
 
 /// The type legalizer framework of generating legalizer can generate libcalls
@@ -389,7 +389,7 @@
 
 SDValue
 PIC16TargetLowering::MakePIC16Libcall(PIC16ISD::PIC16Libcall Call,
-                                      MVT RetVT, const SDValue *Ops,
+                                      EVT RetVT, const SDValue *Ops,
                                       unsigned NumOps, bool isSigned,
                                       SelectionDAG &DAG, DebugLoc dl) {
 
@@ -399,14 +399,14 @@
   TargetLowering::ArgListEntry Entry;
   for (unsigned i = 0; i != NumOps; ++i) {
     Entry.Node = Ops[i];
-    Entry.Ty = Entry.Node.getValueType().getTypeForMVT();
+    Entry.Ty = Entry.Node.getValueType().getTypeForEVT();
     Entry.isSExt = isSigned;
     Entry.isZExt = !isSigned;
     Args.push_back(Entry);
   }
-  SDValue Callee = DAG.getExternalSymbol(getPIC16LibcallName(Call), MVT::i16);
+  SDValue Callee = DAG.getExternalSymbol(getPIC16LibcallName(Call), EVT::i16);
 
-   const Type *RetTy = RetVT.getTypeForMVT();
+   const Type *RetTy = RetVT.getTypeForEVT();
    std::pair<SDValue,SDValue> CallInfo = 
      LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
                  false, 0, CallingConv::C, false,
@@ -477,11 +477,11 @@
 
 SDValue PIC16TargetLowering::ExpandFrameIndex(SDNode *N, SelectionDAG &DAG) {
 
-  // Currently handling FrameIndex of size MVT::i16 only
+  // Currently handling FrameIndex of size EVT::i16 only
   // One example of this scenario is when return value is written on
   // FrameIndex#0
 
-  if (N->getValueType(0) != MVT::i16)
+  if (N->getValueType(0) != EVT::i16)
     return SDValue();
 
   // Expand the FrameIndex into ExternalSymbol and a Constant node
@@ -503,9 +503,9 @@
   int FrameOffset;
   SDValue FI = SDValue(N,0);
   LegalizeFrameIndex(FI, DAG, ES, FrameOffset);
-  SDValue Offset = DAG.getConstant(FrameOffset, MVT::i8);
-  SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, ES, Offset);
-  SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, ES, Offset);
+  SDValue Offset = DAG.getConstant(FrameOffset, EVT::i8);
+  SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, EVT::i8, ES, Offset);
+  SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, EVT::i8, ES, Offset);
   return DAG.getNode(ISD::BUILD_PAIR, dl, N->getValueType(0), Lo, Hi);
 }
 
@@ -515,19 +515,19 @@
   SDValue Chain = St->getChain();
   SDValue Src = St->getValue();
   SDValue Ptr = St->getBasePtr();
-  MVT ValueType = Src.getValueType();
+  EVT ValueType = Src.getValueType();
   unsigned StoreOffset = 0;
   DebugLoc dl = N->getDebugLoc();
 
   SDValue PtrLo, PtrHi;
   LegalizeAddress(Ptr, DAG, PtrLo, PtrHi, StoreOffset, dl);
  
-  if (ValueType == MVT::i8) {
-    return DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, Src,
+  if (ValueType == EVT::i8) {
+    return DAG.getNode (PIC16ISD::PIC16Store, dl, EVT::Other, Chain, Src,
                         PtrLo, PtrHi, 
-                        DAG.getConstant (0 + StoreOffset, MVT::i8));
+                        DAG.getConstant (0 + StoreOffset, EVT::i8));
   }
-  else if (ValueType == MVT::i16) {
+  else if (ValueType == EVT::i16) {
     // Get the Lo and Hi parts from MERGE_VALUE or BUILD_PAIR.
     SDValue SrcLo, SrcHi;
     GetExpandedParts(Src, DAG, SrcLo, SrcHi);
@@ -536,19 +536,19 @@
       ChainLo = Chain.getOperand(0);
       ChainHi = Chain.getOperand(1);
     }
-    SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other,
+    SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other,
                                  ChainLo,
                                  SrcLo, PtrLo, PtrHi,
-                                 DAG.getConstant (0 + StoreOffset, MVT::i8));
+                                 DAG.getConstant (0 + StoreOffset, EVT::i8));
 
-    SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi, 
+    SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainHi, 
                                  SrcHi, PtrLo, PtrHi,
-                                 DAG.getConstant (1 + StoreOffset, MVT::i8));
+                                 DAG.getConstant (1 + StoreOffset, EVT::i8));
 
-    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, getChain(Store1),
+    return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, getChain(Store1),
                        getChain(Store2));
   }
-  else if (ValueType == MVT::i32) {
+  else if (ValueType == EVT::i32) {
     // Get the Lo and Hi parts from MERGE_VALUE or BUILD_PAIR.
     SDValue SrcLo, SrcHi;
     GetExpandedParts(Src, DAG, SrcLo, SrcHi);
@@ -573,30 +573,30 @@
       ChainHi1 = ChainHi.getOperand(0);
       ChainHi2 = ChainHi.getOperand(1);
     }
-    SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other,
+    SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other,
                                  ChainLo1,
                                  SrcLo1, PtrLo, PtrHi,
-                                 DAG.getConstant (0 + StoreOffset, MVT::i8));
+                                 DAG.getConstant (0 + StoreOffset, EVT::i8));
 
-    SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainLo2,
+    SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainLo2,
                                  SrcLo2, PtrLo, PtrHi,
-                                 DAG.getConstant (1 + StoreOffset, MVT::i8));
+                                 DAG.getConstant (1 + StoreOffset, EVT::i8));
 
-    SDValue Store3 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi1,
+    SDValue Store3 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainHi1,
                                  SrcHi1, PtrLo, PtrHi,
-                                 DAG.getConstant (2 + StoreOffset, MVT::i8));
+                                 DAG.getConstant (2 + StoreOffset, EVT::i8));
 
-    SDValue Store4 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi2,
+    SDValue Store4 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainHi2,
                                  SrcHi2, PtrLo, PtrHi,
-                                 DAG.getConstant (3 + StoreOffset, MVT::i8));
+                                 DAG.getConstant (3 + StoreOffset, EVT::i8));
 
-    SDValue RetLo =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+    SDValue RetLo =  DAG.getNode(ISD::TokenFactor, dl, EVT::Other, 
                                  getChain(Store1), getChain(Store2));
-    SDValue RetHi =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+    SDValue RetHi =  DAG.getNode(ISD::TokenFactor, dl, EVT::Other, 
                                  getChain(Store3), getChain(Store4));
-    return  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, RetLo, RetHi);
+    return  DAG.getNode(ISD::TokenFactor, dl, EVT::Other, RetLo, RetHi);
 
-  } else if (ValueType == MVT::i64) {
+  } else if (ValueType == EVT::i64) {
     SDValue SrcLo, SrcHi;
     GetExpandedParts(Src, DAG, SrcLo, SrcHi);
     SDValue ChainLo = Chain, ChainHi = Chain;
@@ -612,7 +612,7 @@
     SDValue Store2 = DAG.getStore(ChainHi, dl, SrcHi, Ptr, NULL,
                                   1 + StoreOffset);
 
-    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1,
+    return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Store1,
                        Store2);
   } else {
     assert (0 && "value type not supported");
@@ -626,12 +626,12 @@
   // FIXME there isn't really debug info here
   DebugLoc dl = ES->getDebugLoc();
 
-  SDValue TES = DAG.getTargetExternalSymbol(ES->getSymbol(), MVT::i8);
-  SDValue Offset = DAG.getConstant(0, MVT::i8);
-  SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, TES, Offset);
-  SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, TES, Offset);
+  SDValue TES = DAG.getTargetExternalSymbol(ES->getSymbol(), EVT::i8);
+  SDValue Offset = DAG.getConstant(0, EVT::i8);
+  SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, EVT::i8, TES, Offset);
+  SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, EVT::i8, TES, Offset);
 
-  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, Lo, Hi);
+  return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16, Lo, Hi);
 }
 
 // ExpandGlobalAddress - 
@@ -640,14 +640,14 @@
   // FIXME there isn't really debug info here
   DebugLoc dl = G->getDebugLoc();
   
-  SDValue TGA = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i8,
+  SDValue TGA = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i8,
                                            G->getOffset());
 
-  SDValue Offset = DAG.getConstant(0, MVT::i8);
-  SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, TGA, Offset);
-  SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, TGA, Offset);
+  SDValue Offset = DAG.getConstant(0, EVT::i8);
+  SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, EVT::i8, TGA, Offset);
+  SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, EVT::i8, TGA, Offset);
 
-  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, Lo, Hi);
+  return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16, Lo, Hi);
 }
 
 bool PIC16TargetLowering::isDirectAddress(const SDValue &Op) {
@@ -690,15 +690,15 @@
                                            SDValue &Lo, SDValue &Hi) {  
   SDNode *N = Op.getNode();
   DebugLoc dl = N->getDebugLoc();
-  MVT NewVT = getTypeToTransformTo(N->getValueType(0));
+  EVT NewVT = getTypeToTransformTo(N->getValueType(0));
 
   // Extract the lo component.
   Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NewVT, Op,
-                   DAG.getConstant(0, MVT::i8));
+                   DAG.getConstant(0, EVT::i8));
 
   // extract the hi component
   Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NewVT, Op,
-                   DAG.getConstant(1, MVT::i8));
+                   DAG.getConstant(1, EVT::i8));
 }
 
 // Legalize FrameIndex into ExternalSymbol and offset.
@@ -722,7 +722,7 @@
   const char *tmpName;
   if (FIndex < ReservedFrameCount) {
     tmpName = createESName(PAN::getFrameLabel(Name));
-    ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
+    ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
     Offset = 0;
     for (unsigned i=0; i<FIndex ; ++i) {
       Offset += MFI->getObjectSize(i);
@@ -730,7 +730,7 @@
   } else {
    // FrameIndex has been made for some temporary storage 
     tmpName = createESName(PAN::getTempdataLabel(Name));
-    ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
+    ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
     Offset = GetTmpOffsetForFI(FIndex, MFI->getObjectSize(FIndex));
   }
 
@@ -776,11 +776,11 @@
   // then treat it as direct address.
   // One example for such case is storing and loading
   // from function frame during a call
-  if (Ptr.getValueType() == MVT::i8) {
+  if (Ptr.getValueType() == EVT::i8) {
     switch (Ptr.getOpcode()) {
     case ISD::TargetExternalSymbol:
       Lo = Ptr;
-      Hi = DAG.getConstant(1, MVT::i8);
+      Hi = DAG.getConstant(1, EVT::i8);
       return;
     }
   }
@@ -791,14 +791,14 @@
       int FrameOffset;
       if (TFI.getOpcode() == ISD::TargetFrameIndex) {
         LegalizeFrameIndex(TFI, DAG, Lo, FrameOffset);
-        Hi = DAG.getConstant(1, MVT::i8);
+        Hi = DAG.getConstant(1, EVT::i8);
         Offset += FrameOffset; 
         return;
       } else if (TFI.getOpcode() == ISD::TargetExternalSymbol) {
         // FrameIndex has already been expanded.
         // Now just make use of its expansion
         Lo = TFI;
-        Hi = DAG.getConstant(1, MVT::i8);
+        Hi = DAG.getConstant(1, EVT::i8);
         SDValue FOffset = Ptr.getOperand(0).getOperand(1);
         assert (FOffset.getOpcode() == ISD::Constant && 
                           "Invalid operand of PIC16ISD::Lo");
@@ -816,7 +816,7 @@
     // signifies that banksel needs to generated for it. Value 0 for
     // the constant signifies that banksel does not need to be generated 
     // for it. Mark it as 1 now and optimize later. 
-    Hi = DAG.getConstant(1, MVT::i8);
+    Hi = DAG.getConstant(1, EVT::i8);
     return; 
   }
 
@@ -824,8 +824,8 @@
   GetExpandedParts(Ptr, DAG, Lo, Hi);
 
   // Put the hi and lo parts into FSR.
-  Lo = DAG.getNode(PIC16ISD::MTLO, dl, MVT::i8, Lo);
-  Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Hi);
+  Lo = DAG.getNode(PIC16ISD::MTLO, dl, EVT::i8, Lo);
+  Hi = DAG.getNode(PIC16ISD::MTHI, dl, EVT::i8, Hi);
 
   return;
 }
@@ -838,7 +838,7 @@
 
   SDValue Load, Offset;
   SDVTList Tys; 
-  MVT VT, NewVT;
+  EVT VT, NewVT;
   SDValue PtrLo, PtrHi;
   unsigned LoadOffset;
 
@@ -851,12 +851,12 @@
   unsigned NumLoads = VT.getSizeInBits() / 8; 
   std::vector<SDValue> PICLoads;
   unsigned iter;
-  MVT MemVT = LD->getMemoryVT();
+  EVT MemVT = LD->getMemoryVT();
   if(ISD::isNON_EXTLoad(N)) {
     for (iter=0; iter<NumLoads ; ++iter) {
       // Add the pointer offset if any
-      Offset = DAG.getConstant(iter + LoadOffset, MVT::i8);
-      Tys = DAG.getVTList(MVT::i8, MVT::Other); 
+      Offset = DAG.getConstant(iter + LoadOffset, EVT::i8);
+      Tys = DAG.getVTList(EVT::i8, EVT::Other); 
       Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Chain, PtrLo, PtrHi,
                          Offset); 
       PICLoads.push_back(Load);
@@ -869,20 +869,20 @@
     
     // For extended loads this is the memory value type
     // i.e. without any extension
-    MVT MemVT = LD->getMemoryVT();
+    EVT MemVT = LD->getMemoryVT();
     unsigned MemBytes = MemVT.getSizeInBits() / 8;
-    // if MVT::i1 is extended to MVT::i8 then MemBytes will be zero
+    // if EVT::i1 is extended to EVT::i8 then MemBytes will be zero
     // So set it to one
     if (MemBytes == 0) MemBytes = 1;
     
     unsigned ExtdBytes = VT.getSizeInBits() / 8;
-    Offset = DAG.getConstant(LoadOffset, MVT::i8);
+    Offset = DAG.getConstant(LoadOffset, EVT::i8);
 
-    Tys = DAG.getVTList(MVT::i8, MVT::Other); 
+    Tys = DAG.getVTList(EVT::i8, EVT::Other); 
     // For MemBytes generate PIC16Load with proper offset
     for (iter=0; iter < MemBytes; ++iter) {
       // Add the pointer offset if any
-      Offset = DAG.getConstant(iter + LoadOffset, MVT::i8);
+      Offset = DAG.getConstant(iter + LoadOffset, EVT::i8);
       Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Chain, PtrLo, PtrHi,
                          Offset); 
       PICLoads.push_back(Load);
@@ -892,15 +892,15 @@
     if (ISD::isSEXTLoad(N)) {
       // For all ExtdBytes use the Right Shifted(Arithmetic) Value of the 
       // highest MemByte
-      SDValue SRA = DAG.getNode(ISD::SRA, dl, MVT::i8, Load, 
-                                DAG.getConstant(7, MVT::i8));
+      SDValue SRA = DAG.getNode(ISD::SRA, dl, EVT::i8, Load, 
+                                DAG.getConstant(7, EVT::i8));
       for (iter=MemBytes; iter<ExtdBytes; ++iter) { 
         PICLoads.push_back(SRA);
       }
     } else if (ISD::isZEXTLoad(N) || ISD::isEXTLoad(N)) {
     //} else if (ISD::isZEXTLoad(N)) {
       // ZeroExtendedLoad -- For all ExtdBytes use constant 0
-      SDValue ConstZero = DAG.getConstant(0, MVT::i8);
+      SDValue ConstZero = DAG.getConstant(0, EVT::i8);
       for (iter=MemBytes; iter<ExtdBytes; ++iter) { 
         PICLoads.push_back(ConstZero);
       }
@@ -908,46 +908,46 @@
   }
   SDValue BP;
 
-  if (VT == MVT::i8) {
+  if (VT == EVT::i8) {
     // Operand of Load is illegal -- Load itself is legal
     return PICLoads[0];
   }
-  else if (VT == MVT::i16) {
+  else if (VT == EVT::i16) {
     BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, PICLoads[0], PICLoads[1]);
-    if (MemVT == MVT::i8)
+    if (MemVT == EVT::i8)
       Chain = getChain(PICLoads[0]);
     else
-      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+      Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, 
                           getChain(PICLoads[0]), getChain(PICLoads[1]));
-  } else if (VT == MVT::i32) {
+  } else if (VT == EVT::i32) {
     SDValue BPs[2];
-    BPs[0] = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, 
+    BPs[0] = DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16, 
                          PICLoads[0], PICLoads[1]);
-    BPs[1] = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16,
+    BPs[1] = DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16,
                          PICLoads[2], PICLoads[3]);
     BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, BPs[0], BPs[1]);
-    if (MemVT == MVT::i8)
+    if (MemVT == EVT::i8)
       Chain = getChain(PICLoads[0]);
-    else if (MemVT == MVT::i16)
-      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+    else if (MemVT == EVT::i16)
+      Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, 
                           getChain(PICLoads[0]), getChain(PICLoads[1]));
     else {
       SDValue Chains[2];
-      Chains[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+      Chains[0] = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                               getChain(PICLoads[0]), getChain(PICLoads[1]));
-      Chains[1] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+      Chains[1] = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                               getChain(PICLoads[2]), getChain(PICLoads[3]));
-      Chain =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+      Chain =  DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                            Chains[0], Chains[1]);
     }
   }
-  Tys = DAG.getVTList(VT, MVT::Other); 
+  Tys = DAG.getVTList(VT, EVT::Other); 
   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, BP, Chain);
 }
 
 SDValue PIC16TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
   // We should have handled larger operands in type legalizer itself.
-  assert (Op.getValueType() == MVT::i8 && "illegal shift to lower");
+  assert (Op.getValueType() == EVT::i8 && "illegal shift to lower");
  
   SDNode *N = Op.getNode();
   SDValue Value = N->getOperand(0);
@@ -977,7 +977,7 @@
 
 SDValue PIC16TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
   // We should have handled larger operands in type legalizer itself.
-  assert (Op.getValueType() == MVT::i8 && "illegal multiply to lower");
+  assert (Op.getValueType() == EVT::i8 && "illegal multiply to lower");
 
   SDNode *N = Op.getNode();
   SmallVector<SDValue, 2> Ops(2);
@@ -1050,7 +1050,7 @@
 SDValue PIC16TargetLowering::ConvertToMemOperand(SDValue Op,
                                                  SelectionDAG &DAG,
                                                  DebugLoc dl) {
-  assert (Op.getValueType() == MVT::i8 
+  assert (Op.getValueType() == EVT::i8 
           && "illegal value type to store on stack.");
 
   MachineFunction &MF = DAG.getMachineFunction();
@@ -1062,22 +1062,22 @@
   // Get a stack slot index and convert to es.
   int FI = MF.getFrameInfo()->CreateStackObject(1, 1);
   const char *tmpName = createESName(PAN::getTempdataLabel(FuncName));
-  SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
+  SDValue ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
 
   // Store the value to ES.
-  SDValue Store = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other,
+  SDValue Store = DAG.getNode (PIC16ISD::PIC16Store, dl, EVT::Other,
                                DAG.getEntryNode(),
                                Op, ES, 
-                               DAG.getConstant (1, MVT::i8), // Banksel.
+                               DAG.getConstant (1, EVT::i8), // Banksel.
                                DAG.getConstant (GetTmpOffsetForFI(FI, 1), 
-                                                MVT::i8));
+                                                EVT::i8));
 
   // Load the value from ES.
-  SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other);
+  SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other);
   SDValue Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Store,
-                             ES, DAG.getConstant (1, MVT::i8),
+                             ES, DAG.getConstant (1, EVT::i8),
                              DAG.getConstant (GetTmpOffsetForFI(FI, 1), 
-                             MVT::i8));
+                             EVT::i8));
     
   return Load.getValue(0);
 }
@@ -1094,7 +1094,7 @@
     return Chain;
 
   std::vector<SDValue> Ops;
-  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
   SDValue Arg, StoreRet;
 
   // For PIC16 ABI the arguments come after the return value. 
@@ -1108,7 +1108,7 @@
     Ops.push_back(Arg);
     Ops.push_back(DataAddr_Lo);
     Ops.push_back(DataAddr_Hi);
-    Ops.push_back(DAG.getConstant(ArgOffset, MVT::i8));
+    Ops.push_back(DAG.getConstant(ArgOffset, EVT::i8));
     Ops.push_back(InFlag);
 
     StoreRet = DAG.getNode (PIC16ISD::PIC16StWF, dl, Tys, &Ops[0], Ops.size());
@@ -1127,7 +1127,7 @@
   unsigned NumOps = Outs.size();
   std::string Name;
   SDValue Arg, StoreAt;
-  MVT ArgVT;
+  EVT ArgVT;
   unsigned Size=0;
 
   // If call has no arguments then do nothing and return.
@@ -1145,7 +1145,7 @@
   SDValue StoreRet;
 
   std::vector<SDValue> Ops;
-  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
   for (unsigned i=0, Offset = 0; i<NumOps; i++) {
     // Get the argument
     Arg = Outs[i].Val;
@@ -1158,7 +1158,7 @@
     Ops.push_back(Arg);
     Ops.push_back(PtrLo);
     Ops.push_back(PtrHi);
-    Ops.push_back(DAG.getConstant(StoreOffset, MVT::i8));
+    Ops.push_back(DAG.getConstant(StoreOffset, EVT::i8));
     Ops.push_back(InFlag);
 
     StoreRet = DAG.getNode (PIC16ISD::PIC16StWF, dl, Tys, &Ops[0], Ops.size());
@@ -1191,10 +1191,10 @@
   // Call has something to return
   SDValue LoadRet;
 
-  SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other, EVT::Flag);
   for(unsigned i=0;i<RetVals;i++) {
     LoadRet = DAG.getNode(PIC16ISD::PIC16LdWF, dl, Tys, Chain, DataAddr_Lo,
-                          DataAddr_Hi, DAG.getConstant(i, MVT::i8),
+                          DataAddr_Hi, DAG.getConstant(i, EVT::i8),
                           InFlag);
     InFlag = getOutFlag(LoadRet);
     Chain = getChain(LoadRet);
@@ -1224,13 +1224,13 @@
   unsigned LdOffset;
   LegalizeAddress(RetLabel, DAG, LdLo, LdHi, LdOffset, dl);
 
-  SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other, EVT::Flag);
   SDValue LoadRet;
  
   for(unsigned i=0, Offset=0;i<RetVals;i++) {
 
     LoadRet = DAG.getNode(PIC16ISD::PIC16LdWF, dl, Tys, Chain, LdLo, LdHi,
-                          DAG.getConstant(LdOffset + Offset, MVT::i8),
+                          DAG.getConstant(LdOffset + Offset, EVT::i8),
                           InFlag);
 
     InFlag = getOutFlag(LoadRet);
@@ -1259,18 +1259,18 @@
   std::string FuncName = F->getName();
 
   const char *tmpName = createESName(PAN::getFrameLabel(FuncName));
-  SDVTList VTs  = DAG.getVTList (MVT::i8, MVT::Other);
-  SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
-  SDValue BS = DAG.getConstant(1, MVT::i8);
+  SDVTList VTs  = DAG.getVTList (EVT::i8, EVT::Other);
+  SDValue ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
+  SDValue BS = DAG.getConstant(1, EVT::i8);
   SDValue RetVal;
   for(unsigned i=0;i<NumRet; ++i) {
     RetVal = Outs[i].Val;
-    Chain =  DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, RetVal,
+    Chain =  DAG.getNode (PIC16ISD::PIC16Store, dl, EVT::Other, Chain, RetVal,
                         ES, BS,
-                        DAG.getConstant (i, MVT::i8));
+                        DAG.getConstant (i, EVT::i8));
       
   }
-  return DAG.getNode(PIC16ISD::RET, dl, MVT::Other, Chain);
+  return DAG.getNode(PIC16ISD::RET, dl, EVT::Other, Chain);
 }
 
 void PIC16TargetLowering::
@@ -1279,7 +1279,7 @@
                SelectionDAG &DAG) {
    assert (Callee.getOpcode() == PIC16ISD::PIC16Connect
            && "Don't know what to do of such callee!!");
-   SDValue ZeroOperand = DAG.getConstant(0, MVT::i8);
+   SDValue ZeroOperand = DAG.getConstant(0, EVT::i8);
    SDValue SeqStart  = DAG.getCALLSEQ_START(Chain, ZeroOperand);
    Chain = getChain(SeqStart);
    SDValue OperFlag = getOutFlag(SeqStart); // To manage the data dependency
@@ -1289,15 +1289,15 @@
    SDValue Hi = Callee.getOperand(1);
 
    SDValue Data_Lo, Data_Hi;
-   SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
+   SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other, EVT::Flag);
    // Subtract 2 from Address to get the Lower part of DataAddress.
-   SDVTList VTList = DAG.getVTList(MVT::i8, MVT::Flag);
+   SDVTList VTList = DAG.getVTList(EVT::i8, EVT::Flag);
    Data_Lo = DAG.getNode(ISD::SUBC, dl, VTList, Lo, 
-                         DAG.getConstant(2, MVT::i8));
-   SDValue Ops[3] = { Hi, DAG.getConstant(0, MVT::i8), Data_Lo.getValue(1)};
+                         DAG.getConstant(2, EVT::i8));
+   SDValue Ops[3] = { Hi, DAG.getConstant(0, EVT::i8), Data_Lo.getValue(1)};
    Data_Hi = DAG.getNode(ISD::SUBE, dl, VTList, Ops, 3);
-   SDValue PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, Data_Hi);
-   Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Data_Lo, PCLATH);
+   SDValue PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, EVT::i8, Data_Hi);
+   Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, Data_Lo, PCLATH);
    SDValue Call = DAG.getNode(PIC16ISD::CALLW, dl, Tys, Chain, Callee,
                               OperFlag);
    Chain = getChain(Call);
@@ -1308,7 +1308,7 @@
    OperFlag = getOutFlag(SeqEnd);
 
    // Low part of Data Address 
-   DataAddr_Lo = DAG.getNode(PIC16ISD::MTLO, dl, MVT::i8, Call, OperFlag);
+   DataAddr_Lo = DAG.getNode(PIC16ISD::MTLO, dl, EVT::i8, Call, OperFlag);
 
    // Make the second call.
    SeqStart  = DAG.getCALLSEQ_START(Chain, ZeroOperand);
@@ -1317,13 +1317,13 @@
 
    // Subtract 1 from Address to get high part of data address.
    Data_Lo = DAG.getNode(ISD::SUBC, dl, VTList, Lo, 
-                         DAG.getConstant(1, MVT::i8));
-   SDValue HiOps[3] = { Hi, DAG.getConstant(0, MVT::i8), Data_Lo.getValue(1)};
+                         DAG.getConstant(1, EVT::i8));
+   SDValue HiOps[3] = { Hi, DAG.getConstant(0, EVT::i8), Data_Lo.getValue(1)};
    Data_Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps, 3);
-   PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, Data_Hi);
+   PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, EVT::i8, Data_Hi);
 
    // Use new Lo to make another CALLW
-   Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Data_Lo, PCLATH);
+   Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, Data_Lo, PCLATH);
    Call = DAG.getNode(PIC16ISD::CALLW, dl, Tys, Chain, Callee, OperFlag);
    Chain = getChain(Call);
    OperFlag = getOutFlag(Call);
@@ -1332,7 +1332,7 @@
    Chain = getChain(SeqEnd);
    OperFlag = getOutFlag(SeqEnd);
    // Hi part of Data Address
-   DataAddr_Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Call, OperFlag);
+   DataAddr_Hi = DAG.getNode(PIC16ISD::MTHI, dl, EVT::i8, Call, OperFlag);
 }
 
 SDValue
@@ -1344,7 +1344,7 @@
                                DebugLoc dl, SelectionDAG &DAG,
                                SmallVectorImpl<SDValue> &InVals) {
 
-    assert(Callee.getValueType() == MVT::i16 &&
+    assert(Callee.getValueType() == EVT::i16 &&
            "Don't know how to legalize this call node!!!");
 
     // The flag to track if this is a direct or indirect call.
@@ -1367,7 +1367,7 @@
        // Indirect addresses. Get the hi and lo parts of ptr.
        GetExpandedParts(Callee, DAG, Lo, Hi);
        // Connect Lo and Hi parts of the callee with the PIC16Connect
-       Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Lo, Hi);
+       Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, Lo, Hi);
 
        // Read DataAddress only if we have to pass arguments or 
        // read return value. 
@@ -1375,7 +1375,7 @@
          GetDataAddress(dl, Callee, Chain, DataAddr_Lo, DataAddr_Hi, DAG);
     }
 
-    SDValue ZeroOperand = DAG.getConstant(0, MVT::i8);
+    SDValue ZeroOperand = DAG.getConstant(0, EVT::i8);
 
     // Start the call sequence.
     // Carring the Constant 0 along the CALLSEQSTART
@@ -1392,32 +1392,32 @@
        // Considering the GlobalAddressNode case here.
        if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
           GlobalValue *GV = G->getGlobal();
-          Callee = DAG.getTargetGlobalAddress(GV, MVT::i8);
+          Callee = DAG.getTargetGlobalAddress(GV, EVT::i8);
           Name = G->getGlobal()->getName();
        } else {// Considering the ExternalSymbol case here
           ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Callee);
-          Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), MVT::i8); 
+          Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), EVT::i8); 
           Name = ES->getSymbol();
        }
 
        // Label for argument passing
        const char *argFrame = createESName(PAN::getArgsLabel(Name));
-       ArgLabel = DAG.getTargetExternalSymbol(argFrame, MVT::i8);
+       ArgLabel = DAG.getTargetExternalSymbol(argFrame, EVT::i8);
 
        // Label for reading return value
        const char *retName = createESName(PAN::getRetvalLabel(Name));
-       RetLabel = DAG.getTargetExternalSymbol(retName, MVT::i8);
+       RetLabel = DAG.getTargetExternalSymbol(retName, EVT::i8);
     } else {
        // if indirect call
        SDValue CodeAddr_Lo = Callee.getOperand(0);
        SDValue CodeAddr_Hi = Callee.getOperand(1);
 
-       /*CodeAddr_Lo = DAG.getNode(ISD::ADD, dl, MVT::i8, CodeAddr_Lo,
-                                 DAG.getConstant(2, MVT::i8));*/
+       /*CodeAddr_Lo = DAG.getNode(ISD::ADD, dl, EVT::i8, CodeAddr_Lo,
+                                 DAG.getConstant(2, EVT::i8));*/
 
        // move Hi part in PCLATH
-       CodeAddr_Hi = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, CodeAddr_Hi);
-       Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, CodeAddr_Lo,
+       CodeAddr_Hi = DAG.getNode(PIC16ISD::MTPCLATH, dl, EVT::i8, CodeAddr_Hi);
+       Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, CodeAddr_Lo,
                             CodeAddr_Hi);
     } 
 
@@ -1435,7 +1435,7 @@
       OperFlag = getOutFlag(CallArgs);
     }
 
-    SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+    SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
     SDValue PICCall = DAG.getNode(PIC16ISD::CALL, dl, Tys, Chain, Callee,
                                   OperFlag);
     Chain = getChain(PICCall);
@@ -1500,14 +1500,14 @@
   DebugLoc dl = Op.getDebugLoc();
 
   // We should have handled larger operands in type legalizer itself.
-  assert (Op.getValueType() == MVT::i8 && "illegal Op to lower");
+  assert (Op.getValueType() == EVT::i8 && "illegal Op to lower");
 
   unsigned MemOp = 1;
   if (NeedToConvertToMemOp(Op, MemOp)) {
     // Put one value on stack.
     SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl);
 
-    return DAG.getNode(Op.getOpcode(), dl, MVT::i8, Op.getOperand(MemOp ^ 1),
+    return DAG.getNode(Op.getOpcode(), dl, EVT::i8, Op.getOperand(MemOp ^ 1),
     NewVal);
   }
   else {
@@ -1519,7 +1519,7 @@
 // that affects carry.
 SDValue PIC16TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) {
   // We should have handled larger operands in type legalizer itself.
-  assert (Op.getValueType() == MVT::i8 && "illegal add to lower");
+  assert (Op.getValueType() == EVT::i8 && "illegal add to lower");
   DebugLoc dl = Op.getDebugLoc();
   unsigned MemOp = 1;
   if (NeedToConvertToMemOp(Op, MemOp)) {
@@ -1527,7 +1527,7 @@
     SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl);
     
     // ADDC and ADDE produce two results.
-    SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag);
+    SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Flag);
 
     // ADDE has three operands, the last one is the carry bit.
     if (Op.getOpcode() == ISD::ADDE)
@@ -1539,7 +1539,7 @@
                          NewVal);
     // ADD it is. It produces only one result.
     else
-      return DAG.getNode(Op.getOpcode(), dl, MVT::i8, Op.getOperand(MemOp ^ 1),
+      return DAG.getNode(Op.getOpcode(), dl, EVT::i8, Op.getOperand(MemOp ^ 1),
                          NewVal);
   }
   else
@@ -1549,7 +1549,7 @@
 SDValue PIC16TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
   // We should have handled larger operands in type legalizer itself.
-  assert (Op.getValueType() == MVT::i8 && "illegal sub to lower");
+  assert (Op.getValueType() == EVT::i8 && "illegal sub to lower");
 
   // Nothing to do if the first operand is already a direct load and it has
   // only one use.
@@ -1559,7 +1559,7 @@
   // Put first operand on stack.
   SDValue NewVal = ConvertToMemOperand (Op.getOperand(0), DAG, dl);
 
-  SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Flag);
   switch (Op.getOpcode()) {
     default:
       assert (0 && "Opcode unknown."); 
@@ -1571,7 +1571,7 @@
       return DAG.getNode(Op.getOpcode(), dl, Tys, NewVal, Op.getOperand(1));
       break;
     case ISD::SUB:
-      return DAG.getNode(Op.getOpcode(), dl, MVT::i8, NewVal, Op.getOperand(1));
+      return DAG.getNode(Op.getOpcode(), dl, EVT::i8, NewVal, Op.getOperand(1));
       break;
   }
 }
@@ -1613,13 +1613,13 @@
 
   // Create the <fname>.args external symbol.
   const char *tmpName = createESName(PAN::getArgsLabel(FuncName));
-  SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
+  SDValue ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
 
   // Load arg values from the label + offset.
-  SDVTList VTs  = DAG.getVTList (MVT::i8, MVT::Other);
-  SDValue BS = DAG.getConstant(1, MVT::i8);
+  SDVTList VTs  = DAG.getVTList (EVT::i8, EVT::Other);
+  SDValue BS = DAG.getConstant(1, EVT::i8);
   for (unsigned i = 0; i < NumArgVals ; ++i) {
-    SDValue Offset = DAG.getConstant(i, MVT::i8);
+    SDValue Offset = DAG.getConstant(i, EVT::i8);
     SDValue PICLoad = DAG.getNode(PIC16ISD::PIC16LdArg, dl, VTs, Chain, ES, BS,
                                   Offset);
     Chain = getChain(PICLoad);
@@ -1751,16 +1751,16 @@
     }
   }
 
-  PIC16CC = DAG.getConstant(CondCode, MVT::i8);
+  PIC16CC = DAG.getConstant(CondCode, EVT::i8);
 
   // These are signed comparisons. 
-  SDValue Mask = DAG.getConstant(128, MVT::i8);
+  SDValue Mask = DAG.getConstant(128, EVT::i8);
   if (isSignedComparison(CondCode)) {
-    LHS = DAG.getNode (ISD::XOR, dl, MVT::i8, LHS, Mask);
-    RHS = DAG.getNode (ISD::XOR, dl, MVT::i8, RHS, Mask); 
+    LHS = DAG.getNode (ISD::XOR, dl, EVT::i8, LHS, Mask);
+    RHS = DAG.getNode (ISD::XOR, dl, EVT::i8, RHS, Mask); 
   }
 
-  SDVTList VTs = DAG.getVTList (MVT::i8, MVT::Flag);
+  SDVTList VTs = DAG.getVTList (EVT::i8, EVT::Flag);
   // We can use a subtract operation to set the condition codes. But
   // we need to put one operand in memory if required.
   // Nothing to do if the first operand is already a valid type (direct load 
@@ -1875,7 +1875,7 @@
   SDValue PIC16CC;
   SDValue Cmp = getPIC16Cmp(LHS, RHS, ORIGCC, PIC16CC, DAG, dl);
 
-  return DAG.getNode(PIC16ISD::BRCOND, dl, MVT::Other, Chain, Dest, PIC16CC, 
+  return DAG.getNode(PIC16ISD::BRCOND, dl, EVT::Other, Chain, Dest, PIC16CC, 
                      Cmp.getValue(1));
 }
 
diff --git a/lib/Target/PIC16/PIC16ISelLowering.h b/lib/Target/PIC16/PIC16ISelLowering.h
index 968374c..5069153 100644
--- a/lib/Target/PIC16/PIC16ISelLowering.h
+++ b/lib/Target/PIC16/PIC16ISelLowering.h
@@ -82,7 +82,7 @@
     /// DAG node.
     virtual const char *getTargetNodeName(unsigned Opcode) const;
     /// getSetCCResultType - Return the ISD::SETCC ValueType
-    virtual MVT::SimpleValueType getSetCCResultType(MVT ValType) const;
+    virtual EVT::SimpleValueType getSetCCResultType(EVT ValType) const;
     SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
     SDValue LowerMUL(SDValue Op, SelectionDAG &DAG);
     SDValue LowerADD(SDValue Op, SelectionDAG &DAG);
@@ -230,7 +230,7 @@
     const char *getPIC16LibcallName(PIC16ISD::PIC16Libcall Call);
 
     // Make PIC16 Libcall.
-    SDValue MakePIC16Libcall(PIC16ISD::PIC16Libcall Call, MVT RetVT, 
+    SDValue MakePIC16Libcall(PIC16ISD::PIC16Libcall Call, EVT RetVT, 
                              const SDValue *Ops, unsigned NumOps, bool isSigned,
                              SelectionDAG &DAG, DebugLoc dl);
 
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index d77ce57..77f1b12 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -64,13 +64,13 @@
     /// getI32Imm - Return a target constant with the specified value, of type
     /// i32.
     inline SDValue getI32Imm(unsigned Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i32);
+      return CurDAG->getTargetConstant(Imm, EVT::i32);
     }
 
     /// getI64Imm - Return a target constant with the specified value, of type
     /// i64.
     inline SDValue getI64Imm(uint64_t Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i64);
+      return CurDAG->getTargetConstant(Imm, EVT::i64);
     }
     
     /// getSmallIPtrImm - Return a target constant of pointer type.
@@ -286,7 +286,7 @@
     MachineBasicBlock::iterator MBBI = FirstMBB.begin();
     DebugLoc dl = DebugLoc::getUnknownLoc();
 
-    if (PPCLowering.getPointerTy() == MVT::i32) {
+    if (PPCLowering.getPointerTy() == EVT::i32) {
       GlobalBaseReg = RegInfo->createVirtualRegister(PPC::GPRCRegisterClass);
       BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR), PPC::LR);
       BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
@@ -309,7 +309,7 @@
     return false;
 
   Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
-  if (N->getValueType(0) == MVT::i32)
+  if (N->getValueType(0) == EVT::i32)
     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
   else
     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
@@ -323,7 +323,7 @@
 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
 /// operand. If so Imm will receive the 32-bit value.
 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
-  if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
+  if (N->getOpcode() == ISD::Constant && N->getValueType(0) == EVT::i32) {
     Imm = cast<ConstantSDNode>(N)->getZExtValue();
     return true;
   }
@@ -333,7 +333,7 @@
 /// isInt64Immediate - This method tests to see if the node is a 64-bit constant
 /// operand.  If so Imm will receive the 64-bit value.
 static bool isInt64Immediate(SDNode *N, uint64_t &Imm) {
-  if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) {
+  if (N->getOpcode() == ISD::Constant && N->getValueType(0) == EVT::i64) {
     Imm = cast<ConstantSDNode>(N)->getZExtValue();
     return true;
   }
@@ -381,7 +381,7 @@
                                       unsigned &MB, unsigned &ME) {
   // Don't even go down this path for i64, since different logic will be
   // necessary for rldicl/rldicr/rldimi.
-  if (N->getValueType(0) != MVT::i32)
+  if (N->getValueType(0) != EVT::i32)
     return false;
 
   unsigned Shift  = 32;
@@ -485,7 +485,7 @@
       SH &= 31;
       SDValue Ops[] = { Tmp3, Op1, getI32Imm(SH), getI32Imm(MB),
                           getI32Imm(ME) };
-      return CurDAG->getTargetNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5);
+      return CurDAG->getTargetNode(PPC::RLWIMI, dl, EVT::i32, Ops, 5);
     }
   }
   return 0;
@@ -498,17 +498,17 @@
   // Always select the LHS.
   unsigned Opc;
   
-  if (LHS.getValueType() == MVT::i32) {
+  if (LHS.getValueType() == EVT::i32) {
     unsigned Imm;
     if (CC == ISD::SETEQ || CC == ISD::SETNE) {
       if (isInt32Immediate(RHS, Imm)) {
         // SETEQ/SETNE comparison with 16-bit immediate, fold it.
         if (isUInt16(Imm))
-          return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, MVT::i32, LHS,
+          return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, EVT::i32, LHS,
                                                  getI32Imm(Imm & 0xFFFF)), 0);
         // If this is a 16-bit signed immediate, fold it.
         if (isInt16((int)Imm))
-          return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, MVT::i32, LHS,
+          return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, EVT::i32, LHS,
                                                  getI32Imm(Imm & 0xFFFF)), 0);
         
         // For non-equality comparisons, the default code would materialize the
@@ -520,36 +520,36 @@
         //   xoris r0,r3,0x1234
         //   cmplwi cr0,r0,0x5678
         //   beq cr0,L6
-        SDValue Xor(CurDAG->getTargetNode(PPC::XORIS, dl, MVT::i32, LHS,
+        SDValue Xor(CurDAG->getTargetNode(PPC::XORIS, dl, EVT::i32, LHS,
                                             getI32Imm(Imm >> 16)), 0);
-        return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, MVT::i32, Xor,
+        return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, EVT::i32, Xor,
                                                getI32Imm(Imm & 0xFFFF)), 0);
       }
       Opc = PPC::CMPLW;
     } else if (ISD::isUnsignedIntSetCC(CC)) {
       if (isInt32Immediate(RHS, Imm) && isUInt16(Imm))
-        return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, MVT::i32, LHS,
+        return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, EVT::i32, LHS,
                                                getI32Imm(Imm & 0xFFFF)), 0);
       Opc = PPC::CMPLW;
     } else {
       short SImm;
       if (isIntS16Immediate(RHS, SImm))
-        return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, MVT::i32, LHS,
+        return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, EVT::i32, LHS,
                                                getI32Imm((int)SImm & 0xFFFF)),
                          0);
       Opc = PPC::CMPW;
     }
-  } else if (LHS.getValueType() == MVT::i64) {
+  } else if (LHS.getValueType() == EVT::i64) {
     uint64_t Imm;
     if (CC == ISD::SETEQ || CC == ISD::SETNE) {
       if (isInt64Immediate(RHS.getNode(), Imm)) {
         // SETEQ/SETNE comparison with 16-bit immediate, fold it.
         if (isUInt16(Imm))
-          return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, MVT::i64, LHS,
+          return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, EVT::i64, LHS,
                                                  getI32Imm(Imm & 0xFFFF)), 0);
         // If this is a 16-bit signed immediate, fold it.
         if (isInt16(Imm))
-          return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, MVT::i64, LHS,
+          return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, EVT::i64, LHS,
                                                  getI32Imm(Imm & 0xFFFF)), 0);
         
         // For non-equality comparisons, the default code would materialize the
@@ -562,33 +562,33 @@
         //   cmpldi cr0,r0,0x5678
         //   beq cr0,L6
         if (isUInt32(Imm)) {
-          SDValue Xor(CurDAG->getTargetNode(PPC::XORIS8, dl, MVT::i64, LHS,
+          SDValue Xor(CurDAG->getTargetNode(PPC::XORIS8, dl, EVT::i64, LHS,
                                               getI64Imm(Imm >> 16)), 0);
-          return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, MVT::i64, Xor,
+          return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, EVT::i64, Xor,
                                                  getI64Imm(Imm & 0xFFFF)), 0);
         }
       }
       Opc = PPC::CMPLD;
     } else if (ISD::isUnsignedIntSetCC(CC)) {
       if (isInt64Immediate(RHS.getNode(), Imm) && isUInt16(Imm))
-        return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, MVT::i64, LHS,
+        return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, EVT::i64, LHS,
                                                getI64Imm(Imm & 0xFFFF)), 0);
       Opc = PPC::CMPLD;
     } else {
       short SImm;
       if (isIntS16Immediate(RHS, SImm))
-        return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, MVT::i64, LHS,
+        return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, EVT::i64, LHS,
                                                getI64Imm(SImm & 0xFFFF)),
                          0);
       Opc = PPC::CMPD;
     }
-  } else if (LHS.getValueType() == MVT::f32) {
+  } else if (LHS.getValueType() == EVT::f32) {
     Opc = PPC::FCMPUS;
   } else {
-    assert(LHS.getValueType() == MVT::f64 && "Unknown vt!");
+    assert(LHS.getValueType() == EVT::f64 && "Unknown vt!");
     Opc = PPC::FCMPUD;
   }
-  return SDValue(CurDAG->getTargetNode(Opc, dl, MVT::i32, LHS, RHS), 0);
+  return SDValue(CurDAG->getTargetNode(Opc, dl, EVT::i32, LHS, RHS), 0);
 }
 
 static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC) {
@@ -670,27 +670,27 @@
       switch (CC) {
       default: break;
       case ISD::SETEQ: {
-        Op = SDValue(CurDAG->getTargetNode(PPC::CNTLZW, dl, MVT::i32, Op), 0);
+        Op = SDValue(CurDAG->getTargetNode(PPC::CNTLZW, dl, EVT::i32, Op), 0);
         SDValue Ops[] = { Op, getI32Imm(27), getI32Imm(5), getI32Imm(31) };
-        return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+        return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
       }
       case ISD::SETNE: {
         SDValue AD =
-          SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+          SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
                                           Op, getI32Imm(~0U)), 0);
-        return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op, 
+        return CurDAG->SelectNodeTo(N, PPC::SUBFE, EVT::i32, AD, Op, 
                                     AD.getValue(1));
       }
       case ISD::SETLT: {
         SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
-        return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+        return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
       }
       case ISD::SETGT: {
         SDValue T =
-          SDValue(CurDAG->getTargetNode(PPC::NEG, dl, MVT::i32, Op), 0);
-        T = SDValue(CurDAG->getTargetNode(PPC::ANDC, dl, MVT::i32, T, Op), 0);
+          SDValue(CurDAG->getTargetNode(PPC::NEG, dl, EVT::i32, Op), 0);
+        T = SDValue(CurDAG->getTargetNode(PPC::ANDC, dl, EVT::i32, T, Op), 0);
         SDValue Ops[] = { T, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
-        return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+        return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
       }
       }
     } else if (Imm == ~0U) {        // setcc op, -1
@@ -698,33 +698,33 @@
       switch (CC) {
       default: break;
       case ISD::SETEQ:
-        Op = SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+        Op = SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
                                              Op, getI32Imm(1)), 0);
-        return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, 
+        return CurDAG->SelectNodeTo(N, PPC::ADDZE, EVT::i32, 
                               SDValue(CurDAG->getTargetNode(PPC::LI, dl, 
-                                                            MVT::i32,
+                                                            EVT::i32,
                                                             getI32Imm(0)), 0),
                                       Op.getValue(1));
       case ISD::SETNE: {
-        Op = SDValue(CurDAG->getTargetNode(PPC::NOR, dl, MVT::i32, Op, Op), 0);
-        SDNode *AD = CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+        Op = SDValue(CurDAG->getTargetNode(PPC::NOR, dl, EVT::i32, Op, Op), 0);
+        SDNode *AD = CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
                                            Op, getI32Imm(~0U));
-        return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0),
+        return CurDAG->SelectNodeTo(N, PPC::SUBFE, EVT::i32, SDValue(AD, 0),
                                     Op, SDValue(AD, 1));
       }
       case ISD::SETLT: {
-        SDValue AD = SDValue(CurDAG->getTargetNode(PPC::ADDI, dl, MVT::i32, Op,
+        SDValue AD = SDValue(CurDAG->getTargetNode(PPC::ADDI, dl, EVT::i32, Op,
                                                        getI32Imm(1)), 0);
-        SDValue AN = SDValue(CurDAG->getTargetNode(PPC::AND, dl, MVT::i32, AD,
+        SDValue AN = SDValue(CurDAG->getTargetNode(PPC::AND, dl, EVT::i32, AD,
                                                        Op), 0);
         SDValue Ops[] = { AN, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
-        return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+        return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
       }
       case ISD::SETGT: {
         SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
-        Op = SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 
+        Op = SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, EVT::i32, Ops, 4), 
                      0);
-        return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op, 
+        return CurDAG->SelectNodeTo(N, PPC::XORI, EVT::i32, Op, 
                                     getI32Imm(1));
       }
       }
@@ -738,29 +738,29 @@
   SDValue IntCR;
   
   // Force the ccreg into CR7.
-  SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32);
+  SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, EVT::i32);
   
   SDValue InFlag(0, 0);  // Null incoming flag value.
   CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg, 
                                InFlag).getValue(1);
   
   if (PPCSubTarget.isGigaProcessor() && OtherCondIdx == -1)
-    IntCR = SDValue(CurDAG->getTargetNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
+    IntCR = SDValue(CurDAG->getTargetNode(PPC::MFOCRF, dl, EVT::i32, CR7Reg,
                                             CCReg), 0);
   else
-    IntCR = SDValue(CurDAG->getTargetNode(PPC::MFCR, dl, MVT::i32, CCReg), 0);
+    IntCR = SDValue(CurDAG->getTargetNode(PPC::MFCR, dl, EVT::i32, CCReg), 0);
   
   SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
                       getI32Imm(31), getI32Imm(31) };
   if (OtherCondIdx == -1 && !Inv)
-    return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+    return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
 
   // Get the specified bit.
   SDValue Tmp =
-    SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0);
+    SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, EVT::i32, Ops, 4), 0);
   if (Inv) {
     assert(OtherCondIdx == -1 && "Can't have split plus negation");
-    return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1));
+    return CurDAG->SelectNodeTo(N, PPC::XORI, EVT::i32, Tmp, getI32Imm(1));
   }
 
   // Otherwise, we have to turn an operation like SETONE -> SETOLT | SETOGT.
@@ -769,9 +769,9 @@
   // Get the other bit of the comparison.
   Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31);
   SDValue OtherCond = 
-    SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0);
+    SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, EVT::i32, Ops, 4), 0);
 
-  return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond);
+  return CurDAG->SelectNodeTo(N, PPC::OR, EVT::i32, Tmp, OtherCond);
 }
 
 
@@ -787,7 +787,7 @@
   default: break;
   
   case ISD::Constant: {
-    if (N->getValueType(0) == MVT::i64) {
+    if (N->getValueType(0) == EVT::i64) {
       // Get 64 bit value.
       int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
       // Assume no remaining bits.
@@ -822,17 +822,17 @@
       // Simple value.
       if (isInt16(Imm)) {
        // Just the Lo bits.
-        Result = CurDAG->getTargetNode(PPC::LI8, dl, MVT::i64, getI32Imm(Lo));
+        Result = CurDAG->getTargetNode(PPC::LI8, dl, EVT::i64, getI32Imm(Lo));
       } else if (Lo) {
         // Handle the Hi bits.
         unsigned OpC = Hi ? PPC::LIS8 : PPC::LI8;
-        Result = CurDAG->getTargetNode(OpC, dl, MVT::i64, getI32Imm(Hi));
+        Result = CurDAG->getTargetNode(OpC, dl, EVT::i64, getI32Imm(Hi));
         // And Lo bits.
-        Result = CurDAG->getTargetNode(PPC::ORI8, dl, MVT::i64,
+        Result = CurDAG->getTargetNode(PPC::ORI8, dl, EVT::i64,
                                        SDValue(Result, 0), getI32Imm(Lo));
       } else {
        // Just the Hi bits.
-        Result = CurDAG->getTargetNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi));
+        Result = CurDAG->getTargetNode(PPC::LIS8, dl, EVT::i64, getI32Imm(Hi));
       }
       
       // If no shift, we're done.
@@ -840,18 +840,18 @@
 
       // Shift for next step if the upper 32-bits were not zero.
       if (Imm) {
-        Result = CurDAG->getTargetNode(PPC::RLDICR, dl, MVT::i64,
+        Result = CurDAG->getTargetNode(PPC::RLDICR, dl, EVT::i64,
                                        SDValue(Result, 0),
                                        getI32Imm(Shift), getI32Imm(63 - Shift));
       }
 
       // Add in the last bits as required.
       if ((Hi = (Remainder >> 16) & 0xFFFF)) {
-        Result = CurDAG->getTargetNode(PPC::ORIS8, dl, MVT::i64,
+        Result = CurDAG->getTargetNode(PPC::ORIS8, dl, EVT::i64,
                                        SDValue(Result, 0), getI32Imm(Hi));
       } 
       if ((Lo = Remainder & 0xFFFF)) {
-        Result = CurDAG->getTargetNode(PPC::ORI8, dl, MVT::i64,
+        Result = CurDAG->getTargetNode(PPC::ORI8, dl, EVT::i64,
                                        SDValue(Result, 0), getI32Imm(Lo));
       }
       
@@ -868,7 +868,7 @@
   case ISD::FrameIndex: {
     int FI = cast<FrameIndexSDNode>(N)->getIndex();
     SDValue TFI = CurDAG->getTargetFrameIndex(FI, Op.getValueType());
-    unsigned Opc = Op.getValueType() == MVT::i32 ? PPC::ADDI : PPC::ADDI8;
+    unsigned Opc = Op.getValueType() == EVT::i32 ? PPC::ADDI : PPC::ADDI8;
     if (N->hasOneUse())
       return CurDAG->SelectNodeTo(N, Opc, Op.getValueType(), TFI,
                                   getSmallIPtrImm(0));
@@ -880,10 +880,10 @@
     SDValue InFlag = N->getOperand(1);
     // Use MFOCRF if supported.
     if (PPCSubTarget.isGigaProcessor())
-      return CurDAG->getTargetNode(PPC::MFOCRF, dl, MVT::i32,
+      return CurDAG->getTargetNode(PPC::MFOCRF, dl, EVT::i32,
                                    N->getOperand(0), InFlag);
     else
-      return CurDAG->getTargetNode(PPC::MFCR, dl, MVT::i32, InFlag);
+      return CurDAG->getTargetNode(PPC::MFCR, dl, EVT::i32, InFlag);
   }
     
   case ISD::SDIV: {
@@ -897,19 +897,19 @@
       SDValue N0 = N->getOperand(0);
       if ((signed)Imm > 0 && isPowerOf2_32(Imm)) {
         SDNode *Op =
-          CurDAG->getTargetNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag,
+          CurDAG->getTargetNode(PPC::SRAWI, dl, EVT::i32, EVT::Flag,
                                 N0, getI32Imm(Log2_32(Imm)));
-        return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, 
+        return CurDAG->SelectNodeTo(N, PPC::ADDZE, EVT::i32, 
                                     SDValue(Op, 0), SDValue(Op, 1));
       } else if ((signed)Imm < 0 && isPowerOf2_32(-Imm)) {
         SDNode *Op =
-          CurDAG->getTargetNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag,
+          CurDAG->getTargetNode(PPC::SRAWI, dl, EVT::i32, EVT::Flag,
                                 N0, getI32Imm(Log2_32(-Imm)));
         SDValue PT =
-          SDValue(CurDAG->getTargetNode(PPC::ADDZE, dl, MVT::i32,
+          SDValue(CurDAG->getTargetNode(PPC::ADDZE, dl, EVT::i32,
                                           SDValue(Op, 0), SDValue(Op, 1)),
                     0);
-        return CurDAG->SelectNodeTo(N, PPC::NEG, MVT::i32, PT);
+        return CurDAG->SelectNodeTo(N, PPC::NEG, EVT::i32, PT);
       }
     }
     
@@ -920,7 +920,7 @@
   case ISD::LOAD: {
     // Handle preincrement loads.
     LoadSDNode *LD = cast<LoadSDNode>(Op);
-    MVT LoadedVT = LD->getMemoryVT();
+    EVT LoadedVT = LD->getMemoryVT();
     
     // Normal loads are handled by code generated from the .td file.
     if (LD->getAddressingMode() != ISD::PRE_INC)
@@ -932,28 +932,28 @@
       
       unsigned Opcode;
       bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
-      if (LD->getValueType(0) != MVT::i64) {
+      if (LD->getValueType(0) != EVT::i64) {
         // Handle PPC32 integer and normal FP loads.
-        assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
+        assert((!isSExt || LoadedVT == EVT::i16) && "Invalid sext update load");
         switch (LoadedVT.getSimpleVT()) {
           default: llvm_unreachable("Invalid PPC load type!");
-          case MVT::f64: Opcode = PPC::LFDU; break;
-          case MVT::f32: Opcode = PPC::LFSU; break;
-          case MVT::i32: Opcode = PPC::LWZU; break;
-          case MVT::i16: Opcode = isSExt ? PPC::LHAU : PPC::LHZU; break;
-          case MVT::i1:
-          case MVT::i8:  Opcode = PPC::LBZU; break;
+          case EVT::f64: Opcode = PPC::LFDU; break;
+          case EVT::f32: Opcode = PPC::LFSU; break;
+          case EVT::i32: Opcode = PPC::LWZU; break;
+          case EVT::i16: Opcode = isSExt ? PPC::LHAU : PPC::LHZU; break;
+          case EVT::i1:
+          case EVT::i8:  Opcode = PPC::LBZU; break;
         }
       } else {
-        assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!");
-        assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
+        assert(LD->getValueType(0) == EVT::i64 && "Unknown load result type!");
+        assert((!isSExt || LoadedVT == EVT::i16) && "Invalid sext update load");
         switch (LoadedVT.getSimpleVT()) {
           default: llvm_unreachable("Invalid PPC load type!");
-          case MVT::i64: Opcode = PPC::LDU; break;
-          case MVT::i32: Opcode = PPC::LWZU8; break;
-          case MVT::i16: Opcode = isSExt ? PPC::LHAU8 : PPC::LHZU8; break;
-          case MVT::i1:
-          case MVT::i8:  Opcode = PPC::LBZU8; break;
+          case EVT::i64: Opcode = PPC::LDU; break;
+          case EVT::i32: Opcode = PPC::LWZU8; break;
+          case EVT::i16: Opcode = isSExt ? PPC::LHAU8 : PPC::LHZU8; break;
+          case EVT::i1:
+          case EVT::i8:  Opcode = PPC::LBZU8; break;
         }
       }
       
@@ -963,7 +963,7 @@
       // FIXME: PPC64
       return CurDAG->getTargetNode(Opcode, dl, LD->getValueType(0),
                                    PPCLowering.getPointerTy(),
-                                   MVT::Other, Ops, 3);
+                                   EVT::Other, Ops, 3);
     } else {
       llvm_unreachable("R+R preindex loads not supported yet!");
     }
@@ -978,7 +978,7 @@
         isRotateAndMask(N->getOperand(0).getNode(), Imm, false, SH, MB, ME)) {
       SDValue Val = N->getOperand(0).getOperand(0);
       SDValue Ops[] = { Val, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
-      return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+      return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
     }
     // If this is just a masked value where the input is not handled above, and
     // is not a rotate-left (handled by a pattern in the .td file), emit rlwinm
@@ -987,7 +987,7 @@
         N->getOperand(0).getOpcode() != ISD::ROTL) {
       SDValue Val = N->getOperand(0);
       SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) };
-      return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+      return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
     }
     // AND X, 0 -> 0, not "rlwinm 32".
     if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) {
@@ -1005,7 +1005,7 @@
         SDValue Ops[] = { N->getOperand(0).getOperand(0),
                             N->getOperand(0).getOperand(1),
                             getI32Imm(0), getI32Imm(MB),getI32Imm(ME) };
-        return CurDAG->getTargetNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5);
+        return CurDAG->getTargetNode(PPC::RLWIMI, dl, EVT::i32, Ops, 5);
       }
     }
     
@@ -1013,7 +1013,7 @@
     break;
   }
   case ISD::OR:
-    if (N->getValueType(0) == MVT::i32)
+    if (N->getValueType(0) == EVT::i32)
       if (SDNode *I = SelectBitfieldInsert(N))
         return I;
       
@@ -1025,7 +1025,7 @@
         isRotateAndMask(N, Imm, true, SH, MB, ME)) {
       SDValue Ops[] = { N->getOperand(0).getOperand(0),
                           getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
-      return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+      return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
     }
     
     // Other cases are autogenerated.
@@ -1037,7 +1037,7 @@
         isRotateAndMask(N, Imm, true, SH, MB, ME)) { 
       SDValue Ops[] = { N->getOperand(0).getOperand(0),
                           getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
-      return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
+      return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
     }
     
     // Other cases are autogenerated.
@@ -1053,11 +1053,11 @@
           if (N1C->isNullValue() && N3C->isNullValue() &&
               N2C->getZExtValue() == 1ULL && CC == ISD::SETNE &&
               // FIXME: Implement this optzn for PPC64.
-              N->getValueType(0) == MVT::i32) {
+              N->getValueType(0) == EVT::i32) {
             SDNode *Tmp =
-              CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
+              CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
                                     N->getOperand(0), getI32Imm(~0U));
-            return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32,
+            return CurDAG->SelectNodeTo(N, PPC::SUBFE, EVT::i32,
                                         SDValue(Tmp, 0), N->getOperand(0),
                                         SDValue(Tmp, 1));
           }
@@ -1066,13 +1066,13 @@
     unsigned BROpc = getPredicateForSetCC(CC);
 
     unsigned SelectCCOp;
-    if (N->getValueType(0) == MVT::i32)
+    if (N->getValueType(0) == EVT::i32)
       SelectCCOp = PPC::SELECT_CC_I4;
-    else if (N->getValueType(0) == MVT::i64)
+    else if (N->getValueType(0) == EVT::i64)
       SelectCCOp = PPC::SELECT_CC_I8;
-    else if (N->getValueType(0) == MVT::f32)
+    else if (N->getValueType(0) == EVT::f32)
       SelectCCOp = PPC::SELECT_CC_F4;
-    else if (N->getValueType(0) == MVT::f64)
+    else if (N->getValueType(0) == EVT::f64)
       SelectCCOp = PPC::SELECT_CC_F8;
     else
       SelectCCOp = PPC::SELECT_CC_VRRC;
@@ -1092,23 +1092,23 @@
       getI32Imm(cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
     SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3),
       N->getOperand(0), N->getOperand(4) };
-    return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 5);
+    return CurDAG->SelectNodeTo(N, PPC::BCC, EVT::Other, Ops, 5);
   }
   case ISD::BR_CC: {
     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
     SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
     SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode, 
                         N->getOperand(4), N->getOperand(0) };
-    return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 4);
+    return CurDAG->SelectNodeTo(N, PPC::BCC, EVT::Other, Ops, 4);
   }
   case ISD::BRIND: {
     // FIXME: Should custom lower this.
     SDValue Chain = N->getOperand(0);
     SDValue Target = N->getOperand(1);
-    unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8;
-    Chain = SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Other, Target,
+    unsigned Opc = Target.getValueType() == EVT::i32 ? PPC::MTCTR : PPC::MTCTR8;
+    Chain = SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Other, Target,
                                             Chain), 0);
-    return CurDAG->SelectNodeTo(N, PPC::BCTR, MVT::Other, Chain);
+    return CurDAG->SelectNodeTo(N, PPC::BCTR, EVT::Other, Chain);
   }
   case ISD::DECLARE: {
     SDValue Chain = N->getOperand(0);
@@ -1149,7 +1149,7 @@
     SDValue Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
     SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
     return CurDAG->SelectNodeTo(N, TargetInstrInfo::DECLARE,
-                                MVT::Other, Tmp1, Tmp2, Chain);
+                                EVT::Other, Tmp1, Tmp2, Chain);
   }
   }
   
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index a63bacd..0debf67 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -38,17 +38,17 @@
 #include "llvm/DerivedTypes.h"
 using namespace llvm;
 
-static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                      CCValAssign::LocInfo &LocInfo,
                                      ISD::ArgFlagsTy &ArgFlags,
                                      CCState &State);
-static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
-                                            MVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT,
+                                            EVT &LocVT,
                                             CCValAssign::LocInfo &LocInfo,
                                             ISD::ArgFlagsTy &ArgFlags,
                                             CCState &State);
-static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
-                                              MVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT,
+                                              EVT &LocVT,
                                               CCValAssign::LocInfo &LocInfo,
                                               ISD::ArgFlagsTy &ArgFlags,
                                               CCState &State);
@@ -74,216 +74,216 @@
   setUseUnderscoreLongJmp(true);
 
   // Set up the register classes.
-  addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
-  addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
-  addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
+  addRegisterClass(EVT::i32, PPC::GPRCRegisterClass);
+  addRegisterClass(EVT::f32, PPC::F4RCRegisterClass);
+  addRegisterClass(EVT::f64, PPC::F8RCRegisterClass);
 
   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i8, Expand);
 
-  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+  setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 
   // PowerPC has pre-inc load and store's.
-  setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
-  setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
-  setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
-  setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
-  setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
-  setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
-  setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
-  setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
-  setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
-  setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
+  setIndexedLoadAction(ISD::PRE_INC, EVT::i1, Legal);
+  setIndexedLoadAction(ISD::PRE_INC, EVT::i8, Legal);
+  setIndexedLoadAction(ISD::PRE_INC, EVT::i16, Legal);
+  setIndexedLoadAction(ISD::PRE_INC, EVT::i32, Legal);
+  setIndexedLoadAction(ISD::PRE_INC, EVT::i64, Legal);
+  setIndexedStoreAction(ISD::PRE_INC, EVT::i1, Legal);
+  setIndexedStoreAction(ISD::PRE_INC, EVT::i8, Legal);
+  setIndexedStoreAction(ISD::PRE_INC, EVT::i16, Legal);
+  setIndexedStoreAction(ISD::PRE_INC, EVT::i32, Legal);
+  setIndexedStoreAction(ISD::PRE_INC, EVT::i64, Legal);
 
   // This is used in the ppcf128->int sequence.  Note it has different semantics
   // from FP_ROUND:  that rounds to nearest, this rounds to zero.
-  setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
+  setOperationAction(ISD::FP_ROUND_INREG, EVT::ppcf128, Custom);
 
   // PowerPC has no SREM/UREM instructions
-  setOperationAction(ISD::SREM, MVT::i32, Expand);
-  setOperationAction(ISD::UREM, MVT::i32, Expand);
-  setOperationAction(ISD::SREM, MVT::i64, Expand);
-  setOperationAction(ISD::UREM, MVT::i64, Expand);
+  setOperationAction(ISD::SREM, EVT::i32, Expand);
+  setOperationAction(ISD::UREM, EVT::i32, Expand);
+  setOperationAction(ISD::SREM, EVT::i64, Expand);
+  setOperationAction(ISD::UREM, EVT::i64, Expand);
 
   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
-  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
-  setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i32, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i64, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i64, Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i32, Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i32, Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i64, Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i64, Expand);
 
   // We don't support sin/cos/sqrt/fmod/pow
-  setOperationAction(ISD::FSIN , MVT::f64, Expand);
-  setOperationAction(ISD::FCOS , MVT::f64, Expand);
-  setOperationAction(ISD::FREM , MVT::f64, Expand);
-  setOperationAction(ISD::FPOW , MVT::f64, Expand);
-  setOperationAction(ISD::FSIN , MVT::f32, Expand);
-  setOperationAction(ISD::FCOS , MVT::f32, Expand);
-  setOperationAction(ISD::FREM , MVT::f32, Expand);
-  setOperationAction(ISD::FPOW , MVT::f32, Expand);
+  setOperationAction(ISD::FSIN , EVT::f64, Expand);
+  setOperationAction(ISD::FCOS , EVT::f64, Expand);
+  setOperationAction(ISD::FREM , EVT::f64, Expand);
+  setOperationAction(ISD::FPOW , EVT::f64, Expand);
+  setOperationAction(ISD::FSIN , EVT::f32, Expand);
+  setOperationAction(ISD::FCOS , EVT::f32, Expand);
+  setOperationAction(ISD::FREM , EVT::f32, Expand);
+  setOperationAction(ISD::FPOW , EVT::f32, Expand);
 
-  setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
+  setOperationAction(ISD::FLT_ROUNDS_, EVT::i32, Custom);
 
   // If we're enabling GP optimizations, use hardware square root
   if (!TM.getSubtarget<PPCSubtarget>().hasFSQRT()) {
-    setOperationAction(ISD::FSQRT, MVT::f64, Expand);
-    setOperationAction(ISD::FSQRT, MVT::f32, Expand);
+    setOperationAction(ISD::FSQRT, EVT::f64, Expand);
+    setOperationAction(ISD::FSQRT, EVT::f32, Expand);
   }
 
-  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+  setOperationAction(ISD::FCOPYSIGN, EVT::f64, Expand);
+  setOperationAction(ISD::FCOPYSIGN, EVT::f32, Expand);
 
   // PowerPC does not have BSWAP, CTPOP or CTTZ
-  setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
-  setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
-  setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
-  setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
-  setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
-  setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
+  setOperationAction(ISD::BSWAP, EVT::i32  , Expand);
+  setOperationAction(ISD::CTPOP, EVT::i32  , Expand);
+  setOperationAction(ISD::CTTZ , EVT::i32  , Expand);
+  setOperationAction(ISD::BSWAP, EVT::i64  , Expand);
+  setOperationAction(ISD::CTPOP, EVT::i64  , Expand);
+  setOperationAction(ISD::CTTZ , EVT::i64  , Expand);
 
   // PowerPC does not have ROTR
-  setOperationAction(ISD::ROTR, MVT::i32   , Expand);
-  setOperationAction(ISD::ROTR, MVT::i64   , Expand);
+  setOperationAction(ISD::ROTR, EVT::i32   , Expand);
+  setOperationAction(ISD::ROTR, EVT::i64   , Expand);
 
   // PowerPC does not have Select
-  setOperationAction(ISD::SELECT, MVT::i32, Expand);
-  setOperationAction(ISD::SELECT, MVT::i64, Expand);
-  setOperationAction(ISD::SELECT, MVT::f32, Expand);
-  setOperationAction(ISD::SELECT, MVT::f64, Expand);
+  setOperationAction(ISD::SELECT, EVT::i32, Expand);
+  setOperationAction(ISD::SELECT, EVT::i64, Expand);
+  setOperationAction(ISD::SELECT, EVT::f32, Expand);
+  setOperationAction(ISD::SELECT, EVT::f64, Expand);
 
   // PowerPC wants to turn select_cc of FP into fsel when possible.
-  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
-  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
+  setOperationAction(ISD::SELECT_CC, EVT::f32, Custom);
+  setOperationAction(ISD::SELECT_CC, EVT::f64, Custom);
 
   // PowerPC wants to optimize integer setcc a bit
-  setOperationAction(ISD::SETCC, MVT::i32, Custom);
+  setOperationAction(ISD::SETCC, EVT::i32, Custom);
 
   // PowerPC does not have BRCOND which requires SetCC
-  setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+  setOperationAction(ISD::BRCOND, EVT::Other, Expand);
 
-  setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
+  setOperationAction(ISD::BR_JT,  EVT::Other, Expand);
 
   // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
-  setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i32, Custom);
 
   // PowerPC does not have [U|S]INT_TO_FP
-  setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
-  setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i32, Expand);
+  setOperationAction(ISD::UINT_TO_FP, EVT::i32, Expand);
 
-  setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
-  setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
-  setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
-  setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
+  setOperationAction(ISD::BIT_CONVERT, EVT::f32, Expand);
+  setOperationAction(ISD::BIT_CONVERT, EVT::i32, Expand);
+  setOperationAction(ISD::BIT_CONVERT, EVT::i64, Expand);
+  setOperationAction(ISD::BIT_CONVERT, EVT::f64, Expand);
 
   // We cannot sextinreg(i1).  Expand to shifts.
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
 
   // Support label based line numbers.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
 
-  setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
-  setOperationAction(ISD::EHSELECTION,   MVT::i64, Expand);
-  setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
-  setOperationAction(ISD::EHSELECTION,   MVT::i32, Expand);
+  setOperationAction(ISD::EXCEPTIONADDR, EVT::i64, Expand);
+  setOperationAction(ISD::EHSELECTION,   EVT::i64, Expand);
+  setOperationAction(ISD::EXCEPTIONADDR, EVT::i32, Expand);
+  setOperationAction(ISD::EHSELECTION,   EVT::i32, Expand);
 
 
   // We want to legalize GlobalAddress and ConstantPool nodes into the
   // appropriate instructions to materialize the address.
-  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
-  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
-  setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
-  setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
-  setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
-  setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
-  setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
-  setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
+  setOperationAction(ISD::GlobalAddress, EVT::i32, Custom);
+  setOperationAction(ISD::GlobalTLSAddress, EVT::i32, Custom);
+  setOperationAction(ISD::ConstantPool,  EVT::i32, Custom);
+  setOperationAction(ISD::JumpTable,     EVT::i32, Custom);
+  setOperationAction(ISD::GlobalAddress, EVT::i64, Custom);
+  setOperationAction(ISD::GlobalTLSAddress, EVT::i64, Custom);
+  setOperationAction(ISD::ConstantPool,  EVT::i64, Custom);
+  setOperationAction(ISD::JumpTable,     EVT::i64, Custom);
 
   // TRAP is legal.
-  setOperationAction(ISD::TRAP, MVT::Other, Legal);
+  setOperationAction(ISD::TRAP, EVT::Other, Legal);
 
   // TRAMPOLINE is custom lowered.
-  setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
+  setOperationAction(ISD::TRAMPOLINE, EVT::Other, Custom);
 
   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
-  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
+  setOperationAction(ISD::VASTART           , EVT::Other, Custom);
 
   // VAARG is custom lowered with the SVR4 ABI
   if (TM.getSubtarget<PPCSubtarget>().isSVR4ABI())
-    setOperationAction(ISD::VAARG, MVT::Other, Custom);
+    setOperationAction(ISD::VAARG, EVT::Other, Custom);
   else
-    setOperationAction(ISD::VAARG, MVT::Other, Expand);
+    setOperationAction(ISD::VAARG, EVT::Other, Expand);
 
   // Use the default implementation.
-  setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
-  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
-  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
+  setOperationAction(ISD::VACOPY            , EVT::Other, Expand);
+  setOperationAction(ISD::VAEND             , EVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE         , EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE      , EVT::Other, Custom);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32  , Custom);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i64  , Custom);
 
   // We want to custom lower some of our intrinsics.
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+  setOperationAction(ISD::INTRINSIC_WO_CHAIN, EVT::Other, Custom);
 
   // Comparisons that require checking two conditions.
-  setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
-  setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
-  setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
-  setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
-  setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
-  setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
+  setCondCodeAction(ISD::SETULT, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETULT, EVT::f64, Expand);
+  setCondCodeAction(ISD::SETUGT, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETUGT, EVT::f64, Expand);
+  setCondCodeAction(ISD::SETUEQ, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETUEQ, EVT::f64, Expand);
+  setCondCodeAction(ISD::SETOGE, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETOGE, EVT::f64, Expand);
+  setCondCodeAction(ISD::SETOLE, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETOLE, EVT::f64, Expand);
+  setCondCodeAction(ISD::SETONE, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETONE, EVT::f64, Expand);
 
   if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
     // They also have instructions for converting between i64 and fp.
-    setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
-    setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
-    setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
-    setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+    setOperationAction(ISD::FP_TO_SINT, EVT::i64, Custom);
+    setOperationAction(ISD::FP_TO_UINT, EVT::i64, Expand);
+    setOperationAction(ISD::SINT_TO_FP, EVT::i64, Custom);
+    setOperationAction(ISD::UINT_TO_FP, EVT::i64, Expand);
     // This is just the low 32 bits of a (signed) fp->i64 conversion.
     // We cannot do this with Promote because i64 is not a legal type.
-    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
+    setOperationAction(ISD::FP_TO_UINT, EVT::i32, Custom);
 
     // FIXME: disable this lowered code.  This generates 64-bit register values,
     // and we don't model the fact that the top part is clobbered by calls.  We
     // need to flag these together so that the value isn't live across a call.
-    //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
+    //setOperationAction(ISD::SINT_TO_FP, EVT::i32, Custom);
   } else {
     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
-    setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+    setOperationAction(ISD::FP_TO_UINT, EVT::i32, Expand);
   }
 
   if (TM.getSubtarget<PPCSubtarget>().use64BitRegs()) {
     // 64-bit PowerPC implementations can support i64 types directly
-    addRegisterClass(MVT::i64, PPC::G8RCRegisterClass);
+    addRegisterClass(EVT::i64, PPC::G8RCRegisterClass);
     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
-    setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
+    setOperationAction(ISD::BUILD_PAIR, EVT::i64, Expand);
     // 64-bit PowerPC wants to expand i128 shifts itself.
-    setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
-    setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
-    setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
+    setOperationAction(ISD::SHL_PARTS, EVT::i64, Custom);
+    setOperationAction(ISD::SRA_PARTS, EVT::i64, Custom);
+    setOperationAction(ISD::SRL_PARTS, EVT::i64, Custom);
   } else {
     // 32-bit PowerPC wants to expand i64 shifts itself.
-    setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
-    setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
-    setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
+    setOperationAction(ISD::SHL_PARTS, EVT::i32, Custom);
+    setOperationAction(ISD::SRA_PARTS, EVT::i32, Custom);
+    setOperationAction(ISD::SRL_PARTS, EVT::i32, Custom);
   }
 
   if (TM.getSubtarget<PPCSubtarget>().hasAltivec()) {
     // First set operation action for all vector types to expand. Then we
     // will selectively turn on ones that can be effectively codegen'd.
-    for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
-         i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
-      MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
+    for (unsigned i = (unsigned)EVT::FIRST_VECTOR_VALUETYPE;
+         i <= (unsigned)EVT::LAST_VECTOR_VALUETYPE; ++i) {
+      EVT::SimpleValueType VT = (EVT::SimpleValueType)i;
 
       // add/sub are legal for all supported vector VT's.
       setOperationAction(ISD::ADD , VT, Legal);
@@ -291,21 +291,21 @@
 
       // We promote all shuffles to v16i8.
       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
-      AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
+      AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, EVT::v16i8);
 
       // We promote all non-typed operations to v4i32.
       setOperationAction(ISD::AND   , VT, Promote);
-      AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
+      AddPromotedToType (ISD::AND   , VT, EVT::v4i32);
       setOperationAction(ISD::OR    , VT, Promote);
-      AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
+      AddPromotedToType (ISD::OR    , VT, EVT::v4i32);
       setOperationAction(ISD::XOR   , VT, Promote);
-      AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
+      AddPromotedToType (ISD::XOR   , VT, EVT::v4i32);
       setOperationAction(ISD::LOAD  , VT, Promote);
-      AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
+      AddPromotedToType (ISD::LOAD  , VT, EVT::v4i32);
       setOperationAction(ISD::SELECT, VT, Promote);
-      AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
+      AddPromotedToType (ISD::SELECT, VT, EVT::v4i32);
       setOperationAction(ISD::STORE, VT, Promote);
-      AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
+      AddPromotedToType (ISD::STORE, VT, EVT::v4i32);
 
       // No other operations are legal.
       setOperationAction(ISD::MUL , VT, Expand);
@@ -331,35 +331,35 @@
 
     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
     // with merges, splats, etc.
-    setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE, EVT::v16i8, Custom);
 
-    setOperationAction(ISD::AND   , MVT::v4i32, Legal);
-    setOperationAction(ISD::OR    , MVT::v4i32, Legal);
-    setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
-    setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
-    setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
-    setOperationAction(ISD::STORE , MVT::v4i32, Legal);
+    setOperationAction(ISD::AND   , EVT::v4i32, Legal);
+    setOperationAction(ISD::OR    , EVT::v4i32, Legal);
+    setOperationAction(ISD::XOR   , EVT::v4i32, Legal);
+    setOperationAction(ISD::LOAD  , EVT::v4i32, Legal);
+    setOperationAction(ISD::SELECT, EVT::v4i32, Expand);
+    setOperationAction(ISD::STORE , EVT::v4i32, Legal);
 
-    addRegisterClass(MVT::v4f32, PPC::VRRCRegisterClass);
-    addRegisterClass(MVT::v4i32, PPC::VRRCRegisterClass);
-    addRegisterClass(MVT::v8i16, PPC::VRRCRegisterClass);
-    addRegisterClass(MVT::v16i8, PPC::VRRCRegisterClass);
+    addRegisterClass(EVT::v4f32, PPC::VRRCRegisterClass);
+    addRegisterClass(EVT::v4i32, PPC::VRRCRegisterClass);
+    addRegisterClass(EVT::v8i16, PPC::VRRCRegisterClass);
+    addRegisterClass(EVT::v16i8, PPC::VRRCRegisterClass);
 
-    setOperationAction(ISD::MUL, MVT::v4f32, Legal);
-    setOperationAction(ISD::MUL, MVT::v4i32, Custom);
-    setOperationAction(ISD::MUL, MVT::v8i16, Custom);
-    setOperationAction(ISD::MUL, MVT::v16i8, Custom);
+    setOperationAction(ISD::MUL, EVT::v4f32, Legal);
+    setOperationAction(ISD::MUL, EVT::v4i32, Custom);
+    setOperationAction(ISD::MUL, EVT::v8i16, Custom);
+    setOperationAction(ISD::MUL, EVT::v16i8, Custom);
 
-    setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
-    setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR, EVT::v4f32, Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR, EVT::v4i32, Custom);
 
-    setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
-    setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
-    setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
-    setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
+    setOperationAction(ISD::BUILD_VECTOR, EVT::v16i8, Custom);
+    setOperationAction(ISD::BUILD_VECTOR, EVT::v8i16, Custom);
+    setOperationAction(ISD::BUILD_VECTOR, EVT::v4i32, Custom);
+    setOperationAction(ISD::BUILD_VECTOR, EVT::v4f32, Custom);
   }
 
-  setShiftAmountType(MVT::i32);
+  setShiftAmountType(EVT::i32);
   setBooleanContents(ZeroOrOneBooleanContent);
 
   if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
@@ -449,8 +449,8 @@
   }
 }
 
-MVT::SimpleValueType PPCTargetLowering::getSetCCResultType(MVT VT) const {
-  return MVT::i32;
+EVT::SimpleValueType PPCTargetLowering::getSetCCResultType(EVT VT) const {
+  return EVT::i32;
 }
 
 /// getFunctionAlignment - Return the Log2 alignment of this function.
@@ -523,7 +523,7 @@
 ///
 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
                      unsigned LHSStart, unsigned RHSStart) {
-  assert(N->getValueType(0) == MVT::v16i8 &&
+  assert(N->getValueType(0) == EVT::v16i8 &&
          "PPC only supports shuffles by bytes!");
   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
          "Unsupported merge size!");
@@ -561,7 +561,7 @@
 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
 /// amount, otherwise return -1.
 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
-  assert(N->getValueType(0) == MVT::v16i8 &&
+  assert(N->getValueType(0) == EVT::v16i8 &&
          "PPC only supports shuffles by bytes!");
 
   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
@@ -597,7 +597,7 @@
 /// specifies a splat of a single element that is suitable for input to
 /// VSPLTB/VSPLTH/VSPLTW.
 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
-  assert(N->getValueType(0) == MVT::v16i8 &&
+  assert(N->getValueType(0) == EVT::v16i8 &&
          (EltSize == 1 || EltSize == 2 || EltSize == 4));
 
   // This is a splat operation if each element of the permute is the same, and
@@ -694,17 +694,17 @@
     // Finally, check the least significant entry.
     if (LeadingZero) {
       if (UniquedVals[Multiple-1].getNode() == 0)
-        return DAG.getTargetConstant(0, MVT::i32);  // 0,0,0,undef
+        return DAG.getTargetConstant(0, EVT::i32);  // 0,0,0,undef
       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
       if (Val < 16)
-        return DAG.getTargetConstant(Val, MVT::i32);  // 0,0,0,4 -> vspltisw(4)
+        return DAG.getTargetConstant(Val, EVT::i32);  // 0,0,0,4 -> vspltisw(4)
     }
     if (LeadingOnes) {
       if (UniquedVals[Multiple-1].getNode() == 0)
-        return DAG.getTargetConstant(~0U, MVT::i32);  // -1,-1,-1,undef
+        return DAG.getTargetConstant(~0U, EVT::i32);  // -1,-1,-1,undef
       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
-        return DAG.getTargetConstant(Val, MVT::i32);
+        return DAG.getTargetConstant(Val, EVT::i32);
     }
 
     return SDValue();
@@ -726,7 +726,7 @@
   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
     Value = CN->getZExtValue();
   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
-    assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
+    assert(CN->getValueType(0) == EVT::f32 && "Only one legal FP vector type!");
     Value = FloatToBits(CN->getValueAPF().convertToFloat());
   }
 
@@ -756,7 +756,7 @@
 
   // Finally, if this value fits in a 5 bit sext field, return it
   if (((MaskVal << (32-5)) >> (32-5)) == MaskVal)
-    return DAG.getTargetConstant(MaskVal, MVT::i32);
+    return DAG.getTargetConstant(MaskVal, EVT::i32);
   return SDValue();
 }
 
@@ -773,7 +773,7 @@
     return false;
 
   Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
-  if (N->getValueType(0) == MVT::i32)
+  if (N->getValueType(0) == EVT::i32)
     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
   else
     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
@@ -846,7 +846,7 @@
   if (N.getOpcode() == ISD::ADD) {
     short imm = 0;
     if (isIntS16Immediate(N.getOperand(1), imm)) {
-      Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
+      Disp = DAG.getTargetConstant((int)imm & 0xFFFF, EVT::i32);
       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
       } else {
@@ -880,7 +880,7 @@
         // If all of the bits are known zero on the LHS or RHS, the add won't
         // carry.
         Base = N.getOperand(0);
-        Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
+        Disp = DAG.getTargetConstant((int)imm & 0xFFFF, EVT::i32);
         return true;
       }
     }
@@ -897,15 +897,15 @@
     }
 
     // Handle 32-bit sext immediates with LIS + addr mode.
-    if (CN->getValueType(0) == MVT::i32 ||
+    if (CN->getValueType(0) == EVT::i32 ||
         (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
       int Addr = (int)CN->getZExtValue();
 
       // Otherwise, break this down into an LIS + disp.
-      Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
+      Disp = DAG.getTargetConstant((short)Addr, EVT::i32);
 
-      Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
-      unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
+      Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, EVT::i32);
+      unsigned Opc = CN->getValueType(0) == EVT::i32 ? PPC::LIS : PPC::LIS8;
       Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base), 0);
       return true;
     }
@@ -960,7 +960,7 @@
   if (N.getOpcode() == ISD::ADD) {
     short imm = 0;
     if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
-      Disp =  DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
+      Disp =  DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, EVT::i32);
       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
       } else {
@@ -993,7 +993,7 @@
         // If all of the bits are known zero on the LHS or RHS, the add won't
         // carry.
         Base = N.getOperand(0);
-        Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
+        Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, EVT::i32);
         return true;
       }
     }
@@ -1010,14 +1010,14 @@
       }
 
       // Fold the low-part of 32-bit absolute addresses into addr mode.
-      if (CN->getValueType(0) == MVT::i32 ||
+      if (CN->getValueType(0) == EVT::i32 ||
           (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
         int Addr = (int)CN->getZExtValue();
 
         // Otherwise, break this down into an LIS + disp.
-        Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
-        Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
-        unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
+        Disp = DAG.getTargetConstant((short)Addr >> 2, EVT::i32);
+        Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, EVT::i32);
+        unsigned Opc = CN->getValueType(0) == EVT::i32 ? PPC::LIS : PPC::LIS8;
         Base = SDValue(DAG.getTargetNode(Opc, dl, CN->getValueType(0), Base),0);
         return true;
       }
@@ -1044,7 +1044,7 @@
   if (!EnablePPCPreinc) return false;
 
   SDValue Ptr;
-  MVT VT;
+  EVT VT;
   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
     Ptr = LD->getBasePtr();
     VT = LD->getMemoryVT();
@@ -1063,7 +1063,7 @@
   // TODO: Check reg+reg first.
 
   // LDU/STU use reg+imm*4, others use reg+imm.
-  if (VT != MVT::i64) {
+  if (VT != EVT::i64) {
     // reg + imm
     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
       return false;
@@ -1076,7 +1076,7 @@
   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
     // sext i32 to i64 when addr mode is r+i.
-    if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
+    if (LD->getValueType(0) == EVT::i64 && LD->getMemoryVT() == EVT::i32 &&
         LD->getExtensionType() == ISD::SEXTLOAD &&
         isa<ConstantSDNode>(Offset))
       return false;
@@ -1092,7 +1092,7 @@
 
 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
                                              SelectionDAG &DAG) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
   Constant *C = CP->getConstVal();
   SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
@@ -1126,7 +1126,7 @@
 }
 
 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
   SDValue Zero = DAG.getConstant(0, PtrVT);
@@ -1166,7 +1166,7 @@
 
 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
                                               SelectionDAG &DAG) {
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
   GlobalValue *GV = GSDN->getGlobal();
   SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
@@ -1214,17 +1214,17 @@
   // fold the new nodes.
   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
     if (C->isNullValue() && CC == ISD::SETEQ) {
-      MVT VT = Op.getOperand(0).getValueType();
+      EVT VT = Op.getOperand(0).getValueType();
       SDValue Zext = Op.getOperand(0);
-      if (VT.bitsLT(MVT::i32)) {
-        VT = MVT::i32;
+      if (VT.bitsLT(EVT::i32)) {
+        VT = EVT::i32;
         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
       }
       unsigned Log2b = Log2_32(VT.getSizeInBits());
       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
-                                DAG.getConstant(Log2b, MVT::i32));
-      return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
+                                DAG.getConstant(Log2b, EVT::i32));
+      return DAG.getNode(ISD::TRUNCATE, dl, EVT::i32, Scc);
     }
     // Leave comparisons against 0 and -1 alone for now, since they're usually
     // optimized.  FIXME: revisit this when we can custom lower all setcc
@@ -1238,9 +1238,9 @@
   // condition register, reading it back out, and masking the correct bit.  The
   // normal approach here uses sub to do this instead of xor.  Using xor exposes
   // the result to other bit-twiddling opportunities.
-  MVT LHSVT = Op.getOperand(0).getValueType();
+  EVT LHSVT = Op.getOperand(0).getValueType();
   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
                                 Op.getOperand(1));
     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
@@ -1266,8 +1266,8 @@
   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
   DebugLoc dl = Op.getDebugLoc();
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  bool isPPC64 = (PtrVT == MVT::i64);
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  bool isPPC64 = (PtrVT == EVT::i64);
   const Type *IntPtrTy =
     DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType();
 
@@ -1279,7 +1279,7 @@
 
   // TrampSize == (isPPC64 ? 48 : 40);
   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
-                               isPPC64 ? MVT::i64 : MVT::i32);
+                               isPPC64 ? EVT::i64 : EVT::i32);
   Args.push_back(Entry);
 
   Entry.Node = FPtr; Args.push_back(Entry);
@@ -1287,7 +1287,7 @@
 
   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
   std::pair<SDValue, SDValue> CallResult =
-    LowerCallTo(Chain, Op.getValueType().getTypeForMVT(), 
+    LowerCallTo(Chain, Op.getValueType().getTypeForEVT(), 
                 false, false, false, false, 0, CallingConv::C, false,
                 /*isReturnValueUsed=*/true,
                 DAG.getExternalSymbol("__trampoline_setup", PtrVT),
@@ -1310,7 +1310,7 @@
   if (Subtarget.isDarwinABI()) {
     // vastart just stores the address of the VarArgsFrameIndex slot into the
     // memory location argument.
-    MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+    EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
     SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
@@ -1341,11 +1341,11 @@
   // } va_list[1];
 
 
-  SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i32);
-  SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i32);
+  SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, EVT::i32);
+  SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, EVT::i32);
 
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
   SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT);
   SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
@@ -1363,14 +1363,14 @@
 
   // Store first byte : number of int regs
   SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR,
-                                         Op.getOperand(1), SV, 0, MVT::i8);
+                                         Op.getOperand(1), SV, 0, EVT::i8);
   uint64_t nextOffset = FPROffset;
   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
                                   ConstFPROffset);
 
   // Store second byte : number of float regs
   SDValue secondStore =
-    DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset, MVT::i8);
+    DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, SV, nextOffset, EVT::i8);
   nextOffset += StackOffset;
   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
 
@@ -1387,15 +1387,15 @@
 
 #include "PPCGenCallingConv.inc"
 
-static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                      CCValAssign::LocInfo &LocInfo,
                                      ISD::ArgFlagsTy &ArgFlags,
                                      CCState &State) {
   return true;
 }
 
-static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
-                                            MVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, EVT &ValVT,
+                                            EVT &LocVT,
                                             CCValAssign::LocInfo &LocInfo,
                                             ISD::ArgFlagsTy &ArgFlags,
                                             CCState &State) {
@@ -1421,8 +1421,8 @@
   return false;
 }
 
-static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
-                                              MVT &LocVT,
+static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, EVT &ValVT,
+                                              EVT &LocVT,
                                               CCValAssign::LocInfo &LocInfo,
                                               ISD::ArgFlagsTy &ArgFlags,
                                               CCState &State) {
@@ -1469,7 +1469,7 @@
 
 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
 /// the stack.
-static unsigned CalculateStackSlotSize(MVT ArgVT, ISD::ArgFlagsTy Flags,
+static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
                                        unsigned PtrByteSize) {
   unsigned ArgSize = ArgVT.getSizeInBits()/8;
   if (Flags.isByVal())
@@ -1536,7 +1536,7 @@
   MachineFunction &MF = DAG.getMachineFunction();
   MachineFrameInfo *MFI = MF.getFrameInfo();
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   // Potential tail calls could cause overwriting of argument stack slots.
   bool isImmutable = !(PerformTailCallOpt && (CallConv==CallingConv::Fast));
   unsigned PtrByteSize = 4;
@@ -1557,24 +1557,24 @@
     // Arguments stored in registers.
     if (VA.isRegLoc()) {
       TargetRegisterClass *RC;
-      MVT ValVT = VA.getValVT();
+      EVT ValVT = VA.getValVT();
       
       switch (ValVT.getSimpleVT()) {
         default:
           llvm_unreachable("ValVT not supported by formal arguments Lowering");
-        case MVT::i32:
+        case EVT::i32:
           RC = PPC::GPRCRegisterClass;
           break;
-        case MVT::f32:
+        case EVT::f32:
           RC = PPC::F4RCRegisterClass;
           break;
-        case MVT::f64:
+        case EVT::f64:
           RC = PPC::F8RCRegisterClass;
           break;
-        case MVT::v16i8:
-        case MVT::v8i16:
-        case MVT::v4i32:
-        case MVT::v4f32:
+        case EVT::v16i8:
+        case EVT::v8i16:
+        case EVT::v4i32:
+        case EVT::v4f32:
           RC = PPC::VRRCRegisterClass;
           break;
       }
@@ -1652,7 +1652,7 @@
 
     // Make room for NumGPArgRegs and NumFPArgRegs.
     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
-                NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
+                NumFPArgRegs * EVT(EVT::f64).getSizeInBits()/8;
 
     VarArgsStackOffset = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
                                                 CCInfo.getNextStackOffset());
@@ -1693,11 +1693,11 @@
     // on the stack.
     unsigned FPRIndex = 0;
     for (FPRIndex = 0; FPRIndex != VarArgsNumFPR; ++FPRIndex) {
-      SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], MVT::f64);
+      SDValue Val = DAG.getRegister(FPArgRegs[FPRIndex], EVT::f64);
       SDValue Store = DAG.getStore(Chain, dl, Val, FIN, NULL, 0);
       MemOps.push_back(Store);
       // Increment the address by eight for the next argument to store
-      SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
+      SDValue PtrOff = DAG.getConstant(EVT(EVT::f64).getSizeInBits()/8,
                                          PtrVT);
       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
     }
@@ -1705,11 +1705,11 @@
     for (; FPRIndex != NumFPArgRegs; ++FPRIndex) {
       unsigned VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
 
-      SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
+      SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, EVT::f64);
       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
       MemOps.push_back(Store);
       // Increment the address by eight for the next argument to store
-      SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8,
+      SDValue PtrOff = DAG.getConstant(EVT(EVT::f64).getSizeInBits()/8,
                                          PtrVT);
       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
     }
@@ -1717,7 +1717,7 @@
 
   if (!MemOps.empty())
     Chain = DAG.getNode(ISD::TokenFactor, dl,
-                        MVT::Other, &MemOps[0], MemOps.size());
+                        EVT::Other, &MemOps[0], MemOps.size());
 
   return Chain;
 }
@@ -1736,8 +1736,8 @@
   MachineFunction &MF = DAG.getMachineFunction();
   MachineFrameInfo *MFI = MF.getFrameInfo();
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  bool isPPC64 = PtrVT == MVT::i64;
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  bool isPPC64 = PtrVT == EVT::i64;
   // Potential tail calls could cause overwriting of argument stack slots.
   bool isImmutable = !(PerformTailCallOpt && (CallConv==CallingConv::Fast));
   unsigned PtrByteSize = isPPC64 ? 8 : 4;
@@ -1781,7 +1781,7 @@
   if (!isVarArg && !isPPC64) {
     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
          ++ArgNo) {
-      MVT ObjectVT = Ins[ArgNo].VT;
+      EVT ObjectVT = Ins[ArgNo].VT;
       unsigned ObjSize = ObjectVT.getSizeInBits()/8;
       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
 
@@ -1796,18 +1796,18 @@
 
       switch(ObjectVT.getSimpleVT()) {
       default: llvm_unreachable("Unhandled argument type!");
-      case MVT::i32:
-      case MVT::f32:
+      case EVT::i32:
+      case EVT::f32:
         VecArgOffset += isPPC64 ? 8 : 4;
         break;
-      case MVT::i64:  // PPC64
-      case MVT::f64:
+      case EVT::i64:  // PPC64
+      case EVT::f64:
         VecArgOffset += 8;
         break;
-      case MVT::v4f32:
-      case MVT::v4i32:
-      case MVT::v8i16:
-      case MVT::v16i8:
+      case EVT::v4f32:
+      case EVT::v4i32:
+      case EVT::v8i16:
+      case EVT::v16i8:
         // Nothing to do, we're only looking at Nonvector args here.
         break;
       }
@@ -1827,7 +1827,7 @@
   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
     SDValue ArgVal;
     bool needsLoad = false;
-    MVT ObjectVT = Ins[ArgNo].VT;
+    EVT ObjectVT = Ins[ArgNo].VT;
     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
     unsigned ArgSize = ObjSize;
     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
@@ -1835,8 +1835,8 @@
     unsigned CurArgOffset = ArgOffset;
 
     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
-    if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
-        ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
+    if (ObjectVT==EVT::v4f32 || ObjectVT==EVT::v4i32 ||
+        ObjectVT==EVT::v8i16 || ObjectVT==EVT::v16i8) {
       if (isVarArg || isPPC64) {
         MinReservedArea = ((MinReservedArea+15)/16)*16;
         MinReservedArea += CalculateStackSlotSize(ObjectVT,
@@ -1869,7 +1869,7 @@
           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
           SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
-                               NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 );
+                               NULL, 0, ObjSize==1 ? EVT::i8 : EVT::i16 );
           MemOps.push_back(Store);
           ++GPR_idx;
         }
@@ -1901,11 +1901,11 @@
 
     switch (ObjectVT.getSimpleVT()) {
     default: llvm_unreachable("Unhandled argument type!");
-    case MVT::i32:
+    case EVT::i32:
       if (!isPPC64) {
         if (GPR_idx != Num_GPR_Regs) {
           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
-          ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+          ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
           ++GPR_idx;
         } else {
           needsLoad = true;
@@ -1916,22 +1916,22 @@
         break;
       }
       // FALLTHROUGH
-    case MVT::i64:  // PPC64
+    case EVT::i64:  // PPC64
       if (GPR_idx != Num_GPR_Regs) {
         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
-        ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
+        ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i64);
 
-        if (ObjectVT == MVT::i32) {
+        if (ObjectVT == EVT::i32) {
           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
-          // value to MVT::i64 and then truncate to the correct register size.
+          // value to EVT::i64 and then truncate to the correct register size.
           if (Flags.isSExt())
-            ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
+            ArgVal = DAG.getNode(ISD::AssertSext, dl, EVT::i64, ArgVal,
                                  DAG.getValueType(ObjectVT));
           else if (Flags.isZExt())
-            ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
+            ArgVal = DAG.getNode(ISD::AssertZext, dl, EVT::i64, ArgVal,
                                  DAG.getValueType(ObjectVT));
 
-          ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
+          ArgVal = DAG.getNode(ISD::TRUNCATE, dl, EVT::i32, ArgVal);
         }
 
         ++GPR_idx;
@@ -1943,8 +1943,8 @@
       ArgOffset += 8;
       break;
 
-    case MVT::f32:
-    case MVT::f64:
+    case EVT::f32:
+    case EVT::f64:
       // Every 4 bytes of argument space consumes one of the GPRs available for
       // argument passing.
       if (GPR_idx != Num_GPR_Regs) {
@@ -1955,7 +1955,7 @@
       if (FPR_idx != Num_FPR_Regs) {
         unsigned VReg;
 
-        if (ObjectVT == MVT::f32)
+        if (ObjectVT == EVT::f32)
           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
         else
           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
@@ -1969,10 +1969,10 @@
       // All FP arguments reserve stack space in the Darwin ABI.
       ArgOffset += isPPC64 ? 8 : ObjSize;
       break;
-    case MVT::v4f32:
-    case MVT::v4i32:
-    case MVT::v8i16:
-    case MVT::v16i8:
+    case EVT::v4f32:
+    case EVT::v4i32:
+    case EVT::v8i16:
+    case EVT::v16i8:
       // Note that vector arguments in registers don't reserve stack space,
       // except in varargs functions.
       if (VR_idx != Num_VR_Regs) {
@@ -2067,7 +2067,7 @@
 
   if (!MemOps.empty())
     Chain = DAG.getNode(ISD::TokenFactor, dl,
-                        MVT::Other, &MemOps[0], MemOps.size());
+                        EVT::Other, &MemOps[0], MemOps.size());
 
   return Chain;
 }
@@ -2099,10 +2099,10 @@
   for (unsigned i = 0; i != NumOps; ++i) {
     SDValue Arg = Outs[i].Val;
     ISD::ArgFlagsTy Flags = Outs[i].Flags;
-    MVT ArgVT = Arg.getValueType();
+    EVT ArgVT = Arg.getValueType();
     // Varargs Altivec parameters are padded to a 16 byte boundary.
-    if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
-        ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
+    if (ArgVT==EVT::v4f32 || ArgVT==EVT::v4i32 ||
+        ArgVT==EVT::v8i16 || ArgVT==EVT::v16i8) {
       if (!isVarArg && !isPPC64) {
         // Non-varargs Altivec parameters go after all the non-Altivec
         // parameters; handle those later so we know how much padding we need.
@@ -2256,7 +2256,7 @@
                                                                    isDarwinABI);
     int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
                                                           NewRetAddrLoc);
-    MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
+    EVT VT = isPPC64 ? EVT::i64 : EVT::i32;
     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
                          PseudoSourceValue::getFixedStack(NewRetAddr), 0);
@@ -2284,7 +2284,7 @@
   int Offset = ArgOffset + SPDiff;
   uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
   int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
-  MVT VT = isPPC64 ? MVT::i64 : MVT::i32;
+  EVT VT = isPPC64 ? EVT::i64 : EVT::i32;
   SDValue FIN = DAG.getFrameIndex(FI, VT);
   TailCallArgumentInfo Info;
   Info.Arg = Arg;
@@ -2305,7 +2305,7 @@
                                                         DebugLoc dl) {
   if (SPDiff) {
     // Load the LR and FP stack slot for later adjusting.
-    MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
+    EVT VT = PPCSubTarget.isPPC64() ? EVT::i64 : EVT::i32;
     LROpOut = getReturnAddrFrameIndex(DAG);
     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, NULL, 0);
     Chain = SDValue(LROpOut.getNode(), 1);
@@ -2331,7 +2331,7 @@
 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
                           DebugLoc dl) {
-  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), EVT::i32);
   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
                        false, NULL, 0, NULL, 0);
 }
@@ -2345,14 +2345,14 @@
                  bool isVector, SmallVector<SDValue, 8> &MemOpChains,
                  SmallVector<TailCallArgumentInfo, 8>& TailCallArguments,
                  DebugLoc dl) {
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   if (!isTailCall) {
     if (isVector) {
       SDValue StackPtr;
       if (isPPC64)
-        StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
+        StackPtr = DAG.getRegister(PPC::X1, EVT::i64);
       else
-        StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
+        StackPtr = DAG.getRegister(PPC::R1, EVT::i32);
       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
                            DAG.getConstant(ArgOffset, PtrVT));
     }
@@ -2377,7 +2377,7 @@
   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
                                     MemOpChains2, dl);
   if (!MemOpChains2.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains2[0], MemOpChains2.size());
 
   // Store the return address to the appropriate stack slot.
@@ -2394,11 +2394,11 @@
 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
                      SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall,
                      SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
-                     SmallVector<SDValue, 8> &Ops, std::vector<MVT> &NodeTys,
+                     SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
                      bool isSVR4ABI) {
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  NodeTys.push_back(MVT::Other);   // Returns a chain
-  NodeTys.push_back(MVT::Flag);    // Returns a flag for retval copy to use.
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  NodeTys.push_back(EVT::Other);   // Returns a chain
+  NodeTys.push_back(EVT::Flag);    // Returns a flag for retval copy to use.
 
   unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin;
 
@@ -2421,8 +2421,8 @@
     InFlag = Chain.getValue(1);
 
     NodeTys.clear();
-    NodeTys.push_back(MVT::Other);
-    NodeTys.push_back(MVT::Flag);
+    NodeTys.push_back(EVT::Other);
+    NodeTys.push_back(EVT::Flag);
     Ops.push_back(Chain);
     CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin;
     Callee.setNode(0);
@@ -2438,7 +2438,7 @@
   }
   // If this is a tail call add stack pointer delta.
   if (isTailCall)
-    Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
+    Ops.push_back(DAG.getConstant(SPDiff, EVT::i32));
 
   // Add argument registers to the end of the list so that they are known live
   // into the call.
@@ -2464,7 +2464,7 @@
   // Copy all of the result registers out of their specified physreg.
   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
     CCValAssign &VA = RVLocs[i];
-    MVT VT = VA.getValVT();
+    EVT VT = VA.getValVT();
     assert(VA.isRegLoc() && "Can only return in registers!");
     Chain = DAG.getCopyFromReg(Chain, dl,
                                VA.getLocReg(), VT, InFlag).getValue(1);
@@ -2487,7 +2487,7 @@
                               const SmallVectorImpl<ISD::InputArg> &Ins,
                               SmallVectorImpl<SDValue> &InVals) {
 
-  std::vector<MVT> NodeTys;
+  std::vector<EVT> NodeTys;
   SmallVector<SDValue, 8> Ops;
   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
                                  isTailCall, RegsToPass, Ops, NodeTys,
@@ -2522,7 +2522,7 @@
             isa<ConstantSDNode>(Callee)) &&
     "Expecting an global address, external symbol, absolute value or register");
 
-    return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size());
+    return DAG.getNode(PPCISD::TC_RETURN, dl, EVT::Other, &Ops[0], Ops.size());
   }
 
   Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
@@ -2575,7 +2575,7 @@
   assert((CallConv == CallingConv::C ||
           CallConv == CallingConv::Fast) && "Unknown calling convention!");
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   unsigned PtrByteSize = 4;
 
   MachineFunction &MF = DAG.getMachineFunction();
@@ -2607,7 +2607,7 @@
     unsigned NumArgs = Outs.size();
     
     for (unsigned i = 0; i != NumArgs; ++i) {
-      MVT ArgVT = Outs[i].Val.getValueType();
+      EVT ArgVT = Outs[i].Val.getValueType();
       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
       bool Result;
       
@@ -2622,7 +2622,7 @@
       if (Result) {
 #ifndef NDEBUG
         cerr << "Call operand #" << i << " has unhandled type "
-             << ArgVT.getMVTString() << "\n";
+             << ArgVT.getEVTString() << "\n";
 #endif
         llvm_unreachable(0);
       }
@@ -2665,7 +2665,7 @@
   // Set up a copy of the stack pointer for use loading and storing any
   // arguments that may not fit in the registers available for argument
   // passing.
-  SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
+  SDValue StackPtr = DAG.getRegister(PPC::R1, EVT::i32);
   
   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
@@ -2737,7 +2737,7 @@
   }
   
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
   
   // Build a sequence of copy-to-reg nodes chained together with token chain
@@ -2751,7 +2751,7 @@
   
   // Set CR6 to true if this is a vararg call.
   if (isVarArg) {
-    SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, MVT::i32), 0);
+    SDValue SetCR(DAG.getTargetNode(PPC::CRSET, dl, EVT::i32), 0);
     Chain = DAG.getCopyToReg(Chain, dl, PPC::CR1EQ, SetCR, InFlag);
     InFlag = Chain.getValue(1);
   }
@@ -2777,8 +2777,8 @@
 
   unsigned NumOps  = Outs.size();
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  bool isPPC64 = PtrVT == MVT::i64;
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  bool isPPC64 = PtrVT == EVT::i64;
   unsigned PtrByteSize = isPPC64 ? 8 : 4;
 
   MachineFunction &MF = DAG.getMachineFunction();
@@ -2826,9 +2826,9 @@
   // passing.
   SDValue StackPtr;
   if (isPPC64)
-    StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
+    StackPtr = DAG.getRegister(PPC::X1, EVT::i64);
   else
-    StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
+    StackPtr = DAG.getRegister(PPC::R1, EVT::i32);
 
   // Figure out which arguments are going to go in registers, and which in
   // memory.  Also, if this is a vararg function, floating point operations
@@ -2875,10 +2875,10 @@
     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
 
     // On PPC64, promote integers to 64-bit values.
-    if (isPPC64 && Arg.getValueType() == MVT::i32) {
+    if (isPPC64 && Arg.getValueType() == EVT::i32) {
       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
-      Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
+      Arg = DAG.getNode(ExtOp, dl, EVT::i64, Arg);
     }
 
     // FIXME memcpy is used way more than necessary.  Correctness first.
@@ -2887,7 +2887,7 @@
       if (Size==1 || Size==2) {
         // Very small objects are passed right-justified.
         // Everything else is passed left-justified.
-        MVT VT = (Size==1) ? MVT::i8 : MVT::i16;
+        EVT VT = (Size==1) ? EVT::i8 : EVT::i16;
         if (GPR_idx != NumGPRs) {
           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
                                           NULL, 0, VT);
@@ -2941,8 +2941,8 @@
 
     switch (Arg.getValueType().getSimpleVT()) {
     default: llvm_unreachable("Unexpected ValueType for argument!");
-    case MVT::i32:
-    case MVT::i64:
+    case EVT::i32:
+    case EVT::i64:
       if (GPR_idx != NumGPRs) {
         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
       } else {
@@ -2953,8 +2953,8 @@
       }
       ArgOffset += PtrByteSize;
       break;
-    case MVT::f32:
-    case MVT::f64:
+    case EVT::f32:
+    case EVT::f64:
       if (FPR_idx != NumFPRs) {
         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
 
@@ -2968,7 +2968,7 @@
             MemOpChains.push_back(Load.getValue(1));
             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
           }
-          if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
+          if (GPR_idx != NumGPRs && Arg.getValueType() == EVT::f64 && !isPPC64){
             SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
             SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, NULL, 0);
@@ -2981,7 +2981,7 @@
           // GPRs.
           if (GPR_idx != NumGPRs)
             ++GPR_idx;
-          if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
+          if (GPR_idx != NumGPRs && Arg.getValueType() == EVT::f64 &&
               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
             ++GPR_idx;
         }
@@ -2994,12 +2994,12 @@
       if (isPPC64)
         ArgOffset += 8;
       else
-        ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
+        ArgOffset += Arg.getValueType() == EVT::f32 ? 4 : 8;
       break;
-    case MVT::v4f32:
-    case MVT::v4i32:
-    case MVT::v8i16:
-    case MVT::v16i8:
+    case EVT::v4f32:
+    case EVT::v4i32:
+    case EVT::v8i16:
+    case EVT::v16i8:
       if (isVarArg) {
         // These go aligned on the stack, or in the corresponding R registers
         // when within range.  The Darwin PPC ABI doc claims they also go in
@@ -3018,7 +3018,7 @@
         SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0);
         MemOpChains.push_back(Store);
         if (VR_idx != NumVRs) {
-          SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, NULL, 0);
+          SDValue Load = DAG.getLoad(EVT::v4f32, dl, Store, PtrOff, NULL, 0);
           MemOpChains.push_back(Load.getValue(1));
           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
         }
@@ -3062,9 +3062,9 @@
     ArgOffset += 12*16;
     for (unsigned i = 0; i != NumOps; ++i) {
       SDValue Arg = Outs[i].Val;
-      MVT ArgType = Arg.getValueType();
-      if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
-          ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
+      EVT ArgType = Arg.getValueType();
+      if (ArgType==EVT::v4f32 || ArgType==EVT::v4i32 ||
+          ArgType==EVT::v8i16 || ArgType==EVT::v16i8) {
         if (++j > NumVRs) {
           SDValue PtrOff;
           // We are emitting Altivec params in order.
@@ -3078,7 +3078,7 @@
   }
 
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token chain
@@ -3130,9 +3130,9 @@
   }
 
   if (Flag.getNode())
-    return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+    return DAG.getNode(PPCISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
   else
-    return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain);
+    return DAG.getNode(PPCISD::RET_FLAG, dl, EVT::Other, Chain);
 }
 
 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
@@ -3141,7 +3141,7 @@
   DebugLoc dl = Op.getDebugLoc();
 
   // Get the corect type for pointers.
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
   // Construct the stack pointer operand.
   bool IsPPC64 = Subtarget.isPPC64();
@@ -3169,7 +3169,7 @@
   MachineFunction &MF = DAG.getMachineFunction();
   bool IsPPC64 = PPCSubTarget.isPPC64();
   bool isDarwinABI = PPCSubTarget.isDarwinABI();
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
   // Get current frame pointer save index.  The users of this index will be
   // primarily DYNALLOC instructions.
@@ -3193,7 +3193,7 @@
   MachineFunction &MF = DAG.getMachineFunction();
   bool IsPPC64 = PPCSubTarget.isPPC64();
   bool isDarwinABI = PPCSubTarget.isDarwinABI();
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
 
   // Get current frame pointer save index.  The users of this index will be
   // primarily DYNALLOC instructions.
@@ -3223,7 +3223,7 @@
   DebugLoc dl = Op.getDebugLoc();
 
   // Get the corect type for pointers.
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   // Negate the size.
   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
                                   DAG.getConstant(0, PtrVT), Size);
@@ -3231,7 +3231,7 @@
   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
   // Build a DYNALLOC node.
   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
-  SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
+  SDVTList VTs = DAG.getVTList(PtrVT, EVT::Other);
   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3);
 }
 
@@ -3248,8 +3248,8 @@
   // Cannot handle SETEQ/SETNE.
   if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op;
 
-  MVT ResVT = Op.getValueType();
-  MVT CmpVT = Op.getOperand(0).getValueType();
+  EVT ResVT = Op.getValueType();
+  EVT CmpVT = Op.getOperand(0).getValueType();
   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
   DebugLoc dl = Op.getDebugLoc();
@@ -3264,18 +3264,18 @@
       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
     case ISD::SETOGE:
     case ISD::SETGE:
-      if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
-        LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
+      if (LHS.getValueType() == EVT::f32)   // Comparison is always 64-bits
+        LHS = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, LHS);
       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
     case ISD::SETUGT:
     case ISD::SETGT:
       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
     case ISD::SETOLE:
     case ISD::SETLE:
-      if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
-        LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
+      if (LHS.getValueType() == EVT::f32)   // Comparison is always 64-bits
+        LHS = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, LHS);
       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
-                         DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
+                         DAG.getNode(ISD::FNEG, dl, EVT::f64, LHS), TV, FV);
     }
 
   SDValue Cmp;
@@ -3284,26 +3284,26 @@
   case ISD::SETULT:
   case ISD::SETLT:
     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
-    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
-      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+    if (Cmp.getValueType() == EVT::f32)   // Comparison is always 64-bits
+      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, Cmp);
       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
   case ISD::SETOGE:
   case ISD::SETGE:
     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
-    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
-      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+    if (Cmp.getValueType() == EVT::f32)   // Comparison is always 64-bits
+      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, Cmp);
       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
   case ISD::SETUGT:
   case ISD::SETGT:
     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
-    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
-      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+    if (Cmp.getValueType() == EVT::f32)   // Comparison is always 64-bits
+      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, Cmp);
       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
   case ISD::SETOLE:
   case ISD::SETLE:
     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
-    if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
-      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
+    if (Cmp.getValueType() == EVT::f32)   // Comparison is always 64-bits
+      Cmp = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, Cmp);
       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
   }
   return Op;
@@ -3314,31 +3314,31 @@
                                            DebugLoc dl) {
   assert(Op.getOperand(0).getValueType().isFloatingPoint());
   SDValue Src = Op.getOperand(0);
-  if (Src.getValueType() == MVT::f32)
-    Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
+  if (Src.getValueType() == EVT::f32)
+    Src = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, Src);
 
   SDValue Tmp;
   switch (Op.getValueType().getSimpleVT()) {
   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
-  case MVT::i32:
+  case EVT::i32:
     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
                                                          PPCISD::FCTIDZ, 
-                      dl, MVT::f64, Src);
+                      dl, EVT::f64, Src);
     break;
-  case MVT::i64:
-    Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src);
+  case EVT::i64:
+    Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, EVT::f64, Src);
     break;
   }
 
   // Convert the FP value to an int value through memory.
-  SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64);
+  SDValue FIPtr = DAG.CreateStackTemporary(EVT::f64);
 
   // Emit a store to the stack slot.
   SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, NULL, 0);
 
   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
   // add in a bias.
-  if (Op.getValueType() == MVT::i32)
+  if (Op.getValueType() == EVT::i32)
     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
                         DAG.getConstant(4, FIPtr.getValueType()));
   return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, NULL, 0);
@@ -3347,20 +3347,20 @@
 SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
-  if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
+  if (Op.getValueType() != EVT::f32 && Op.getValueType() != EVT::f64)
     return SDValue();
 
-  if (Op.getOperand(0).getValueType() == MVT::i64) {
+  if (Op.getOperand(0).getValueType() == EVT::i64) {
     SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl,
-                               MVT::f64, Op.getOperand(0));
-    SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
-    if (Op.getValueType() == MVT::f32)
+                               EVT::f64, Op.getOperand(0));
+    SDValue FP = DAG.getNode(PPCISD::FCFID, dl, EVT::f64, Bits);
+    if (Op.getValueType() == EVT::f32)
       FP = DAG.getNode(ISD::FP_ROUND, dl,
-                       MVT::f32, FP, DAG.getIntPtrConstant(0));
+                       EVT::f32, FP, DAG.getIntPtrConstant(0));
     return FP;
   }
 
-  assert(Op.getOperand(0).getValueType() == MVT::i32 &&
+  assert(Op.getOperand(0).getValueType() == EVT::i32 &&
          "Unhandled SINT_TO_FP type in custom expander!");
   // Since we only generate this in 64-bit mode, we can take advantage of
   // 64-bit registers.  In particular, sign extend the input value into the
@@ -3368,25 +3368,25 @@
   // then lfd it and fcfid it.
   MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
   int FrameIdx = FrameInfo->CreateStackObject(8, 8);
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
 
-  SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32,
+  SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, EVT::i32,
                                 Op.getOperand(0));
 
   // STD the extended value into the stack slot.
   MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx),
                        MachineMemOperand::MOStore, 0, 8, 8);
-  SDValue Store = DAG.getNode(PPCISD::STD_32, dl, MVT::Other,
+  SDValue Store = DAG.getNode(PPCISD::STD_32, dl, EVT::Other,
                                 DAG.getEntryNode(), Ext64, FIdx,
                                 DAG.getMemOperand(MO));
   // Load the value as a double.
-  SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, NULL, 0);
+  SDValue Ld = DAG.getLoad(EVT::f64, dl, Store, FIdx, NULL, 0);
 
   // FCFID it and return it.
-  SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
-  if (Op.getValueType() == MVT::f32)
-    FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
+  SDValue FP = DAG.getNode(PPCISD::FCFID, dl, EVT::f64, Ld);
+  if (Op.getValueType() == EVT::f32)
+    FP = DAG.getNode(ISD::FP_ROUND, dl, EVT::f32, FP, DAG.getIntPtrConstant(0));
   return FP;
 }
 
@@ -3412,14 +3412,14 @@
   */
 
   MachineFunction &MF = DAG.getMachineFunction();
-  MVT VT = Op.getValueType();
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  std::vector<MVT> NodeTys;
+  EVT VT = Op.getValueType();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  std::vector<EVT> NodeTys;
   SDValue MFFSreg, InFlag;
 
   // Save FP Control Word to register
-  NodeTys.push_back(MVT::f64);    // return register
-  NodeTys.push_back(MVT::Flag);   // unused in this context
+  NodeTys.push_back(EVT::f64);    // return register
+  NodeTys.push_back(EVT::Flag);   // unused in this context
   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
 
   // Save FP register to stack slot
@@ -3431,29 +3431,29 @@
   // Load FP Control Word from low 32 bits of stack slot.
   SDValue Four = DAG.getConstant(4, PtrVT);
   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
-  SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, NULL, 0);
+  SDValue CWD = DAG.getLoad(EVT::i32, dl, Store, Addr, NULL, 0);
 
   // Transform as necessary
   SDValue CWD1 =
-    DAG.getNode(ISD::AND, dl, MVT::i32,
-                CWD, DAG.getConstant(3, MVT::i32));
+    DAG.getNode(ISD::AND, dl, EVT::i32,
+                CWD, DAG.getConstant(3, EVT::i32));
   SDValue CWD2 =
-    DAG.getNode(ISD::SRL, dl, MVT::i32,
-                DAG.getNode(ISD::AND, dl, MVT::i32,
-                            DAG.getNode(ISD::XOR, dl, MVT::i32,
-                                        CWD, DAG.getConstant(3, MVT::i32)),
-                            DAG.getConstant(3, MVT::i32)),
-                DAG.getConstant(1, MVT::i32));
+    DAG.getNode(ISD::SRL, dl, EVT::i32,
+                DAG.getNode(ISD::AND, dl, EVT::i32,
+                            DAG.getNode(ISD::XOR, dl, EVT::i32,
+                                        CWD, DAG.getConstant(3, EVT::i32)),
+                            DAG.getConstant(3, EVT::i32)),
+                DAG.getConstant(1, EVT::i32));
 
   SDValue RetVal =
-    DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
+    DAG.getNode(ISD::XOR, dl, EVT::i32, CWD1, CWD2);
 
   return DAG.getNode((VT.getSizeInBits() < 16 ?
                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
 }
 
 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   unsigned BitWidth = VT.getSizeInBits();
   DebugLoc dl = Op.getDebugLoc();
   assert(Op.getNumOperands() == 3 &&
@@ -3465,7 +3465,7 @@
   SDValue Lo = Op.getOperand(0);
   SDValue Hi = Op.getOperand(1);
   SDValue Amt = Op.getOperand(2);
-  MVT AmtVT = Amt.getValueType();
+  EVT AmtVT = Amt.getValueType();
 
   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
                              DAG.getConstant(BitWidth, AmtVT), Amt);
@@ -3482,7 +3482,7 @@
 }
 
 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
   unsigned BitWidth = VT.getSizeInBits();
   assert(Op.getNumOperands() == 3 &&
@@ -3494,7 +3494,7 @@
   SDValue Lo = Op.getOperand(0);
   SDValue Hi = Op.getOperand(1);
   SDValue Amt = Op.getOperand(2);
-  MVT AmtVT = Amt.getValueType();
+  EVT AmtVT = Amt.getValueType();
 
   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
                              DAG.getConstant(BitWidth, AmtVT), Amt);
@@ -3512,7 +3512,7 @@
 
 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   unsigned BitWidth = VT.getSizeInBits();
   assert(Op.getNumOperands() == 3 &&
          VT == Op.getOperand(1).getValueType() &&
@@ -3522,7 +3522,7 @@
   SDValue Lo = Op.getOperand(0);
   SDValue Hi = Op.getOperand(1);
   SDValue Amt = Op.getOperand(2);
-  MVT AmtVT = Amt.getValueType();
+  EVT AmtVT = Amt.getValueType();
 
   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
                              DAG.getConstant(BitWidth, AmtVT), Amt);
@@ -3545,24 +3545,24 @@
 
 /// BuildSplatI - Build a canonical splati of Val with an element size of
 /// SplatSize.  Cast the result to VT.
-static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT,
+static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
                              SelectionDAG &DAG, DebugLoc dl) {
   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
 
-  static const MVT VTys[] = { // canonical VT to use for each size.
-    MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
+  static const EVT VTys[] = { // canonical VT to use for each size.
+    EVT::v16i8, EVT::v8i16, EVT::Other, EVT::v4i32
   };
 
-  MVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
+  EVT ReqVT = VT != EVT::Other ? VT : VTys[SplatSize-1];
 
   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
   if (Val == -1)
     SplatSize = 1;
 
-  MVT CanonicalVT = VTys[SplatSize-1];
+  EVT CanonicalVT = VTys[SplatSize-1];
 
   // Build a canonical splat for this value.
-  SDValue Elt = DAG.getConstant(Val, MVT::i32);
+  SDValue Elt = DAG.getConstant(Val, EVT::i32);
   SmallVector<SDValue, 8> Ops;
   Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
@@ -3574,35 +3574,35 @@
 /// specified intrinsic ID.
 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
                                 SelectionDAG &DAG, DebugLoc dl,
-                                MVT DestVT = MVT::Other) {
-  if (DestVT == MVT::Other) DestVT = LHS.getValueType();
+                                EVT DestVT = EVT::Other) {
+  if (DestVT == EVT::Other) DestVT = LHS.getValueType();
   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
-                     DAG.getConstant(IID, MVT::i32), LHS, RHS);
+                     DAG.getConstant(IID, EVT::i32), LHS, RHS);
 }
 
 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
 /// specified intrinsic ID.
 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
                                 SDValue Op2, SelectionDAG &DAG,
-                                DebugLoc dl, MVT DestVT = MVT::Other) {
-  if (DestVT == MVT::Other) DestVT = Op0.getValueType();
+                                DebugLoc dl, EVT DestVT = EVT::Other) {
+  if (DestVT == EVT::Other) DestVT = Op0.getValueType();
   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
-                     DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
+                     DAG.getConstant(IID, EVT::i32), Op0, Op1, Op2);
 }
 
 
 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
 /// amount.  The result has the specified value type.
 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
-                             MVT VT, SelectionDAG &DAG, DebugLoc dl) {
+                             EVT VT, SelectionDAG &DAG, DebugLoc dl) {
   // Force LHS/RHS to be the right type.
-  LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS);
-  RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS);
+  LHS = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, LHS);
+  RHS = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, RHS);
 
   int Ops[16];
   for (unsigned i = 0; i != 16; ++i)
     Ops[i] = i + Amt;
-  SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
+  SDValue T = DAG.getVectorShuffle(EVT::v16i8, dl, LHS, RHS, Ops);
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
 }
 
@@ -3633,9 +3633,9 @@
   // All zeros?
   if (SplatBits == 0) {
     // Canonicalize all zero vectors to be v4i32.
-    if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
-      SDValue Z = DAG.getConstant(0, MVT::i32);
-      Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
+    if (Op.getValueType() != EVT::v4i32 || HasAnyUndefs) {
+      SDValue Z = DAG.getConstant(0, EVT::i32);
+      Z = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32, Z, Z, Z, Z);
       Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z);
     }
     return Op;
@@ -3653,7 +3653,7 @@
   // If this value is in the range [-32,30] and is even, use:
   //    tmp = VSPLTI[bhw], result = add tmp, tmp
   if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
-    SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
+    SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, EVT::Other, DAG, dl);
     Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
     return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
   }
@@ -3663,14 +3663,14 @@
   // for fneg/fabs.
   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
     // Make -1 and vspltisw -1:
-    SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
+    SDValue OnesV = BuildSplatI(-1, 4, EVT::v4i32, DAG, dl);
 
     // Make the VSLW intrinsic, computing 0x8000_0000.
     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
                                    OnesV, DAG, dl);
 
     // xor by OnesV to invert it.
-    Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
+    Res = DAG.getNode(ISD::XOR, dl, EVT::v4i32, Res, OnesV);
     return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
   }
 
@@ -3691,7 +3691,7 @@
 
     // vsplti + shl self.
     if (SextVal == (i << (int)TypeShiftAmt)) {
-      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
+      SDValue Res = BuildSplatI(i, SplatSize, EVT::Other, DAG, dl);
       static const unsigned IIDs[] = { // Intrinsic to use for each size.
         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
         Intrinsic::ppc_altivec_vslw
@@ -3702,7 +3702,7 @@
 
     // vsplti + srl self.
     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
-      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
+      SDValue Res = BuildSplatI(i, SplatSize, EVT::Other, DAG, dl);
       static const unsigned IIDs[] = { // Intrinsic to use for each size.
         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
         Intrinsic::ppc_altivec_vsrw
@@ -3713,7 +3713,7 @@
 
     // vsplti + sra self.
     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
-      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
+      SDValue Res = BuildSplatI(i, SplatSize, EVT::Other, DAG, dl);
       static const unsigned IIDs[] = { // Intrinsic to use for each size.
         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
         Intrinsic::ppc_altivec_vsraw
@@ -3725,7 +3725,7 @@
     // vsplti + rol self.
     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
-      SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
+      SDValue Res = BuildSplatI(i, SplatSize, EVT::Other, DAG, dl);
       static const unsigned IIDs[] = { // Intrinsic to use for each size.
         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
         Intrinsic::ppc_altivec_vrlw
@@ -3736,17 +3736,17 @@
 
     // t = vsplti c, result = vsldoi t, t, 1
     if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) {
-      SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
+      SDValue T = BuildSplatI(i, SplatSize, EVT::v16i8, DAG, dl);
       return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
     }
     // t = vsplti c, result = vsldoi t, t, 2
     if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) {
-      SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
+      SDValue T = BuildSplatI(i, SplatSize, EVT::v16i8, DAG, dl);
       return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
     }
     // t = vsplti c, result = vsldoi t, t, 3
     if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) {
-      SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
+      SDValue T = BuildSplatI(i, SplatSize, EVT::v16i8, DAG, dl);
       return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
     }
   }
@@ -3755,15 +3755,15 @@
 
   // Odd, in range [17,31]:  (vsplti C)-(vsplti -16).
   if (SextVal >= 0 && SextVal <= 31) {
-    SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
-    SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
+    SDValue LHS = BuildSplatI(SextVal-16, SplatSize, EVT::Other, DAG, dl);
+    SDValue RHS = BuildSplatI(-16, SplatSize, EVT::Other, DAG, dl);
     LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
     return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
   }
   // Odd, in range [-31,-17]:  (vsplti C)+(vsplti -16).
   if (SextVal >= -31 && SextVal <= 0) {
-    SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
-    SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
+    SDValue LHS = BuildSplatI(SextVal+16, SplatSize, EVT::Other, DAG, dl);
+    SDValue RHS = BuildSplatI(-16, SplatSize, EVT::Other, DAG, dl);
     LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
     return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
   }
@@ -3841,10 +3841,10 @@
   case OP_VSLDOI12:
     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
   }
-  MVT VT = OpLHS.getValueType();
-  OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS);
-  OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS);
-  SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
+  EVT VT = OpLHS.getValueType();
+  OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, OpLHS);
+  OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, OpRHS);
+  SDValue T = DAG.getVectorShuffle(EVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
 }
 
@@ -3858,7 +3858,7 @@
   SDValue V1 = Op.getOperand(0);
   SDValue V2 = Op.getOperand(1);
   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
 
   // Cases that are handled by instructions that take permute immediates
   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
@@ -3955,7 +3955,7 @@
 
   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
   // that it is in input element units, not in bytes.  Convert now.
-  MVT EltVT = V1.getValueType().getVectorElementType();
+  EVT EltVT = V1.getValueType().getVectorElementType();
   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
 
   SmallVector<SDValue, 16> ResultMask;
@@ -3964,10 +3964,10 @@
 
     for (unsigned j = 0; j != BytesPerElement; ++j)
       ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
-                                           MVT::i32));
+                                           EVT::i32));
   }
 
-  SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
+  SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v16i8,
                                     &ResultMask[0], ResultMask.size());
   return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
 }
@@ -4032,7 +4032,7 @@
   if (!isDot) {
     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
                                 Op.getOperand(1), Op.getOperand(2),
-                                DAG.getConstant(CompareOpc, MVT::i32));
+                                DAG.getConstant(CompareOpc, EVT::i32));
     return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp);
   }
 
@@ -4040,17 +4040,17 @@
   SDValue Ops[] = {
     Op.getOperand(2),  // LHS
     Op.getOperand(3),  // RHS
-    DAG.getConstant(CompareOpc, MVT::i32)
+    DAG.getConstant(CompareOpc, EVT::i32)
   };
-  std::vector<MVT> VTs;
+  std::vector<EVT> VTs;
   VTs.push_back(Op.getOperand(2).getValueType());
-  VTs.push_back(MVT::Flag);
+  VTs.push_back(EVT::Flag);
   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
 
   // Now that we have the comparison, emit a copy from the CR to a GPR.
   // This is flagged to the above dot comparison.
-  SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32,
-                                DAG.getRegister(PPC::CR6, MVT::i32),
+  SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, EVT::i32,
+                                DAG.getRegister(PPC::CR6, EVT::i32),
                                 CompNode.getValue(1));
 
   // Unpack the result based on how the target uses it.
@@ -4073,16 +4073,16 @@
   }
 
   // Shift the bit into the low position.
-  Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
-                      DAG.getConstant(8-(3-BitNo), MVT::i32));
+  Flags = DAG.getNode(ISD::SRL, dl, EVT::i32, Flags,
+                      DAG.getConstant(8-(3-BitNo), EVT::i32));
   // Isolate the bit.
-  Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
-                      DAG.getConstant(1, MVT::i32));
+  Flags = DAG.getNode(ISD::AND, dl, EVT::i32, Flags,
+                      DAG.getConstant(1, EVT::i32));
 
   // If we are supposed to, toggle the bit.
   if (InvertBit)
-    Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
-                        DAG.getConstant(1, MVT::i32));
+    Flags = DAG.getNode(ISD::XOR, dl, EVT::i32, Flags,
+                        DAG.getConstant(1, EVT::i32));
   return Flags;
 }
 
@@ -4092,7 +4092,7 @@
   // Create a stack slot that is 16-byte aligned.
   MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
   int FrameIdx = FrameInfo->CreateStackObject(16, 16);
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
 
   // Store the input value into Value#0 of the stack slot.
@@ -4104,50 +4104,50 @@
 
 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
-  if (Op.getValueType() == MVT::v4i32) {
+  if (Op.getValueType() == EVT::v4i32) {
     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
 
-    SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
-    SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
+    SDValue Zero  = BuildSplatI(  0, 1, EVT::v4i32, DAG, dl);
+    SDValue Neg16 = BuildSplatI(-16, 4, EVT::v4i32, DAG, dl);//+16 as shift amt.
 
     SDValue RHSSwap =   // = vrlw RHS, 16
       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
 
     // Shrinkify inputs to v8i16.
-    LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS);
-    RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS);
-    RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap);
+    LHS = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, LHS);
+    RHS = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, RHS);
+    RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, RHSSwap);
 
     // Low parts multiplied together, generating 32-bit results (we ignore the
     // top parts).
     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
-                                        LHS, RHS, DAG, dl, MVT::v4i32);
+                                        LHS, RHS, DAG, dl, EVT::v4i32);
 
     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
-                                      LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
+                                      LHS, RHSSwap, Zero, DAG, dl, EVT::v4i32);
     // Shift the high parts up 16 bits.
     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
                               Neg16, DAG, dl);
-    return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
-  } else if (Op.getValueType() == MVT::v8i16) {
+    return DAG.getNode(ISD::ADD, dl, EVT::v4i32, LoProd, HiProd);
+  } else if (Op.getValueType() == EVT::v8i16) {
     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
 
-    SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
+    SDValue Zero = BuildSplatI(0, 1, EVT::v8i16, DAG, dl);
 
     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
                             LHS, RHS, Zero, DAG, dl);
-  } else if (Op.getValueType() == MVT::v16i8) {
+  } else if (Op.getValueType() == EVT::v16i8) {
     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
 
     // Multiply the even 8-bit parts, producing 16-bit sums.
     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
-                                           LHS, RHS, DAG, dl, MVT::v8i16);
-    EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts);
+                                           LHS, RHS, DAG, dl, EVT::v8i16);
+    EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, EvenParts);
 
     // Multiply the odd 8-bit parts, producing 16-bit sums.
     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
-                                          LHS, RHS, DAG, dl, MVT::v8i16);
-    OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts);
+                                          LHS, RHS, DAG, dl, EVT::v8i16);
+    OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, OddParts);
 
     // Merge the results together.
     int Ops[16];
@@ -4155,7 +4155,7 @@
       Ops[i*2  ] = 2*i+1;
       Ops[i*2+1] = 2*i+1+16;
     }
-    return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
+    return DAG.getVectorShuffle(EVT::v16i8, dl, EvenParts, OddParts, Ops);
   } else {
     llvm_unreachable("Unknown mul to lower!");
   }
@@ -4219,44 +4219,44 @@
     assert(false && "Do not know how to custom type legalize this operation!");
     return;
   case ISD::FP_ROUND_INREG: {
-    assert(N->getValueType(0) == MVT::ppcf128);
-    assert(N->getOperand(0).getValueType() == MVT::ppcf128);
+    assert(N->getValueType(0) == EVT::ppcf128);
+    assert(N->getOperand(0).getValueType() == EVT::ppcf128);
     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
-                             MVT::f64, N->getOperand(0),
+                             EVT::f64, N->getOperand(0),
                              DAG.getIntPtrConstant(0));
     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
-                             MVT::f64, N->getOperand(0),
+                             EVT::f64, N->getOperand(0),
                              DAG.getIntPtrConstant(1));
 
     // This sequence changes FPSCR to do round-to-zero, adds the two halves
     // of the long double, and puts FPSCR back the way it was.  We do not
     // actually model FPSCR.
-    std::vector<MVT> NodeTys;
+    std::vector<EVT> NodeTys;
     SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
 
-    NodeTys.push_back(MVT::f64);   // Return register
-    NodeTys.push_back(MVT::Flag);    // Returns a flag for later insns
+    NodeTys.push_back(EVT::f64);   // Return register
+    NodeTys.push_back(EVT::Flag);    // Returns a flag for later insns
     Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
     MFFSreg = Result.getValue(0);
     InFlag = Result.getValue(1);
 
     NodeTys.clear();
-    NodeTys.push_back(MVT::Flag);   // Returns a flag
-    Ops[0] = DAG.getConstant(31, MVT::i32);
+    NodeTys.push_back(EVT::Flag);   // Returns a flag
+    Ops[0] = DAG.getConstant(31, EVT::i32);
     Ops[1] = InFlag;
     Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2);
     InFlag = Result.getValue(0);
 
     NodeTys.clear();
-    NodeTys.push_back(MVT::Flag);   // Returns a flag
-    Ops[0] = DAG.getConstant(30, MVT::i32);
+    NodeTys.push_back(EVT::Flag);   // Returns a flag
+    Ops[0] = DAG.getConstant(30, EVT::i32);
     Ops[1] = InFlag;
     Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2);
     InFlag = Result.getValue(0);
 
     NodeTys.clear();
-    NodeTys.push_back(MVT::f64);    // result of add
-    NodeTys.push_back(MVT::Flag);   // Returns a flag
+    NodeTys.push_back(EVT::f64);    // result of add
+    NodeTys.push_back(EVT::Flag);   // Returns a flag
     Ops[0] = Lo;
     Ops[1] = Hi;
     Ops[2] = InFlag;
@@ -4265,8 +4265,8 @@
     InFlag = Result.getValue(1);
 
     NodeTys.clear();
-    NodeTys.push_back(MVT::f64);
-    Ops[0] = DAG.getConstant(1, MVT::i32);
+    NodeTys.push_back(EVT::f64);
+    Ops[0] = DAG.getConstant(1, EVT::i32);
     Ops[1] = MFFSreg;
     Ops[2] = FPreg;
     Ops[3] = InFlag;
@@ -4275,7 +4275,7 @@
 
     // We know the low half is about to be thrown away, so just use something
     // convenient.
-    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
+    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, EVT::ppcf128,
                                 FPreg, FPreg));
     return;
   }
@@ -4853,25 +4853,25 @@
         // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
         // We allow the src/dst to be either f32/f64, but the intermediate
         // type must be i64.
-        if (N->getOperand(0).getValueType() == MVT::i64 &&
-            N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
+        if (N->getOperand(0).getValueType() == EVT::i64 &&
+            N->getOperand(0).getOperand(0).getValueType() != EVT::ppcf128) {
           SDValue Val = N->getOperand(0).getOperand(0);
-          if (Val.getValueType() == MVT::f32) {
-            Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
+          if (Val.getValueType() == EVT::f32) {
+            Val = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, Val);
             DCI.AddToWorklist(Val.getNode());
           }
 
-          Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val);
+          Val = DAG.getNode(PPCISD::FCTIDZ, dl, EVT::f64, Val);
           DCI.AddToWorklist(Val.getNode());
-          Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val);
+          Val = DAG.getNode(PPCISD::FCFID, dl, EVT::f64, Val);
           DCI.AddToWorklist(Val.getNode());
-          if (N->getValueType(0) == MVT::f32) {
-            Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val,
+          if (N->getValueType(0) == EVT::f32) {
+            Val = DAG.getNode(ISD::FP_ROUND, dl, EVT::f32, Val,
                               DAG.getIntPtrConstant(0));
             DCI.AddToWorklist(Val.getNode());
           }
           return Val;
-        } else if (N->getOperand(0).getValueType() == MVT::i32) {
+        } else if (N->getOperand(0).getValueType() == EVT::i32) {
           // If the intermediate type is i32, we can avoid the load/store here
           // too.
         }
@@ -4883,17 +4883,17 @@
     if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
         !cast<StoreSDNode>(N)->isTruncatingStore() &&
         N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
-        N->getOperand(1).getValueType() == MVT::i32 &&
-        N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
+        N->getOperand(1).getValueType() == EVT::i32 &&
+        N->getOperand(1).getOperand(0).getValueType() != EVT::ppcf128) {
       SDValue Val = N->getOperand(1).getOperand(0);
-      if (Val.getValueType() == MVT::f32) {
-        Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
+      if (Val.getValueType() == EVT::f32) {
+        Val = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, Val);
         DCI.AddToWorklist(Val.getNode());
       }
-      Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val);
+      Val = DAG.getNode(PPCISD::FCTIWZ, dl, EVT::f64, Val);
       DCI.AddToWorklist(Val.getNode());
 
-      Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val,
+      Val = DAG.getNode(PPCISD::STFIWX, dl, EVT::Other, N->getOperand(0), Val,
                         N->getOperand(2), N->getOperand(3));
       DCI.AddToWorklist(Val.getNode());
       return Val;
@@ -4902,14 +4902,14 @@
     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
     if (N->getOperand(1).getOpcode() == ISD::BSWAP &&
         N->getOperand(1).getNode()->hasOneUse() &&
-        (N->getOperand(1).getValueType() == MVT::i32 ||
-         N->getOperand(1).getValueType() == MVT::i16)) {
+        (N->getOperand(1).getValueType() == EVT::i32 ||
+         N->getOperand(1).getValueType() == EVT::i16)) {
       SDValue BSwapOp = N->getOperand(1).getOperand(0);
       // Do an any-extend to 32-bits if this is a half-word input.
-      if (BSwapOp.getValueType() == MVT::i16)
-        BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
+      if (BSwapOp.getValueType() == EVT::i16)
+        BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, EVT::i32, BSwapOp);
 
-      return DAG.getNode(PPCISD::STBRX, dl, MVT::Other, N->getOperand(0),
+      return DAG.getNode(PPCISD::STBRX, dl, EVT::Other, N->getOperand(0),
                          BSwapOp, N->getOperand(2), N->getOperand(3),
                          DAG.getValueType(N->getOperand(1).getValueType()));
     }
@@ -4918,13 +4918,13 @@
     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
         N->getOperand(0).hasOneUse() &&
-        (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
+        (N->getValueType(0) == EVT::i32 || N->getValueType(0) == EVT::i16)) {
       SDValue Load = N->getOperand(0);
       LoadSDNode *LD = cast<LoadSDNode>(Load);
       // Create the byte-swapping load.
-      std::vector<MVT> VTs;
-      VTs.push_back(MVT::i32);
-      VTs.push_back(MVT::Other);
+      std::vector<EVT> VTs;
+      VTs.push_back(EVT::i32);
+      VTs.push_back(EVT::Other);
       SDValue MO = DAG.getMemOperand(LD->getMemOperand());
       SDValue Ops[] = {
         LD->getChain(),    // Chain
@@ -4936,8 +4936,8 @@
 
       // If this is an i16 load, insert the truncate.
       SDValue ResVal = BSLoad;
-      if (N->getValueType(0) == MVT::i16)
-        ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
+      if (N->getValueType(0) == EVT::i16)
+        ResVal = DAG.getNode(ISD::TRUNCATE, dl, EVT::i16, BSLoad);
 
       // First, combine the bswap away.  This makes the value produced by the
       // load dead.
@@ -5025,21 +5025,21 @@
         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
           return N->getOperand(0);
         // Always !=, turn it into an unconditional branch.
-        return DAG.getNode(ISD::BR, dl, MVT::Other,
+        return DAG.getNode(ISD::BR, dl, EVT::Other,
                            N->getOperand(0), N->getOperand(4));
       }
 
       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
 
       // Create the PPCISD altivec 'dot' comparison node.
-      std::vector<MVT> VTs;
+      std::vector<EVT> VTs;
       SDValue Ops[] = {
         LHS.getOperand(2),  // LHS of compare
         LHS.getOperand(3),  // RHS of compare
-        DAG.getConstant(CompareOpc, MVT::i32)
+        DAG.getConstant(CompareOpc, EVT::i32)
       };
       VTs.push_back(LHS.getOperand(2).getValueType());
-      VTs.push_back(MVT::Flag);
+      VTs.push_back(EVT::Flag);
       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
 
       // Unpack the result based on how the target uses it.
@@ -5060,9 +5060,9 @@
         break;
       }
 
-      return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
-                         DAG.getConstant(CompOpc, MVT::i32),
-                         DAG.getRegister(PPC::CR6, MVT::i32),
+      return DAG.getNode(PPCISD::COND_BRANCH, dl, EVT::Other, N->getOperand(0),
+                         DAG.getConstant(CompOpc, EVT::i32),
+                         DAG.getRegister(PPC::CR6, EVT::i32),
                          N->getOperand(4), CompNode.getValue(1));
     }
     break;
@@ -5087,7 +5087,7 @@
   default: break;
   case PPCISD::LBRX: {
     // lhbrx is known to have the top bits cleared out.
-    if (cast<VTSDNode>(Op.getOperand(3))->getVT() == MVT::i16)
+    if (cast<VTSDNode>(Op.getOperand(3))->getVT() == EVT::i16)
       KnownZero = 0xFFFF0000;
     break;
   }
@@ -5135,19 +5135,19 @@
 
 std::pair<unsigned, const TargetRegisterClass*>
 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
-                                                MVT VT) const {
+                                                EVT VT) const {
   if (Constraint.size() == 1) {
     // GCC RS6000 Constraint Letters
     switch (Constraint[0]) {
     case 'b':   // R1-R31
     case 'r':   // R0-R31
-      if (VT == MVT::i64 && PPCSubTarget.isPPC64())
+      if (VT == EVT::i64 && PPCSubTarget.isPPC64())
         return std::make_pair(0U, PPC::G8RCRegisterClass);
       return std::make_pair(0U, PPC::GPRCRegisterClass);
     case 'f':
-      if (VT == MVT::f32)
+      if (VT == EVT::f32)
         return std::make_pair(0U, PPC::F4RCRegisterClass);
-      else if (VT == MVT::f64)
+      else if (VT == EVT::f64)
         return std::make_pair(0U, PPC::F8RCRegisterClass);
       break;
     case 'v':
@@ -5301,8 +5301,8 @@
   if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
     return SDValue();
 
-  MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-  bool isPPC64 = PtrVT == MVT::i64;
+  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
+  bool isPPC64 = PtrVT == EVT::i64;
 
   MachineFunction &MF = DAG.getMachineFunction();
   MachineFrameInfo *MFI = MF.getFrameInfo();
@@ -5311,10 +5311,10 @@
 
   if (isPPC64)
     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::X31 : PPC::X1,
-      MVT::i64);
+      EVT::i64);
   else
     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, is31 ? PPC::R31 : PPC::R1,
-      MVT::i32);
+      EVT::i32);
 }
 
 bool
@@ -5323,12 +5323,12 @@
   return false;
 }
 
-MVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
+EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
                                            bool isSrcConst, bool isSrcStr,
                                            SelectionDAG &DAG) const {
   if (this->PPCSubTarget.isPPC64()) {
-    return MVT::i64;
+    return EVT::i64;
   } else {
-    return MVT::i32;
+    return EVT::i32;
   }
 }
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index e3f4b69..9d8687b 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -230,7 +230,7 @@
     virtual const char *getTargetNodeName(unsigned Opcode) const;
 
     /// getSetCCResultType - Return the ISD::SETCC ValueType
-    virtual MVT::SimpleValueType getSetCCResultType(MVT VT) const;
+    virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
 
     /// getPreIndexedAddressParts - returns true by value, base pointer and
     /// offset pointer and addressing mode by reference if the node's address
@@ -295,7 +295,7 @@
     ConstraintType getConstraintType(const std::string &Constraint) const;
     std::pair<unsigned, const TargetRegisterClass*> 
       getRegForInlineAsmConstraint(const std::string &Constraint,
-                                   MVT VT) const;
+                                   EVT VT) const;
 
     /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
     /// function arguments in the caller parameter area.  This is the actual
@@ -334,7 +334,7 @@
 
     virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
     
-    virtual MVT getOptimalMemOpType(uint64_t Size, unsigned Align,
+    virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
                                     bool isSrcConst, bool isSrcStr,
                                     SelectionDAG &DAG) const;
 
diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td
index 759cdf0..e0e8466 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/lib/Target/PowerPC/PPCInstrInfo.td
@@ -191,7 +191,7 @@
 def maskimm32 : PatLeaf<(imm), [{
   // maskImm predicate - True if immediate is a run of ones.
   unsigned mb, me;
-  if (N->getValueType(0) == MVT::i32)
+  if (N->getValueType(0) == EVT::i32)
     return isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
   else
     return false;
@@ -200,7 +200,7 @@
 def immSExt16  : PatLeaf<(imm), [{
   // immSExt16 predicate - True if the immediate fits in a 16-bit sign extended
   // field.  Used by instructions like 'addi'.
-  if (N->getValueType(0) == MVT::i32)
+  if (N->getValueType(0) == EVT::i32)
     return (int32_t)N->getZExtValue() == (short)N->getZExtValue();
   else
     return (int64_t)N->getZExtValue() == (short)N->getZExtValue();
@@ -227,7 +227,7 @@
   // immediate are set.  Used by instructions like 'addis'.  Identical to 
   // imm16ShiftedZExt in 32-bit mode.
   if (N->getZExtValue() & 0xFFFF) return false;
-  if (N->getValueType(0) == MVT::i32)
+  if (N->getValueType(0) == EVT::i32)
     return true;
   // For 64-bit, make sure it is sext right.
   return N->getZExtValue() == (uint64_t)(int)N->getZExtValue();
diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
index e1b9b59..3aefa30 100644
--- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp
+++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -79,8 +79,8 @@
 bool SparcDAGToDAGISel::SelectADDRri(SDValue Op, SDValue Addr,
                                      SDValue &Base, SDValue &Offset) {
   if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
-    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-    Offset = CurDAG->getTargetConstant(0, MVT::i32);
+    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
+    Offset = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
   if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
@@ -93,11 +93,11 @@
         if (FrameIndexSDNode *FIN =
                 dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
           // Constant offset from frame ref.
-          Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
+          Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
         } else {
           Base = Addr.getOperand(0);
         }
-        Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
+        Offset = CurDAG->getTargetConstant(CN->getZExtValue(), EVT::i32);
         return true;
       }
     }
@@ -113,7 +113,7 @@
     }
   }
   Base = Addr;
-  Offset = CurDAG->getTargetConstant(0, MVT::i32);
+  Offset = CurDAG->getTargetConstant(0, EVT::i32);
   return true;
 }
 
@@ -137,7 +137,7 @@
   }
 
   R1 = Addr;
-  R2 = CurDAG->getRegister(SP::G0, MVT::i32);
+  R2 = CurDAG->getRegister(SP::G0, EVT::i32);
   return true;
 }
 
@@ -158,17 +158,17 @@
     // Set the Y register to the high-part.
     SDValue TopPart;
     if (N->getOpcode() == ISD::SDIV) {
-      TopPart = SDValue(CurDAG->getTargetNode(SP::SRAri, dl, MVT::i32, DivLHS,
-                                   CurDAG->getTargetConstant(31, MVT::i32)), 0);
+      TopPart = SDValue(CurDAG->getTargetNode(SP::SRAri, dl, EVT::i32, DivLHS,
+                                   CurDAG->getTargetConstant(31, EVT::i32)), 0);
     } else {
-      TopPart = CurDAG->getRegister(SP::G0, MVT::i32);
+      TopPart = CurDAG->getRegister(SP::G0, EVT::i32);
     }
-    TopPart = SDValue(CurDAG->getTargetNode(SP::WRYrr, dl, MVT::Flag, TopPart,
-                                     CurDAG->getRegister(SP::G0, MVT::i32)), 0);
+    TopPart = SDValue(CurDAG->getTargetNode(SP::WRYrr, dl, EVT::Flag, TopPart,
+                                     CurDAG->getRegister(SP::G0, EVT::i32)), 0);
 
     // FIXME: Handle div by immediate.
     unsigned Opcode = N->getOpcode() == ISD::SDIV ? SP::SDIVrr : SP::UDIVrr;
-    return CurDAG->SelectNodeTo(N, Opcode, MVT::i32, DivLHS, DivRHS,
+    return CurDAG->SelectNodeTo(N, Opcode, EVT::i32, DivLHS, DivRHS,
                                 TopPart);
   }
   case ISD::MULHU:
@@ -177,10 +177,10 @@
     SDValue MulLHS = N->getOperand(0);
     SDValue MulRHS = N->getOperand(1);
     unsigned Opcode = N->getOpcode() == ISD::MULHU ? SP::UMULrr : SP::SMULrr;
-    SDNode *Mul = CurDAG->getTargetNode(Opcode, dl, MVT::i32, MVT::Flag,
+    SDNode *Mul = CurDAG->getTargetNode(Opcode, dl, EVT::i32, EVT::Flag,
                                         MulLHS, MulRHS);
     // The high part is in the Y register.
-    return CurDAG->SelectNodeTo(N, SP::RDY, MVT::i32, SDValue(Mul, 1));
+    return CurDAG->SelectNodeTo(N, SP::RDY, EVT::i32, SDValue(Mul, 1));
     return NULL;
   }
   }
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index b560b73..959d41a 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -72,8 +72,8 @@
   }
 
   if (Flag.getNode())
-    return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
-  return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain);
+    return DAG.getNode(SPISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
+  return DAG.getNode(SPISD::RET_FLAG, dl, EVT::Other, Chain);
 }
 
 /// LowerFormalArguments - V8 uses a very simple ABI, where all values are
@@ -107,41 +107,41 @@
     CCValAssign &VA = ArgLocs[i];
     // FIXME: We ignore the register assignments of AnalyzeFormalArguments
     // because it doesn't know how to split a double into two i32 registers.
-    MVT ObjectVT = VA.getValVT();
+    EVT ObjectVT = VA.getValVT();
     switch (ObjectVT.getSimpleVT()) {
     default: llvm_unreachable("Unhandled argument type!");
-    case MVT::i1:
-    case MVT::i8:
-    case MVT::i16:
-    case MVT::i32:
+    case EVT::i1:
+    case EVT::i8:
+    case EVT::i16:
+    case EVT::i32:
       if (!Ins[i].Used) {                  // Argument is dead.
         if (CurArgReg < ArgRegEnd) ++CurArgReg;
         InVals.push_back(DAG.getUNDEF(ObjectVT));
       } else if (CurArgReg < ArgRegEnd) {  // Lives in an incoming GPR
         unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
         MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
-        SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
-        if (ObjectVT != MVT::i32) {
+        SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
+        if (ObjectVT != EVT::i32) {
           unsigned AssertOp = ISD::AssertSext;
-          Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg,
+          Arg = DAG.getNode(AssertOp, dl, EVT::i32, Arg,
                             DAG.getValueType(ObjectVT));
           Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
         }
         InVals.push_back(Arg);
       } else {
         int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
-        SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
+        SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
         SDValue Load;
-        if (ObjectVT == MVT::i32) {
-          Load = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
+        if (ObjectVT == EVT::i32) {
+          Load = DAG.getLoad(EVT::i32, dl, Chain, FIPtr, NULL, 0);
         } else {
           ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
 
           // Sparc is big endian, so add an offset based on the ObjectVT.
           unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8);
-          FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
-                              DAG.getConstant(Offset, MVT::i32));
-          Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
+          FIPtr = DAG.getNode(ISD::ADD, dl, EVT::i32, FIPtr,
+                              DAG.getConstant(Offset, EVT::i32));
+          Load = DAG.getExtLoad(LoadOp, dl, EVT::i32, Chain, FIPtr,
                                 NULL, 0, ObjectVT);
           Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
         }
@@ -150,7 +150,7 @@
 
       ArgOffset += 4;
       break;
-    case MVT::f32:
+    case EVT::f32:
       if (!Ins[i].Used) {                  // Argument is dead.
         if (CurArgReg < ArgRegEnd) ++CurArgReg;
         InVals.push_back(DAG.getUNDEF(ObjectVT));
@@ -158,21 +158,21 @@
         // FP value is passed in an integer register.
         unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
         MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
-        SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+        SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
 
-        Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg);
+        Arg = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, Arg);
         InVals.push_back(Arg);
       } else {
         int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
-        SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
-        SDValue Load = DAG.getLoad(MVT::f32, dl, Chain, FIPtr, NULL, 0);
+        SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
+        SDValue Load = DAG.getLoad(EVT::f32, dl, Chain, FIPtr, NULL, 0);
         InVals.push_back(Load);
       }
       ArgOffset += 4;
       break;
 
-    case MVT::i64:
-    case MVT::f64:
+    case EVT::i64:
+    case EVT::f64:
       if (!Ins[i].Used) {                // Argument is dead.
         if (CurArgReg < ArgRegEnd) ++CurArgReg;
         if (CurArgReg < ArgRegEnd) ++CurArgReg;
@@ -182,31 +182,31 @@
         if (CurArgReg < ArgRegEnd) {  // Lives in an incoming GPR
           unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
           MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi);
-          HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
+          HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, EVT::i32);
         } else {
           int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
-          SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
-          HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
+          SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
+          HiVal = DAG.getLoad(EVT::i32, dl, Chain, FIPtr, NULL, 0);
         }
 
         SDValue LoVal;
         if (CurArgReg < ArgRegEnd) {  // Lives in an incoming GPR
           unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
           MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo);
-          LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, MVT::i32);
+          LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, EVT::i32);
         } else {
           int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4);
-          SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
-          LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
+          SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
+          LoVal = DAG.getLoad(EVT::i32, dl, Chain, FIPtr, NULL, 0);
         }
 
         // Compose the two halves together into an i64 unit.
         SDValue WholeValue =
-          DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
+          DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, LoVal, HiVal);
 
         // If we want a double, do a bit convert.
-        if (ObjectVT == MVT::f64)
-          WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue);
+        if (ObjectVT == EVT::f64)
+          WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, WholeValue);
 
         InVals.push_back(WholeValue);
       }
@@ -225,10 +225,10 @@
     for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
       unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
       MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
-      SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
+      SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, EVT::i32);
 
       int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
-      SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
+      SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
 
       OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, NULL, 0));
       ArgOffset += 4;
@@ -236,7 +236,7 @@
 
     if (!OutChains.empty()) {
       OutChains.push_back(Chain);
-      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+      Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                           &OutChains[0], OutChains.size());
     }
   }
@@ -270,15 +270,15 @@
   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
     switch (Outs[i].Val.getValueType().getSimpleVT()) {
       default: llvm_unreachable("Unknown value type!");
-      case MVT::i1:
-      case MVT::i8:
-      case MVT::i16:
-      case MVT::i32:
-      case MVT::f32:
+      case EVT::i1:
+      case EVT::i8:
+      case EVT::i16:
+      case EVT::i32:
+      case EVT::f32:
         ArgsSize += 4;
         break;
-      case MVT::i64:
-      case MVT::f64:
+      case EVT::i64:
+      case EVT::f64:
         ArgsSize += 8;
         break;
     }
@@ -328,10 +328,10 @@
     assert(VA.isMemLoc());
 
     // Create a store off the stack pointer for this argument.
-    SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+    SDValue StackPtr = DAG.getRegister(SP::O6, EVT::i32);
     // FIXME: VERIFY THAT 68 IS RIGHT.
     SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+68);
-    PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
+    PtrOff = DAG.getNode(ISD::ADD, EVT::i32, StackPtr, PtrOff);
     MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
   }
 
@@ -343,12 +343,12 @@
 
   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
     SDValue Val = Outs[i].Val;
-    MVT ObjectVT = Val.getValueType();
+    EVT ObjectVT = Val.getValueType();
     SDValue ValToStore(0, 0);
     unsigned ObjSize;
     switch (ObjectVT.getSimpleVT()) {
     default: llvm_unreachable("Unhandled argument type!");
-    case MVT::i32:
+    case EVT::i32:
       ObjSize = 4;
 
       if (RegsToPass.size() >= 6) {
@@ -357,17 +357,17 @@
         RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
       }
       break;
-    case MVT::f32:
+    case EVT::f32:
       ObjSize = 4;
       if (RegsToPass.size() >= 6) {
         ValToStore = Val;
       } else {
         // Convert this to a FP value in an int reg.
-        Val = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Val);
+        Val = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Val);
         RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
       }
       break;
-    case MVT::f64: {
+    case EVT::f64: {
       ObjSize = 8;
       if (RegsToPass.size() >= 6) {
         ValToStore = Val;    // Whole thing is passed in memory.
@@ -376,16 +376,16 @@
 
       // Break into top and bottom parts by storing to the stack and loading
       // out the parts as integers.  Top part goes in a reg.
-      SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
+      SDValue StackPtr = DAG.CreateStackTemporary(EVT::f64, EVT::i32);
       SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, 
                                    Val, StackPtr, NULL, 0);
       // Sparc is big-endian, so the high part comes first.
-      SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0, 0);
+      SDValue Hi = DAG.getLoad(EVT::i32, dl, Store, StackPtr, NULL, 0, 0);
       // Increment the pointer to the other half.
       StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
                              DAG.getIntPtrConstant(4));
       // Load the low part.
-      SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0, 0);
+      SDValue Lo = DAG.getLoad(EVT::i32, dl, Store, StackPtr, NULL, 0, 0);
 
       RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
 
@@ -398,7 +398,7 @@
       }
       break;
     }
-    case MVT::i64: {
+    case EVT::i64: {
       ObjSize = 8;
       if (RegsToPass.size() >= 6) {
         ValToStore = Val;    // Whole thing is passed in memory.
@@ -406,10 +406,10 @@
       }
 
       // Split the value into top and bottom part.  Top part goes in a reg.
-      SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
-                                 DAG.getConstant(1, MVT::i32));
-      SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
-                                 DAG.getConstant(0, MVT::i32));
+      SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Val,
+                                 DAG.getConstant(1, EVT::i32));
+      SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Val,
+                                 DAG.getConstant(0, EVT::i32));
       RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
 
       if (RegsToPass.size() >= 6) {
@@ -424,9 +424,9 @@
     }
 
     if (ValToStore.getNode()) {
-      SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
-      SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32);
-      PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+      SDValue StackPtr = DAG.getRegister(SP::O6, EVT::i32);
+      SDValue PtrOff = DAG.getConstant(ArgOffset, EVT::i32);
+      PtrOff = DAG.getNode(ISD::ADD, dl, EVT::i32, StackPtr, PtrOff);
       MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore, 
                                          PtrOff, NULL, 0));
     }
@@ -436,7 +436,7 @@
 
   // Emit all stores, make sure the occur before any copies into physregs.
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token
@@ -458,13 +458,13 @@
   // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
   // Likewise ExternalSymbol -> TargetExternalSymbol.
   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
-    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
+    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i32);
   else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
-    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
+    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i32);
 
-  std::vector<MVT> NodeTys;
-  NodeTys.push_back(MVT::Other);   // Returns a chain
-  NodeTys.push_back(MVT::Flag);    // Returns a flag for retval copy to use.
+  std::vector<EVT> NodeTys;
+  NodeTys.push_back(EVT::Other);   // Returns a chain
+  NodeTys.push_back(EVT::Flag);    // Returns a flag for retval copy to use.
   SDValue Ops[] = { Chain, Callee, InFlag };
   Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops, InFlag.getNode() ? 3 : 2);
   InFlag = Chain.getValue(1);
@@ -553,120 +553,120 @@
   : TargetLowering(TM, new TargetLoweringObjectFileELF()) {
 
   // Set up the register classes.
-  addRegisterClass(MVT::i32, SP::IntRegsRegisterClass);
-  addRegisterClass(MVT::f32, SP::FPRegsRegisterClass);
-  addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
+  addRegisterClass(EVT::i32, SP::IntRegsRegisterClass);
+  addRegisterClass(EVT::f32, SP::FPRegsRegisterClass);
+  addRegisterClass(EVT::f64, SP::DFPRegsRegisterClass);
 
   // Turn FP extload into load/fextend
-  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
+  setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
   // Sparc doesn't have i1 sign extending load
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
   // Turn FP truncstore into trunc + store.
-  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+  setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 
   // Custom legalize GlobalAddress nodes into LO/HI parts.
-  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
-  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
-  setOperationAction(ISD::ConstantPool , MVT::i32, Custom);
+  setOperationAction(ISD::GlobalAddress, EVT::i32, Custom);
+  setOperationAction(ISD::GlobalTLSAddress, EVT::i32, Custom);
+  setOperationAction(ISD::ConstantPool , EVT::i32, Custom);
 
   // Sparc doesn't have sext_inreg, replace them with shl/sra
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i16, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i8 , Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1 , Expand);
 
   // Sparc has no REM or DIVREM operations.
-  setOperationAction(ISD::UREM, MVT::i32, Expand);
-  setOperationAction(ISD::SREM, MVT::i32, Expand);
-  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
-  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+  setOperationAction(ISD::UREM, EVT::i32, Expand);
+  setOperationAction(ISD::SREM, EVT::i32, Expand);
+  setOperationAction(ISD::SDIVREM, EVT::i32, Expand);
+  setOperationAction(ISD::UDIVREM, EVT::i32, Expand);
 
   // Custom expand fp<->sint
-  setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
-  setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
+  setOperationAction(ISD::FP_TO_SINT, EVT::i32, Custom);
+  setOperationAction(ISD::SINT_TO_FP, EVT::i32, Custom);
 
   // Expand fp<->uint
-  setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
-  setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
+  setOperationAction(ISD::FP_TO_UINT, EVT::i32, Expand);
+  setOperationAction(ISD::UINT_TO_FP, EVT::i32, Expand);
 
-  setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
-  setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
+  setOperationAction(ISD::BIT_CONVERT, EVT::f32, Expand);
+  setOperationAction(ISD::BIT_CONVERT, EVT::i32, Expand);
 
   // Sparc has no select or setcc: expand to SELECT_CC.
-  setOperationAction(ISD::SELECT, MVT::i32, Expand);
-  setOperationAction(ISD::SELECT, MVT::f32, Expand);
-  setOperationAction(ISD::SELECT, MVT::f64, Expand);
-  setOperationAction(ISD::SETCC, MVT::i32, Expand);
-  setOperationAction(ISD::SETCC, MVT::f32, Expand);
-  setOperationAction(ISD::SETCC, MVT::f64, Expand);
+  setOperationAction(ISD::SELECT, EVT::i32, Expand);
+  setOperationAction(ISD::SELECT, EVT::f32, Expand);
+  setOperationAction(ISD::SELECT, EVT::f64, Expand);
+  setOperationAction(ISD::SETCC, EVT::i32, Expand);
+  setOperationAction(ISD::SETCC, EVT::f32, Expand);
+  setOperationAction(ISD::SETCC, EVT::f64, Expand);
 
   // Sparc doesn't have BRCOND either, it has BR_CC.
-  setOperationAction(ISD::BRCOND, MVT::Other, Expand);
-  setOperationAction(ISD::BRIND, MVT::Other, Expand);
-  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC, MVT::i32, Custom);
-  setOperationAction(ISD::BR_CC, MVT::f32, Custom);
-  setOperationAction(ISD::BR_CC, MVT::f64, Custom);
+  setOperationAction(ISD::BRCOND, EVT::Other, Expand);
+  setOperationAction(ISD::BRIND, EVT::Other, Expand);
+  setOperationAction(ISD::BR_JT, EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC, EVT::i32, Custom);
+  setOperationAction(ISD::BR_CC, EVT::f32, Custom);
+  setOperationAction(ISD::BR_CC, EVT::f64, Custom);
 
-  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
-  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
-  setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
+  setOperationAction(ISD::SELECT_CC, EVT::i32, Custom);
+  setOperationAction(ISD::SELECT_CC, EVT::f32, Custom);
+  setOperationAction(ISD::SELECT_CC, EVT::f64, Custom);
 
   // SPARC has no intrinsics for these particular operations.
-  setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
+  setOperationAction(ISD::MEMBARRIER, EVT::Other, Expand);
 
-  setOperationAction(ISD::FSIN , MVT::f64, Expand);
-  setOperationAction(ISD::FCOS , MVT::f64, Expand);
-  setOperationAction(ISD::FREM , MVT::f64, Expand);
-  setOperationAction(ISD::FSIN , MVT::f32, Expand);
-  setOperationAction(ISD::FCOS , MVT::f32, Expand);
-  setOperationAction(ISD::FREM , MVT::f32, Expand);
-  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
-  setOperationAction(ISD::CTTZ , MVT::i32, Expand);
-  setOperationAction(ISD::CTLZ , MVT::i32, Expand);
-  setOperationAction(ISD::ROTL , MVT::i32, Expand);
-  setOperationAction(ISD::ROTR , MVT::i32, Expand);
-  setOperationAction(ISD::BSWAP, MVT::i32, Expand);
-  setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-  setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
-  setOperationAction(ISD::FPOW , MVT::f64, Expand);
-  setOperationAction(ISD::FPOW , MVT::f32, Expand);
+  setOperationAction(ISD::FSIN , EVT::f64, Expand);
+  setOperationAction(ISD::FCOS , EVT::f64, Expand);
+  setOperationAction(ISD::FREM , EVT::f64, Expand);
+  setOperationAction(ISD::FSIN , EVT::f32, Expand);
+  setOperationAction(ISD::FCOS , EVT::f32, Expand);
+  setOperationAction(ISD::FREM , EVT::f32, Expand);
+  setOperationAction(ISD::CTPOP, EVT::i32, Expand);
+  setOperationAction(ISD::CTTZ , EVT::i32, Expand);
+  setOperationAction(ISD::CTLZ , EVT::i32, Expand);
+  setOperationAction(ISD::ROTL , EVT::i32, Expand);
+  setOperationAction(ISD::ROTR , EVT::i32, Expand);
+  setOperationAction(ISD::BSWAP, EVT::i32, Expand);
+  setOperationAction(ISD::FCOPYSIGN, EVT::f64, Expand);
+  setOperationAction(ISD::FCOPYSIGN, EVT::f32, Expand);
+  setOperationAction(ISD::FPOW , EVT::f64, Expand);
+  setOperationAction(ISD::FPOW , EVT::f32, Expand);
 
-  setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+  setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
 
   // FIXME: Sparc provides these multiplies, but we don't have them yet.
-  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
-  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+  setOperationAction(ISD::UMUL_LOHI, EVT::i32, Expand);
+  setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
 
   // We don't have line number support yet.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
-  setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
-  setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
+  setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
+  setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
 
   // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
-  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
+  setOperationAction(ISD::VASTART           , EVT::Other, Custom);
   // VAARG needs to be lowered to not do unaligned accesses for doubles.
-  setOperationAction(ISD::VAARG             , MVT::Other, Custom);
+  setOperationAction(ISD::VAARG             , EVT::Other, Custom);
 
   // Use the default implementation.
-  setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
-  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
-  setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE      , MVT::Other, Expand);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
+  setOperationAction(ISD::VACOPY            , EVT::Other, Expand);
+  setOperationAction(ISD::VAEND             , EVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE         , EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE      , EVT::Other, Expand);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32  , Custom);
 
   // No debug info support yet.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
-  setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
-  setOperationAction(ISD::DECLARE, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
+  setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
+  setOperationAction(ISD::DECLARE, EVT::Other, Expand);
 
   setStackPointerRegisterToSaveRestore(SP::O6);
 
   if (TM.getSubtarget<SparcSubtarget>().isV9())
-    setOperationAction(ISD::CTPOP, MVT::i32, Legal);
+    setOperationAction(ISD::CTPOP, EVT::i32, Legal);
 
   computeRegisterProperties();
 }
@@ -745,10 +745,10 @@
   GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
   // FIXME there isn't really any debug info here
   DebugLoc dl = Op.getDebugLoc();
-  SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
-  SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
-  SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
-  return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
+  SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
+  SDValue Hi = DAG.getNode(SPISD::Hi, dl, EVT::i32, GA);
+  SDValue Lo = DAG.getNode(SPISD::Lo, dl, EVT::i32, GA);
+  return DAG.getNode(ISD::ADD, dl, EVT::i32, Lo, Hi);
 }
 
 static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG) {
@@ -756,24 +756,24 @@
   // FIXME there isn't really any debug info here
   DebugLoc dl = Op.getDebugLoc();
   Constant *C = N->getConstVal();
-  SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
-  SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
-  SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);
-  return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
+  SDValue CP = DAG.getTargetConstantPool(C, EVT::i32, N->getAlignment());
+  SDValue Hi = DAG.getNode(SPISD::Hi, dl, EVT::i32, CP);
+  SDValue Lo = DAG.getNode(SPISD::Lo, dl, EVT::i32, CP);
+  return DAG.getNode(ISD::ADD, dl, EVT::i32, Lo, Hi);
 }
 
 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
   // Convert the fp value to integer in an FP register.
-  assert(Op.getValueType() == MVT::i32);
-  Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
-  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
+  assert(Op.getValueType() == EVT::i32);
+  Op = DAG.getNode(SPISD::FTOI, dl, EVT::f32, Op.getOperand(0));
+  return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Op);
 }
 
 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
-  assert(Op.getOperand(0).getValueType() == MVT::i32);
-  SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
+  assert(Op.getOperand(0).getValueType() == EVT::i32);
+  SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, Op.getOperand(0));
   // Convert the int value to FP in an FP register.
   return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
 }
@@ -793,21 +793,21 @@
 
   // Get the condition flag.
   SDValue CompareFlag;
-  if (LHS.getValueType() == MVT::i32) {
-    std::vector<MVT> VTs;
-    VTs.push_back(MVT::i32);
-    VTs.push_back(MVT::Flag);
+  if (LHS.getValueType() == EVT::i32) {
+    std::vector<EVT> VTs;
+    VTs.push_back(EVT::i32);
+    VTs.push_back(EVT::Flag);
     SDValue Ops[2] = { LHS, RHS };
     CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
     if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
     Opc = SPISD::BRICC;
   } else {
-    CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
+    CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, EVT::Flag, LHS, RHS);
     if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
     Opc = SPISD::BRFCC;
   }
-  return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
-                     DAG.getConstant(SPCC, MVT::i32), CompareFlag);
+  return DAG.getNode(Opc, dl, EVT::Other, Chain, Dest,
+                     DAG.getConstant(SPCC, EVT::i32), CompareFlag);
 }
 
 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
@@ -824,21 +824,21 @@
   LookThroughSetCC(LHS, RHS, CC, SPCC);
 
   SDValue CompareFlag;
-  if (LHS.getValueType() == MVT::i32) {
-    std::vector<MVT> VTs;
+  if (LHS.getValueType() == EVT::i32) {
+    std::vector<EVT> VTs;
     VTs.push_back(LHS.getValueType());   // subcc returns a value
-    VTs.push_back(MVT::Flag);
+    VTs.push_back(EVT::Flag);
     SDValue Ops[2] = { LHS, RHS };
     CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
     Opc = SPISD::SELECT_ICC;
     if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
   } else {
-    CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
+    CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, EVT::Flag, LHS, RHS);
     Opc = SPISD::SELECT_FCC;
     if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
   }
   return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
-                     DAG.getConstant(SPCC, MVT::i32), CompareFlag);
+                     DAG.getConstant(SPCC, EVT::i32), CompareFlag);
 }
 
 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
@@ -846,40 +846,40 @@
   // vastart just stores the address of the VarArgsFrameIndex slot into the
   // memory location argument.
   DebugLoc dl = Op.getDebugLoc();
-  SDValue Offset = DAG.getNode(ISD::ADD, dl, MVT::i32,
-                                 DAG.getRegister(SP::I6, MVT::i32),
+  SDValue Offset = DAG.getNode(ISD::ADD, dl, EVT::i32,
+                                 DAG.getRegister(SP::I6, EVT::i32),
                                  DAG.getConstant(TLI.getVarArgsFrameOffset(),
-                                                 MVT::i32));
+                                                 EVT::i32));
   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
   return DAG.getStore(Op.getOperand(0), dl, Offset, Op.getOperand(1), SV, 0);
 }
 
 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
   SDNode *Node = Op.getNode();
-  MVT VT = Node->getValueType(0);
+  EVT VT = Node->getValueType(0);
   SDValue InChain = Node->getOperand(0);
   SDValue VAListPtr = Node->getOperand(1);
   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
   DebugLoc dl = Node->getDebugLoc();
-  SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr, SV, 0);
+  SDValue VAList = DAG.getLoad(EVT::i32, dl, InChain, VAListPtr, SV, 0);
   // Increment the pointer, VAList, to the next vaarg
-  SDValue NextPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, VAList,
+  SDValue NextPtr = DAG.getNode(ISD::ADD, dl, EVT::i32, VAList,
                                   DAG.getConstant(VT.getSizeInBits()/8,
-                                                  MVT::i32));
+                                                  EVT::i32));
   // Store the incremented VAList to the legalized pointer
   InChain = DAG.getStore(VAList.getValue(1), dl, NextPtr,
                          VAListPtr, SV, 0);
   // Load the actual argument out of the pointer VAList, unless this is an
   // f64 load.
-  if (VT != MVT::f64)
+  if (VT != EVT::f64)
     return DAG.getLoad(VT, dl, InChain, VAList, NULL, 0);
 
   // Otherwise, load it as i64, then do a bitconvert.
-  SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, NULL, 0);
+  SDValue V = DAG.getLoad(EVT::i64, dl, InChain, VAList, NULL, 0);
 
   // Bit-Convert the value to f64.
   SDValue Ops[2] = {
-    DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, V),
+    DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, V),
     V.getValue(1)
   };
   return DAG.getMergeValues(Ops, 2, dl);
@@ -891,14 +891,14 @@
   DebugLoc dl = Op.getDebugLoc();
 
   unsigned SPReg = SP::O6;
-  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
-  SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value
+  SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, EVT::i32);
+  SDValue NewSP = DAG.getNode(ISD::SUB, dl, EVT::i32, SP, Size); // Value
   Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP);    // Output chain
 
   // The resultant pointer is actually 16 words from the bottom of the stack,
   // to provide a register spill area.
-  SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP,
-                                 DAG.getConstant(96, MVT::i32));
+  SDValue NewVal = DAG.getNode(ISD::ADD, dl, EVT::i32, NewSP,
+                                 DAG.getConstant(96, EVT::i32));
   SDValue Ops[2] = { NewVal, Chain };
   return DAG.getMergeValues(Ops, 2, dl);
 }
@@ -1016,7 +1016,7 @@
 
 std::pair<unsigned, const TargetRegisterClass*>
 SparcTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
-                                                  MVT VT) const {
+                                                  EVT VT) const {
   if (Constraint.size() == 1) {
     switch (Constraint[0]) {
     case 'r':
@@ -1029,7 +1029,7 @@
 
 std::vector<unsigned> SparcTargetLowering::
 getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  MVT VT) const {
+                                  EVT VT) const {
   if (Constraint.size() != 1)
     return std::vector<unsigned>();
 
diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h
index dab4cc6..912f2ba 100644
--- a/lib/Target/Sparc/SparcISelLowering.h
+++ b/lib/Target/Sparc/SparcISelLowering.h
@@ -64,10 +64,10 @@
 
     ConstraintType getConstraintType(const std::string &Constraint) const;
     std::pair<unsigned, const TargetRegisterClass*>
-    getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
+    getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const;
     std::vector<unsigned>
     getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                      MVT VT) const;
+                                      EVT VT) const;
 
     virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
 
diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td
index 8ecc588..3f61cb3 100644
--- a/lib/Target/Sparc/SparcInstrInfo.td
+++ b/lib/Target/Sparc/SparcInstrInfo.td
@@ -57,12 +57,12 @@
 
 def LO10 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant((unsigned)N->getZExtValue() & 1023,
-                                   MVT::i32);
+                                   EVT::i32);
 }]>;
 
 def HI22 : SDNodeXForm<imm, [{
   // Transformation function: shift the immediate value down into the low bits.
-  return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 10, MVT::i32);
+  return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 10, EVT::i32);
 }]>;
 
 def SETHIimm : PatLeaf<(imm), [{
diff --git a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index 482d934..a8357a1 100644
--- a/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -107,19 +107,19 @@
     /// getI8Imm - Return a target constant with the specified value, of type
     /// i8.
     inline SDValue getI8Imm(uint64_t Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i8);
+      return CurDAG->getTargetConstant(Imm, EVT::i8);
     }
 
     /// getI16Imm - Return a target constant with the specified value, of type
     /// i16.
     inline SDValue getI16Imm(uint64_t Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i16);
+      return CurDAG->getTargetConstant(Imm, EVT::i16);
     }
 
     /// getI32Imm - Return a target constant with the specified value, of type
     /// i32.
     inline SDValue getI32Imm(uint64_t Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i32);
+      return CurDAG->getTargetConstant(Imm, EVT::i32);
     }
 
     // Include the pieces autogenerated from the target description.
@@ -353,7 +353,7 @@
     Base = AM.Base.Reg;
   else
     Base = CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy());
-  Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i64);
+  Disp = CurDAG->getTargetConstant(AM.Disp, EVT::i64);
 }
 
 void SystemZDAGToDAGISel::getAddressOperands(const SystemZRRIAddressMode &AM,
@@ -405,7 +405,7 @@
 
   DOUT << "MatchAddress (final): "; DEBUG(AM12.dump());
 
-  MVT VT = Addr.getValueType();
+  EVT VT = Addr.getValueType();
   if (AM12.BaseType == SystemZRRIAddressMode::RegBase) {
     if (!AM12.Base.Reg.getNode())
       AM12.Base.Reg = CurDAG->getRegister(0, VT);
@@ -448,7 +448,7 @@
 
   DOUT << "MatchAddress (final): "; DEBUG(AM.dump());
 
-  MVT VT = Addr.getValueType();
+  EVT VT = Addr.getValueType();
   if (AM.BaseType == SystemZRRIAddressMode::RegBase) {
     if (!AM.Base.Reg.getNode())
       AM.Base.Reg = CurDAG->getRegister(0, VT);
@@ -496,7 +496,7 @@
 
   DOUT << "MatchAddress (final): "; DEBUG(AM12.dump());
 
-  MVT VT = Addr.getValueType();
+  EVT VT = Addr.getValueType();
   if (AM12.BaseType == SystemZRRIAddressMode::RegBase) {
     if (!AM12.Base.Reg.getNode())
       AM12.Base.Reg = CurDAG->getRegister(0, VT);
@@ -540,7 +540,7 @@
 
   DOUT << "MatchAddress (final): "; DEBUG(AM.dump());
 
-  MVT VT = Addr.getValueType();
+  EVT VT = Addr.getValueType();
   if (AM.BaseType == SystemZRRIAddressMode::RegBase) {
     if (!AM.Base.Reg.getNode())
       AM.Base.Reg = CurDAG->getRegister(0, VT);
@@ -563,7 +563,7 @@
   if (MatchAddress(Addr, AM, false))
     return false;
 
-  MVT VT = Addr.getValueType();
+  EVT VT = Addr.getValueType();
   unsigned Complexity = 0;
   if (AM.BaseType == SystemZRRIAddressMode::RegBase)
     if (AM.Base.Reg.getNode())
@@ -618,7 +618,7 @@
 
 SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
   SDNode *Node = Op.getNode();
-  MVT NVT = Node->getValueType(0);
+  EVT NVT = Node->getValueType(0);
   DebugLoc dl = Op.getDebugLoc();
   unsigned Opcode = Node->getOpcode();
 
@@ -648,18 +648,18 @@
     SDValue N0 = Node->getOperand(0);
     SDValue N1 = Node->getOperand(1);
 
-    MVT ResVT;
+    EVT ResVT;
     bool is32Bit = false;
     switch (NVT.getSimpleVT()) {
       default: assert(0 && "Unsupported VT!");
-      case MVT::i32:
+      case EVT::i32:
         Opc = SystemZ::SDIVREM32r; MOpc = SystemZ::SDIVREM32m;
-        ResVT = MVT::v2i64;
+        ResVT = EVT::v2i64;
         is32Bit = true;
         break;
-      case MVT::i64:
+      case EVT::i64:
         Opc = SystemZ::SDIVREM64r; MOpc = SystemZ::SDIVREM64m;
-        ResVT = MVT::v2i64;
+        ResVT = EVT::v2i64;
         break;
     }
 
@@ -669,7 +669,7 @@
     // Prepare the dividend
     SDNode *Dividend;
     if (is32Bit)
-      Dividend = CurDAG->getTargetNode(SystemZ::MOVSX64rr32, dl, MVT::i64, N0);
+      Dividend = CurDAG->getTargetNode(SystemZ::MOVSX64rr32, dl, EVT::i64, N0);
     else
       Dividend = N0.getNode();
 
@@ -679,7 +679,7 @@
     Dividend =
       CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, ResVT,
                             SDValue(Tmp, 0), SDValue(Dividend, 0),
-                            CurDAG->getTargetConstant(subreg_odd, MVT::i32));
+                            CurDAG->getTargetConstant(subreg_odd, EVT::i32));
 
     SDNode *Result;
     SDValue DivVal = SDValue(Dividend, 0);
@@ -699,7 +699,7 @@
                                           dl, NVT,
                                           SDValue(Result, 0),
                                           CurDAG->getTargetConstant(SubRegIdx,
-                                                                    MVT::i32));
+                                                                    EVT::i32));
 
       ReplaceUses(Op.getValue(0), SDValue(Div, 0));
       #ifndef NDEBUG
@@ -716,7 +716,7 @@
                                           dl, NVT,
                                           SDValue(Result, 0),
                                           CurDAG->getTargetConstant(SubRegIdx,
-                                                                    MVT::i32));
+                                                                    EVT::i32));
 
       ReplaceUses(Op.getValue(1), SDValue(Rem, 0));
       #ifndef NDEBUG
@@ -736,21 +736,21 @@
     unsigned Opc, MOpc, ClrOpc;
     SDValue N0 = Node->getOperand(0);
     SDValue N1 = Node->getOperand(1);
-    MVT ResVT;
+    EVT ResVT;
 
     bool is32Bit = false;
     switch (NVT.getSimpleVT()) {
       default: assert(0 && "Unsupported VT!");
-      case MVT::i32:
+      case EVT::i32:
         Opc = SystemZ::UDIVREM32r; MOpc = SystemZ::UDIVREM32m;
         ClrOpc = SystemZ::MOV64Pr0_even;
-        ResVT = MVT::v2i32;
+        ResVT = EVT::v2i32;
         is32Bit = true;
         break;
-      case MVT::i64:
+      case EVT::i64:
         Opc = SystemZ::UDIVREM64r; MOpc = SystemZ::UDIVREM64m;
         ClrOpc = SystemZ::MOV128r0_even;
-        ResVT = MVT::v2i64;
+        ResVT = EVT::v2i64;
         break;
     }
 
@@ -768,7 +768,7 @@
       Dividend =
         CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, ResVT,
                               SDValue(Tmp, 0), SDValue(Dividend, 0),
-                              CurDAG->getTargetConstant(SubRegIdx, MVT::i32));
+                              CurDAG->getTargetConstant(SubRegIdx, EVT::i32));
     }
 
     // Zero out even subreg
@@ -793,7 +793,7 @@
                                           dl, NVT,
                                           SDValue(Result, 0),
                                           CurDAG->getTargetConstant(SubRegIdx,
-                                                                    MVT::i32));
+                                                                    EVT::i32));
       ReplaceUses(Op.getValue(0), SDValue(Div, 0));
       #ifndef NDEBUG
       DOUT << std::string(Indent-2, ' ') << "=> ";
@@ -809,7 +809,7 @@
                                           dl, NVT,
                                           SDValue(Result, 0),
                                           CurDAG->getTargetConstant(SubRegIdx,
-                                                                    MVT::i32));
+                                                                    EVT::i32));
       ReplaceUses(Op.getValue(1), SDValue(Rem, 0));
       #ifndef NDEBUG
       DOUT << std::string(Indent-2, ' ') << "=> ";
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index 6265a13..6ffb434 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -44,14 +44,14 @@
   RegInfo = TM.getRegisterInfo();
 
   // Set up the register classes.
-  addRegisterClass(MVT::i32,  SystemZ::GR32RegisterClass);
-  addRegisterClass(MVT::i64,  SystemZ::GR64RegisterClass);
-  addRegisterClass(MVT::v2i32,SystemZ::GR64PRegisterClass);
-  addRegisterClass(MVT::v2i64,SystemZ::GR128RegisterClass);
+  addRegisterClass(EVT::i32,  SystemZ::GR32RegisterClass);
+  addRegisterClass(EVT::i64,  SystemZ::GR64RegisterClass);
+  addRegisterClass(EVT::v2i32,SystemZ::GR64PRegisterClass);
+  addRegisterClass(EVT::v2i64,SystemZ::GR128RegisterClass);
 
   if (!UseSoftFloat) {
-    addRegisterClass(MVT::f32, SystemZ::FP32RegisterClass);
-    addRegisterClass(MVT::f64, SystemZ::FP64RegisterClass);
+    addRegisterClass(EVT::f32, SystemZ::FP32RegisterClass);
+    addRegisterClass(EVT::f64, SystemZ::FP64RegisterClass);
 
     addLegalFPImmediate(APFloat(+0.0));  // lzer
     addLegalFPImmediate(APFloat(+0.0f)); // lzdr
@@ -63,92 +63,92 @@
   computeRegisterProperties();
 
   // Set shifts properties
-  setShiftAmountType(MVT::i64);
+  setShiftAmountType(EVT::i64);
 
   // Provide all sorts of operation actions
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::EXTLOAD,  MVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::i1, Promote);
 
-  setLoadExtAction(ISD::SEXTLOAD, MVT::f32, Expand);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::f32, Expand);
-  setLoadExtAction(ISD::EXTLOAD,  MVT::f32, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::f32, Expand);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::f32, Expand);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::f32, Expand);
 
-  setLoadExtAction(ISD::SEXTLOAD, MVT::f64, Expand);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::f64, Expand);
-  setLoadExtAction(ISD::EXTLOAD,  MVT::f64, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::f64, Expand);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::f64, Expand);
+  setLoadExtAction(ISD::EXTLOAD,  EVT::f64, Expand);
 
   setStackPointerRegisterToSaveRestore(SystemZ::R15D);
   setSchedulingPreference(SchedulingForLatency);
   setBooleanContents(ZeroOrOneBooleanContent);
 
-  setOperationAction(ISD::BR_JT,            MVT::Other, Expand);
-  setOperationAction(ISD::BRCOND,           MVT::Other, Expand);
-  setOperationAction(ISD::BR_CC,            MVT::i32, Custom);
-  setOperationAction(ISD::BR_CC,            MVT::i64, Custom);
-  setOperationAction(ISD::BR_CC,            MVT::f32, Custom);
-  setOperationAction(ISD::BR_CC,            MVT::f64, Custom);
-  setOperationAction(ISD::ConstantPool,     MVT::i32, Custom);
-  setOperationAction(ISD::ConstantPool,     MVT::i64, Custom);
-  setOperationAction(ISD::GlobalAddress,    MVT::i64, Custom);
-  setOperationAction(ISD::JumpTable,        MVT::i64, Custom);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
+  setOperationAction(ISD::BR_JT,            EVT::Other, Expand);
+  setOperationAction(ISD::BRCOND,           EVT::Other, Expand);
+  setOperationAction(ISD::BR_CC,            EVT::i32, Custom);
+  setOperationAction(ISD::BR_CC,            EVT::i64, Custom);
+  setOperationAction(ISD::BR_CC,            EVT::f32, Custom);
+  setOperationAction(ISD::BR_CC,            EVT::f64, Custom);
+  setOperationAction(ISD::ConstantPool,     EVT::i32, Custom);
+  setOperationAction(ISD::ConstantPool,     EVT::i64, Custom);
+  setOperationAction(ISD::GlobalAddress,    EVT::i64, Custom);
+  setOperationAction(ISD::JumpTable,        EVT::i64, Custom);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i64, Expand);
 
-  setOperationAction(ISD::SDIV,             MVT::i32, Expand);
-  setOperationAction(ISD::UDIV,             MVT::i32, Expand);
-  setOperationAction(ISD::SDIV,             MVT::i64, Expand);
-  setOperationAction(ISD::UDIV,             MVT::i64, Expand);
-  setOperationAction(ISD::SREM,             MVT::i32, Expand);
-  setOperationAction(ISD::UREM,             MVT::i32, Expand);
-  setOperationAction(ISD::SREM,             MVT::i64, Expand);
-  setOperationAction(ISD::UREM,             MVT::i64, Expand);
+  setOperationAction(ISD::SDIV,             EVT::i32, Expand);
+  setOperationAction(ISD::UDIV,             EVT::i32, Expand);
+  setOperationAction(ISD::SDIV,             EVT::i64, Expand);
+  setOperationAction(ISD::UDIV,             EVT::i64, Expand);
+  setOperationAction(ISD::SREM,             EVT::i32, Expand);
+  setOperationAction(ISD::UREM,             EVT::i32, Expand);
+  setOperationAction(ISD::SREM,             EVT::i64, Expand);
+  setOperationAction(ISD::UREM,             EVT::i64, Expand);
 
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
 
-  setOperationAction(ISD::CTPOP,            MVT::i32, Expand);
-  setOperationAction(ISD::CTPOP,            MVT::i64, Expand);
-  setOperationAction(ISD::CTTZ,             MVT::i32, Expand);
-  setOperationAction(ISD::CTTZ,             MVT::i64, Expand);
-  setOperationAction(ISD::CTLZ,             MVT::i32, Promote);
-  setOperationAction(ISD::CTLZ,             MVT::i64, Legal);
+  setOperationAction(ISD::CTPOP,            EVT::i32, Expand);
+  setOperationAction(ISD::CTPOP,            EVT::i64, Expand);
+  setOperationAction(ISD::CTTZ,             EVT::i32, Expand);
+  setOperationAction(ISD::CTTZ,             EVT::i64, Expand);
+  setOperationAction(ISD::CTLZ,             EVT::i32, Promote);
+  setOperationAction(ISD::CTLZ,             EVT::i64, Legal);
 
   // FIXME: Can we lower these 2 efficiently?
-  setOperationAction(ISD::SETCC,            MVT::i32, Expand);
-  setOperationAction(ISD::SETCC,            MVT::i64, Expand);
-  setOperationAction(ISD::SETCC,            MVT::f32, Expand);
-  setOperationAction(ISD::SETCC,            MVT::f64, Expand);
-  setOperationAction(ISD::SELECT,           MVT::i32, Expand);
-  setOperationAction(ISD::SELECT,           MVT::i64, Expand);
-  setOperationAction(ISD::SELECT,           MVT::f32, Expand);
-  setOperationAction(ISD::SELECT,           MVT::f64, Expand);
-  setOperationAction(ISD::SELECT_CC,        MVT::i32, Custom);
-  setOperationAction(ISD::SELECT_CC,        MVT::i64, Custom);
-  setOperationAction(ISD::SELECT_CC,        MVT::f32, Custom);
-  setOperationAction(ISD::SELECT_CC,        MVT::f64, Custom);
+  setOperationAction(ISD::SETCC,            EVT::i32, Expand);
+  setOperationAction(ISD::SETCC,            EVT::i64, Expand);
+  setOperationAction(ISD::SETCC,            EVT::f32, Expand);
+  setOperationAction(ISD::SETCC,            EVT::f64, Expand);
+  setOperationAction(ISD::SELECT,           EVT::i32, Expand);
+  setOperationAction(ISD::SELECT,           EVT::i64, Expand);
+  setOperationAction(ISD::SELECT,           EVT::f32, Expand);
+  setOperationAction(ISD::SELECT,           EVT::f64, Expand);
+  setOperationAction(ISD::SELECT_CC,        EVT::i32, Custom);
+  setOperationAction(ISD::SELECT_CC,        EVT::i64, Custom);
+  setOperationAction(ISD::SELECT_CC,        EVT::f32, Custom);
+  setOperationAction(ISD::SELECT_CC,        EVT::f64, Custom);
 
   // Funny enough: we don't have 64-bit signed versions of these stuff, but have
   // unsigned.
-  setOperationAction(ISD::MULHS,            MVT::i64, Expand);
-  setOperationAction(ISD::SMUL_LOHI,        MVT::i64, Expand);
+  setOperationAction(ISD::MULHS,            EVT::i64, Expand);
+  setOperationAction(ISD::SMUL_LOHI,        EVT::i64, Expand);
 
   // Lower some FP stuff
-  setOperationAction(ISD::FSIN,             MVT::f32, Expand);
-  setOperationAction(ISD::FSIN,             MVT::f64, Expand);
-  setOperationAction(ISD::FCOS,             MVT::f32, Expand);
-  setOperationAction(ISD::FCOS,             MVT::f64, Expand);
-  setOperationAction(ISD::FREM,             MVT::f32, Expand);
-  setOperationAction(ISD::FREM,             MVT::f64, Expand);
+  setOperationAction(ISD::FSIN,             EVT::f32, Expand);
+  setOperationAction(ISD::FSIN,             EVT::f64, Expand);
+  setOperationAction(ISD::FCOS,             EVT::f32, Expand);
+  setOperationAction(ISD::FCOS,             EVT::f64, Expand);
+  setOperationAction(ISD::FREM,             EVT::f32, Expand);
+  setOperationAction(ISD::FREM,             EVT::f64, Expand);
 
   // We have only 64-bit bitconverts
-  setOperationAction(ISD::BIT_CONVERT,      MVT::f32, Expand);
-  setOperationAction(ISD::BIT_CONVERT,      MVT::i32, Expand);
+  setOperationAction(ISD::BIT_CONVERT,      EVT::f32, Expand);
+  setOperationAction(ISD::BIT_CONVERT,      EVT::i32, Expand);
 
-  setOperationAction(ISD::UINT_TO_FP,       MVT::i32, Expand);
-  setOperationAction(ISD::UINT_TO_FP,       MVT::i64, Expand);
-  setOperationAction(ISD::FP_TO_UINT,       MVT::i32, Expand);
-  setOperationAction(ISD::FP_TO_UINT,       MVT::i64, Expand);
+  setOperationAction(ISD::UINT_TO_FP,       EVT::i32, Expand);
+  setOperationAction(ISD::UINT_TO_FP,       EVT::i64, Expand);
+  setOperationAction(ISD::FP_TO_UINT,       EVT::i32, Expand);
+  setOperationAction(ISD::FP_TO_UINT,       EVT::i64, Expand);
 
-  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+  setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 }
 
 SDValue SystemZTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
@@ -238,7 +238,7 @@
   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
     SDValue ArgValue;
     CCValAssign &VA = ArgLocs[i];
-    MVT LocVT = VA.getLocVT();
+    EVT LocVT = VA.getLocVT();
     if (VA.isRegLoc()) {
       // Arguments passed in registers
       TargetRegisterClass *RC;
@@ -250,13 +250,13 @@
              << "\n";
 #endif
         llvm_unreachable(0);
-      case MVT::i64:
+      case EVT::i64:
         RC = SystemZ::GR64RegisterClass;
         break;
-      case MVT::f32:
+      case EVT::f32:
         RC = SystemZ::FP32RegisterClass;
         break;
-      case MVT::f64:
+      case EVT::f64:
         RC = SystemZ::FP64RegisterClass;
         break;
       }
@@ -382,7 +382,7 @@
   // Transform all store nodes into one single node because all store nodes are
   // independent of each other.
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token chain and
@@ -404,7 +404,7 @@
     Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
 
   // Returns a chain & a flag for retval copy to use.
-  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(Callee);
@@ -530,10 +530,10 @@
   }
 
   if (Flag.getNode())
-    return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+    return DAG.getNode(SystemZISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
 
   // Return Void
-  return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain);
+  return DAG.getNode(SystemZISD::RET_FLAG, dl, EVT::Other, Chain);
 }
 
 SDValue SystemZTargetLowering::EmitCmp(SDValue LHS, SDValue RHS,
@@ -608,11 +608,11 @@
     break;
   }
 
-  SystemZCC = DAG.getConstant(TCC, MVT::i32);
+  SystemZCC = DAG.getConstant(TCC, EVT::i32);
 
   DebugLoc dl = LHS.getDebugLoc();
   return DAG.getNode((isUnsigned ? SystemZISD::UCMP : SystemZISD::CMP),
-                     dl, MVT::Flag, LHS, RHS);
+                     dl, EVT::Flag, LHS, RHS);
 }
 
 
@@ -641,7 +641,7 @@
   SDValue SystemZCC;
   SDValue Flag = EmitCmp(LHS, RHS, CC, SystemZCC, DAG);
 
-  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
+  SDVTList VTs = DAG.getVTList(Op.getValueType(), EVT::Flag);
   SmallVector<SDValue, 4> Ops;
   Ops.push_back(TrueV);
   Ops.push_back(FalseV);
diff --git a/lib/Target/SystemZ/SystemZOperands.td b/lib/Target/SystemZ/SystemZOperands.td
index 156cace..0ee48c3 100644
--- a/lib/Target/SystemZ/SystemZOperands.td
+++ b/lib/Target/SystemZ/SystemZOperands.td
@@ -134,10 +134,10 @@
 def immSExt16 : PatLeaf<(imm), [{
   // immSExt16 predicate - true if the immediate fits in a 16-bit sign extended
   // field.
-  if (N->getValueType(0) == MVT::i64) {
+  if (N->getValueType(0) == EVT::i64) {
     uint64_t val = N->getZExtValue();
     return ((int64_t)val == (int16_t)val);
-  } else if (N->getValueType(0) == MVT::i32) {
+  } else if (N->getValueType(0) == EVT::i32) {
     uint32_t val = N->getZExtValue();
     return ((int32_t)val == (int16_t)val);
   }
diff --git a/lib/Target/TargetRegisterInfo.cpp b/lib/Target/TargetRegisterInfo.cpp
index a84fdaa..b649fca 100644
--- a/lib/Target/TargetRegisterInfo.cpp
+++ b/lib/Target/TargetRegisterInfo.cpp
@@ -40,10 +40,10 @@
 TargetRegisterInfo::~TargetRegisterInfo() {}
 
 /// getPhysicalRegisterRegClass - Returns the Register Class of a physical
-/// register of the given type. If type is MVT::Other, then just return any
+/// register of the given type. If type is EVT::Other, then just return any
 /// register class the register belongs to.
 const TargetRegisterClass *
-TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, MVT VT) const {
+TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
   assert(isPhysicalRegister(reg) && "reg must be a physical register");
 
   // Pick the most super register class of the right type that contains
@@ -51,7 +51,7 @@
   const TargetRegisterClass* BestRC = 0;
   for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
     const TargetRegisterClass* RC = *I;
-    if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
+    if ((VT == EVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
         (!BestRC || BestRC->hasSuperClass(RC)))
       BestRC = RC;
   }
diff --git a/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp b/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp
index 58959ed..1d8d7a7 100644
--- a/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp
+++ b/lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp
@@ -425,9 +425,9 @@
     O << '%';
     unsigned Reg = MO.getReg();
     if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
-      MVT VT = (strcmp(Modifier+6,"64") == 0) ?
-        MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 :
-                    ((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8));
+      EVT VT = (strcmp(Modifier+6,"64") == 0) ?
+        EVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? EVT::i32 :
+                    ((strcmp(Modifier+6,"16") == 0) ? EVT::i16 : EVT::i8));
       Reg = getX86SubSuperRegister(Reg, VT);
     }
     O << TRI->getAsmName(Reg);
@@ -573,19 +573,19 @@
   switch (Mode) {
   default: return true;  // Unknown mode.
   case 'b': // Print QImode register
-    Reg = getX86SubSuperRegister(Reg, MVT::i8);
+    Reg = getX86SubSuperRegister(Reg, EVT::i8);
     break;
   case 'h': // Print QImode high register
-    Reg = getX86SubSuperRegister(Reg, MVT::i8, true);
+    Reg = getX86SubSuperRegister(Reg, EVT::i8, true);
     break;
   case 'w': // Print HImode register
-    Reg = getX86SubSuperRegister(Reg, MVT::i16);
+    Reg = getX86SubSuperRegister(Reg, EVT::i16);
     break;
   case 'k': // Print SImode register
-    Reg = getX86SubSuperRegister(Reg, MVT::i32);
+    Reg = getX86SubSuperRegister(Reg, EVT::i32);
     break;
   case 'q': // Print DImode register
-    Reg = getX86SubSuperRegister(Reg, MVT::i64);
+    Reg = getX86SubSuperRegister(Reg, EVT::i64);
     break;
   }
 
@@ -685,7 +685,7 @@
     unsigned Reg = MI->getOperand(i).getReg();
     if (Reg == 0) continue;
     
-    MI->getOperand(i).setReg(getX86SubSuperRegister(Reg, MVT::i64));
+    MI->getOperand(i).setReg(getX86SubSuperRegister(Reg, EVT::i64));
   }
 }
 
diff --git a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
index 2324479..34908da 100644
--- a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
+++ b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp
@@ -75,9 +75,9 @@
     unsigned Reg = Op.getReg();
 #if 0
     if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
-      MVT VT = (strcmp(Modifier+6,"64") == 0) ?
-      MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 :
-                  ((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8));
+      EVT VT = (strcmp(Modifier+6,"64") == 0) ?
+      EVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? EVT::i32 :
+                  ((strcmp(Modifier+6,"16") == 0) ? EVT::i16 : EVT::i8));
       Reg = getX86SubSuperRegister(Reg, VT);
     }
 #endif
diff --git a/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp b/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp
index c9a761d..08f5aa4 100644
--- a/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp
+++ b/lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp
@@ -209,9 +209,9 @@
     if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
       unsigned Reg = MO.getReg();
       if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
-        MVT VT = (strcmp(Modifier,"subreg64") == 0) ?
-          MVT::i64 : ((strcmp(Modifier, "subreg32") == 0) ? MVT::i32 :
-                      ((strcmp(Modifier,"subreg16") == 0) ? MVT::i16 :MVT::i8));
+        EVT VT = (strcmp(Modifier,"subreg64") == 0) ?
+          EVT::i64 : ((strcmp(Modifier, "subreg32") == 0) ? EVT::i32 :
+                      ((strcmp(Modifier,"subreg16") == 0) ? EVT::i16 :EVT::i8));
         Reg = getX86SubSuperRegister(Reg, VT);
       }
       O << TRI->getName(Reg);
@@ -376,16 +376,16 @@
   switch (Mode) {
   default: return true;  // Unknown mode.
   case 'b': // Print QImode register
-    Reg = getX86SubSuperRegister(Reg, MVT::i8);
+    Reg = getX86SubSuperRegister(Reg, EVT::i8);
     break;
   case 'h': // Print QImode high register
-    Reg = getX86SubSuperRegister(Reg, MVT::i8, true);
+    Reg = getX86SubSuperRegister(Reg, EVT::i8, true);
     break;
   case 'w': // Print HImode register
-    Reg = getX86SubSuperRegister(Reg, MVT::i16);
+    Reg = getX86SubSuperRegister(Reg, EVT::i16);
     break;
   case 'k': // Print SImode register
-    Reg = getX86SubSuperRegister(Reg, MVT::i32);
+    Reg = getX86SubSuperRegister(Reg, EVT::i32);
     break;
   }
 
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index a4bb1be..3046681 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -79,16 +79,16 @@
 #include "X86GenFastISel.inc"
 
 private:
-  bool X86FastEmitCompare(Value *LHS, Value *RHS, MVT VT);
+  bool X86FastEmitCompare(Value *LHS, Value *RHS, EVT VT);
   
-  bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR);
+  bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
 
-  bool X86FastEmitStore(MVT VT, Value *Val,
+  bool X86FastEmitStore(EVT VT, Value *Val,
                         const X86AddressMode &AM);
-  bool X86FastEmitStore(MVT VT, unsigned Val,
+  bool X86FastEmitStore(EVT VT, unsigned Val,
                         const X86AddressMode &AM);
 
-  bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
+  bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
                          unsigned &ResultReg);
   
   bool X86SelectAddress(Value *V, X86AddressMode &AM);
@@ -133,36 +133,36 @@
 
   /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
   /// computed in an SSE register, not on the X87 floating point stack.
-  bool isScalarFPTypeInSSEReg(MVT VT) const {
-    return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
-      (VT == MVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
+  bool isScalarFPTypeInSSEReg(EVT VT) const {
+    return (VT == EVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
+      (VT == EVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
   }
 
-  bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
+  bool isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1 = false);
 };
   
 } // end anonymous namespace.
 
-bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
+bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
   VT = TLI.getValueType(Ty, /*HandleUnknown=*/true);
-  if (VT == MVT::Other || !VT.isSimple())
+  if (VT == EVT::Other || !VT.isSimple())
     // Unhandled type. Halt "fast" selection and bail.
     return false;
   
   // For now, require SSE/SSE2 for performing floating-point operations,
   // since x87 requires additional work.
-  if (VT == MVT::f64 && !X86ScalarSSEf64)
+  if (VT == EVT::f64 && !X86ScalarSSEf64)
      return false;
-  if (VT == MVT::f32 && !X86ScalarSSEf32)
+  if (VT == EVT::f32 && !X86ScalarSSEf32)
      return false;
   // Similarly, no f80 support yet.
-  if (VT == MVT::f80)
+  if (VT == EVT::f80)
     return false;
   // We only handle legal types. For example, on x86-32 the instruction
   // selector contains all of the 64-bit instructions from x86-64,
   // under the assumption that i64 won't be used if the target doesn't
   // support it.
-  return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
+  return (AllowI1 && VT == EVT::i1) || TLI.isTypeLegal(VT);
 }
 
 #include "X86GenCallingConv.inc"
@@ -188,31 +188,31 @@
 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
 /// Return true and the result register by reference if it is possible.
-bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
+bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
                                   unsigned &ResultReg) {
   // Get opcode and regclass of the output for the given load instruction.
   unsigned Opc = 0;
   const TargetRegisterClass *RC = NULL;
   switch (VT.getSimpleVT()) {
   default: return false;
-  case MVT::i8:
+  case EVT::i8:
     Opc = X86::MOV8rm;
     RC  = X86::GR8RegisterClass;
     break;
-  case MVT::i16:
+  case EVT::i16:
     Opc = X86::MOV16rm;
     RC  = X86::GR16RegisterClass;
     break;
-  case MVT::i32:
+  case EVT::i32:
     Opc = X86::MOV32rm;
     RC  = X86::GR32RegisterClass;
     break;
-  case MVT::i64:
+  case EVT::i64:
     // Must be in x86-64 mode.
     Opc = X86::MOV64rm;
     RC  = X86::GR64RegisterClass;
     break;
-  case MVT::f32:
+  case EVT::f32:
     if (Subtarget->hasSSE1()) {
       Opc = X86::MOVSSrm;
       RC  = X86::FR32RegisterClass;
@@ -221,7 +221,7 @@
       RC  = X86::RFP32RegisterClass;
     }
     break;
-  case MVT::f64:
+  case EVT::f64:
     if (Subtarget->hasSSE2()) {
       Opc = X86::MOVSDrm;
       RC  = X86::FR64RegisterClass;
@@ -230,7 +230,7 @@
       RC  = X86::RFP64RegisterClass;
     }
     break;
-  case MVT::f80:
+  case EVT::f80:
     // No f80 support yet.
     return false;
   }
@@ -245,21 +245,21 @@
 /// and a displacement offset, or a GlobalAddress,
 /// i.e. V. Return true if it is possible.
 bool
-X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
+X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
                               const X86AddressMode &AM) {
   // Get opcode and regclass of the output for the given store instruction.
   unsigned Opc = 0;
   switch (VT.getSimpleVT()) {
-  case MVT::f80: // No f80 support yet.
+  case EVT::f80: // No f80 support yet.
   default: return false;
-  case MVT::i8:  Opc = X86::MOV8mr;  break;
-  case MVT::i16: Opc = X86::MOV16mr; break;
-  case MVT::i32: Opc = X86::MOV32mr; break;
-  case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
-  case MVT::f32:
+  case EVT::i8:  Opc = X86::MOV8mr;  break;
+  case EVT::i16: Opc = X86::MOV16mr; break;
+  case EVT::i32: Opc = X86::MOV32mr; break;
+  case EVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
+  case EVT::f32:
     Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m;
     break;
-  case MVT::f64:
+  case EVT::f64:
     Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
     break;
   }
@@ -268,7 +268,7 @@
   return true;
 }
 
-bool X86FastISel::X86FastEmitStore(MVT VT, Value *Val,
+bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
                                    const X86AddressMode &AM) {
   // Handle 'null' like i32/i64 0.
   if (isa<ConstantPointerNull>(Val))
@@ -279,10 +279,10 @@
     unsigned Opc = 0;
     switch (VT.getSimpleVT()) {
     default: break;
-    case MVT::i8:  Opc = X86::MOV8mi;  break;
-    case MVT::i16: Opc = X86::MOV16mi; break;
-    case MVT::i32: Opc = X86::MOV32mi; break;
-    case MVT::i64:
+    case EVT::i8:  Opc = X86::MOV8mi;  break;
+    case EVT::i16: Opc = X86::MOV16mi; break;
+    case EVT::i32: Opc = X86::MOV32mi; break;
+    case EVT::i64:
       // Must be a 32-bit sign extended value.
       if ((int)CI->getSExtValue() == CI->getSExtValue())
         Opc = X86::MOV64mi32;
@@ -306,8 +306,8 @@
 /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
 /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
 /// ISD::SIGN_EXTEND).
-bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT,
-                                    unsigned Src, MVT SrcVT,
+bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
+                                    unsigned Src, EVT SrcVT,
                                     unsigned &ResultReg) {
   unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
   
@@ -478,7 +478,7 @@
       StubAM.GV = GV;
       StubAM.GVOpFlags = GVFlags;
 
-      if (TLI.getPointerTy() == MVT::i64) {
+      if (TLI.getPointerTy() == EVT::i64) {
         Opc = X86::MOV64rm;
         RC  = X86::GR64RegisterClass;
         
@@ -605,7 +605,7 @@
 
 /// X86SelectStore - Select and emit code to implement store instructions.
 bool X86FastISel::X86SelectStore(Instruction* I) {
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(I->getOperand(0)->getType(), VT))
     return false;
 
@@ -619,7 +619,7 @@
 /// X86SelectLoad - Select and emit code to implement load instructions.
 ///
 bool X86FastISel::X86SelectLoad(Instruction *I)  {
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(I->getType(), VT))
     return false;
 
@@ -635,29 +635,29 @@
   return false;
 }
 
-static unsigned X86ChooseCmpOpcode(MVT VT) {
+static unsigned X86ChooseCmpOpcode(EVT VT) {
   switch (VT.getSimpleVT()) {
   default:       return 0;
-  case MVT::i8:  return X86::CMP8rr;
-  case MVT::i16: return X86::CMP16rr;
-  case MVT::i32: return X86::CMP32rr;
-  case MVT::i64: return X86::CMP64rr;
-  case MVT::f32: return X86::UCOMISSrr;
-  case MVT::f64: return X86::UCOMISDrr;
+  case EVT::i8:  return X86::CMP8rr;
+  case EVT::i16: return X86::CMP16rr;
+  case EVT::i32: return X86::CMP32rr;
+  case EVT::i64: return X86::CMP64rr;
+  case EVT::f32: return X86::UCOMISSrr;
+  case EVT::f64: return X86::UCOMISDrr;
   }
 }
 
 /// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
 /// of the comparison, return an opcode that works for the compare (e.g.
 /// CMP32ri) otherwise return 0.
-static unsigned X86ChooseCmpImmediateOpcode(MVT VT, ConstantInt *RHSC) {
+static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
   switch (VT.getSimpleVT()) {
   // Otherwise, we can't fold the immediate into this comparison.
   default: return 0;
-  case MVT::i8: return X86::CMP8ri;
-  case MVT::i16: return X86::CMP16ri;
-  case MVT::i32: return X86::CMP32ri;
-  case MVT::i64:
+  case EVT::i8: return X86::CMP8ri;
+  case EVT::i16: return X86::CMP16ri;
+  case EVT::i32: return X86::CMP32ri;
+  case EVT::i64:
     // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
     // field.
     if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
@@ -666,7 +666,7 @@
   }
 }
 
-bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, MVT VT) {
+bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
   unsigned Op0Reg = getRegForValue(Op0);
   if (Op0Reg == 0) return false;
   
@@ -698,7 +698,7 @@
 bool X86FastISel::X86SelectCmp(Instruction *I) {
   CmpInst *CI = cast<CmpInst>(I);
 
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(I->getOperand(0)->getType(), VT))
     return false;
 
@@ -778,7 +778,7 @@
     unsigned ResultReg = getRegForValue(I->getOperand(0));
     if (ResultReg == 0) return false;
     // Set the high bits to zero.
-    ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg);
+    ResultReg = FastEmitZExtFromI1(EVT::i8, ResultReg);
     if (ResultReg == 0) return false;
     UpdateValueMap(I, ResultReg);
     return true;
@@ -798,7 +798,7 @@
   // Fold the common case of a conditional branch with a comparison.
   if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
     if (CI->hasOneUse()) {
-      MVT VT = TLI.getValueType(CI->getOperand(0)->getType());
+      EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
 
       // Try to take advantage of fallthrough opportunities.
       CmpInst::Predicate Predicate = CI->getPredicate();
@@ -975,8 +975,8 @@
     return false;
   }
 
-  MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
-  if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+  EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
+  if (VT == EVT::Other || !isTypeLegal(I->getType(), VT))
     return false;
 
   unsigned Op0Reg = getRegForValue(I->getOperand(0));
@@ -1009,19 +1009,19 @@
 }
 
 bool X86FastISel::X86SelectSelect(Instruction *I) {
-  MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
-  if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+  EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
+  if (VT == EVT::Other || !isTypeLegal(I->getType(), VT))
     return false;
   
   unsigned Opc = 0;
   const TargetRegisterClass *RC = NULL;
-  if (VT.getSimpleVT() == MVT::i16) {
+  if (VT.getSimpleVT() == EVT::i16) {
     Opc = X86::CMOVE16rr;
     RC = &X86::GR16RegClass;
-  } else if (VT.getSimpleVT() == MVT::i32) {
+  } else if (VT.getSimpleVT() == EVT::i32) {
     Opc = X86::CMOVE32rr;
     RC = &X86::GR32RegClass;
-  } else if (VT.getSimpleVT() == MVT::i64) {
+  } else if (VT.getSimpleVT() == EVT::i64) {
     Opc = X86::CMOVE64rr;
     RC = &X86::GR64RegClass;
   } else {
@@ -1081,14 +1081,14 @@
   if (Subtarget->is64Bit())
     // All other cases should be handled by the tblgen generated code.
     return false;
-  MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
-  MVT DstVT = TLI.getValueType(I->getType());
+  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+  EVT DstVT = TLI.getValueType(I->getType());
   
   // This code only handles truncation to byte right now.
-  if (DstVT != MVT::i8 && DstVT != MVT::i1)
+  if (DstVT != EVT::i8 && DstVT != EVT::i1)
     // All other cases should be handled by the tblgen generated code.
     return false;
-  if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
+  if (SrcVT != EVT::i16 && SrcVT != EVT::i32)
     // All other cases should be handled by the tblgen generated code.
     return false;
 
@@ -1098,14 +1098,14 @@
     return false;
 
   // First issue a copy to GR16_ABCD or GR32_ABCD.
-  unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
-  const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
+  unsigned CopyOpc = (SrcVT == EVT::i16) ? X86::MOV16rr : X86::MOV32rr;
+  const TargetRegisterClass *CopyRC = (SrcVT == EVT::i16)
     ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
   unsigned CopyReg = createResultReg(CopyRC);
   BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
 
   // Then issue an extract_subreg.
-  unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
+  unsigned ResultReg = FastEmitInst_extractsubreg(EVT::i8,
                                                   CopyReg, X86::SUBREG_8BIT);
   if (!ResultReg)
     return false;
@@ -1150,7 +1150,7 @@
     const Type *RetTy =
       cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
 
-    MVT VT;
+    EVT VT;
     if (!isTypeLegal(RetTy, VT))
       return false;
 
@@ -1164,9 +1164,9 @@
       return false;
 
     unsigned OpC = 0;
-    if (VT == MVT::i32)
+    if (VT == EVT::i32)
       OpC = X86::ADD32rr;
-    else if (VT == MVT::i64)
+    else if (VT == EVT::i64)
       OpC = X86::ADD64rr;
     else
       return false;
@@ -1185,7 +1185,7 @@
     if (DestReg1 != ResultReg)
       ResultReg = DestReg1+1;
     else
-      ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
+      ResultReg = createResultReg(TLI.getRegClassFor(EVT::i8));
     
     unsigned Opc = X86::SETBr;
     if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
@@ -1229,9 +1229,9 @@
 
   // Handle *simple* calls for now.
   const Type *RetTy = CS.getType();
-  MVT RetVT;
+  EVT RetVT;
   if (RetTy == Type::VoidTy)
-    RetVT = MVT::isVoid;
+    RetVT = EVT::isVoid;
   else if (!isTypeLegal(RetTy, RetVT, true))
     return false;
 
@@ -1251,15 +1251,15 @@
 
   // Allow calls which produce i1 results.
   bool AndToI1 = false;
-  if (RetVT == MVT::i1) {
-    RetVT = MVT::i8;
+  if (RetVT == EVT::i1) {
+    RetVT = EVT::i8;
     AndToI1 = true;
   }
 
   // Deal with call operands first.
   SmallVector<Value*, 8> ArgVals;
   SmallVector<unsigned, 8> Args;
-  SmallVector<MVT, 8> ArgVTs;
+  SmallVector<EVT, 8> ArgVTs;
   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
   Args.reserve(CS.arg_size());
   ArgVals.reserve(CS.arg_size());
@@ -1285,7 +1285,7 @@
       return false;
 
     const Type *ArgTy = (*i)->getType();
-    MVT ArgVT;
+    EVT ArgVT;
     if (!isTypeLegal(ArgTy, ArgVT))
       return false;
     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
@@ -1315,7 +1315,7 @@
   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
     CCValAssign &VA = ArgLocs[i];
     unsigned Arg = Args[VA.getValNo()];
-    MVT ArgVT = ArgVTs[VA.getValNo()];
+    EVT ArgVT = ArgVTs[VA.getValNo()];
   
     // Promote the value if needed.
     switch (VA.getLocInfo()) {
@@ -1445,14 +1445,14 @@
   BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
 
   // Now handle call return value (if any).
-  if (RetVT.getSimpleVT() != MVT::isVoid) {
+  if (RetVT.getSimpleVT() != EVT::isVoid) {
     SmallVector<CCValAssign, 16> RVLocs;
     CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
     CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
 
     // Copy all of the result registers out of their specified physreg.
     assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
-    MVT CopyVT = RVLocs[0].getValVT();
+    EVT CopyVT = RVLocs[0].getValVT();
     TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
     TargetRegisterClass *SrcRC = DstRC;
     
@@ -1462,7 +1462,7 @@
     if ((RVLocs[0].getLocReg() == X86::ST0 ||
          RVLocs[0].getLocReg() == X86::ST1) &&
         isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
-      CopyVT = MVT::f80;
+      CopyVT = EVT::f80;
       SrcRC = X86::RSTRegisterClass;
       DstRC = X86::RFP80RegisterClass;
     }
@@ -1476,14 +1476,14 @@
       // Round the F80 the right size, which also moves to the appropriate xmm
       // register. This is accomplished by storing the F80 value in memory and
       // then loading it back. Ewww...
-      MVT ResVT = RVLocs[0].getValVT();
-      unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
+      EVT ResVT = RVLocs[0].getValVT();
+      unsigned Opc = ResVT == EVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
       unsigned MemSize = ResVT.getSizeInBits()/8;
       int FI = MFI.CreateStackObject(MemSize, MemSize);
       addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
-      DstRC = ResVT == MVT::f32
+      DstRC = ResVT == EVT::f32
         ? X86::FR32RegisterClass : X86::FR64RegisterClass;
-      Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
+      Opc = ResVT == EVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
       ResultReg = createResultReg(DstRC);
       addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
     }
@@ -1536,8 +1536,8 @@
     return X86SelectExtractValue(I);
   case Instruction::IntToPtr: // Deliberate fall-through.
   case Instruction::PtrToInt: {
-    MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
-    MVT DstVT = TLI.getValueType(I->getType());
+    EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+    EVT DstVT = TLI.getValueType(I->getType());
     if (DstVT.bitsGT(SrcVT))
       return X86SelectZExt(I);
     if (DstVT.bitsLT(SrcVT))
@@ -1553,7 +1553,7 @@
 }
 
 unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(C->getType(), VT))
     return false;
   
@@ -1562,24 +1562,24 @@
   const TargetRegisterClass *RC = NULL;
   switch (VT.getSimpleVT()) {
   default: return false;
-  case MVT::i8:
+  case EVT::i8:
     Opc = X86::MOV8rm;
     RC  = X86::GR8RegisterClass;
     break;
-  case MVT::i16:
+  case EVT::i16:
     Opc = X86::MOV16rm;
     RC  = X86::GR16RegisterClass;
     break;
-  case MVT::i32:
+  case EVT::i32:
     Opc = X86::MOV32rm;
     RC  = X86::GR32RegisterClass;
     break;
-  case MVT::i64:
+  case EVT::i64:
     // Must be in x86-64 mode.
     Opc = X86::MOV64rm;
     RC  = X86::GR64RegisterClass;
     break;
-  case MVT::f32:
+  case EVT::f32:
     if (Subtarget->hasSSE1()) {
       Opc = X86::MOVSSrm;
       RC  = X86::FR32RegisterClass;
@@ -1588,7 +1588,7 @@
       RC  = X86::RFP32RegisterClass;
     }
     break;
-  case MVT::f64:
+  case EVT::f64:
     if (Subtarget->hasSSE2()) {
       Opc = X86::MOVSDrm;
       RC  = X86::FR64RegisterClass;
@@ -1597,7 +1597,7 @@
       RC  = X86::RFP64RegisterClass;
     }
     break;
-  case MVT::f80:
+  case EVT::f80:
     // No f80 support yet.
     return false;
   }
@@ -1606,7 +1606,7 @@
   if (isa<GlobalValue>(C)) {
     X86AddressMode AM;
     if (X86SelectAddress(C, AM)) {
-      if (TLI.getPointerTy() == MVT::i32)
+      if (TLI.getPointerTy() == EVT::i32)
         Opc = X86::LEA32r;
       else
         Opc = X86::LEA64r;
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 1a99b08..4aee493 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -186,7 +186,7 @@
   private:
     SDNode *Select(SDValue N);
     SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
-    SDNode *SelectAtomicLoadAdd(SDNode *Node, MVT NVT);
+    SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
 
     bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
     bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
@@ -233,40 +233,40 @@
       // These are 32-bit even in 64-bit mode since RIP relative offset
       // is 32-bit.
       if (AM.GV)
-        Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
+        Disp = CurDAG->getTargetGlobalAddress(AM.GV, EVT::i32, AM.Disp,
                                               AM.SymbolFlags);
       else if (AM.CP)
-        Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
+        Disp = CurDAG->getTargetConstantPool(AM.CP, EVT::i32,
                                              AM.Align, AM.Disp, AM.SymbolFlags);
       else if (AM.ES)
-        Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
+        Disp = CurDAG->getTargetExternalSymbol(AM.ES, EVT::i32, AM.SymbolFlags);
       else if (AM.JT != -1)
-        Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
+        Disp = CurDAG->getTargetJumpTable(AM.JT, EVT::i32, AM.SymbolFlags);
       else
-        Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
+        Disp = CurDAG->getTargetConstant(AM.Disp, EVT::i32);
 
       if (AM.Segment.getNode())
         Segment = AM.Segment;
       else
-        Segment = CurDAG->getRegister(0, MVT::i32);
+        Segment = CurDAG->getRegister(0, EVT::i32);
     }
 
     /// getI8Imm - Return a target constant with the specified value, of type
     /// i8.
     inline SDValue getI8Imm(unsigned Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i8);
+      return CurDAG->getTargetConstant(Imm, EVT::i8);
     }
 
     /// getI16Imm - Return a target constant with the specified value, of type
     /// i16.
     inline SDValue getI16Imm(unsigned Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i16);
+      return CurDAG->getTargetConstant(Imm, EVT::i16);
     }
 
     /// getI32Imm - Return a target constant with the specified value, of type
     /// i32.
     inline SDValue getI32Imm(unsigned Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i32);
+      return CurDAG->getTargetConstant(Imm, EVT::i32);
     }
 
     /// getGlobalBaseReg - Return an SDNode that returns the value of
@@ -408,7 +408,7 @@
         Ops.push_back(Chain.getOperand(i));
     SDValue NewChain =
       CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
-                      MVT::Other, &Ops[0], Ops.size());
+                      EVT::Other, &Ops[0], Ops.size());
     Ops.clear();
     Ops.push_back(NewChain);
   }
@@ -599,8 +599,8 @@
     
     // If the source and destination are SSE registers, then this is a legal
     // conversion that should not be lowered.
-    MVT SrcVT = N->getOperand(0).getValueType();
-    MVT DstVT = N->getValueType(0);
+    EVT SrcVT = N->getOperand(0).getValueType();
+    EVT DstVT = N->getValueType(0);
     bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
     bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
     if (SrcIsSSE && DstIsSSE)
@@ -618,7 +618,7 @@
     // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
     // FPStack has extload and truncstore.  SSE can fold direct loads into other
     // operations.  Based on this, decide what we want to do.
-    MVT MemVT;
+    EVT MemVT;
     if (N->getOpcode() == ISD::FP_ROUND)
       MemVT = DstVT;  // FP_ROUND must use DstVT, we can't do a 'trunc load'.
     else
@@ -764,7 +764,7 @@
     }
 
     if (N.getOpcode() == X86ISD::WrapperRIP)
-      AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
+      AM.setBaseReg(CurDAG->getRegister(X86::RIP, EVT::i64));
     return false;
   }
 
@@ -1001,7 +1001,7 @@
         RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
         RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
         (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
-         RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
+         RHS.getNode()->getOperand(0).getValueType() == EVT::i32))
       ++Cost;
     // If the base is a register with multiple uses, this
     // transformation may save a mov.
@@ -1111,13 +1111,13 @@
       unsigned ScaleLog = 8 - C1->getZExtValue();
       if (ScaleLog > 0 && ScaleLog < 4 &&
           C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
-        SDValue Eight = CurDAG->getConstant(8, MVT::i8);
+        SDValue Eight = CurDAG->getConstant(8, EVT::i8);
         SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
         SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
                                       X, Eight);
         SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
                                       Srl, Mask);
-        SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
+        SDValue ShlCount = CurDAG->getConstant(ScaleLog, EVT::i8);
         SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
                                       And, ShlCount);
 
@@ -1267,7 +1267,7 @@
   if (!Done && MatchAddress(N, AM))
     return false;
 
-  MVT VT = N.getValueType();
+  EVT VT = N.getValueType();
   if (AM.BaseType == X86ISelAddressMode::RegBase) {
     if (!AM.Base.Reg.getNode())
       AM.Base.Reg = CurDAG->getRegister(0, VT);
@@ -1333,14 +1333,14 @@
   // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
   // segments.
   SDValue Copy = AM.Segment;
-  SDValue T = CurDAG->getRegister(0, MVT::i32);
+  SDValue T = CurDAG->getRegister(0, EVT::i32);
   AM.Segment = T;
   if (MatchAddress(N, AM))
     return false;
   assert (T == AM.Segment);
   AM.Segment = Copy;
 
-  MVT VT = N.getValueType();
+  EVT VT = N.getValueType();
   unsigned Complexity = 0;
   if (AM.BaseType == X86ISelAddressMode::RegBase)
     if (AM.Base.Reg.getNode())
@@ -1400,11 +1400,11 @@
   AM.Base.Reg = CurDAG->getRegister(0, N.getValueType());
   AM.SymbolFlags = GA->getTargetFlags();
 
-  if (N.getValueType() == MVT::i32) {
+  if (N.getValueType() == EVT::i32) {
     AM.Scale = 1;
-    AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
+    AM.IndexReg = CurDAG->getRegister(X86::EBX, EVT::i32);
   } else {
-    AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
+    AM.IndexReg = CurDAG->getRegister(0, EVT::i64);
   }
   
   SDValue Segment;
@@ -1435,7 +1435,7 @@
 
 static SDNode *FindCallStartFromCall(SDNode *Node) {
   if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
-    assert(Node->getOperand(0).getValueType() == MVT::Other &&
+    assert(Node->getOperand(0).getValueType() == EVT::Other &&
          "Node doesn't have a token chain argument!");
   return FindCallStartFromCall(Node->getOperand(0).getNode());
 }
@@ -1451,11 +1451,11 @@
   SDValue LSI = Node->getOperand(4);    // MemOperand
   const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain};
   return CurDAG->getTargetNode(Opc, Node->getDebugLoc(),
-                               MVT::i32, MVT::i32, MVT::Other, Ops,
+                               EVT::i32, EVT::i32, EVT::Other, Ops,
                                array_lengthof(Ops));
 }
 
-SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, MVT NVT) {
+SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
   if (Node->hasAnyUseOfValue(0))
     return 0;
 
@@ -1497,7 +1497,7 @@
   unsigned Opc = 0;
   switch (NVT.getSimpleVT()) {
   default: return 0;
-  case MVT::i8:
+  case EVT::i8:
     if (isInc)
       Opc = X86::LOCK_INC8m;
     else if (isDec)
@@ -1514,7 +1514,7 @@
         Opc = X86::LOCK_ADD8mr;
     }
     break;
-  case MVT::i16:
+  case EVT::i16:
     if (isInc)
       Opc = X86::LOCK_INC16m;
     else if (isDec)
@@ -1537,7 +1537,7 @@
         Opc = X86::LOCK_ADD16mr;
     }
     break;
-  case MVT::i32:
+  case EVT::i32:
     if (isInc)
       Opc = X86::LOCK_INC32m;
     else if (isDec)
@@ -1560,7 +1560,7 @@
         Opc = X86::LOCK_ADD32mr;
     }
     break;
-  case MVT::i64:
+  case EVT::i64:
     if (isInc)
       Opc = X86::LOCK_INC64m;
     else if (isDec)
@@ -1591,12 +1591,12 @@
   SDValue MemOp = CurDAG->getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
   if (isInc || isDec) {
     SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, MemOp, Chain };
-    SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 7), 0);
+    SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 7), 0);
     SDValue RetVals[] = { Undef, Ret };
     return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
   } else {
     SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, MemOp, Chain };
-    SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 8), 0);
+    SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 8), 0);
     SDValue RetVals[] = { Undef, Ret };
     return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
   }
@@ -1604,7 +1604,7 @@
 
 SDNode *X86DAGToDAGISel::Select(SDValue N) {
   SDNode *Node = N.getNode();
-  MVT NVT = Node->getValueType(0);
+  EVT NVT = Node->getValueType(0);
   unsigned Opc, MOpc;
   unsigned Opcode = Node->getOpcode();
   DebugLoc dl = Node->getDebugLoc();
@@ -1666,28 +1666,28 @@
     if (!isSigned) {
       switch (NVT.getSimpleVT()) {
       default: llvm_unreachable("Unsupported VT!");
-      case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
-      case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
-      case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
-      case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
+      case EVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
+      case EVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
+      case EVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
+      case EVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
       }
     } else {
       switch (NVT.getSimpleVT()) {
       default: llvm_unreachable("Unsupported VT!");
-      case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
-      case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
-      case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
-      case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
+      case EVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
+      case EVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
+      case EVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
+      case EVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
       }
     }
 
     unsigned LoReg, HiReg;
     switch (NVT.getSimpleVT()) {
     default: llvm_unreachable("Unsupported VT!");
-    case MVT::i8:  LoReg = X86::AL;  HiReg = X86::AH;  break;
-    case MVT::i16: LoReg = X86::AX;  HiReg = X86::DX;  break;
-    case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
-    case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
+    case EVT::i8:  LoReg = X86::AL;  HiReg = X86::AH;  break;
+    case EVT::i16: LoReg = X86::AX;  HiReg = X86::DX;  break;
+    case EVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
+    case EVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
     }
 
     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
@@ -1706,14 +1706,14 @@
       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
                         InFlag };
       SDNode *CNode =
-        CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
+        CurDAG->getTargetNode(MOpc, dl, EVT::Other, EVT::Flag, Ops,
                               array_lengthof(Ops));
       InFlag = SDValue(CNode, 1);
       // Update the chain.
       ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
     } else {
       InFlag =
-        SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
+        SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Flag, N1, InFlag), 0);
     }
 
     // Copy the low half of the result, if it is needed.
@@ -1737,15 +1737,15 @@
         // Prevent use of AH in a REX instruction by referencing AX instead.
         // Shift it down 8 bits.
         Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
-                                        X86::AX, MVT::i16, InFlag);
+                                        X86::AX, EVT::i16, InFlag);
         InFlag = Result.getValue(2);
-        Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
+        Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, EVT::i16,
                                                Result,
-                                   CurDAG->getTargetConstant(8, MVT::i8)), 0);
+                                   CurDAG->getTargetConstant(8, EVT::i8)), 0);
         // Then truncate it down to i8.
-        SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
+        SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, EVT::i32);
         Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
-                                                 MVT::i8, Result, SRIdx), 0);
+                                                 EVT::i8, Result, SRIdx), 0);
       } else {
         Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
                                         HiReg, NVT, InFlag);
@@ -1777,18 +1777,18 @@
     if (!isSigned) {
       switch (NVT.getSimpleVT()) {
       default: llvm_unreachable("Unsupported VT!");
-      case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
-      case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
-      case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
-      case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
+      case EVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
+      case EVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
+      case EVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
+      case EVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
       }
     } else {
       switch (NVT.getSimpleVT()) {
       default: llvm_unreachable("Unsupported VT!");
-      case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
-      case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
-      case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
-      case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
+      case EVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
+      case EVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
+      case EVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
+      case EVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
       }
     }
 
@@ -1796,22 +1796,22 @@
     unsigned ClrOpcode, SExtOpcode;
     switch (NVT.getSimpleVT()) {
     default: llvm_unreachable("Unsupported VT!");
-    case MVT::i8:
+    case EVT::i8:
       LoReg = X86::AL;  HiReg = X86::AH;
       ClrOpcode  = 0;
       SExtOpcode = X86::CBW;
       break;
-    case MVT::i16:
+    case EVT::i16:
       LoReg = X86::AX;  HiReg = X86::DX;
       ClrOpcode  = X86::MOV16r0;
       SExtOpcode = X86::CWD;
       break;
-    case MVT::i32:
+    case EVT::i32:
       LoReg = X86::EAX; HiReg = X86::EDX;
       ClrOpcode  = X86::MOV32r0;
       SExtOpcode = X86::CDQ;
       break;
-    case MVT::i64:
+    case EVT::i64:
       LoReg = X86::RAX; HiReg = X86::RDX;
       ClrOpcode  = ~0U; // NOT USED.
       SExtOpcode = X86::CQO;
@@ -1823,21 +1823,21 @@
     bool signBitIsZero = CurDAG->SignBitIsZero(N0);
 
     SDValue InFlag;
-    if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
+    if (NVT == EVT::i8 && (!isSigned || signBitIsZero)) {
       // Special case for div8, just use a move with zero extension to AX to
       // clear the upper 8 bits (AH).
       SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
       if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
         SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
         Move =
-          SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, MVT::i16,
-                                        MVT::Other, Ops,
+          SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, EVT::i16,
+                                        EVT::Other, Ops,
                                         array_lengthof(Ops)), 0);
         Chain = Move.getValue(1);
         ReplaceUses(N0.getValue(1), Chain);
       } else {
         Move =
-          SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0);
+          SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, EVT::i16, N0),0);
         Chain = CurDAG->getEntryNode();
       }
       Chain  = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue());
@@ -1849,24 +1849,24 @@
       if (isSigned && !signBitIsZero) {
         // Sign extend the low part into the high part.
         InFlag =
-          SDValue(CurDAG->getTargetNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
+          SDValue(CurDAG->getTargetNode(SExtOpcode, dl, EVT::Flag, InFlag),0);
       } else {
         // Zero out the high part, effectively zero extending the input.
         SDValue ClrNode;
 
-        if (NVT.getSimpleVT() == MVT::i64) {
-          ClrNode = SDValue(CurDAG->getTargetNode(X86::MOV32r0, dl, MVT::i32),
+        if (NVT.getSimpleVT() == EVT::i64) {
+          ClrNode = SDValue(CurDAG->getTargetNode(X86::MOV32r0, dl, EVT::i32),
                             0);
           // We just did a 32-bit clear, insert it into a 64-bit register to
           // clear the whole 64-bit reg.
           SDValue Undef =
             SDValue(CurDAG->getTargetNode(TargetInstrInfo::IMPLICIT_DEF,
-                                          dl, MVT::i64), 0);
+                                          dl, EVT::i64), 0);
           SDValue SubRegNo =
-            CurDAG->getTargetConstant(X86::SUBREG_32BIT, MVT::i32);
+            CurDAG->getTargetConstant(X86::SUBREG_32BIT, EVT::i32);
           ClrNode =
             SDValue(CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl,
-                                          MVT::i64, Undef, ClrNode, SubRegNo),
+                                          EVT::i64, Undef, ClrNode, SubRegNo),
                     0);
         } else {
           ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, dl, NVT), 0);
@@ -1881,14 +1881,14 @@
       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
                         InFlag };
       SDNode *CNode =
-        CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
+        CurDAG->getTargetNode(MOpc, dl, EVT::Other, EVT::Flag, Ops,
                               array_lengthof(Ops));
       InFlag = SDValue(CNode, 1);
       // Update the chain.
       ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
     } else {
       InFlag =
-        SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
+        SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Flag, N1, InFlag), 0);
     }
 
     // Copy the division (low) result, if it is needed.
@@ -1912,16 +1912,16 @@
         // Prevent use of AH in a REX instruction by referencing AX instead.
         // Shift it down 8 bits.
         Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
-                                        X86::AX, MVT::i16, InFlag);
+                                        X86::AX, EVT::i16, InFlag);
         InFlag = Result.getValue(2);
-        Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
+        Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, EVT::i16,
                                       Result,
-                                      CurDAG->getTargetConstant(8, MVT::i8)),
+                                      CurDAG->getTargetConstant(8, EVT::i8)),
                          0);
         // Then truncate it down to i8.
-        SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
+        SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, EVT::i32);
         Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
-                                                 MVT::i8, Result, SRIdx), 0);
+                                                 EVT::i8, Result, SRIdx), 0);
       } else {
         Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
                                         HiReg, NVT, InFlag);
@@ -1981,7 +1981,7 @@
                                                   TLI.getPointerTy());
     SDValue Ops[] = { Tmp1, Tmp2, Chain };
     return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
-                                 MVT::Other, Ops,
+                                 EVT::Other, Ops,
                                  array_lengthof(Ops));
   }
   }
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 3fbb846..8551673 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -48,7 +48,7 @@
 DisableMMX("disable-mmx", cl::Hidden, cl::desc("Disable use of MMX"));
 
 // Forward declarations.
-static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
                        SDValue V2);
 
 static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) {
@@ -79,7 +79,7 @@
   // Set up the TargetLowering object.
 
   // X86 is weird, it always uses i8 for shift amounts and setcc results.
-  setShiftAmountType(MVT::i8);
+  setShiftAmountType(EVT::i8);
   setBooleanContents(ZeroOrOneBooleanContent);
   setSchedulingPreference(SchedulingForRegPressure);
   setStackPointerRegisterToSaveRestore(X86StackPtr);
@@ -98,113 +98,113 @@
   }
 
   // Set up the register classes.
-  addRegisterClass(MVT::i8, X86::GR8RegisterClass);
-  addRegisterClass(MVT::i16, X86::GR16RegisterClass);
-  addRegisterClass(MVT::i32, X86::GR32RegisterClass);
+  addRegisterClass(EVT::i8, X86::GR8RegisterClass);
+  addRegisterClass(EVT::i16, X86::GR16RegisterClass);
+  addRegisterClass(EVT::i32, X86::GR32RegisterClass);
   if (Subtarget->is64Bit())
-    addRegisterClass(MVT::i64, X86::GR64RegisterClass);
+    addRegisterClass(EVT::i64, X86::GR64RegisterClass);
 
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
 
   // We don't accept any truncstore of integer registers.
-  setTruncStoreAction(MVT::i64, MVT::i32, Expand);
-  setTruncStoreAction(MVT::i64, MVT::i16, Expand);
-  setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
-  setTruncStoreAction(MVT::i32, MVT::i16, Expand);
-  setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
-  setTruncStoreAction(MVT::i16, MVT::i8,  Expand);
+  setTruncStoreAction(EVT::i64, EVT::i32, Expand);
+  setTruncStoreAction(EVT::i64, EVT::i16, Expand);
+  setTruncStoreAction(EVT::i64, EVT::i8 , Expand);
+  setTruncStoreAction(EVT::i32, EVT::i16, Expand);
+  setTruncStoreAction(EVT::i32, EVT::i8 , Expand);
+  setTruncStoreAction(EVT::i16, EVT::i8,  Expand);
 
   // SETOEQ and SETUNE require checking two conditions.
-  setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
-  setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
-  setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
-  setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
+  setCondCodeAction(ISD::SETOEQ, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETOEQ, EVT::f64, Expand);
+  setCondCodeAction(ISD::SETOEQ, EVT::f80, Expand);
+  setCondCodeAction(ISD::SETUNE, EVT::f32, Expand);
+  setCondCodeAction(ISD::SETUNE, EVT::f64, Expand);
+  setCondCodeAction(ISD::SETUNE, EVT::f80, Expand);
 
   // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
   // operation.
-  setOperationAction(ISD::UINT_TO_FP       , MVT::i1   , Promote);
-  setOperationAction(ISD::UINT_TO_FP       , MVT::i8   , Promote);
-  setOperationAction(ISD::UINT_TO_FP       , MVT::i16  , Promote);
+  setOperationAction(ISD::UINT_TO_FP       , EVT::i1   , Promote);
+  setOperationAction(ISD::UINT_TO_FP       , EVT::i8   , Promote);
+  setOperationAction(ISD::UINT_TO_FP       , EVT::i16  , Promote);
 
   if (Subtarget->is64Bit()) {
-    setOperationAction(ISD::UINT_TO_FP     , MVT::i32  , Promote);
-    setOperationAction(ISD::UINT_TO_FP     , MVT::i64  , Expand);
+    setOperationAction(ISD::UINT_TO_FP     , EVT::i32  , Promote);
+    setOperationAction(ISD::UINT_TO_FP     , EVT::i64  , Expand);
   } else if (!UseSoftFloat) {
     if (X86ScalarSSEf64) {
       // We have an impenetrably clever algorithm for ui64->double only.
-      setOperationAction(ISD::UINT_TO_FP   , MVT::i64  , Custom);
+      setOperationAction(ISD::UINT_TO_FP   , EVT::i64  , Custom);
     }
     // We have an algorithm for SSE2, and we turn this into a 64-bit
     // FILD for other targets.
-    setOperationAction(ISD::UINT_TO_FP   , MVT::i32  , Custom);
+    setOperationAction(ISD::UINT_TO_FP   , EVT::i32  , Custom);
   }
 
   // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
   // this operation.
-  setOperationAction(ISD::SINT_TO_FP       , MVT::i1   , Promote);
-  setOperationAction(ISD::SINT_TO_FP       , MVT::i8   , Promote);
+  setOperationAction(ISD::SINT_TO_FP       , EVT::i1   , Promote);
+  setOperationAction(ISD::SINT_TO_FP       , EVT::i8   , Promote);
 
   if (!UseSoftFloat) {
     // SSE has no i16 to fp conversion, only i32
     if (X86ScalarSSEf32) {
-      setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Promote);
+      setOperationAction(ISD::SINT_TO_FP     , EVT::i16  , Promote);
       // f32 and f64 cases are Legal, f80 case is not
-      setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Custom);
+      setOperationAction(ISD::SINT_TO_FP     , EVT::i32  , Custom);
     } else {
-      setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Custom);
-      setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Custom);
+      setOperationAction(ISD::SINT_TO_FP     , EVT::i16  , Custom);
+      setOperationAction(ISD::SINT_TO_FP     , EVT::i32  , Custom);
     }
   } else {
-    setOperationAction(ISD::SINT_TO_FP     , MVT::i16  , Promote);
-    setOperationAction(ISD::SINT_TO_FP     , MVT::i32  , Promote);
+    setOperationAction(ISD::SINT_TO_FP     , EVT::i16  , Promote);
+    setOperationAction(ISD::SINT_TO_FP     , EVT::i32  , Promote);
   }
 
   // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
   // are Legal, f80 is custom lowered.
-  setOperationAction(ISD::FP_TO_SINT     , MVT::i64  , Custom);
-  setOperationAction(ISD::SINT_TO_FP     , MVT::i64  , Custom);
+  setOperationAction(ISD::FP_TO_SINT     , EVT::i64  , Custom);
+  setOperationAction(ISD::SINT_TO_FP     , EVT::i64  , Custom);
 
   // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
   // this operation.
-  setOperationAction(ISD::FP_TO_SINT       , MVT::i1   , Promote);
-  setOperationAction(ISD::FP_TO_SINT       , MVT::i8   , Promote);
+  setOperationAction(ISD::FP_TO_SINT       , EVT::i1   , Promote);
+  setOperationAction(ISD::FP_TO_SINT       , EVT::i8   , Promote);
 
   if (X86ScalarSSEf32) {
-    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Promote);
+    setOperationAction(ISD::FP_TO_SINT     , EVT::i16  , Promote);
     // f32 and f64 cases are Legal, f80 case is not
-    setOperationAction(ISD::FP_TO_SINT     , MVT::i32  , Custom);
+    setOperationAction(ISD::FP_TO_SINT     , EVT::i32  , Custom);
   } else {
-    setOperationAction(ISD::FP_TO_SINT     , MVT::i16  , Custom);
-    setOperationAction(ISD::FP_TO_SINT     , MVT::i32  , Custom);
+    setOperationAction(ISD::FP_TO_SINT     , EVT::i16  , Custom);
+    setOperationAction(ISD::FP_TO_SINT     , EVT::i32  , Custom);
   }
 
   // Handle FP_TO_UINT by promoting the destination to a larger signed
   // conversion.
-  setOperationAction(ISD::FP_TO_UINT       , MVT::i1   , Promote);
-  setOperationAction(ISD::FP_TO_UINT       , MVT::i8   , Promote);
-  setOperationAction(ISD::FP_TO_UINT       , MVT::i16  , Promote);
+  setOperationAction(ISD::FP_TO_UINT       , EVT::i1   , Promote);
+  setOperationAction(ISD::FP_TO_UINT       , EVT::i8   , Promote);
+  setOperationAction(ISD::FP_TO_UINT       , EVT::i16  , Promote);
 
   if (Subtarget->is64Bit()) {
-    setOperationAction(ISD::FP_TO_UINT     , MVT::i64  , Expand);
-    setOperationAction(ISD::FP_TO_UINT     , MVT::i32  , Promote);
+    setOperationAction(ISD::FP_TO_UINT     , EVT::i64  , Expand);
+    setOperationAction(ISD::FP_TO_UINT     , EVT::i32  , Promote);
   } else if (!UseSoftFloat) {
     if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
       // Expand FP_TO_UINT into a select.
       // FIXME: We would like to use a Custom expander here eventually to do
       // the optimal thing for SSE vs. the default expansion in the legalizer.
-      setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Expand);
+      setOperationAction(ISD::FP_TO_UINT   , EVT::i32  , Expand);
     else
       // With SSE3 we can use fisttpll to convert to a signed i64; without
       // SSE, we're stuck with a fistpll.
-      setOperationAction(ISD::FP_TO_UINT   , MVT::i32  , Custom);
+      setOperationAction(ISD::FP_TO_UINT   , EVT::i32  , Custom);
   }
 
   // TODO: when we have SSE, these could be more efficient, by using movd/movq.
   if (!X86ScalarSSEf64) {
-    setOperationAction(ISD::BIT_CONVERT      , MVT::f32  , Expand);
-    setOperationAction(ISD::BIT_CONVERT      , MVT::i32  , Expand);
+    setOperationAction(ISD::BIT_CONVERT      , EVT::f32  , Expand);
+    setOperationAction(ISD::BIT_CONVERT      , EVT::i32  , Expand);
   }
 
   // Scalar integer divide and remainder are lowered to use operations that
@@ -217,150 +217,150 @@
   // (low) operations are left as Legal, as there are single-result
   // instructions for this in x86. Using the two-result multiply instructions
   // when both high and low results are needed must be arranged by dagcombine.
-  setOperationAction(ISD::MULHS           , MVT::i8    , Expand);
-  setOperationAction(ISD::MULHU           , MVT::i8    , Expand);
-  setOperationAction(ISD::SDIV            , MVT::i8    , Expand);
-  setOperationAction(ISD::UDIV            , MVT::i8    , Expand);
-  setOperationAction(ISD::SREM            , MVT::i8    , Expand);
-  setOperationAction(ISD::UREM            , MVT::i8    , Expand);
-  setOperationAction(ISD::MULHS           , MVT::i16   , Expand);
-  setOperationAction(ISD::MULHU           , MVT::i16   , Expand);
-  setOperationAction(ISD::SDIV            , MVT::i16   , Expand);
-  setOperationAction(ISD::UDIV            , MVT::i16   , Expand);
-  setOperationAction(ISD::SREM            , MVT::i16   , Expand);
-  setOperationAction(ISD::UREM            , MVT::i16   , Expand);
-  setOperationAction(ISD::MULHS           , MVT::i32   , Expand);
-  setOperationAction(ISD::MULHU           , MVT::i32   , Expand);
-  setOperationAction(ISD::SDIV            , MVT::i32   , Expand);
-  setOperationAction(ISD::UDIV            , MVT::i32   , Expand);
-  setOperationAction(ISD::SREM            , MVT::i32   , Expand);
-  setOperationAction(ISD::UREM            , MVT::i32   , Expand);
-  setOperationAction(ISD::MULHS           , MVT::i64   , Expand);
-  setOperationAction(ISD::MULHU           , MVT::i64   , Expand);
-  setOperationAction(ISD::SDIV            , MVT::i64   , Expand);
-  setOperationAction(ISD::UDIV            , MVT::i64   , Expand);
-  setOperationAction(ISD::SREM            , MVT::i64   , Expand);
-  setOperationAction(ISD::UREM            , MVT::i64   , Expand);
+  setOperationAction(ISD::MULHS           , EVT::i8    , Expand);
+  setOperationAction(ISD::MULHU           , EVT::i8    , Expand);
+  setOperationAction(ISD::SDIV            , EVT::i8    , Expand);
+  setOperationAction(ISD::UDIV            , EVT::i8    , Expand);
+  setOperationAction(ISD::SREM            , EVT::i8    , Expand);
+  setOperationAction(ISD::UREM            , EVT::i8    , Expand);
+  setOperationAction(ISD::MULHS           , EVT::i16   , Expand);
+  setOperationAction(ISD::MULHU           , EVT::i16   , Expand);
+  setOperationAction(ISD::SDIV            , EVT::i16   , Expand);
+  setOperationAction(ISD::UDIV            , EVT::i16   , Expand);
+  setOperationAction(ISD::SREM            , EVT::i16   , Expand);
+  setOperationAction(ISD::UREM            , EVT::i16   , Expand);
+  setOperationAction(ISD::MULHS           , EVT::i32   , Expand);
+  setOperationAction(ISD::MULHU           , EVT::i32   , Expand);
+  setOperationAction(ISD::SDIV            , EVT::i32   , Expand);
+  setOperationAction(ISD::UDIV            , EVT::i32   , Expand);
+  setOperationAction(ISD::SREM            , EVT::i32   , Expand);
+  setOperationAction(ISD::UREM            , EVT::i32   , Expand);
+  setOperationAction(ISD::MULHS           , EVT::i64   , Expand);
+  setOperationAction(ISD::MULHU           , EVT::i64   , Expand);
+  setOperationAction(ISD::SDIV            , EVT::i64   , Expand);
+  setOperationAction(ISD::UDIV            , EVT::i64   , Expand);
+  setOperationAction(ISD::SREM            , EVT::i64   , Expand);
+  setOperationAction(ISD::UREM            , EVT::i64   , Expand);
 
-  setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
-  setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
-  setOperationAction(ISD::BR_CC            , MVT::Other, Expand);
-  setOperationAction(ISD::SELECT_CC        , MVT::Other, Expand);
+  setOperationAction(ISD::BR_JT            , EVT::Other, Expand);
+  setOperationAction(ISD::BRCOND           , EVT::Other, Custom);
+  setOperationAction(ISD::BR_CC            , EVT::Other, Expand);
+  setOperationAction(ISD::SELECT_CC        , EVT::Other, Expand);
   if (Subtarget->is64Bit())
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
-  setOperationAction(ISD::FP_ROUND_INREG   , MVT::f32  , Expand);
-  setOperationAction(ISD::FREM             , MVT::f32  , Expand);
-  setOperationAction(ISD::FREM             , MVT::f64  , Expand);
-  setOperationAction(ISD::FREM             , MVT::f80  , Expand);
-  setOperationAction(ISD::FLT_ROUNDS_      , MVT::i32  , Custom);
+    setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i32, Legal);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i16  , Legal);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i8   , Legal);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1   , Expand);
+  setOperationAction(ISD::FP_ROUND_INREG   , EVT::f32  , Expand);
+  setOperationAction(ISD::FREM             , EVT::f32  , Expand);
+  setOperationAction(ISD::FREM             , EVT::f64  , Expand);
+  setOperationAction(ISD::FREM             , EVT::f80  , Expand);
+  setOperationAction(ISD::FLT_ROUNDS_      , EVT::i32  , Custom);
 
-  setOperationAction(ISD::CTPOP            , MVT::i8   , Expand);
-  setOperationAction(ISD::CTTZ             , MVT::i8   , Custom);
-  setOperationAction(ISD::CTLZ             , MVT::i8   , Custom);
-  setOperationAction(ISD::CTPOP            , MVT::i16  , Expand);
-  setOperationAction(ISD::CTTZ             , MVT::i16  , Custom);
-  setOperationAction(ISD::CTLZ             , MVT::i16  , Custom);
-  setOperationAction(ISD::CTPOP            , MVT::i32  , Expand);
-  setOperationAction(ISD::CTTZ             , MVT::i32  , Custom);
-  setOperationAction(ISD::CTLZ             , MVT::i32  , Custom);
+  setOperationAction(ISD::CTPOP            , EVT::i8   , Expand);
+  setOperationAction(ISD::CTTZ             , EVT::i8   , Custom);
+  setOperationAction(ISD::CTLZ             , EVT::i8   , Custom);
+  setOperationAction(ISD::CTPOP            , EVT::i16  , Expand);
+  setOperationAction(ISD::CTTZ             , EVT::i16  , Custom);
+  setOperationAction(ISD::CTLZ             , EVT::i16  , Custom);
+  setOperationAction(ISD::CTPOP            , EVT::i32  , Expand);
+  setOperationAction(ISD::CTTZ             , EVT::i32  , Custom);
+  setOperationAction(ISD::CTLZ             , EVT::i32  , Custom);
   if (Subtarget->is64Bit()) {
-    setOperationAction(ISD::CTPOP          , MVT::i64  , Expand);
-    setOperationAction(ISD::CTTZ           , MVT::i64  , Custom);
-    setOperationAction(ISD::CTLZ           , MVT::i64  , Custom);
+    setOperationAction(ISD::CTPOP          , EVT::i64  , Expand);
+    setOperationAction(ISD::CTTZ           , EVT::i64  , Custom);
+    setOperationAction(ISD::CTLZ           , EVT::i64  , Custom);
   }
 
-  setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
-  setOperationAction(ISD::BSWAP            , MVT::i16  , Expand);
+  setOperationAction(ISD::READCYCLECOUNTER , EVT::i64  , Custom);
+  setOperationAction(ISD::BSWAP            , EVT::i16  , Expand);
 
   // These should be promoted to a larger select which is supported.
-  setOperationAction(ISD::SELECT           , MVT::i1   , Promote);
-  setOperationAction(ISD::SELECT           , MVT::i8   , Promote);
+  setOperationAction(ISD::SELECT           , EVT::i1   , Promote);
+  setOperationAction(ISD::SELECT           , EVT::i8   , Promote);
   // X86 wants to expand cmov itself.
-  setOperationAction(ISD::SELECT          , MVT::i16  , Custom);
-  setOperationAction(ISD::SELECT          , MVT::i32  , Custom);
-  setOperationAction(ISD::SELECT          , MVT::f32  , Custom);
-  setOperationAction(ISD::SELECT          , MVT::f64  , Custom);
-  setOperationAction(ISD::SELECT          , MVT::f80  , Custom);
-  setOperationAction(ISD::SETCC           , MVT::i8   , Custom);
-  setOperationAction(ISD::SETCC           , MVT::i16  , Custom);
-  setOperationAction(ISD::SETCC           , MVT::i32  , Custom);
-  setOperationAction(ISD::SETCC           , MVT::f32  , Custom);
-  setOperationAction(ISD::SETCC           , MVT::f64  , Custom);
-  setOperationAction(ISD::SETCC           , MVT::f80  , Custom);
+  setOperationAction(ISD::SELECT          , EVT::i16  , Custom);
+  setOperationAction(ISD::SELECT          , EVT::i32  , Custom);
+  setOperationAction(ISD::SELECT          , EVT::f32  , Custom);
+  setOperationAction(ISD::SELECT          , EVT::f64  , Custom);
+  setOperationAction(ISD::SELECT          , EVT::f80  , Custom);
+  setOperationAction(ISD::SETCC           , EVT::i8   , Custom);
+  setOperationAction(ISD::SETCC           , EVT::i16  , Custom);
+  setOperationAction(ISD::SETCC           , EVT::i32  , Custom);
+  setOperationAction(ISD::SETCC           , EVT::f32  , Custom);
+  setOperationAction(ISD::SETCC           , EVT::f64  , Custom);
+  setOperationAction(ISD::SETCC           , EVT::f80  , Custom);
   if (Subtarget->is64Bit()) {
-    setOperationAction(ISD::SELECT        , MVT::i64  , Custom);
-    setOperationAction(ISD::SETCC         , MVT::i64  , Custom);
+    setOperationAction(ISD::SELECT        , EVT::i64  , Custom);
+    setOperationAction(ISD::SETCC         , EVT::i64  , Custom);
   }
-  setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
+  setOperationAction(ISD::EH_RETURN       , EVT::Other, Custom);
 
   // Darwin ABI issue.
-  setOperationAction(ISD::ConstantPool    , MVT::i32  , Custom);
-  setOperationAction(ISD::JumpTable       , MVT::i32  , Custom);
-  setOperationAction(ISD::GlobalAddress   , MVT::i32  , Custom);
-  setOperationAction(ISD::GlobalTLSAddress, MVT::i32  , Custom);
+  setOperationAction(ISD::ConstantPool    , EVT::i32  , Custom);
+  setOperationAction(ISD::JumpTable       , EVT::i32  , Custom);
+  setOperationAction(ISD::GlobalAddress   , EVT::i32  , Custom);
+  setOperationAction(ISD::GlobalTLSAddress, EVT::i32  , Custom);
   if (Subtarget->is64Bit())
-    setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
-  setOperationAction(ISD::ExternalSymbol  , MVT::i32  , Custom);
+    setOperationAction(ISD::GlobalTLSAddress, EVT::i64, Custom);
+  setOperationAction(ISD::ExternalSymbol  , EVT::i32  , Custom);
   if (Subtarget->is64Bit()) {
-    setOperationAction(ISD::ConstantPool  , MVT::i64  , Custom);
-    setOperationAction(ISD::JumpTable     , MVT::i64  , Custom);
-    setOperationAction(ISD::GlobalAddress , MVT::i64  , Custom);
-    setOperationAction(ISD::ExternalSymbol, MVT::i64  , Custom);
+    setOperationAction(ISD::ConstantPool  , EVT::i64  , Custom);
+    setOperationAction(ISD::JumpTable     , EVT::i64  , Custom);
+    setOperationAction(ISD::GlobalAddress , EVT::i64  , Custom);
+    setOperationAction(ISD::ExternalSymbol, EVT::i64  , Custom);
   }
   // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
-  setOperationAction(ISD::SHL_PARTS       , MVT::i32  , Custom);
-  setOperationAction(ISD::SRA_PARTS       , MVT::i32  , Custom);
-  setOperationAction(ISD::SRL_PARTS       , MVT::i32  , Custom);
+  setOperationAction(ISD::SHL_PARTS       , EVT::i32  , Custom);
+  setOperationAction(ISD::SRA_PARTS       , EVT::i32  , Custom);
+  setOperationAction(ISD::SRL_PARTS       , EVT::i32  , Custom);
   if (Subtarget->is64Bit()) {
-    setOperationAction(ISD::SHL_PARTS     , MVT::i64  , Custom);
-    setOperationAction(ISD::SRA_PARTS     , MVT::i64  , Custom);
-    setOperationAction(ISD::SRL_PARTS     , MVT::i64  , Custom);
+    setOperationAction(ISD::SHL_PARTS     , EVT::i64  , Custom);
+    setOperationAction(ISD::SRA_PARTS     , EVT::i64  , Custom);
+    setOperationAction(ISD::SRL_PARTS     , EVT::i64  , Custom);
   }
 
   if (Subtarget->hasSSE1())
-    setOperationAction(ISD::PREFETCH      , MVT::Other, Legal);
+    setOperationAction(ISD::PREFETCH      , EVT::Other, Legal);
 
   if (!Subtarget->hasSSE2())
-    setOperationAction(ISD::MEMBARRIER    , MVT::Other, Expand);
+    setOperationAction(ISD::MEMBARRIER    , EVT::Other, Expand);
 
   // Expand certain atomics
-  setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i8, Custom);
-  setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i16, Custom);
-  setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
-  setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP, EVT::i8, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP, EVT::i16, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP, EVT::i32, Custom);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP, EVT::i64, Custom);
 
-  setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i8, Custom);
-  setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i16, Custom);
-  setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
-  setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB, EVT::i8, Custom);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB, EVT::i16, Custom);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB, EVT::i32, Custom);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB, EVT::i64, Custom);
 
   if (!Subtarget->is64Bit()) {
-    setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom);
-    setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom);
-    setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom);
-    setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom);
-    setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom);
-    setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom);
-    setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom);
+    setOperationAction(ISD::ATOMIC_LOAD_ADD, EVT::i64, Custom);
+    setOperationAction(ISD::ATOMIC_LOAD_SUB, EVT::i64, Custom);
+    setOperationAction(ISD::ATOMIC_LOAD_AND, EVT::i64, Custom);
+    setOperationAction(ISD::ATOMIC_LOAD_OR, EVT::i64, Custom);
+    setOperationAction(ISD::ATOMIC_LOAD_XOR, EVT::i64, Custom);
+    setOperationAction(ISD::ATOMIC_LOAD_NAND, EVT::i64, Custom);
+    setOperationAction(ISD::ATOMIC_SWAP, EVT::i64, Custom);
   }
 
   // Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
   // FIXME - use subtarget debug flags
   if (!Subtarget->isTargetDarwin() &&
       !Subtarget->isTargetELF() &&
       !Subtarget->isTargetCygMing()) {
-    setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
-    setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
+    setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
+    setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
   }
 
-  setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
-  setOperationAction(ISD::EHSELECTION,   MVT::i64, Expand);
-  setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
-  setOperationAction(ISD::EHSELECTION,   MVT::i32, Expand);
+  setOperationAction(ISD::EXCEPTIONADDR, EVT::i64, Expand);
+  setOperationAction(ISD::EHSELECTION,   EVT::i64, Expand);
+  setOperationAction(ISD::EXCEPTIONADDR, EVT::i32, Expand);
+  setOperationAction(ISD::EHSELECTION,   EVT::i32, Expand);
   if (Subtarget->is64Bit()) {
     setExceptionPointerRegister(X86::RAX);
     setExceptionSelectorRegister(X86::RDX);
@@ -368,56 +368,56 @@
     setExceptionPointerRegister(X86::EAX);
     setExceptionSelectorRegister(X86::EDX);
   }
-  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
-  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
+  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, EVT::i32, Custom);
+  setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, EVT::i64, Custom);
 
-  setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
+  setOperationAction(ISD::TRAMPOLINE, EVT::Other, Custom);
 
-  setOperationAction(ISD::TRAP, MVT::Other, Legal);
+  setOperationAction(ISD::TRAP, EVT::Other, Legal);
 
   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
-  setOperationAction(ISD::VASTART           , MVT::Other, Custom);
-  setOperationAction(ISD::VAEND             , MVT::Other, Expand);
+  setOperationAction(ISD::VASTART           , EVT::Other, Custom);
+  setOperationAction(ISD::VAEND             , EVT::Other, Expand);
   if (Subtarget->is64Bit()) {
-    setOperationAction(ISD::VAARG           , MVT::Other, Custom);
-    setOperationAction(ISD::VACOPY          , MVT::Other, Custom);
+    setOperationAction(ISD::VAARG           , EVT::Other, Custom);
+    setOperationAction(ISD::VACOPY          , EVT::Other, Custom);
   } else {
-    setOperationAction(ISD::VAARG           , MVT::Other, Expand);
-    setOperationAction(ISD::VACOPY          , MVT::Other, Expand);
+    setOperationAction(ISD::VAARG           , EVT::Other, Expand);
+    setOperationAction(ISD::VACOPY          , EVT::Other, Expand);
   }
 
-  setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
+  setOperationAction(ISD::STACKSAVE,          EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE,       EVT::Other, Expand);
   if (Subtarget->is64Bit())
-    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i64, Expand);
   if (Subtarget->isTargetCygMing())
-    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32, Custom);
   else
-    setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
+    setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32, Expand);
 
   if (!UseSoftFloat && X86ScalarSSEf64) {
     // f32 and f64 use SSE.
     // Set up the FP register classes.
-    addRegisterClass(MVT::f32, X86::FR32RegisterClass);
-    addRegisterClass(MVT::f64, X86::FR64RegisterClass);
+    addRegisterClass(EVT::f32, X86::FR32RegisterClass);
+    addRegisterClass(EVT::f64, X86::FR64RegisterClass);
 
     // Use ANDPD to simulate FABS.
-    setOperationAction(ISD::FABS , MVT::f64, Custom);
-    setOperationAction(ISD::FABS , MVT::f32, Custom);
+    setOperationAction(ISD::FABS , EVT::f64, Custom);
+    setOperationAction(ISD::FABS , EVT::f32, Custom);
 
     // Use XORP to simulate FNEG.
-    setOperationAction(ISD::FNEG , MVT::f64, Custom);
-    setOperationAction(ISD::FNEG , MVT::f32, Custom);
+    setOperationAction(ISD::FNEG , EVT::f64, Custom);
+    setOperationAction(ISD::FNEG , EVT::f32, Custom);
 
     // Use ANDPD and ORPD to simulate FCOPYSIGN.
-    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
-    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f64, Custom);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f32, Custom);
 
     // We don't support sin/cos/fmod
-    setOperationAction(ISD::FSIN , MVT::f64, Expand);
-    setOperationAction(ISD::FCOS , MVT::f64, Expand);
-    setOperationAction(ISD::FSIN , MVT::f32, Expand);
-    setOperationAction(ISD::FCOS , MVT::f32, Expand);
+    setOperationAction(ISD::FSIN , EVT::f64, Expand);
+    setOperationAction(ISD::FCOS , EVT::f64, Expand);
+    setOperationAction(ISD::FSIN , EVT::f32, Expand);
+    setOperationAction(ISD::FCOS , EVT::f32, Expand);
 
     // Expand FP immediates into loads from the stack, except for the special
     // cases we handle.
@@ -426,24 +426,24 @@
   } else if (!UseSoftFloat && X86ScalarSSEf32) {
     // Use SSE for f32, x87 for f64.
     // Set up the FP register classes.
-    addRegisterClass(MVT::f32, X86::FR32RegisterClass);
-    addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
+    addRegisterClass(EVT::f32, X86::FR32RegisterClass);
+    addRegisterClass(EVT::f64, X86::RFP64RegisterClass);
 
     // Use ANDPS to simulate FABS.
-    setOperationAction(ISD::FABS , MVT::f32, Custom);
+    setOperationAction(ISD::FABS , EVT::f32, Custom);
 
     // Use XORP to simulate FNEG.
-    setOperationAction(ISD::FNEG , MVT::f32, Custom);
+    setOperationAction(ISD::FNEG , EVT::f32, Custom);
 
-    setOperationAction(ISD::UNDEF,     MVT::f64, Expand);
+    setOperationAction(ISD::UNDEF,     EVT::f64, Expand);
 
     // Use ANDPS and ORPS to simulate FCOPYSIGN.
-    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f64, Expand);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f32, Custom);
 
     // We don't support sin/cos/fmod
-    setOperationAction(ISD::FSIN , MVT::f32, Expand);
-    setOperationAction(ISD::FCOS , MVT::f32, Expand);
+    setOperationAction(ISD::FSIN , EVT::f32, Expand);
+    setOperationAction(ISD::FCOS , EVT::f32, Expand);
 
     // Special cases we handle for FP constants.
     addLegalFPImmediate(APFloat(+0.0f)); // xorps
@@ -453,23 +453,23 @@
     addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
 
     if (!UnsafeFPMath) {
-      setOperationAction(ISD::FSIN           , MVT::f64  , Expand);
-      setOperationAction(ISD::FCOS           , MVT::f64  , Expand);
+      setOperationAction(ISD::FSIN           , EVT::f64  , Expand);
+      setOperationAction(ISD::FCOS           , EVT::f64  , Expand);
     }
   } else if (!UseSoftFloat) {
     // f32 and f64 in x87.
     // Set up the FP register classes.
-    addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
-    addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
+    addRegisterClass(EVT::f64, X86::RFP64RegisterClass);
+    addRegisterClass(EVT::f32, X86::RFP32RegisterClass);
 
-    setOperationAction(ISD::UNDEF,     MVT::f64, Expand);
-    setOperationAction(ISD::UNDEF,     MVT::f32, Expand);
-    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+    setOperationAction(ISD::UNDEF,     EVT::f64, Expand);
+    setOperationAction(ISD::UNDEF,     EVT::f32, Expand);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f64, Expand);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f32, Expand);
 
     if (!UnsafeFPMath) {
-      setOperationAction(ISD::FSIN           , MVT::f64  , Expand);
-      setOperationAction(ISD::FCOS           , MVT::f64  , Expand);
+      setOperationAction(ISD::FSIN           , EVT::f64  , Expand);
+      setOperationAction(ISD::FCOS           , EVT::f64  , Expand);
     }
     addLegalFPImmediate(APFloat(+0.0)); // FLD0
     addLegalFPImmediate(APFloat(+1.0)); // FLD1
@@ -483,9 +483,9 @@
 
   // Long double always uses X87.
   if (!UseSoftFloat) {
-    addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
-    setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
-    setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
+    addRegisterClass(EVT::f80, X86::RFP80RegisterClass);
+    setOperationAction(ISD::UNDEF,     EVT::f80, Expand);
+    setOperationAction(ISD::FCOPYSIGN, EVT::f80, Expand);
     {
       bool ignored;
       APFloat TmpFlt(+0.0);
@@ -503,220 +503,220 @@
     }
 
     if (!UnsafeFPMath) {
-      setOperationAction(ISD::FSIN           , MVT::f80  , Expand);
-      setOperationAction(ISD::FCOS           , MVT::f80  , Expand);
+      setOperationAction(ISD::FSIN           , EVT::f80  , Expand);
+      setOperationAction(ISD::FCOS           , EVT::f80  , Expand);
     }
   }
 
   // Always use a library call for pow.
-  setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
-  setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
-  setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
+  setOperationAction(ISD::FPOW             , EVT::f32  , Expand);
+  setOperationAction(ISD::FPOW             , EVT::f64  , Expand);
+  setOperationAction(ISD::FPOW             , EVT::f80  , Expand);
 
-  setOperationAction(ISD::FLOG, MVT::f80, Expand);
-  setOperationAction(ISD::FLOG2, MVT::f80, Expand);
-  setOperationAction(ISD::FLOG10, MVT::f80, Expand);
-  setOperationAction(ISD::FEXP, MVT::f80, Expand);
-  setOperationAction(ISD::FEXP2, MVT::f80, Expand);
+  setOperationAction(ISD::FLOG, EVT::f80, Expand);
+  setOperationAction(ISD::FLOG2, EVT::f80, Expand);
+  setOperationAction(ISD::FLOG10, EVT::f80, Expand);
+  setOperationAction(ISD::FEXP, EVT::f80, Expand);
+  setOperationAction(ISD::FEXP2, EVT::f80, Expand);
 
   // First set operation action for all vector types to either promote
   // (for widening) or expand (for scalarization). Then we will selectively
   // turn on ones that can be effectively codegen'd.
-  for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
-       VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
-    setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand);
-    setOperationAction(ISD::EXTRACT_SUBVECTOR,(MVT::SimpleValueType)VT,Expand);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FP_TO_UINT, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::FP_TO_SINT, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::UINT_TO_FP, (MVT::SimpleValueType)VT, Expand);
-    setOperationAction(ISD::SINT_TO_FP, (MVT::SimpleValueType)VT, Expand);
+  for (unsigned VT = (unsigned)EVT::FIRST_VECTOR_VALUETYPE;
+       VT <= (unsigned)EVT::LAST_VECTOR_VALUETYPE; ++VT) {
+    setOperationAction(ISD::ADD , (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SUB , (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FADD, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FNEG, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FSUB, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::MUL , (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FMUL, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SDIV, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::UDIV, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FDIV, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SREM, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::UREM, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::LOAD, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::VECTOR_SHUFFLE, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT,(EVT::SimpleValueType)VT,Expand);
+    setOperationAction(ISD::EXTRACT_SUBVECTOR,(EVT::SimpleValueType)VT,Expand);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,(EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FABS, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FSIN, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FCOS, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FREM, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FPOWI, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FSQRT, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FCOPYSIGN, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SMUL_LOHI, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::UMUL_LOHI, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SDIVREM, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::UDIVREM, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FPOW, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::CTPOP, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::CTTZ, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::CTLZ, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SHL, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SRA, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SRL, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::ROTL, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::ROTR, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::BSWAP, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::VSETCC, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FLOG, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FLOG2, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FLOG10, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FEXP, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FEXP2, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FP_TO_UINT, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::FP_TO_SINT, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::UINT_TO_FP, (EVT::SimpleValueType)VT, Expand);
+    setOperationAction(ISD::SINT_TO_FP, (EVT::SimpleValueType)VT, Expand);
   }
 
   // FIXME: In order to prevent SSE instructions being expanded to MMX ones
   // with -msoft-float, disable use of MMX as well.
   if (!UseSoftFloat && !DisableMMX && Subtarget->hasMMX()) {
-    addRegisterClass(MVT::v8i8,  X86::VR64RegisterClass);
-    addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
-    addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
-    addRegisterClass(MVT::v2f32, X86::VR64RegisterClass);
-    addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
+    addRegisterClass(EVT::v8i8,  X86::VR64RegisterClass);
+    addRegisterClass(EVT::v4i16, X86::VR64RegisterClass);
+    addRegisterClass(EVT::v2i32, X86::VR64RegisterClass);
+    addRegisterClass(EVT::v2f32, X86::VR64RegisterClass);
+    addRegisterClass(EVT::v1i64, X86::VR64RegisterClass);
 
-    setOperationAction(ISD::ADD,                MVT::v8i8,  Legal);
-    setOperationAction(ISD::ADD,                MVT::v4i16, Legal);
-    setOperationAction(ISD::ADD,                MVT::v2i32, Legal);
-    setOperationAction(ISD::ADD,                MVT::v1i64, Legal);
+    setOperationAction(ISD::ADD,                EVT::v8i8,  Legal);
+    setOperationAction(ISD::ADD,                EVT::v4i16, Legal);
+    setOperationAction(ISD::ADD,                EVT::v2i32, Legal);
+    setOperationAction(ISD::ADD,                EVT::v1i64, Legal);
 
-    setOperationAction(ISD::SUB,                MVT::v8i8,  Legal);
-    setOperationAction(ISD::SUB,                MVT::v4i16, Legal);
-    setOperationAction(ISD::SUB,                MVT::v2i32, Legal);
-    setOperationAction(ISD::SUB,                MVT::v1i64, Legal);
+    setOperationAction(ISD::SUB,                EVT::v8i8,  Legal);
+    setOperationAction(ISD::SUB,                EVT::v4i16, Legal);
+    setOperationAction(ISD::SUB,                EVT::v2i32, Legal);
+    setOperationAction(ISD::SUB,                EVT::v1i64, Legal);
 
-    setOperationAction(ISD::MULHS,              MVT::v4i16, Legal);
-    setOperationAction(ISD::MUL,                MVT::v4i16, Legal);
+    setOperationAction(ISD::MULHS,              EVT::v4i16, Legal);
+    setOperationAction(ISD::MUL,                EVT::v4i16, Legal);
 
-    setOperationAction(ISD::AND,                MVT::v8i8,  Promote);
-    AddPromotedToType (ISD::AND,                MVT::v8i8,  MVT::v1i64);
-    setOperationAction(ISD::AND,                MVT::v4i16, Promote);
-    AddPromotedToType (ISD::AND,                MVT::v4i16, MVT::v1i64);
-    setOperationAction(ISD::AND,                MVT::v2i32, Promote);
-    AddPromotedToType (ISD::AND,                MVT::v2i32, MVT::v1i64);
-    setOperationAction(ISD::AND,                MVT::v1i64, Legal);
+    setOperationAction(ISD::AND,                EVT::v8i8,  Promote);
+    AddPromotedToType (ISD::AND,                EVT::v8i8,  EVT::v1i64);
+    setOperationAction(ISD::AND,                EVT::v4i16, Promote);
+    AddPromotedToType (ISD::AND,                EVT::v4i16, EVT::v1i64);
+    setOperationAction(ISD::AND,                EVT::v2i32, Promote);
+    AddPromotedToType (ISD::AND,                EVT::v2i32, EVT::v1i64);
+    setOperationAction(ISD::AND,                EVT::v1i64, Legal);
 
-    setOperationAction(ISD::OR,                 MVT::v8i8,  Promote);
-    AddPromotedToType (ISD::OR,                 MVT::v8i8,  MVT::v1i64);
-    setOperationAction(ISD::OR,                 MVT::v4i16, Promote);
-    AddPromotedToType (ISD::OR,                 MVT::v4i16, MVT::v1i64);
-    setOperationAction(ISD::OR,                 MVT::v2i32, Promote);
-    AddPromotedToType (ISD::OR,                 MVT::v2i32, MVT::v1i64);
-    setOperationAction(ISD::OR,                 MVT::v1i64, Legal);
+    setOperationAction(ISD::OR,                 EVT::v8i8,  Promote);
+    AddPromotedToType (ISD::OR,                 EVT::v8i8,  EVT::v1i64);
+    setOperationAction(ISD::OR,                 EVT::v4i16, Promote);
+    AddPromotedToType (ISD::OR,                 EVT::v4i16, EVT::v1i64);
+    setOperationAction(ISD::OR,                 EVT::v2i32, Promote);
+    AddPromotedToType (ISD::OR,                 EVT::v2i32, EVT::v1i64);
+    setOperationAction(ISD::OR,                 EVT::v1i64, Legal);
 
-    setOperationAction(ISD::XOR,                MVT::v8i8,  Promote);
-    AddPromotedToType (ISD::XOR,                MVT::v8i8,  MVT::v1i64);
-    setOperationAction(ISD::XOR,                MVT::v4i16, Promote);
-    AddPromotedToType (ISD::XOR,                MVT::v4i16, MVT::v1i64);
-    setOperationAction(ISD::XOR,                MVT::v2i32, Promote);
-    AddPromotedToType (ISD::XOR,                MVT::v2i32, MVT::v1i64);
-    setOperationAction(ISD::XOR,                MVT::v1i64, Legal);
+    setOperationAction(ISD::XOR,                EVT::v8i8,  Promote);
+    AddPromotedToType (ISD::XOR,                EVT::v8i8,  EVT::v1i64);
+    setOperationAction(ISD::XOR,                EVT::v4i16, Promote);
+    AddPromotedToType (ISD::XOR,                EVT::v4i16, EVT::v1i64);
+    setOperationAction(ISD::XOR,                EVT::v2i32, Promote);
+    AddPromotedToType (ISD::XOR,                EVT::v2i32, EVT::v1i64);
+    setOperationAction(ISD::XOR,                EVT::v1i64, Legal);
 
-    setOperationAction(ISD::LOAD,               MVT::v8i8,  Promote);
-    AddPromotedToType (ISD::LOAD,               MVT::v8i8,  MVT::v1i64);
-    setOperationAction(ISD::LOAD,               MVT::v4i16, Promote);
-    AddPromotedToType (ISD::LOAD,               MVT::v4i16, MVT::v1i64);
-    setOperationAction(ISD::LOAD,               MVT::v2i32, Promote);
-    AddPromotedToType (ISD::LOAD,               MVT::v2i32, MVT::v1i64);
-    setOperationAction(ISD::LOAD,               MVT::v2f32, Promote);
-    AddPromotedToType (ISD::LOAD,               MVT::v2f32, MVT::v1i64);
-    setOperationAction(ISD::LOAD,               MVT::v1i64, Legal);
+    setOperationAction(ISD::LOAD,               EVT::v8i8,  Promote);
+    AddPromotedToType (ISD::LOAD,               EVT::v8i8,  EVT::v1i64);
+    setOperationAction(ISD::LOAD,               EVT::v4i16, Promote);
+    AddPromotedToType (ISD::LOAD,               EVT::v4i16, EVT::v1i64);
+    setOperationAction(ISD::LOAD,               EVT::v2i32, Promote);
+    AddPromotedToType (ISD::LOAD,               EVT::v2i32, EVT::v1i64);
+    setOperationAction(ISD::LOAD,               EVT::v2f32, Promote);
+    AddPromotedToType (ISD::LOAD,               EVT::v2f32, EVT::v1i64);
+    setOperationAction(ISD::LOAD,               EVT::v1i64, Legal);
 
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v8i8,  Custom);
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4i16, Custom);
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2i32, Custom);
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2f32, Custom);
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i64, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v8i8,  Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v4i16, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v2i32, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v2f32, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v1i64, Custom);
 
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v8i8,  Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4i16, Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2i32, Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v1i64, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v8i8,  Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v4i16, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v2i32, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v1i64, Custom);
 
-    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v2f32, Custom);
-    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8i8,  Custom);
-    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v4i16, Custom);
-    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v1i64, Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v2f32, Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v8i8,  Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v4i16, Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v1i64, Custom);
 
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i16, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v4i16, Custom);
 
-    setTruncStoreAction(MVT::v8i16,             MVT::v8i8, Expand);
-    setOperationAction(ISD::TRUNCATE,           MVT::v8i8, Expand);
-    setOperationAction(ISD::SELECT,             MVT::v8i8, Promote);
-    setOperationAction(ISD::SELECT,             MVT::v4i16, Promote);
-    setOperationAction(ISD::SELECT,             MVT::v2i32, Promote);
-    setOperationAction(ISD::SELECT,             MVT::v1i64, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v8i8, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v4i16, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v2i32, Custom);
+    setTruncStoreAction(EVT::v8i16,             EVT::v8i8, Expand);
+    setOperationAction(ISD::TRUNCATE,           EVT::v8i8, Expand);
+    setOperationAction(ISD::SELECT,             EVT::v8i8, Promote);
+    setOperationAction(ISD::SELECT,             EVT::v4i16, Promote);
+    setOperationAction(ISD::SELECT,             EVT::v2i32, Promote);
+    setOperationAction(ISD::SELECT,             EVT::v1i64, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v8i8, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v4i16, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v2i32, Custom);
   }
 
   if (!UseSoftFloat && Subtarget->hasSSE1()) {
-    addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
+    addRegisterClass(EVT::v4f32, X86::VR128RegisterClass);
 
-    setOperationAction(ISD::FADD,               MVT::v4f32, Legal);
-    setOperationAction(ISD::FSUB,               MVT::v4f32, Legal);
-    setOperationAction(ISD::FMUL,               MVT::v4f32, Legal);
-    setOperationAction(ISD::FDIV,               MVT::v4f32, Legal);
-    setOperationAction(ISD::FSQRT,              MVT::v4f32, Legal);
-    setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
-    setOperationAction(ISD::LOAD,               MVT::v4f32, Legal);
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
-    setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v4f32, Custom);
+    setOperationAction(ISD::FADD,               EVT::v4f32, Legal);
+    setOperationAction(ISD::FSUB,               EVT::v4f32, Legal);
+    setOperationAction(ISD::FMUL,               EVT::v4f32, Legal);
+    setOperationAction(ISD::FDIV,               EVT::v4f32, Legal);
+    setOperationAction(ISD::FSQRT,              EVT::v4f32, Legal);
+    setOperationAction(ISD::FNEG,               EVT::v4f32, Custom);
+    setOperationAction(ISD::LOAD,               EVT::v4f32, Legal);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v4f32, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v4f32, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v4f32, Custom);
+    setOperationAction(ISD::SELECT,             EVT::v4f32, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v4f32, Custom);
   }
 
   if (!UseSoftFloat && Subtarget->hasSSE2()) {
-    addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
+    addRegisterClass(EVT::v2f64, X86::VR128RegisterClass);
 
     // FIXME: Unfortunately -soft-float and -no-implicit-float means XMM
     // registers cannot be used even for integer operations.
-    addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
-    addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
-    addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
-    addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
+    addRegisterClass(EVT::v16i8, X86::VR128RegisterClass);
+    addRegisterClass(EVT::v8i16, X86::VR128RegisterClass);
+    addRegisterClass(EVT::v4i32, X86::VR128RegisterClass);
+    addRegisterClass(EVT::v2i64, X86::VR128RegisterClass);
 
-    setOperationAction(ISD::ADD,                MVT::v16i8, Legal);
-    setOperationAction(ISD::ADD,                MVT::v8i16, Legal);
-    setOperationAction(ISD::ADD,                MVT::v4i32, Legal);
-    setOperationAction(ISD::ADD,                MVT::v2i64, Legal);
-    setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
-    setOperationAction(ISD::SUB,                MVT::v16i8, Legal);
-    setOperationAction(ISD::SUB,                MVT::v8i16, Legal);
-    setOperationAction(ISD::SUB,                MVT::v4i32, Legal);
-    setOperationAction(ISD::SUB,                MVT::v2i64, Legal);
-    setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
-    setOperationAction(ISD::FADD,               MVT::v2f64, Legal);
-    setOperationAction(ISD::FSUB,               MVT::v2f64, Legal);
-    setOperationAction(ISD::FMUL,               MVT::v2f64, Legal);
-    setOperationAction(ISD::FDIV,               MVT::v2f64, Legal);
-    setOperationAction(ISD::FSQRT,              MVT::v2f64, Legal);
-    setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
+    setOperationAction(ISD::ADD,                EVT::v16i8, Legal);
+    setOperationAction(ISD::ADD,                EVT::v8i16, Legal);
+    setOperationAction(ISD::ADD,                EVT::v4i32, Legal);
+    setOperationAction(ISD::ADD,                EVT::v2i64, Legal);
+    setOperationAction(ISD::MUL,                EVT::v2i64, Custom);
+    setOperationAction(ISD::SUB,                EVT::v16i8, Legal);
+    setOperationAction(ISD::SUB,                EVT::v8i16, Legal);
+    setOperationAction(ISD::SUB,                EVT::v4i32, Legal);
+    setOperationAction(ISD::SUB,                EVT::v2i64, Legal);
+    setOperationAction(ISD::MUL,                EVT::v8i16, Legal);
+    setOperationAction(ISD::FADD,               EVT::v2f64, Legal);
+    setOperationAction(ISD::FSUB,               EVT::v2f64, Legal);
+    setOperationAction(ISD::FMUL,               EVT::v2f64, Legal);
+    setOperationAction(ISD::FDIV,               EVT::v2f64, Legal);
+    setOperationAction(ISD::FSQRT,              EVT::v2f64, Legal);
+    setOperationAction(ISD::FNEG,               EVT::v2f64, Custom);
 
-    setOperationAction(ISD::VSETCC,             MVT::v2f64, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v16i8, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v8i16, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v4i32, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v2f64, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v16i8, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v8i16, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v4i32, Custom);
 
-    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16i8, Custom);
-    setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8i16, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v16i8, Custom);
+    setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v8i16, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v8i16, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v4i32, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v4f32, Custom);
 
     // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
-    for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) {
-      MVT VT = (MVT::SimpleValueType)i;
+    for (unsigned i = (unsigned)EVT::v16i8; i != (unsigned)EVT::v2i64; ++i) {
+      EVT VT = (EVT::SimpleValueType)i;
       // Do not attempt to custom lower non-power-of-2 vectors
       if (!isPowerOf2_32(VT.getVectorNumElements()))
         continue;
@@ -728,138 +728,138 @@
       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT.getSimpleVT(), Custom);
     }
 
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2f64, Custom);
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v2i64, Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2f64, Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v2i64, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v2f64, Custom);
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v2f64, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v2i64, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v2f64, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v2i64, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v2f64, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v2f64, Custom);
 
     if (Subtarget->is64Bit()) {
-      setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v2i64, Custom);
-      setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
+      setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v2i64, Custom);
+      setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v2i64, Custom);
     }
 
     // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
-    for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; i++) {
-      MVT::SimpleValueType SVT = (MVT::SimpleValueType)i;
-      MVT VT = SVT;
+    for (unsigned i = (unsigned)EVT::v16i8; i != (unsigned)EVT::v2i64; i++) {
+      EVT::SimpleValueType SVT = (EVT::SimpleValueType)i;
+      EVT VT = SVT;
 
       // Do not attempt to promote non-128-bit vectors
       if (!VT.is128BitVector()) {
         continue;
       }
       setOperationAction(ISD::AND,    SVT, Promote);
-      AddPromotedToType (ISD::AND,    SVT, MVT::v2i64);
+      AddPromotedToType (ISD::AND,    SVT, EVT::v2i64);
       setOperationAction(ISD::OR,     SVT, Promote);
-      AddPromotedToType (ISD::OR,     SVT, MVT::v2i64);
+      AddPromotedToType (ISD::OR,     SVT, EVT::v2i64);
       setOperationAction(ISD::XOR,    SVT, Promote);
-      AddPromotedToType (ISD::XOR,    SVT, MVT::v2i64);
+      AddPromotedToType (ISD::XOR,    SVT, EVT::v2i64);
       setOperationAction(ISD::LOAD,   SVT, Promote);
-      AddPromotedToType (ISD::LOAD,   SVT, MVT::v2i64);
+      AddPromotedToType (ISD::LOAD,   SVT, EVT::v2i64);
       setOperationAction(ISD::SELECT, SVT, Promote);
-      AddPromotedToType (ISD::SELECT, SVT, MVT::v2i64);
+      AddPromotedToType (ISD::SELECT, SVT, EVT::v2i64);
     }
 
-    setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+    setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 
     // Custom lower v2i64 and v2f64 selects.
-    setOperationAction(ISD::LOAD,               MVT::v2f64, Legal);
-    setOperationAction(ISD::LOAD,               MVT::v2i64, Legal);
-    setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
-    setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
+    setOperationAction(ISD::LOAD,               EVT::v2f64, Legal);
+    setOperationAction(ISD::LOAD,               EVT::v2i64, Legal);
+    setOperationAction(ISD::SELECT,             EVT::v2f64, Custom);
+    setOperationAction(ISD::SELECT,             EVT::v2i64, Custom);
 
-    setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Legal);
-    setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Legal);
+    setOperationAction(ISD::FP_TO_SINT,         EVT::v4i32, Legal);
+    setOperationAction(ISD::SINT_TO_FP,         EVT::v4i32, Legal);
     if (!DisableMMX && Subtarget->hasMMX()) {
-      setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);
-      setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);
+      setOperationAction(ISD::FP_TO_SINT,         EVT::v2i32, Custom);
+      setOperationAction(ISD::SINT_TO_FP,         EVT::v2i32, Custom);
     }
   }
 
   if (Subtarget->hasSSE41()) {
     // FIXME: Do we need to handle scalar-to-vector here?
-    setOperationAction(ISD::MUL,                MVT::v4i32, Legal);
+    setOperationAction(ISD::MUL,                EVT::v4i32, Legal);
 
     // i8 and i16 vectors are custom , because the source register and source
     // source memory operand types are not the same width.  f32 vectors are
     // custom since the immediate controlling the insert encodes additional
     // information.
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v16i8, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v8i16, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v4i32, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v4f32, Custom);
 
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v16i8, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v8i16, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v4i32, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v4f32, Custom);
 
     if (Subtarget->is64Bit()) {
-      setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v2i64, Legal);
-      setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
+      setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v2i64, Legal);
+      setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v2i64, Legal);
     }
   }
 
   if (Subtarget->hasSSE42()) {
-    setOperationAction(ISD::VSETCC,             MVT::v2i64, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v2i64, Custom);
   }
 
   if (!UseSoftFloat && Subtarget->hasAVX()) {
-    addRegisterClass(MVT::v8f32, X86::VR256RegisterClass);
-    addRegisterClass(MVT::v4f64, X86::VR256RegisterClass);
-    addRegisterClass(MVT::v8i32, X86::VR256RegisterClass);
-    addRegisterClass(MVT::v4i64, X86::VR256RegisterClass);
+    addRegisterClass(EVT::v8f32, X86::VR256RegisterClass);
+    addRegisterClass(EVT::v4f64, X86::VR256RegisterClass);
+    addRegisterClass(EVT::v8i32, X86::VR256RegisterClass);
+    addRegisterClass(EVT::v4i64, X86::VR256RegisterClass);
 
-    setOperationAction(ISD::LOAD,               MVT::v8f32, Legal);
-    setOperationAction(ISD::LOAD,               MVT::v8i32, Legal);
-    setOperationAction(ISD::LOAD,               MVT::v4f64, Legal);
-    setOperationAction(ISD::LOAD,               MVT::v4i64, Legal);
-    setOperationAction(ISD::FADD,               MVT::v8f32, Legal);
-    setOperationAction(ISD::FSUB,               MVT::v8f32, Legal);
-    setOperationAction(ISD::FMUL,               MVT::v8f32, Legal);
-    setOperationAction(ISD::FDIV,               MVT::v8f32, Legal);
-    setOperationAction(ISD::FSQRT,              MVT::v8f32, Legal);
-    setOperationAction(ISD::FNEG,               MVT::v8f32, Custom);
-    //setOperationAction(ISD::BUILD_VECTOR,       MVT::v8f32, Custom);
-    //setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v8f32, Custom);
-    //setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8f32, Custom);
-    //setOperationAction(ISD::SELECT,             MVT::v8f32, Custom);
-    //setOperationAction(ISD::VSETCC,             MVT::v8f32, Custom);
+    setOperationAction(ISD::LOAD,               EVT::v8f32, Legal);
+    setOperationAction(ISD::LOAD,               EVT::v8i32, Legal);
+    setOperationAction(ISD::LOAD,               EVT::v4f64, Legal);
+    setOperationAction(ISD::LOAD,               EVT::v4i64, Legal);
+    setOperationAction(ISD::FADD,               EVT::v8f32, Legal);
+    setOperationAction(ISD::FSUB,               EVT::v8f32, Legal);
+    setOperationAction(ISD::FMUL,               EVT::v8f32, Legal);
+    setOperationAction(ISD::FDIV,               EVT::v8f32, Legal);
+    setOperationAction(ISD::FSQRT,              EVT::v8f32, Legal);
+    setOperationAction(ISD::FNEG,               EVT::v8f32, Custom);
+    //setOperationAction(ISD::BUILD_VECTOR,       EVT::v8f32, Custom);
+    //setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v8f32, Custom);
+    //setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v8f32, Custom);
+    //setOperationAction(ISD::SELECT,             EVT::v8f32, Custom);
+    //setOperationAction(ISD::VSETCC,             EVT::v8f32, Custom);
 
     // Operations to consider commented out -v16i16 v32i8
-    //setOperationAction(ISD::ADD,                MVT::v16i16, Legal);
-    setOperationAction(ISD::ADD,                MVT::v8i32, Custom);
-    setOperationAction(ISD::ADD,                MVT::v4i64, Custom);
-    //setOperationAction(ISD::SUB,                MVT::v32i8, Legal);
-    //setOperationAction(ISD::SUB,                MVT::v16i16, Legal);
-    setOperationAction(ISD::SUB,                MVT::v8i32, Custom);
-    setOperationAction(ISD::SUB,                MVT::v4i64, Custom);
-    //setOperationAction(ISD::MUL,                MVT::v16i16, Legal);
-    setOperationAction(ISD::FADD,               MVT::v4f64, Legal);
-    setOperationAction(ISD::FSUB,               MVT::v4f64, Legal);
-    setOperationAction(ISD::FMUL,               MVT::v4f64, Legal);
-    setOperationAction(ISD::FDIV,               MVT::v4f64, Legal);
-    setOperationAction(ISD::FSQRT,              MVT::v4f64, Legal);
-    setOperationAction(ISD::FNEG,               MVT::v4f64, Custom);
+    //setOperationAction(ISD::ADD,                EVT::v16i16, Legal);
+    setOperationAction(ISD::ADD,                EVT::v8i32, Custom);
+    setOperationAction(ISD::ADD,                EVT::v4i64, Custom);
+    //setOperationAction(ISD::SUB,                EVT::v32i8, Legal);
+    //setOperationAction(ISD::SUB,                EVT::v16i16, Legal);
+    setOperationAction(ISD::SUB,                EVT::v8i32, Custom);
+    setOperationAction(ISD::SUB,                EVT::v4i64, Custom);
+    //setOperationAction(ISD::MUL,                EVT::v16i16, Legal);
+    setOperationAction(ISD::FADD,               EVT::v4f64, Legal);
+    setOperationAction(ISD::FSUB,               EVT::v4f64, Legal);
+    setOperationAction(ISD::FMUL,               EVT::v4f64, Legal);
+    setOperationAction(ISD::FDIV,               EVT::v4f64, Legal);
+    setOperationAction(ISD::FSQRT,              EVT::v4f64, Legal);
+    setOperationAction(ISD::FNEG,               EVT::v4f64, Custom);
 
-    setOperationAction(ISD::VSETCC,             MVT::v4f64, Custom);
-    // setOperationAction(ISD::VSETCC,             MVT::v32i8, Custom);
-    // setOperationAction(ISD::VSETCC,             MVT::v16i16, Custom);
-    setOperationAction(ISD::VSETCC,             MVT::v8i32, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v4f64, Custom);
+    // setOperationAction(ISD::VSETCC,             EVT::v32i8, Custom);
+    // setOperationAction(ISD::VSETCC,             EVT::v16i16, Custom);
+    setOperationAction(ISD::VSETCC,             EVT::v8i32, Custom);
 
-    // setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v32i8, Custom);
-    // setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16i16, Custom);
-    // setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i16, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i32, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8f32, Custom);
+    // setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v32i8, Custom);
+    // setOperationAction(ISD::SCALAR_TO_VECTOR,   EVT::v16i16, Custom);
+    // setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v16i16, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v8i32, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v8f32, Custom);
 
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f64, Custom);
-    setOperationAction(ISD::BUILD_VECTOR,       MVT::v4i64, Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f64, Custom);
-    setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4i64, Custom);
-    setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f64, Custom);
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f64, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v4f64, Custom);
+    setOperationAction(ISD::BUILD_VECTOR,       EVT::v4i64, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v4f64, Custom);
+    setOperationAction(ISD::VECTOR_SHUFFLE,     EVT::v4i64, Custom);
+    setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v4f64, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v4f64, Custom);
 
 #if 0
     // Not sure we want to do this since there are no 256-bit integer
@@ -867,8 +867,8 @@
 
     // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
     // This includes 256-bit vectors
-    for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; ++i) {
-      MVT VT = (MVT::SimpleValueType)i;
+    for (unsigned i = (unsigned)EVT::v16i8; i != (unsigned)EVT::v4i64; ++i) {
+      EVT VT = (EVT::SimpleValueType)i;
 
       // Do not attempt to custom lower non-power-of-2 vectors
       if (!isPowerOf2_32(VT.getVectorNumElements()))
@@ -880,8 +880,8 @@
     }
 
     if (Subtarget->is64Bit()) {
-      setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i64, Custom);
-      setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i64, Custom);
+      setOperationAction(ISD::INSERT_VECTOR_ELT,  EVT::v4i64, Custom);
+      setOperationAction(ISD::EXTRACT_VECTOR_ELT, EVT::v4i64, Custom);
     }    
 #endif
 
@@ -891,42 +891,42 @@
 
     // Promote v32i8, v16i16, v8i32 load, select, and, or, xor to v4i64.
     // Including 256-bit vectors
-    for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v4i64; i++) {
-      MVT VT = (MVT::SimpleValueType)i;
+    for (unsigned i = (unsigned)EVT::v16i8; i != (unsigned)EVT::v4i64; i++) {
+      EVT VT = (EVT::SimpleValueType)i;
 
       if (!VT.is256BitVector()) {
         continue;
       }
       setOperationAction(ISD::AND,    VT, Promote);
-      AddPromotedToType (ISD::AND,    VT, MVT::v4i64);
+      AddPromotedToType (ISD::AND,    VT, EVT::v4i64);
       setOperationAction(ISD::OR,     VT, Promote);
-      AddPromotedToType (ISD::OR,     VT, MVT::v4i64);
+      AddPromotedToType (ISD::OR,     VT, EVT::v4i64);
       setOperationAction(ISD::XOR,    VT, Promote);
-      AddPromotedToType (ISD::XOR,    VT, MVT::v4i64);
+      AddPromotedToType (ISD::XOR,    VT, EVT::v4i64);
       setOperationAction(ISD::LOAD,   VT, Promote);
-      AddPromotedToType (ISD::LOAD,   VT, MVT::v4i64);
+      AddPromotedToType (ISD::LOAD,   VT, EVT::v4i64);
       setOperationAction(ISD::SELECT, VT, Promote);
-      AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
+      AddPromotedToType (ISD::SELECT, VT, EVT::v4i64);
     }
 
-    setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+    setTruncStoreAction(EVT::f64, EVT::f32, Expand);
 #endif
   }
 
   // We want to custom lower some of our intrinsics.
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+  setOperationAction(ISD::INTRINSIC_WO_CHAIN, EVT::Other, Custom);
 
   // Add/Sub/Mul with overflow operations are custom lowered.
-  setOperationAction(ISD::SADDO, MVT::i32, Custom);
-  setOperationAction(ISD::SADDO, MVT::i64, Custom);
-  setOperationAction(ISD::UADDO, MVT::i32, Custom);
-  setOperationAction(ISD::UADDO, MVT::i64, Custom);
-  setOperationAction(ISD::SSUBO, MVT::i32, Custom);
-  setOperationAction(ISD::SSUBO, MVT::i64, Custom);
-  setOperationAction(ISD::USUBO, MVT::i32, Custom);
-  setOperationAction(ISD::USUBO, MVT::i64, Custom);
-  setOperationAction(ISD::SMULO, MVT::i32, Custom);
-  setOperationAction(ISD::SMULO, MVT::i64, Custom);
+  setOperationAction(ISD::SADDO, EVT::i32, Custom);
+  setOperationAction(ISD::SADDO, EVT::i64, Custom);
+  setOperationAction(ISD::UADDO, EVT::i32, Custom);
+  setOperationAction(ISD::UADDO, EVT::i64, Custom);
+  setOperationAction(ISD::SSUBO, EVT::i32, Custom);
+  setOperationAction(ISD::SSUBO, EVT::i64, Custom);
+  setOperationAction(ISD::USUBO, EVT::i32, Custom);
+  setOperationAction(ISD::USUBO, EVT::i64, Custom);
+  setOperationAction(ISD::SMULO, EVT::i32, Custom);
+  setOperationAction(ISD::SMULO, EVT::i64, Custom);
 
   if (!Subtarget->is64Bit()) {
     // These libcalls are not available in 32-bit.
@@ -960,8 +960,8 @@
 }
 
 
-MVT::SimpleValueType X86TargetLowering::getSetCCResultType(MVT VT) const {
-  return MVT::i8;
+EVT::SimpleValueType X86TargetLowering::getSetCCResultType(EVT VT) const {
+  return EVT::i8;
 }
 
 
@@ -1012,9 +1012,9 @@
 
 /// getOptimalMemOpType - Returns the target specific optimal type for load
 /// and store operations as a result of memset, memcpy, and memmove
-/// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
+/// lowering. It returns EVT::iAny if SelectionDAG should be responsible for
 /// determining it.
-MVT
+EVT
 X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
                                        bool isSrcConst, bool isSrcStr,
                                        SelectionDAG &DAG) const {
@@ -1025,13 +1025,13 @@
   bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
   if (!NoImplicitFloatOps && Subtarget->getStackAlignment() >= 16) {
     if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16)
-      return MVT::v4i32;
+      return EVT::v4i32;
     if ((isSrcConst || isSrcStr) && Subtarget->hasSSE1() && Size >= 16)
-      return MVT::v4f32;
+      return EVT::v4f32;
   }
   if (Subtarget->is64Bit() && Size >= 8)
-    return MVT::i64;
-  return MVT::i32;
+    return EVT::i64;
+  return EVT::i32;
 }
 
 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
@@ -1083,7 +1083,7 @@
   SmallVector<SDValue, 6> RetOps;
   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
   // Operand #1 = Bytes To Pop
-  RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
+  RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), EVT::i16));
 
   // Copy the result values into the output registers.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {
@@ -1098,7 +1098,7 @@
       // If this is a copy from an xmm register to ST(0), use an FPExtend to
       // change the value to the FP stack register class.
       if (isScalarFPTypeInSSEReg(VA.getValVT()))
-        ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
+        ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f80, ValToCopy);
       RetOps.push_back(ValToCopy);
       // Don't emit a copytoreg.
       continue;
@@ -1107,11 +1107,11 @@
     // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
     // which is returned in RAX / RDX.
     if (Subtarget->is64Bit()) {
-      MVT ValVT = ValToCopy.getValueType();
+      EVT ValVT = ValToCopy.getValueType();
       if (ValVT.isVector() && ValVT.getSizeInBits() == 64) {
-        ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
+        ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i64, ValToCopy);
         if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1)
-          ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, ValToCopy);
+          ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v2i64, ValToCopy);
       }
     }
 
@@ -1129,7 +1129,7 @@
     X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
     unsigned Reg = FuncInfo->getSRetReturnReg();
     if (!Reg) {
-      Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
+      Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(EVT::i64));
       FuncInfo->setSRetReturnReg(Reg);
     }
     SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
@@ -1145,7 +1145,7 @@
     RetOps.push_back(Flag);
 
   return DAG.getNode(X86ISD::RET_FLAG, dl,
-                     MVT::Other, &RetOps[0], RetOps.size());
+                     EVT::Other, &RetOps[0], RetOps.size());
 }
 
 /// LowerCallResult - Lower the result values of a call into the
@@ -1168,10 +1168,10 @@
   // Copy all of the result registers out of their specified physreg.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {
     CCValAssign &VA = RVLocs[i];
-    MVT CopyVT = VA.getValVT();
+    EVT CopyVT = VA.getValVT();
 
     // If this is x86-64, and we disabled SSE, we can't return FP values
-    if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
+    if ((CopyVT == EVT::f32 || CopyVT == EVT::f64) &&
         ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
       llvm_report_error("SSE register return with SSE disabled");
     }
@@ -1182,7 +1182,7 @@
     if ((VA.getLocReg() == X86::ST0 ||
          VA.getLocReg() == X86::ST1) &&
         isScalarFPTypeInSSEReg(VA.getValVT())) {
-      CopyVT = MVT::f80;
+      CopyVT = EVT::f80;
     }
 
     SDValue Val;
@@ -1190,13 +1190,13 @@
       // For x86-64, MMX values are returned in XMM0 / XMM1 except for v1i64.
       if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
         Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
-                                   MVT::v2i64, InFlag).getValue(1);
+                                   EVT::v2i64, InFlag).getValue(1);
         Val = Chain.getValue(0);
-        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
-                          Val, DAG.getConstant(0, MVT::i64));
+        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i64,
+                          Val, DAG.getConstant(0, EVT::i64));
       } else {
         Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
-                                   MVT::i64, InFlag).getValue(1);
+                                   EVT::i64, InFlag).getValue(1);
         Val = Chain.getValue(0);
       }
       Val = DAG.getNode(ISD::BIT_CONVERT, dl, CopyVT, Val);
@@ -1307,7 +1307,7 @@
 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
                           DebugLoc dl) {
-  SDValue SizeNode     = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+  SDValue SizeNode     = DAG.getConstant(Flags.getByValSize(), EVT::i32);
   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
                        /*AlwaysInline=*/true, NULL, 0, NULL, 0);
 }
@@ -1384,15 +1384,15 @@
     LastVal = VA.getValNo();
 
     if (VA.isRegLoc()) {
-      MVT RegVT = VA.getLocVT();
+      EVT RegVT = VA.getLocVT();
       TargetRegisterClass *RC = NULL;
-      if (RegVT == MVT::i32)
+      if (RegVT == EVT::i32)
         RC = X86::GR32RegisterClass;
-      else if (Is64Bit && RegVT == MVT::i64)
+      else if (Is64Bit && RegVT == EVT::i64)
         RC = X86::GR64RegisterClass;
-      else if (RegVT == MVT::f32)
+      else if (RegVT == EVT::f32)
         RC = X86::FR32RegisterClass;
-      else if (RegVT == MVT::f64)
+      else if (RegVT == EVT::f64)
         RC = X86::FR64RegisterClass;
       else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
         RC = X86::VR128RegisterClass;
@@ -1419,8 +1419,8 @@
       if (VA.isExtInLoc()) {
         // Handle MMX values passed in XMM regs.
         if (RegVT.isVector()) {
-          ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
-                                 ArgValue, DAG.getConstant(0, MVT::i64));
+          ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i64,
+                                 ArgValue, DAG.getConstant(0, EVT::i64));
           ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
         } else
           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
@@ -1444,11 +1444,11 @@
     X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
     unsigned Reg = FuncInfo->getSRetReturnReg();
     if (!Reg) {
-      Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
+      Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(EVT::i64));
       FuncInfo->setSRetReturnReg(Reg);
     }
     SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Copy, Chain);
   }
 
   unsigned StackSize = CCInfo.getNextStackOffset();
@@ -1521,7 +1521,7 @@
       for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
         unsigned VReg = MF.addLiveIn(GPR64ArgRegs[NumIntRegs],
                                      X86::GR64RegisterClass);
-        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
+        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i64);
         SDValue Store =
           DAG.getStore(Val.getValue(1), dl, Val, FIN,
                        PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
@@ -1536,7 +1536,7 @@
       for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
         unsigned VReg = MF.addLiveIn(XMMArgRegs[NumXMMRegs],
                                      X86::VR128RegisterClass);
-        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::v4f32);
+        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, EVT::v4f32);
         SDValue Store =
           DAG.getStore(Val.getValue(1), dl, Val, FIN,
                        PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
@@ -1545,7 +1545,7 @@
                           DAG.getIntPtrConstant(16));
       }
       if (!MemOps.empty())
-          Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+          Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                              &MemOps[0], MemOps.size());
     }
   }
@@ -1603,7 +1603,7 @@
   if (!IsTailCall || FPDiff==0) return Chain;
 
   // Adjust the Return address stack slot.
-  MVT VT = getPointerTy();
+  EVT VT = getPointerTy();
   OutRetAddr = getReturnAddressFrameIndex(DAG);
 
   // Load the "old" Return address.
@@ -1623,7 +1623,7 @@
   int SlotSize = Is64Bit ? 8 : 4;
   int NewReturnAddrFI =
     MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
-  MVT VT = Is64Bit ? MVT::i64 : MVT::i32;
+  EVT VT = Is64Bit ? EVT::i64 : EVT::i32;
   SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
   Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
                        PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0);
@@ -1687,7 +1687,7 @@
   // of tail call optimization arguments are handle later.
   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
     CCValAssign &VA = ArgLocs[i];
-    MVT RegVT = VA.getLocVT();
+    EVT RegVT = VA.getLocVT();
     SDValue Arg = Outs[i].Val;
     ISD::ArgFlagsTy Flags = Outs[i].Flags;
     bool isByVal = Flags.isByVal();
@@ -1705,9 +1705,9 @@
     case CCValAssign::AExt:
       if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
         // Special case: passing MMX values in XMM registers.
-        Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
-        Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
-        Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
+        Arg = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i64, Arg);
+        Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v2i64, Arg);
+        Arg = getMOVL(DAG, dl, EVT::v2i64, DAG.getUNDEF(EVT::v2i64), Arg);
       } else
         Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
       break;
@@ -1740,7 +1740,7 @@
   }
 
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token chain
@@ -1805,7 +1805,7 @@
            && "SSE registers cannot be used when SSE is disabled");
 
     Chain = DAG.getCopyToReg(Chain, dl, X86::AL,
-                             DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
+                             DAG.getConstant(NumXMMRegs, EVT::i8), InFlag);
     InFlag = Chain.getValue(1);
   }
 
@@ -1858,7 +1858,7 @@
     }
 
     if (!MemOpChains2.empty())
-      Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+      Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                           &MemOpChains2[0], MemOpChains2.size());
 
     // Copy arguments to their registers.
@@ -1933,7 +1933,7 @@
   }
 
   // Returns a chain & a flag for retval copy to use.
-  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
 
   if (isTailCall) {
@@ -1946,7 +1946,7 @@
   Ops.push_back(Callee);
 
   if (isTailCall)
-    Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
+    Ops.push_back(DAG.getConstant(FPDiff, EVT::i32));
 
   // Add argument registers to the end of the list so that they are known live
   // into the call.
@@ -1960,7 +1960,7 @@
 
   // Add an implicit use of AL for x86 vararg functions.
   if (Is64Bit && isVarArg)
-    Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
+    Ops.push_back(DAG.getRegister(X86::AL, EVT::i8));
 
   if (InFlag.getNode())
     Ops.push_back(InFlag);
@@ -2277,10 +2277,10 @@
 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
 /// is suitable for input to PSHUFD or PSHUFW.  That is, it doesn't reference
 /// the second operand.
-static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, MVT VT) {
-  if (VT == MVT::v4f32 || VT == MVT::v4i32 || VT == MVT::v4i16)
+static bool isPSHUFDMask(const SmallVectorImpl<int> &Mask, EVT VT) {
+  if (VT == EVT::v4f32 || VT == EVT::v4i32 || VT == EVT::v4i16)
     return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4);
-  if (VT == MVT::v2f64 || VT == MVT::v2i64)
+  if (VT == EVT::v2f64 || VT == EVT::v2i64)
     return (Mask[0] < 2 && Mask[1] < 2);
   return false;
 }
@@ -2293,8 +2293,8 @@
 
 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
 /// is suitable for input to PSHUFHW.
-static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, MVT VT) {
-  if (VT != MVT::v8i16)
+static bool isPSHUFHWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
+  if (VT != EVT::v8i16)
     return false;
   
   // Lower quadword copied in order or undef.
@@ -2318,8 +2318,8 @@
 
 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
 /// is suitable for input to PSHUFLW.
-static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, MVT VT) {
-  if (VT != MVT::v8i16)
+static bool isPSHUFLWMask(const SmallVectorImpl<int> &Mask, EVT VT) {
+  if (VT != EVT::v8i16)
     return false;
   
   // Upper quadword copied in order.
@@ -2343,7 +2343,7 @@
 
 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
-static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
   int NumElems = VT.getVectorNumElements();
   if (NumElems != 2 && NumElems != 4)
     return false;
@@ -2369,7 +2369,7 @@
 /// the reverse of what x86 shuffles want. x86 shuffles requires the lower
 /// half elements to come from vector 1 (which would equal the dest.) and
 /// the upper half to come from vector 2.
-static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isCommutedSHUFPMask(const SmallVectorImpl<int> &Mask, EVT VT) {
   int NumElems = VT.getVectorNumElements();
   
   if (NumElems != 2 && NumElems != 4) 
@@ -2460,7 +2460,7 @@
 
 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
-static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, MVT VT,
+static bool isUNPCKLMask(const SmallVectorImpl<int> &Mask, EVT VT,
                          bool V2IsSplat = false) {
   int NumElts = VT.getVectorNumElements();
   if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
@@ -2490,7 +2490,7 @@
 
 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
-static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, MVT VT, 
+static bool isUNPCKHMask(const SmallVectorImpl<int> &Mask, EVT VT, 
                          bool V2IsSplat = false) {
   int NumElts = VT.getVectorNumElements();
   if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
@@ -2521,7 +2521,7 @@
 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
 /// <0, 0, 1, 1>
-static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isUNPCKL_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
   int NumElems = VT.getVectorNumElements();
   if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
     return false;
@@ -2546,7 +2546,7 @@
 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
 /// <2, 2, 3, 3>
-static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isUNPCKH_v_undef_Mask(const SmallVectorImpl<int> &Mask, EVT VT) {
   int NumElems = VT.getVectorNumElements();
   if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
     return false;
@@ -2571,7 +2571,7 @@
 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
 /// specifies a shuffle of elements that is suitable for input to MOVSS,
 /// MOVSD, and MOVD, i.e. setting the lowest element.
-static bool isMOVLMask(const SmallVectorImpl<int> &Mask, MVT VT) {
+static bool isMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT) {
   if (VT.getVectorElementType().getSizeInBits() < 32)
     return false;
 
@@ -2596,7 +2596,7 @@
 /// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
 /// of what x86 movss want. X86 movs requires the lowest  element to be lowest
 /// element of vector 2 and the other elements to come from vector 1 in order.
-static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, MVT VT,
+static bool isCommutedMOVLMask(const SmallVectorImpl<int> &Mask, EVT VT,
                                bool V2IsSplat = false, bool V2IsUndef = false) {
   int NumOps = VT.getVectorNumElements();
   if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
@@ -2751,7 +2751,7 @@
 /// their permute mask.
 static SDValue CommuteVectorShuffle(ShuffleVectorSDNode *SVOp,
                                     SelectionDAG &DAG) {
-  MVT VT = SVOp->getValueType(0);
+  EVT VT = SVOp->getValueType(0);
   unsigned NumElems = VT.getVectorNumElements();
   SmallVector<int, 8> MaskVec;
   
@@ -2770,7 +2770,7 @@
 
 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
 /// the two vector operands have swapped position.
-static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, MVT VT) {
+static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask, EVT VT) {
   unsigned NumElems = VT.getVectorNumElements();
   for (unsigned i = 0; i != NumElems; ++i) {
     int idx = Mask[i];
@@ -2883,7 +2883,7 @@
 
 /// getZeroVector - Returns a vector of specified type with all zero elements.
 ///
-static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG,
+static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
                              DebugLoc dl) {
   assert(VT.isVector() && "Expected a vector type");
 
@@ -2891,31 +2891,31 @@
   // type.  This ensures they get CSE'd.
   SDValue Vec;
   if (VT.getSizeInBits() == 64) { // MMX
-    SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
+    SDValue Cst = DAG.getTargetConstant(0, EVT::i32);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v2i32, Cst, Cst);
   } else if (HasSSE2) {  // SSE2
-    SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
+    SDValue Cst = DAG.getTargetConstant(0, EVT::i32);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32, Cst, Cst, Cst, Cst);
   } else { // SSE1
-    SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
+    SDValue Cst = DAG.getTargetConstantFP(+0.0, EVT::f32);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4f32, Cst, Cst, Cst, Cst);
   }
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
 }
 
 /// getOnesVector - Returns a vector of specified type with all bits set.
 ///
-static SDValue getOnesVector(MVT VT, SelectionDAG &DAG, DebugLoc dl) {
+static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
   assert(VT.isVector() && "Expected a vector type");
 
   // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
   // type.  This ensures they get CSE'd.
-  SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
+  SDValue Cst = DAG.getTargetConstant(~0U, EVT::i32);
   SDValue Vec;
   if (VT.getSizeInBits() == 64)  // MMX
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v2i32, Cst, Cst);
   else                                              // SSE
-    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
+    Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32, Cst, Cst, Cst, Cst);
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
 }
 
@@ -2923,7 +2923,7 @@
 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
 /// that point to V2 points to its first element.
 static SDValue NormalizeMask(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
-  MVT VT = SVOp->getValueType(0);
+  EVT VT = SVOp->getValueType(0);
   unsigned NumElems = VT.getVectorNumElements();
   
   bool Changed = false;
@@ -2944,7 +2944,7 @@
 
 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
 /// operation of specified width.
-static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getMOVL(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
                        SDValue V2) {
   unsigned NumElems = VT.getVectorNumElements();
   SmallVector<int, 8> Mask;
@@ -2955,7 +2955,7 @@
 }
 
 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
-static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getUnpackl(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
                           SDValue V2) {
   unsigned NumElems = VT.getVectorNumElements();
   SmallVector<int, 8> Mask;
@@ -2967,7 +2967,7 @@
 }
 
 /// getUnpackhMask - Returns a vector_shuffle node for an unpackh operation.
-static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, MVT VT, SDValue V1,
+static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1,
                           SDValue V2) {
   unsigned NumElems = VT.getVectorNumElements();
   unsigned Half = NumElems/2;
@@ -2985,8 +2985,8 @@
   if (SV->getValueType(0).getVectorNumElements() <= 4)
     return SDValue(SV, 0);
   
-  MVT PVT = MVT::v4f32;
-  MVT VT = SV->getValueType(0);
+  EVT PVT = EVT::v4f32;
+  EVT VT = SV->getValueType(0);
   DebugLoc dl = SV->getDebugLoc();
   SDValue V1 = SV->getOperand(0);
   int NumElems = VT.getVectorNumElements();
@@ -3017,7 +3017,7 @@
 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
                                              bool isZero, bool HasSSE2,
                                              SelectionDAG &DAG) {
-  MVT VT = V2.getValueType();
+  EVT VT = V2.getValueType();
   SDValue V1 = isZero
     ? getZeroVector(VT, HasSSE2, DAG, V2.getDebugLoc()) : DAG.getUNDEF(VT);
   unsigned NumElems = VT.getVectorNumElements();
@@ -3105,9 +3105,9 @@
     bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
     if (ThisIsNonZero && First) {
       if (NumZero)
-        V = getZeroVector(MVT::v8i16, true, DAG, dl);
+        V = getZeroVector(EVT::v8i16, true, DAG, dl);
       else
-        V = DAG.getUNDEF(MVT::v8i16);
+        V = DAG.getUNDEF(EVT::v8i16);
       First = false;
     }
 
@@ -3116,24 +3116,24 @@
       bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
       if (LastIsNonZero) {
         LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
-                              MVT::i16, Op.getOperand(i-1));
+                              EVT::i16, Op.getOperand(i-1));
       }
       if (ThisIsNonZero) {
-        ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
-        ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
-                              ThisElt, DAG.getConstant(8, MVT::i8));
+        ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, EVT::i16, Op.getOperand(i));
+        ThisElt = DAG.getNode(ISD::SHL, dl, EVT::i16,
+                              ThisElt, DAG.getConstant(8, EVT::i8));
         if (LastIsNonZero)
-          ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
+          ThisElt = DAG.getNode(ISD::OR, dl, EVT::i16, ThisElt, LastElt);
       } else
         ThisElt = LastElt;
 
       if (ThisElt.getNode())
-        V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
+        V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v8i16, V, ThisElt,
                         DAG.getIntPtrConstant(i/2));
     }
   }
 
-  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V);
+  return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, V);
 }
 
 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
@@ -3152,13 +3152,13 @@
     if (isNonZero) {
       if (First) {
         if (NumZero)
-          V = getZeroVector(MVT::v8i16, true, DAG, dl);
+          V = getZeroVector(EVT::v8i16, true, DAG, dl);
         else
-          V = DAG.getUNDEF(MVT::v8i16);
+          V = DAG.getUNDEF(EVT::v8i16);
         First = false;
       }
       V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
-                      MVT::v8i16, V, Op.getOperand(i),
+                      EVT::v8i16, V, Op.getOperand(i),
                       DAG.getIntPtrConstant(i));
     }
   }
@@ -3168,11 +3168,11 @@
 
 /// getVShift - Return a vector logical shift node.
 ///
-static SDValue getVShift(bool isLeft, MVT VT, SDValue SrcOp,
+static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
                          unsigned NumBits, SelectionDAG &DAG,
                          const TargetLowering &TLI, DebugLoc dl) {
   bool isMMX = VT.getSizeInBits() == 64;
-  MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
+  EVT ShVT = isMMX ? EVT::v1i64 : EVT::v2i64;
   unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
   SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp);
   return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
@@ -3189,7 +3189,7 @@
     // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
     // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
     // eliminated on x86-32 hosts.
-    if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32)
+    if (Op.getValueType() == EVT::v4i32 || Op.getValueType() == EVT::v2i32)
       return Op;
 
     if (ISD::isBuildVectorAllOnes(Op.getNode()))
@@ -3197,9 +3197,9 @@
     return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG, dl);
   }
 
-  MVT VT = Op.getValueType();
-  MVT EVT = VT.getVectorElementType();
-  unsigned EVTBits = EVT.getSizeInBits();
+  EVT VT = Op.getValueType();
+  EVT ExtVT = VT.getVectorElementType();
+  unsigned EVTBits = ExtVT.getSizeInBits();
 
   unsigned NumElems = Op.getNumOperands();
   unsigned NumZero  = 0;
@@ -3238,16 +3238,16 @@
     // insertion that way.  Only do this if the value is non-constant or if the
     // value is a constant being inserted into element 0.  It is cheaper to do
     // a constant pool load than it is to do a movd + shuffle.
-    if (EVT == MVT::i64 && !Subtarget->is64Bit() &&
+    if (ExtVT == EVT::i64 && !Subtarget->is64Bit() &&
         (!IsAllConstants || Idx == 0)) {
       if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
         // Handle MMX and SSE both.
-        MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
-        unsigned VecElts = VT == MVT::v2i64 ? 4 : 2;
+        EVT VecVT = VT == EVT::v2i64 ? EVT::v4i32 : EVT::v2i32;
+        unsigned VecElts = VT == EVT::v2i64 ? 4 : 2;
 
         // Truncate the value (which may itself be a constant) to i32, and
         // convert it to a vector with movd (S2V+shuffle to zero extend).
-        Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
+        Item = DAG.getNode(ISD::TRUNCATE, dl, EVT::i32, Item);
         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
         Item = getShuffleVectorZeroOrUndef(Item, 0, true,
                                            Subtarget->hasSSE2(), DAG);
@@ -3274,15 +3274,15 @@
     if (Idx == 0) {
       if (NumZero == 0) {
         return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
-      } else if (EVT == MVT::i32 || EVT == MVT::f32 || EVT == MVT::f64 ||
-          (EVT == MVT::i64 && Subtarget->is64Bit())) {
+      } else if (ExtVT == EVT::i32 || ExtVT == EVT::f32 || ExtVT == EVT::f64 ||
+          (ExtVT == EVT::i64 && Subtarget->is64Bit())) {
         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
         // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
         return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget->hasSSE2(),
                                            DAG);
-      } else if (EVT == MVT::i16 || EVT == MVT::i8) {
-        Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
-        MVT MiddleVT = VT.getSizeInBits() == 64 ? MVT::v2i32 : MVT::v4i32;
+      } else if (ExtVT == EVT::i16 || ExtVT == EVT::i8) {
+        Item = DAG.getNode(ISD::ZERO_EXTEND, dl, EVT::i32, Item);
+        EVT MiddleVT = VT.getSizeInBits() == 64 ? EVT::v2i32 : EVT::v4i32;
         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
         Item = getShuffleVectorZeroOrUndef(Item, 0, true,
                                            Subtarget->hasSSE2(), DAG);
@@ -3508,10 +3508,10 @@
     SmallVector<int, 8> MaskV;
     MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad);
     MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad);
-    NewV = DAG.getVectorShuffle(MVT::v2i64, dl, 
-                  DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1),
-                  DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), &MaskV[0]);
-    NewV = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, NewV);
+    NewV = DAG.getVectorShuffle(EVT::v2i64, dl, 
+                  DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2i64, V1),
+                  DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2i64, V2), &MaskV[0]);
+    NewV = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, NewV);
 
     // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
     // source words for the shuffle, to aid later transformations.
@@ -3548,8 +3548,8 @@
     // If we've eliminated the use of V2, and the new mask is a pshuflw or
     // pshufhw, that's as cheap as it gets.  Return the new shuffle.
     if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
-      return DAG.getVectorShuffle(MVT::v8i16, dl, NewV, 
-                                  DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
+      return DAG.getVectorShuffle(EVT::v8i16, dl, NewV, 
+                                  DAG.getUNDEF(EVT::v8i16), &MaskVals[0]);
     }
   }
   
@@ -3567,19 +3567,19 @@
     for (unsigned i = 0; i != 8; ++i) {
       int EltIdx = MaskVals[i] * 2;
       if (TwoInputs && (EltIdx >= 16)) {
-        pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
-        pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
+        pshufbMask.push_back(DAG.getConstant(0x80, EVT::i8));
+        pshufbMask.push_back(DAG.getConstant(0x80, EVT::i8));
         continue;
       }
-      pshufbMask.push_back(DAG.getConstant(EltIdx,   MVT::i8));
-      pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8));
+      pshufbMask.push_back(DAG.getConstant(EltIdx,   EVT::i8));
+      pshufbMask.push_back(DAG.getConstant(EltIdx+1, EVT::i8));
     }
-    V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1);
-    V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, 
+    V1 = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, V1);
+    V1 = DAG.getNode(X86ISD::PSHUFB, dl, EVT::v16i8, V1, 
                      DAG.getNode(ISD::BUILD_VECTOR, dl,
-                                 MVT::v16i8, &pshufbMask[0], 16));
+                                 EVT::v16i8, &pshufbMask[0], 16));
     if (!TwoInputs)
-      return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
+      return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, V1);
     
     // Calculate the shuffle mask for the second input, shuffle it, and
     // OR it with the first shuffled input.
@@ -3587,19 +3587,19 @@
     for (unsigned i = 0; i != 8; ++i) {
       int EltIdx = MaskVals[i] * 2;
       if (EltIdx < 16) {
-        pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
-        pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
+        pshufbMask.push_back(DAG.getConstant(0x80, EVT::i8));
+        pshufbMask.push_back(DAG.getConstant(0x80, EVT::i8));
         continue;
       }
-      pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
-      pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8));
+      pshufbMask.push_back(DAG.getConstant(EltIdx - 16, EVT::i8));
+      pshufbMask.push_back(DAG.getConstant(EltIdx - 15, EVT::i8));
     }
-    V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2);
-    V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, 
+    V2 = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, V2);
+    V2 = DAG.getNode(X86ISD::PSHUFB, dl, EVT::v16i8, V2, 
                      DAG.getNode(ISD::BUILD_VECTOR, dl,
-                                 MVT::v16i8, &pshufbMask[0], 16));
-    V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
-    return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
+                                 EVT::v16i8, &pshufbMask[0], 16));
+    V1 = DAG.getNode(ISD::OR, dl, EVT::v16i8, V1, V2);
+    return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, V1);
   }
 
   // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
@@ -3621,7 +3621,7 @@
     }
     for (unsigned i = 4; i != 8; ++i)
       MaskV.push_back(i);
-    NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
+    NewV = DAG.getVectorShuffle(EVT::v8i16, dl, NewV, DAG.getUNDEF(EVT::v8i16),
                                 &MaskV[0]);
   }
   
@@ -3643,7 +3643,7 @@
         MaskV.push_back(-1);
       }
     }
-    NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
+    NewV = DAG.getVectorShuffle(EVT::v8i16, dl, NewV, DAG.getUNDEF(EVT::v8i16),
                                 &MaskV[0]);
   }
   
@@ -3665,11 +3665,11 @@
     if (EltIdx < 0)
       continue;
     SDValue ExtOp = (EltIdx < 8)
-    ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
+    ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i16, V1,
                   DAG.getIntPtrConstant(EltIdx))
-    : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
+    : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i16, V2,
                   DAG.getIntPtrConstant(EltIdx - 8));
-    NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
+    NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v8i16, NewV, ExtOp,
                        DAG.getIntPtrConstant(i));
   }
   return NewV;
@@ -3718,18 +3718,18 @@
     for (unsigned i = 0; i != 16; ++i) {
       int EltIdx = MaskVals[i];
       if (EltIdx < 0 || (TwoInputs && EltIdx >= 16)) {
-        pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
+        pshufbMask.push_back(DAG.getConstant(0x80, EVT::i8));
         continue;
       }
-      pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
+      pshufbMask.push_back(DAG.getConstant(EltIdx, EVT::i8));
     }
     // If all the elements are from V2, assign it to V1 and return after
     // building the first pshufb.
     if (V2Only)
       V1 = V2;
-    V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
+    V1 = DAG.getNode(X86ISD::PSHUFB, dl, EVT::v16i8, V1,
                      DAG.getNode(ISD::BUILD_VECTOR, dl,
-                                 MVT::v16i8, &pshufbMask[0], 16));
+                                 EVT::v16i8, &pshufbMask[0], 16));
     if (!TwoInputs)
       return V1;
     
@@ -3739,22 +3739,22 @@
     for (unsigned i = 0; i != 16; ++i) {
       int EltIdx = MaskVals[i];
       if (EltIdx < 16) {
-        pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
+        pshufbMask.push_back(DAG.getConstant(0x80, EVT::i8));
         continue;
       }
-      pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
+      pshufbMask.push_back(DAG.getConstant(EltIdx - 16, EVT::i8));
     }
-    V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
+    V2 = DAG.getNode(X86ISD::PSHUFB, dl, EVT::v16i8, V2,
                      DAG.getNode(ISD::BUILD_VECTOR, dl,
-                                 MVT::v16i8, &pshufbMask[0], 16));
-    return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
+                                 EVT::v16i8, &pshufbMask[0], 16));
+    return DAG.getNode(ISD::OR, dl, EVT::v16i8, V1, V2);
   }
   
   // No SSSE3 - Calculate in place words and then fix all out of place words
   // With 0-16 extracts & inserts.  Worst case is 16 bytes out of order from
   // the 16 different words that comprise the two doublequadword input vectors.
-  V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
-  V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V2);
+  V1 = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, V1);
+  V2 = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v8i16, V2);
   SDValue NewV = V2Only ? V2 : V1;
   for (int i = 0; i != 8; ++i) {
     int Elt0 = MaskVals[i*2];
@@ -3777,9 +3777,9 @@
     // If Elt0 and Elt1 are defined, are consecutive, and can be load
     // using a single extract together, load it and store it.
     if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
-      InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
+      InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i16, Elt1Src,
                            DAG.getIntPtrConstant(Elt1 / 2));
-      NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
+      NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v8i16, NewV, InsElt,
                         DAG.getIntPtrConstant(i));
       continue;
     }
@@ -3788,35 +3788,35 @@
     // source byte is not also odd, shift the extracted word left 8 bits
     // otherwise clear the bottom 8 bits if we need to do an or.
     if (Elt1 >= 0) {
-      InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
+      InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i16, Elt1Src,
                            DAG.getIntPtrConstant(Elt1 / 2));
       if ((Elt1 & 1) == 0)
-        InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
+        InsElt = DAG.getNode(ISD::SHL, dl, EVT::i16, InsElt,
                              DAG.getConstant(8, TLI.getShiftAmountTy()));
       else if (Elt0 >= 0)
-        InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
-                             DAG.getConstant(0xFF00, MVT::i16));
+        InsElt = DAG.getNode(ISD::AND, dl, EVT::i16, InsElt,
+                             DAG.getConstant(0xFF00, EVT::i16));
     }
     // If Elt0 is defined, extract it from the appropriate source.  If the
     // source byte is not also even, shift the extracted word right 8 bits. If
     // Elt1 was also defined, OR the extracted values together before
     // inserting them in the result.
     if (Elt0 >= 0) {
-      SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
+      SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i16,
                                     Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
       if ((Elt0 & 1) != 0)
-        InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
+        InsElt0 = DAG.getNode(ISD::SRL, dl, EVT::i16, InsElt0,
                               DAG.getConstant(8, TLI.getShiftAmountTy()));
       else if (Elt1 >= 0)
-        InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
-                             DAG.getConstant(0x00FF, MVT::i16));
-      InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
+        InsElt0 = DAG.getNode(ISD::AND, dl, EVT::i16, InsElt0,
+                             DAG.getConstant(0x00FF, EVT::i16));
+      InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, EVT::i16, InsElt, InsElt0)
                          : InsElt0;
     }
-    NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
+    NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, EVT::v8i16, NewV, InsElt,
                        DAG.getIntPtrConstant(i));
   }
-  return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, NewV);
+  return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v16i8, NewV);
 }
 
 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
@@ -3828,27 +3828,27 @@
 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
                                  SelectionDAG &DAG,
                                  TargetLowering &TLI, DebugLoc dl) {
-  MVT VT = SVOp->getValueType(0);
+  EVT VT = SVOp->getValueType(0);
   SDValue V1 = SVOp->getOperand(0);
   SDValue V2 = SVOp->getOperand(1);
   unsigned NumElems = VT.getVectorNumElements();
   unsigned NewWidth = (NumElems == 4) ? 2 : 4;
-  MVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
-  MVT MaskEltVT = MaskVT.getVectorElementType();
-  MVT NewVT = MaskVT;
+  EVT MaskVT = EVT::getIntVectorWithNumElements(NewWidth);
+  EVT MaskEltVT = MaskVT.getVectorElementType();
+  EVT NewVT = MaskVT;
   switch (VT.getSimpleVT()) {
   default: assert(false && "Unexpected!");
-  case MVT::v4f32: NewVT = MVT::v2f64; break;
-  case MVT::v4i32: NewVT = MVT::v2i64; break;
-  case MVT::v8i16: NewVT = MVT::v4i32; break;
-  case MVT::v16i8: NewVT = MVT::v4i32; break;
+  case EVT::v4f32: NewVT = EVT::v2f64; break;
+  case EVT::v4i32: NewVT = EVT::v2i64; break;
+  case EVT::v8i16: NewVT = EVT::v4i32; break;
+  case EVT::v16i8: NewVT = EVT::v4i32; break;
   }
 
   if (NewWidth == 2) {
     if (VT.isInteger())
-      NewVT = MVT::v2i64;
+      NewVT = EVT::v2i64;
     else
-      NewVT = MVT::v2f64;
+      NewVT = EVT::v2f64;
   }
   int Scale = NumElems / NewWidth;
   SmallVector<int, 8> MaskVec;
@@ -3876,23 +3876,23 @@
 
 /// getVZextMovL - Return a zero-extending vector move low node.
 ///
-static SDValue getVZextMovL(MVT VT, MVT OpVT,
+static SDValue getVZextMovL(EVT VT, EVT OpVT,
                             SDValue SrcOp, SelectionDAG &DAG,
                             const X86Subtarget *Subtarget, DebugLoc dl) {
-  if (VT == MVT::v2f64 || VT == MVT::v4f32) {
+  if (VT == EVT::v2f64 || VT == EVT::v4f32) {
     LoadSDNode *LD = NULL;
     if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
       LD = dyn_cast<LoadSDNode>(SrcOp);
     if (!LD) {
       // movssrr and movsdrr do not clear top bits. Try to use movd, movq
       // instead.
-      MVT EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
-      if ((EVT != MVT::i64 || Subtarget->is64Bit()) &&
+      EVT EVT = (OpVT == EVT::v2f64) ? EVT::i64 : EVT::i32;
+      if ((EVT != EVT::i64 || Subtarget->is64Bit()) &&
           SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
           SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
           SrcOp.getOperand(0).getOperand(0).getValueType() == EVT) {
         // PR2108
-        OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
+        OpVT = (OpVT == EVT::v2f64) ? EVT::v2i64 : EVT::v4i32;
         return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
                            DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
                                        DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
@@ -3916,7 +3916,7 @@
   SDValue V1 = SVOp->getOperand(0);
   SDValue V2 = SVOp->getOperand(1);
   DebugLoc dl = SVOp->getDebugLoc();
-  MVT VT = SVOp->getValueType(0);
+  EVT VT = SVOp->getValueType(0);
   
   SmallVector<std::pair<int, int>, 8> Locs;
   Locs.resize(4);
@@ -4063,7 +4063,7 @@
   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
   SDValue V1 = Op.getOperand(0);
   SDValue V2 = Op.getOperand(1);
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
   unsigned NumElems = VT.getVectorNumElements();
   bool isMMX = VT.getSizeInBits() == 64;
@@ -4084,12 +4084,12 @@
 
   // If the shuffle can be profitably rewritten as a narrower shuffle, then
   // do it!
-  if (VT == MVT::v8i16 || VT == MVT::v16i8) {
+  if (VT == EVT::v8i16 || VT == EVT::v16i8) {
     SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, *this, dl);
     if (NewOp.getNode())
       return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
                          LowerVECTOR_SHUFFLE(NewOp, DAG));
-  } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
+  } else if ((VT == EVT::v4i32 || (VT == EVT::v4f32 && Subtarget->hasSSE2()))) {
     // FIXME: Figure out a cleaner way to do this.
     // Try to make use of movq to zero out the top part.
     if (ISD::isBuildVectorAllZeros(V2.getNode())) {
@@ -4119,7 +4119,7 @@
   if (isShift && ShVal.hasOneUse()) {
     // If the shifted value has multiple uses, it may be cheaper to use
     // v_set0 + movlhps or movhlps, etc.
-    MVT EVT = VT.getVectorElementType();
+    EVT EVT = VT.getVectorElementType();
     ShAmt *= EVT.getSizeInBits();
     return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
   }
@@ -4147,7 +4147,7 @@
 
   if (isShift) {
     // No better options. Use a vshl / vsrl.
-    MVT EVT = VT.getVectorElementType();
+    EVT EVT = VT.getVectorElementType();
     ShAmt *= EVT.getSizeInBits();
     return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
   }
@@ -4225,13 +4225,13 @@
     return Op;
   
   // Handle v8i16 specifically since SSE can do byte extraction and insertion.
-  if (VT == MVT::v8i16) {
+  if (VT == EVT::v8i16) {
     SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(SVOp, DAG, *this);
     if (NewOp.getNode())
       return NewOp;
   }
 
-  if (VT == MVT::v16i8) {
+  if (VT == EVT::v16i8) {
     SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, DAG, *this);
     if (NewOp.getNode())
       return NewOp;
@@ -4247,30 +4247,30 @@
 SDValue
 X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
                                                 SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
   if (VT.getSizeInBits() == 8) {
-    SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
+    SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, EVT::i32,
                                     Op.getOperand(0), Op.getOperand(1));
-    SDValue Assert  = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
+    SDValue Assert  = DAG.getNode(ISD::AssertZext, dl, EVT::i32, Extract,
                                     DAG.getValueType(VT));
     return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
   } else if (VT.getSizeInBits() == 16) {
     unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
     // If Idx is 0, it's cheaper to do a move instead of a pextrw.
     if (Idx == 0)
-      return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
-                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+      return DAG.getNode(ISD::TRUNCATE, dl, EVT::i16,
+                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i32,
                                      DAG.getNode(ISD::BIT_CONVERT, dl,
-                                                 MVT::v4i32,
+                                                 EVT::v4i32,
                                                  Op.getOperand(0)),
                                      Op.getOperand(1)));
-    SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
+    SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EVT::i32,
                                     Op.getOperand(0), Op.getOperand(1));
-    SDValue Assert  = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
+    SDValue Assert  = DAG.getNode(ISD::AssertZext, dl, EVT::i32, Extract,
                                     DAG.getValueType(VT));
     return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
-  } else if (VT == MVT::f32) {
+  } else if (VT == EVT::f32) {
     // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
     // the result back to FR32 register. It's only worth matching if the
     // result has a single use which is a store or a bitcast to i32.  And in
@@ -4283,14 +4283,14 @@
          (isa<ConstantSDNode>(Op.getOperand(1)) &&
           cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
         (User->getOpcode() != ISD::BIT_CONVERT ||
-         User->getValueType(0) != MVT::i32))
+         User->getValueType(0) != EVT::i32))
       return SDValue();
-    SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
-                                  DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32,
+    SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i32,
+                                  DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v4i32,
                                               Op.getOperand(0)),
                                               Op.getOperand(1));
-    return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract);
-  } else if (VT == MVT::i32) {
+    return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, Extract);
+  } else if (VT == EVT::i32) {
     // ExtractPS works with constant index.
     if (isa<ConstantSDNode>(Op.getOperand(1)))
       return Op;
@@ -4310,20 +4310,20 @@
       return Res;
   }
 
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
   // TODO: handle v16i8.
   if (VT.getSizeInBits() == 16) {
     SDValue Vec = Op.getOperand(0);
     unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
     if (Idx == 0)
-      return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
-                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+      return DAG.getNode(ISD::TRUNCATE, dl, EVT::i16,
+                         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::i32,
                                      DAG.getNode(ISD::BIT_CONVERT, dl,
-                                                 MVT::v4i32, Vec),
+                                                 EVT::v4i32, Vec),
                                      Op.getOperand(1)));
     // Transform it so it match pextrw which produces a 32-bit result.
-    MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1);
+    EVT EVT = (EVT::SimpleValueType)(VT.getSimpleVT()+1);
     SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EVT,
                                     Op.getOperand(0), Op.getOperand(1));
     SDValue Assert  = DAG.getNode(ISD::AssertZext, dl, EVT, Extract,
@@ -4336,7 +4336,7 @@
     
     // SHUFPS the element to the lowest double word, then movss.
     int Mask[4] = { Idx, -1, -1, -1 };
-    MVT VVT = Op.getOperand(0).getValueType();
+    EVT VVT = Op.getOperand(0).getValueType();
     SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 
                                        DAG.getUNDEF(VVT), Mask);
     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
@@ -4353,7 +4353,7 @@
     // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
     // to a f64mem, the whole operation is folded into a single MOVHPDmr.
     int Mask[2] = { 1, -1 };
-    MVT VVT = Op.getOperand(0).getValueType();
+    EVT VVT = Op.getOperand(0).getValueType();
     SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0), 
                                        DAG.getUNDEF(VVT), Mask);
     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
@@ -4365,8 +4365,8 @@
 
 SDValue
 X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG){
-  MVT VT = Op.getValueType();
-  MVT EVT = VT.getVectorElementType();
+  EVT VT = Op.getValueType();
+  EVT EVT = VT.getVectorElementType();
   DebugLoc dl = Op.getDebugLoc();
 
   SDValue N0 = Op.getOperand(0);
@@ -4379,12 +4379,12 @@
                                               : X86ISD::PINSRW;
     // Transform it so it match pinsr{b,w} which expects a GR32 as its second
     // argument.
-    if (N1.getValueType() != MVT::i32)
-      N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
-    if (N2.getValueType() != MVT::i32)
+    if (N1.getValueType() != EVT::i32)
+      N1 = DAG.getNode(ISD::ANY_EXTEND, dl, EVT::i32, N1);
+    if (N2.getValueType() != EVT::i32)
       N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
     return DAG.getNode(Opc, dl, VT, N0, N1, N2);
-  } else if (EVT == MVT::f32 && isa<ConstantSDNode>(N2)) {
+  } else if (EVT == EVT::f32 && isa<ConstantSDNode>(N2)) {
     // Bits [7:6] of the constant are the source select.  This will always be
     //  zero here.  The DAG Combiner may combine an extract_elt index into these
     //  bits.  For example (insert (extract, 3), 2) could be matched by putting
@@ -4395,9 +4395,9 @@
     //   combine either bitwise AND or insert of float 0.0 to set these bits.
     N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4);
     // Create this as a scalar to vector..
-    N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
+    N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v4f32, N1);
     return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
-  } else if (EVT == MVT::i32 && isa<ConstantSDNode>(N2)) {
+  } else if (EVT == EVT::i32 && isa<ConstantSDNode>(N2)) {
     // PINSR* works with constant index.
     return Op;
   }
@@ -4406,13 +4406,13 @@
 
 SDValue
 X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
-  MVT EVT = VT.getVectorElementType();
+  EVT VT = Op.getValueType();
+  EVT EVT = VT.getVectorElementType();
 
   if (Subtarget->hasSSE41())
     return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
 
-  if (EVT == MVT::i8)
+  if (EVT == EVT::i8)
     return SDValue();
 
   DebugLoc dl = Op.getDebugLoc();
@@ -4423,9 +4423,9 @@
   if (EVT.getSizeInBits() == 16 && isa<ConstantSDNode>(N2)) {
     // Transform it so it match pinsrw which expects a 16-bit value in a GR32
     // as its second argument.
-    if (N1.getValueType() != MVT::i32)
-      N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
-    if (N2.getValueType() != MVT::i32)
+    if (N1.getValueType() != EVT::i32)
+      N1 = DAG.getNode(ISD::ANY_EXTEND, dl, EVT::i32, N1);
+    if (N2.getValueType() != EVT::i32)
       N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
     return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
   }
@@ -4435,22 +4435,22 @@
 SDValue
 X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
   DebugLoc dl = Op.getDebugLoc();
-  if (Op.getValueType() == MVT::v2f32)
-    return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f32,
-                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i32,
-                                   DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
+  if (Op.getValueType() == EVT::v2f32)
+    return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2f32,
+                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v2i32,
+                                   DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32,
                                                Op.getOperand(0))));
 
-  if (Op.getValueType() == MVT::v1i64 && Op.getOperand(0).getValueType() == MVT::i64)
-    return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
+  if (Op.getValueType() == EVT::v1i64 && Op.getOperand(0).getValueType() == EVT::i64)
+    return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v1i64, Op.getOperand(0));
 
-  SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
-  MVT VT = MVT::v2i32;
+  SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, EVT::i32, Op.getOperand(0));
+  EVT VT = EVT::v2i32;
   switch (Op.getValueType().getSimpleVT()) {
   default: break;
-  case MVT::v16i8:
-  case MVT::v8i16:
-    VT = MVT::v4i32;
+  case EVT::v16i8:
+  case EVT::v8i16:
+    VT = EVT::v4i32;
     break;
   }
   return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(),
@@ -4623,9 +4623,9 @@
 
 static SDValue
 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
-           SDValue *InFlag, const MVT PtrVT, unsigned ReturnReg,
+           SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
            unsigned char OperandFlags) {
-  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   DebugLoc dl = GA->getDebugLoc();
   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
                                            GA->getValueType(0),
@@ -4645,7 +4645,7 @@
 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
 static SDValue
 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
-                                const MVT PtrVT) {
+                                const EVT PtrVT) {
   SDValue InFlag;
   DebugLoc dl = GA->getDebugLoc();  // ? function entry point might be better
   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
@@ -4660,7 +4660,7 @@
 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
 static SDValue
 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
-                                const MVT PtrVT) {
+                                const EVT PtrVT) {
   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, NULL, PtrVT,
                     X86::RAX, X86II::MO_TLSGD);
 }
@@ -4668,14 +4668,14 @@
 // Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
 // "local exec" model.
 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
-                                   const MVT PtrVT, TLSModel::Model model,
+                                   const EVT PtrVT, TLSModel::Model model,
                                    bool is64Bit) {
   DebugLoc dl = GA->getDebugLoc();
   // Get the Thread Pointer
   SDValue Base = DAG.getNode(X86ISD::SegmentBaseAddress,
                              DebugLoc::getUnknownLoc(), PtrVT,
                              DAG.getRegister(is64Bit? X86::FS : X86::GS,
-                                             MVT::i32));
+                                             EVT::i32));
 
   SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Base,
                                       NULL, 0);
@@ -4749,7 +4749,7 @@
 /// take a 2 x i32 value to shift plus a shift amount.
 SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   unsigned VTBits = VT.getSizeInBits();
   DebugLoc dl = Op.getDebugLoc();
   bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
@@ -4757,7 +4757,7 @@
   SDValue ShOpHi = Op.getOperand(1);
   SDValue ShAmt  = Op.getOperand(2);
   SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
-                                     DAG.getConstant(VTBits - 1, MVT::i8))
+                                     DAG.getConstant(VTBits - 1, EVT::i8))
                        : DAG.getConstant(0, VT);
 
   SDValue Tmp2, Tmp3;
@@ -4769,13 +4769,13 @@
     Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, ShAmt);
   }
 
-  SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
-                                DAG.getConstant(VTBits, MVT::i8));
+  SDValue AndNode = DAG.getNode(ISD::AND, dl, EVT::i8, ShAmt,
+                                DAG.getConstant(VTBits, EVT::i8));
   SDValue Cond = DAG.getNode(X86ISD::CMP, dl, VT,
-                             AndNode, DAG.getConstant(0, MVT::i8));
+                             AndNode, DAG.getConstant(0, EVT::i8));
 
   SDValue Hi, Lo;
-  SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+  SDValue CC = DAG.getConstant(X86::COND_NE, EVT::i8);
   SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
   SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
 
@@ -4792,23 +4792,23 @@
 }
 
 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
-  MVT SrcVT = Op.getOperand(0).getValueType();
+  EVT SrcVT = Op.getOperand(0).getValueType();
 
   if (SrcVT.isVector()) {
-    if (SrcVT == MVT::v2i32 && Op.getValueType() == MVT::v2f64) {
+    if (SrcVT == EVT::v2i32 && Op.getValueType() == EVT::v2f64) {
       return Op;
     }
     return SDValue();
   }
 
-  assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
+  assert(SrcVT.getSimpleVT() <= EVT::i64 && SrcVT.getSimpleVT() >= EVT::i16 &&
          "Unknown SINT_TO_FP to lower!");
 
   // These are really Legal; return the operand so the caller accepts it as
   // Legal.
-  if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
+  if (SrcVT == EVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
     return Op;
-  if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
+  if (SrcVT == EVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
       Subtarget->is64Bit()) {
     return Op;
   }
@@ -4824,7 +4824,7 @@
   return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
 }
 
-SDValue X86TargetLowering::BuildFILD(SDValue Op, MVT SrcVT, SDValue Chain,
+SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
                                      SDValue StackSlot,
                                      SelectionDAG &DAG) {
   // Build the FILD
@@ -4832,9 +4832,9 @@
   SDVTList Tys;
   bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
   if (useSSE)
-    Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
+    Tys = DAG.getVTList(EVT::f64, EVT::Other, EVT::Flag);
   else
-    Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
+    Tys = DAG.getVTList(Op.getValueType(), EVT::Other);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(StackSlot);
@@ -4852,7 +4852,7 @@
     MachineFunction &MF = DAG.getMachineFunction();
     int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
     SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
-    Tys = DAG.getVTList(MVT::Other);
+    Tys = DAG.getVTList(EVT::Other);
     SmallVector<SDValue, 8> Ops;
     Ops.push_back(Chain);
     Ops.push_back(Result);
@@ -4923,31 +4923,31 @@
   Constant *C1 = ConstantVector::get(CV1);
   SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
 
-  SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
-                            DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+  SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v4i32,
+                            DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
                                         Op.getOperand(0),
                                         DAG.getIntPtrConstant(1)));
-  SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
-                            DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+  SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v4i32,
+                            DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
                                         Op.getOperand(0),
                                         DAG.getIntPtrConstant(0)));
-  SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32, XR1, XR2);
-  SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
+  SDValue Unpck1 = getUnpackl(DAG, dl, EVT::v4i32, XR1, XR2);
+  SDValue CLod0 = DAG.getLoad(EVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
                               PseudoSourceValue::getConstantPool(), 0,
                               false, 16);
-  SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0);
-  SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Unpck2);
-  SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
+  SDValue Unpck2 = getUnpackl(DAG, dl, EVT::v4i32, Unpck1, CLod0);
+  SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2f64, Unpck2);
+  SDValue CLod1 = DAG.getLoad(EVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
                               PseudoSourceValue::getConstantPool(), 0,
                               false, 16);
-  SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
+  SDValue Sub = DAG.getNode(ISD::FSUB, dl, EVT::v2f64, XR2F, CLod1);
 
   // Add the halves; easiest way is to swap them into another reg first.
   int ShufMask[2] = { 1, -1 };
-  SDValue Shuf = DAG.getVectorShuffle(MVT::v2f64, dl, Sub,
-                                      DAG.getUNDEF(MVT::v2f64), ShufMask);
-  SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuf, Sub);
-  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Add,
+  SDValue Shuf = DAG.getVectorShuffle(EVT::v2f64, dl, Sub,
+                                      DAG.getUNDEF(EVT::v2f64), ShufMask);
+  SDValue Add = DAG.getNode(ISD::FADD, dl, EVT::v2f64, Shuf, Sub);
+  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f64, Add,
                      DAG.getIntPtrConstant(0));
 }
 
@@ -4956,40 +4956,40 @@
   DebugLoc dl = Op.getDebugLoc();
   // FP constant to bias correct the final result.
   SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
-                                   MVT::f64);
+                                   EVT::f64);
 
   // Load the 32-bit value into an XMM register.
-  SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
-                             DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+  SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v4i32,
+                             DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
                                          Op.getOperand(0),
                                          DAG.getIntPtrConstant(0)));
 
-  Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
-                     DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Load),
+  Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f64,
+                     DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2f64, Load),
                      DAG.getIntPtrConstant(0));
 
   // Or the load with the bias.
-  SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
-                           DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
+  SDValue Or = DAG.getNode(ISD::OR, dl, EVT::v2i64,
+                           DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2i64,
                                        DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
-                                                   MVT::v2f64, Load)),
-                           DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
+                                                   EVT::v2f64, Load)),
+                           DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2i64,
                                        DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
-                                                   MVT::v2f64, Bias)));
-  Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
-                   DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Or),
+                                                   EVT::v2f64, Bias)));
+  Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f64,
+                   DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2f64, Or),
                    DAG.getIntPtrConstant(0));
 
   // Subtract the bias.
-  SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
+  SDValue Sub = DAG.getNode(ISD::FSUB, dl, EVT::f64, Or, Bias);
 
   // Handle final rounding.
-  MVT DestVT = Op.getValueType();
+  EVT DestVT = Op.getValueType();
 
-  if (DestVT.bitsLT(MVT::f64)) {
+  if (DestVT.bitsLT(EVT::f64)) {
     return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
                        DAG.getIntPtrConstant(0));
-  } else if (DestVT.bitsGT(MVT::f64)) {
+  } else if (DestVT.bitsGT(EVT::f64)) {
     return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
   }
 
@@ -5007,52 +5007,52 @@
   if (DAG.SignBitIsZero(N0))
     return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
 
-  MVT SrcVT = N0.getValueType();
-  if (SrcVT == MVT::i64) {
+  EVT SrcVT = N0.getValueType();
+  if (SrcVT == EVT::i64) {
     // We only handle SSE2 f64 target here; caller can expand the rest.
-    if (Op.getValueType() != MVT::f64 || !X86ScalarSSEf64)
+    if (Op.getValueType() != EVT::f64 || !X86ScalarSSEf64)
       return SDValue();
 
     return LowerUINT_TO_FP_i64(Op, DAG);
-  } else if (SrcVT == MVT::i32 && X86ScalarSSEf64) {
+  } else if (SrcVT == EVT::i32 && X86ScalarSSEf64) {
     return LowerUINT_TO_FP_i32(Op, DAG);
   }
 
-  assert(SrcVT == MVT::i32 && "Unknown UINT_TO_FP to lower!");
+  assert(SrcVT == EVT::i32 && "Unknown UINT_TO_FP to lower!");
 
   // Make a 64-bit buffer, and use it to build an FILD.
-  SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
+  SDValue StackSlot = DAG.CreateStackTemporary(EVT::i64);
   SDValue WordOff = DAG.getConstant(4, getPointerTy());
   SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
                                    getPointerTy(), StackSlot, WordOff);
   SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
                                 StackSlot, NULL, 0);
-  SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
+  SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, EVT::i32),
                                 OffsetSlot, NULL, 0);
-  return BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
+  return BuildFILD(Op, EVT::i64, Store2, StackSlot, DAG);
 }
 
 std::pair<SDValue,SDValue> X86TargetLowering::
 FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned) {
   DebugLoc dl = Op.getDebugLoc();
 
-  MVT DstTy = Op.getValueType();
+  EVT DstTy = Op.getValueType();
 
   if (!IsSigned) {
-    assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
-    DstTy = MVT::i64;
+    assert(DstTy == EVT::i32 && "Unexpected FP_TO_UINT");
+    DstTy = EVT::i64;
   }
 
-  assert(DstTy.getSimpleVT() <= MVT::i64 &&
-         DstTy.getSimpleVT() >= MVT::i16 &&
+  assert(DstTy.getSimpleVT() <= EVT::i64 &&
+         DstTy.getSimpleVT() >= EVT::i16 &&
          "Unknown FP_TO_SINT to lower!");
 
   // These are really Legal.
-  if (DstTy == MVT::i32 &&
+  if (DstTy == EVT::i32 &&
       isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
     return std::make_pair(SDValue(), SDValue());
   if (Subtarget->is64Bit() &&
-      DstTy == MVT::i64 &&
+      DstTy == EVT::i64 &&
       isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
     return std::make_pair(SDValue(), SDValue());
 
@@ -5066,18 +5066,18 @@
   unsigned Opc;
   switch (DstTy.getSimpleVT()) {
   default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
-  case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
-  case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
-  case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
+  case EVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
+  case EVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
+  case EVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
   }
 
   SDValue Chain = DAG.getEntryNode();
   SDValue Value = Op.getOperand(0);
   if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) {
-    assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
+    assert(DstTy == EVT::i64 && "Invalid FP_TO_SINT to lower!");
     Chain = DAG.getStore(Chain, dl, Value, StackSlot,
                          PseudoSourceValue::getFixedStack(SSFI), 0);
-    SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
+    SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), EVT::Other);
     SDValue Ops[] = {
       Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
     };
@@ -5089,15 +5089,15 @@
 
   // Build the FP_TO_INT*_IN_MEM
   SDValue Ops[] = { Chain, Value, StackSlot };
-  SDValue FIST = DAG.getNode(Opc, dl, MVT::Other, Ops, 3);
+  SDValue FIST = DAG.getNode(Opc, dl, EVT::Other, Ops, 3);
 
   return std::make_pair(FIST, StackSlot);
 }
 
 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
   if (Op.getValueType().isVector()) {
-    if (Op.getValueType() == MVT::v2i32 &&
-        Op.getOperand(0).getValueType() == MVT::v2f64) {
+    if (Op.getValueType() == EVT::v2i32 &&
+        Op.getOperand(0).getValueType() == EVT::v2f64) {
       return Op;
     }
     return SDValue();
@@ -5126,12 +5126,12 @@
 SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) {
   LLVMContext *Context = DAG.getContext();
   DebugLoc dl = Op.getDebugLoc();
-  MVT VT = Op.getValueType();
-  MVT EltVT = VT;
+  EVT VT = Op.getValueType();
+  EVT EltVT = VT;
   if (VT.isVector())
     EltVT = VT.getVectorElementType();
   std::vector<Constant*> CV;
-  if (EltVT == MVT::f64) {
+  if (EltVT == EVT::f64) {
     Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))));
     CV.push_back(C);
     CV.push_back(C);
@@ -5153,15 +5153,15 @@
 SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) {
   LLVMContext *Context = DAG.getContext();
   DebugLoc dl = Op.getDebugLoc();
-  MVT VT = Op.getValueType();
-  MVT EltVT = VT;
+  EVT VT = Op.getValueType();
+  EVT EltVT = VT;
   unsigned EltNum = 1;
   if (VT.isVector()) {
     EltVT = VT.getVectorElementType();
     EltNum = VT.getVectorNumElements();
   }
   std::vector<Constant*> CV;
-  if (EltVT == MVT::f64) {
+  if (EltVT == EVT::f64) {
     Constant *C = ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63)));
     CV.push_back(C);
     CV.push_back(C);
@@ -5179,10 +5179,10 @@
                                false, 16);
   if (VT.isVector()) {
     return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
-                       DAG.getNode(ISD::XOR, dl, MVT::v2i64,
-                    DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
+                       DAG.getNode(ISD::XOR, dl, EVT::v2i64,
+                    DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2i64,
                                 Op.getOperand(0)),
-                    DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask)));
+                    DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v2i64, Mask)));
   } else {
     return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
   }
@@ -5193,8 +5193,8 @@
   SDValue Op0 = Op.getOperand(0);
   SDValue Op1 = Op.getOperand(1);
   DebugLoc dl = Op.getDebugLoc();
-  MVT VT = Op.getValueType();
-  MVT SrcVT = Op1.getValueType();
+  EVT VT = Op.getValueType();
+  EVT SrcVT = Op1.getValueType();
 
   // If second operand is smaller, extend it first.
   if (SrcVT.bitsLT(VT)) {
@@ -5212,7 +5212,7 @@
 
   // First get the sign bit of second operand.
   std::vector<Constant*> CV;
-  if (SrcVT == MVT::f64) {
+  if (SrcVT == EVT::f64) {
     CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 1ULL << 63))));
     CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0))));
   } else {
@@ -5230,18 +5230,18 @@
 
   // Shift sign bit right or left if the two operands have different types.
   if (SrcVT.bitsGT(VT)) {
-    // Op0 is MVT::f32, Op1 is MVT::f64.
-    SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit);
-    SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit,
-                          DAG.getConstant(32, MVT::i32));
-    SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, SignBit);
-    SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit,
+    // Op0 is EVT::f32, Op1 is EVT::f64.
+    SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, EVT::v2f64, SignBit);
+    SignBit = DAG.getNode(X86ISD::FSRL, dl, EVT::v2f64, SignBit,
+                          DAG.getConstant(32, EVT::i32));
+    SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::v4f32, SignBit);
+    SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EVT::f32, SignBit,
                           DAG.getIntPtrConstant(0));
   }
 
   // Clear first operand sign bit.
   CV.clear();
-  if (VT == MVT::f64) {
+  if (VT == EVT::f64) {
     CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))));
     CV.push_back(ConstantFP::get(*Context, APFloat(APInt(64, 0))));
   } else {
@@ -5342,7 +5342,7 @@
       break;
     }
     if (Opcode != 0) {
-      SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
+      SDVTList VTs = DAG.getVTList(Op.getValueType(), EVT::i32);
       SmallVector<SDValue, 4> Ops;
       for (unsigned i = 0; i != NumOperands; ++i)
         Ops.push_back(Op.getOperand(i));
@@ -5353,7 +5353,7 @@
   }
 
   // Otherwise just emit a CMP with 0, which is the TEST pattern.
-  return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
+  return DAG.getNode(X86ISD::CMP, dl, EVT::i32, Op,
                      DAG.getConstant(0, Op.getValueType()));
 }
 
@@ -5366,11 +5366,11 @@
       return EmitTest(Op0, X86CC, DAG);
 
   DebugLoc dl = Op0.getDebugLoc();
-  return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
+  return DAG.getNode(X86ISD::CMP, dl, EVT::i32, Op0, Op1);
 }
 
 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
-  assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
+  assert(Op.getValueType() == EVT::i8 && "SetCC type must be 8-bit integer");
   SDValue Op0 = Op.getOperand(0);
   SDValue Op1 = Op.getOperand(1);
   DebugLoc dl = Op.getDebugLoc();
@@ -5413,18 +5413,18 @@
       // instruction.  Since the shift amount is in-range-or-undefined, we know
       // that doing a bittest on the i16 value is ok.  We extend to i32 because
       // the encoding for the i16 version is larger than the i32 version.
-      if (LHS.getValueType() == MVT::i8)
-        LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
+      if (LHS.getValueType() == EVT::i8)
+        LHS = DAG.getNode(ISD::ANY_EXTEND, dl, EVT::i32, LHS);
 
       // If the operand types disagree, extend the shift amount to match.  Since
       // BT ignores high bits (like shifts) we can use anyextend.
       if (LHS.getValueType() != RHS.getValueType())
         RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
 
-      SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
+      SDValue BT = DAG.getNode(X86ISD::BT, dl, EVT::i32, LHS, RHS);
       unsigned Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
-      return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
-                         DAG.getConstant(Cond, MVT::i8), BT);
+      return DAG.getNode(X86ISD::SETCC, dl, EVT::i8,
+                         DAG.getConstant(Cond, EVT::i8), BT);
     }
   }
 
@@ -5432,8 +5432,8 @@
   unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
 
   SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG);
-  return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
-                     DAG.getConstant(X86CC, MVT::i8), Cond);
+  return DAG.getNode(X86ISD::SETCC, dl, EVT::i8,
+                     DAG.getConstant(X86CC, EVT::i8), Cond);
 }
 
 SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
@@ -5441,16 +5441,16 @@
   SDValue Op0 = Op.getOperand(0);
   SDValue Op1 = Op.getOperand(1);
   SDValue CC = Op.getOperand(2);
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
   bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
   DebugLoc dl = Op.getDebugLoc();
 
   if (isFP) {
     unsigned SSECC = 8;
-    MVT VT0 = Op0.getValueType();
-    assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64);
-    unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD;
+    EVT VT0 = Op0.getValueType();
+    assert(VT0 == EVT::v4f32 || VT0 == EVT::v2f64);
+    unsigned Opc = VT0 == EVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD;
     bool Swap = false;
 
     switch (SetCCOpcode) {
@@ -5481,20 +5481,20 @@
     if (SSECC == 8) {
       if (SetCCOpcode == ISD::SETUEQ) {
         SDValue UNORD, EQ;
-        UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, MVT::i8));
-        EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, MVT::i8));
+        UNORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(3, EVT::i8));
+        EQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(0, EVT::i8));
         return DAG.getNode(ISD::OR, dl, VT, UNORD, EQ);
       }
       else if (SetCCOpcode == ISD::SETONE) {
         SDValue ORD, NEQ;
-        ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, MVT::i8));
-        NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, MVT::i8));
+        ORD = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(7, EVT::i8));
+        NEQ = DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(4, EVT::i8));
         return DAG.getNode(ISD::AND, dl, VT, ORD, NEQ);
       }
       llvm_unreachable("Illegal FP comparison");
     }
     // Handle all other FP comparisons here.
-    return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8));
+    return DAG.getNode(Opc, dl, VT, Op0, Op1, DAG.getConstant(SSECC, EVT::i8));
   }
 
   // We are handling one of the integer comparisons here.  Since SSE only has
@@ -5505,13 +5505,13 @@
 
   switch (VT.getSimpleVT()) {
   default: break;
-  case MVT::v8i8:
-  case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
-  case MVT::v4i16:
-  case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
-  case MVT::v2i32:
-  case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
-  case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
+  case EVT::v8i8:
+  case EVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
+  case EVT::v4i16:
+  case EVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
+  case EVT::v2i32:
+  case EVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
+  case EVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
   }
 
   switch (SetCCOpcode) {
@@ -5533,7 +5533,7 @@
   // Since SSE has no unsigned integer comparisons, we need to flip  the sign
   // bits of the inputs before performing those operations.
   if (FlipSigns) {
-    MVT EltVT = VT.getVectorElementType();
+    EVT EltVT = VT.getVectorElementType();
     SDValue SignBit = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()),
                                       EltVT);
     std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit);
@@ -5585,7 +5585,7 @@
 
     SDValue Cmp = Cond.getOperand(1);
     unsigned Opc = Cmp.getOpcode();
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
 
     bool IllegalFPCMov = false;
     if (VT.isFloatingPoint() && !VT.isVector() &&
@@ -5600,11 +5600,11 @@
   }
 
   if (addTest) {
-    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+    CC = DAG.getConstant(X86::COND_NE, EVT::i8);
     Cond = EmitTest(Cond, X86::COND_NE, DAG);
   }
 
-  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
+  SDVTList VTs = DAG.getVTList(Op.getValueType(), EVT::Flag);
   SmallVector<SDValue, 4> Ops;
   // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
   // condition is true.
@@ -5712,7 +5712,7 @@
           X86::CondCode CCode =
             (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
           CCode = X86::GetOppositeBranchCondition(CCode);
-          CC = DAG.getConstant(CCode, MVT::i8);
+          CC = DAG.getConstant(CCode, EVT::i8);
           SDValue User = SDValue(*Op.getNode()->use_begin(), 0);
           // Look for an unconditional branch following this conditional branch.
           // We need this because we need to reverse the successors in order
@@ -5729,7 +5729,7 @@
             X86::CondCode CCode =
               (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
             CCode = X86::GetOppositeBranchCondition(CCode);
-            CC = DAG.getConstant(CCode, MVT::i8);
+            CC = DAG.getConstant(CCode, EVT::i8);
             Cond = Cmp;
             addTest = false;
           }
@@ -5742,14 +5742,14 @@
       X86::CondCode CCode =
         (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
       CCode = X86::GetOppositeBranchCondition(CCode);
-      CC = DAG.getConstant(CCode, MVT::i8);
+      CC = DAG.getConstant(CCode, EVT::i8);
       Cond = Cond.getOperand(0).getOperand(1);
       addTest = false;
     }
   }
 
   if (addTest) {
-    CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+    CC = DAG.getConstant(X86::COND_NE, EVT::i8);
     Cond = EmitTest(Cond, X86::COND_NE, DAG);
   }
   return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
@@ -5776,15 +5776,15 @@
 
   SDValue Flag;
 
-  MVT IntPtr = getPointerTy();
-  MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
+  EVT IntPtr = getPointerTy();
+  EVT SPTy = Subtarget->is64Bit() ? EVT::i64 : EVT::i32;
 
   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
 
   Chain = DAG.getCopyToReg(Chain, dl, X86::EAX, Size, Flag);
   Flag = Chain.getValue(1);
 
-  SDVTList  NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList  NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   SDValue Ops[] = { Chain,
                       DAG.getTargetExternalSymbol("_alloca", IntPtr),
                       DAG.getRegister(X86::EAX, IntPtr),
@@ -5827,7 +5827,7 @@
 
     if (const char *bzeroEntry =  V &&
         V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
-      MVT IntPtr = getPointerTy();
+      EVT IntPtr = getPointerTy();
       const Type *IntPtrTy = TD->getIntPtrType();
       TargetLowering::ArgListTy Args;
       TargetLowering::ArgListEntry Entry;
@@ -5849,7 +5849,7 @@
 
   uint64_t SizeVal = ConstantSize->getZExtValue();
   SDValue InFlag(0, 0);
-  MVT AVT;
+  EVT AVT;
   SDValue Count;
   ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
   unsigned BytesLeft = 0;
@@ -5861,29 +5861,29 @@
     // If the value is a constant, then we can potentially use larger sets.
     switch (Align & 3) {
     case 2:   // WORD aligned
-      AVT = MVT::i16;
+      AVT = EVT::i16;
       ValReg = X86::AX;
       Val = (Val << 8) | Val;
       break;
     case 0:  // DWORD aligned
-      AVT = MVT::i32;
+      AVT = EVT::i32;
       ValReg = X86::EAX;
       Val = (Val << 8)  | Val;
       Val = (Val << 16) | Val;
       if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) {  // QWORD aligned
-        AVT = MVT::i64;
+        AVT = EVT::i64;
         ValReg = X86::RAX;
         Val = (Val << 32) | Val;
       }
       break;
     default:  // Byte aligned
-      AVT = MVT::i8;
+      AVT = EVT::i8;
       ValReg = X86::AL;
       Count = DAG.getIntPtrConstant(SizeVal);
       break;
     }
 
-    if (AVT.bitsGT(MVT::i8)) {
+    if (AVT.bitsGT(EVT::i8)) {
       unsigned UBytes = AVT.getSizeInBits() / 8;
       Count = DAG.getIntPtrConstant(SizeVal / UBytes);
       BytesLeft = SizeVal % UBytes;
@@ -5893,7 +5893,7 @@
                               InFlag);
     InFlag = Chain.getValue(1);
   } else {
-    AVT = MVT::i8;
+    AVT = EVT::i8;
     Count  = DAG.getIntPtrConstant(SizeVal);
     Chain  = DAG.getCopyToReg(Chain, dl, X86::AL, Src, InFlag);
     InFlag = Chain.getValue(1);
@@ -5908,7 +5908,7 @@
                             Dst, InFlag);
   InFlag = Chain.getValue(1);
 
-  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(DAG.getValueType(AVT));
@@ -5918,24 +5918,24 @@
   if (TwoRepStos) {
     InFlag = Chain.getValue(1);
     Count  = Size;
-    MVT CVT = Count.getValueType();
+    EVT CVT = Count.getValueType();
     SDValue Left = DAG.getNode(ISD::AND, dl, CVT, Count,
-                               DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
-    Chain  = DAG.getCopyToReg(Chain, dl, (CVT == MVT::i64) ? X86::RCX :
+                               DAG.getConstant((AVT == EVT::i64) ? 7 : 3, CVT));
+    Chain  = DAG.getCopyToReg(Chain, dl, (CVT == EVT::i64) ? X86::RCX :
                                                              X86::ECX,
                               Left, InFlag);
     InFlag = Chain.getValue(1);
-    Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+    Tys = DAG.getVTList(EVT::Other, EVT::Flag);
     Ops.clear();
     Ops.push_back(Chain);
-    Ops.push_back(DAG.getValueType(MVT::i8));
+    Ops.push_back(DAG.getValueType(EVT::i8));
     Ops.push_back(InFlag);
     Chain  = DAG.getNode(X86ISD::REP_STOS, dl, Tys, &Ops[0], Ops.size());
   } else if (BytesLeft) {
     // Handle the last 1 - 7 bytes.
     unsigned Offset = SizeVal - BytesLeft;
-    MVT AddrVT = Dst.getValueType();
-    MVT SizeVT = Size.getValueType();
+    EVT AddrVT = Dst.getValueType();
+    EVT SizeVT = Size.getValueType();
 
     Chain = DAG.getMemset(Chain, dl,
                           DAG.getNode(ISD::ADD, dl, AddrVT, Dst,
@@ -5970,9 +5970,9 @@
     return SDValue();
 
   // DWORD aligned
-  MVT AVT = MVT::i32;
+  EVT AVT = EVT::i32;
   if (Subtarget->is64Bit() && ((Align & 0x7) == 0))  // QWORD aligned
-    AVT = MVT::i64;
+    AVT = EVT::i64;
 
   unsigned UBytes = AVT.getSizeInBits() / 8;
   unsigned CountVal = SizeVal / UBytes;
@@ -5993,7 +5993,7 @@
                             Src, InFlag);
   InFlag = Chain.getValue(1);
 
-  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(DAG.getValueType(AVT));
@@ -6005,9 +6005,9 @@
   if (BytesLeft) {
     // Handle the last 1 - 7 bytes.
     unsigned Offset = SizeVal - BytesLeft;
-    MVT DstVT = Dst.getValueType();
-    MVT SrcVT = Src.getValueType();
-    MVT SizeVT = Size.getValueType();
+    EVT DstVT = Dst.getValueType();
+    EVT SrcVT = Src.getValueType();
+    EVT SizeVT = Size.getValueType();
     Results.push_back(DAG.getMemcpy(Chain, dl,
                                     DAG.getNode(ISD::ADD, dl, DstVT, Dst,
                                                 DAG.getConstant(Offset, DstVT)),
@@ -6019,7 +6019,7 @@
                                     SrcSV, SrcSVOff + Offset));
   }
 
-  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+  return DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                      &Results[0], Results.size());
 }
 
@@ -6043,7 +6043,7 @@
   SDValue FIN = Op.getOperand(1);
   // Store gp_offset
   SDValue Store = DAG.getStore(Op.getOperand(0), dl,
-                                 DAG.getConstant(VarArgsGPOffset, MVT::i32),
+                                 DAG.getConstant(VarArgsGPOffset, EVT::i32),
                                  FIN, SV, 0);
   MemOps.push_back(Store);
 
@@ -6051,7 +6051,7 @@
   FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(),
                     FIN, DAG.getIntPtrConstant(4));
   Store = DAG.getStore(Op.getOperand(0), dl,
-                       DAG.getConstant(VarArgsFPOffset, MVT::i32),
+                       DAG.getConstant(VarArgsFPOffset, EVT::i32),
                        FIN, SV, 0);
   MemOps.push_back(Store);
 
@@ -6068,7 +6068,7 @@
   SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
   Store = DAG.getStore(Op.getOperand(0), dl, RSFIN, FIN, SV, 0);
   MemOps.push_back(Store);
-  return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+  return DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                      &MemOps[0], MemOps.size());
 }
 
@@ -6198,10 +6198,10 @@
     SDValue LHS = Op.getOperand(1);
     SDValue RHS = Op.getOperand(2);
     unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
-    SDValue Cond = DAG.getNode(Opc, dl, MVT::i32, LHS, RHS);
-    SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
-                                DAG.getConstant(X86CC, MVT::i8), Cond);
-    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
+    SDValue Cond = DAG.getNode(Opc, dl, EVT::i32, LHS, RHS);
+    SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, EVT::i8,
+                                DAG.getConstant(X86CC, EVT::i8), Cond);
+    return DAG.getNode(ISD::ZERO_EXTEND, dl, EVT::i32, SetCC);
   }
   // ptest intrinsics. The intrinsic these come from are designed to return
   // an integer value, not just an instruction so lower it to the ptest
@@ -6228,10 +6228,10 @@
        
     SDValue LHS = Op.getOperand(1);
     SDValue RHS = Op.getOperand(2);
-    SDValue Test = DAG.getNode(X86ISD::PTEST, dl, MVT::i32, LHS, RHS);
-    SDValue CC = DAG.getConstant(X86CC, MVT::i8);
-    SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
-    return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
+    SDValue Test = DAG.getNode(X86ISD::PTEST, dl, EVT::i32, LHS, RHS);
+    SDValue CC = DAG.getConstant(X86CC, EVT::i8);
+    SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, EVT::i8, CC, Test);
+    return DAG.getNode(ISD::ZERO_EXTEND, dl, EVT::i32, SetCC);
   }
 
   // Fix vector shift instructions where the last operand is a non-immediate
@@ -6257,7 +6257,7 @@
       return SDValue();
 
     unsigned NewIntNo = 0;
-    MVT ShAmtVT = MVT::v4i32;
+    EVT ShAmtVT = EVT::v4i32;
     switch (IntNo) {
     case Intrinsic::x86_sse2_pslli_w:
       NewIntNo = Intrinsic::x86_sse2_psll_w;
@@ -6284,7 +6284,7 @@
       NewIntNo = Intrinsic::x86_sse2_psra_d;
       break;
     default: {
-      ShAmtVT = MVT::v2i32;
+      ShAmtVT = EVT::v2i32;
       switch (IntNo) {
       case Intrinsic::x86_mmx_pslli_w:
         NewIntNo = Intrinsic::x86_mmx_psll_w;
@@ -6315,11 +6315,11 @@
       break;
     }
     }
-    MVT VT = Op.getValueType();
+    EVT VT = Op.getValueType();
     ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT,
                         DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShAmtVT, ShAmt));
     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(NewIntNo, MVT::i32),
+                       DAG.getConstant(NewIntNo, EVT::i32),
                        Op.getOperand(1), ShAmt);
   }
   }
@@ -6333,7 +6333,7 @@
     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
     SDValue Offset =
       DAG.getConstant(TD->getPointerSize(),
-                      Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
+                      Subtarget->is64Bit() ? EVT::i64 : EVT::i32);
     return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
                        DAG.getNode(ISD::ADD, dl, getPointerTy(),
                                    FrameAddr, Offset),
@@ -6349,7 +6349,7 @@
 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
   MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
   MFI->setFrameAddressIsTaken(true);
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();  // FIXME probably not meaningful
   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
   unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP;
@@ -6384,7 +6384,7 @@
   MF.getRegInfo().addLiveOut(StoreAddrReg);
 
   return DAG.getNode(X86ISD::EH_RETURN, dl,
-                     MVT::Other,
+                     EVT::Other,
                      Chain, DAG.getRegister(StoreAddrReg, getPointerTy()));
 }
 
@@ -6417,40 +6417,40 @@
     // Load the pointer to the nested function into R11.
     unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
     SDValue Addr = Trmp;
-    OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
+    OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, EVT::i16),
                                 Addr, TrmpAddr, 0);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
-                       DAG.getConstant(2, MVT::i64));
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i64, Trmp,
+                       DAG.getConstant(2, EVT::i64));
     OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr, TrmpAddr, 2, false, 2);
 
     // Load the 'nest' parameter value into R10.
     // R10 is specified in X86CallingConv.td
     OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
-                       DAG.getConstant(10, MVT::i64));
-    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i64, Trmp,
+                       DAG.getConstant(10, EVT::i64));
+    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, EVT::i16),
                                 Addr, TrmpAddr, 10);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
-                       DAG.getConstant(12, MVT::i64));
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i64, Trmp,
+                       DAG.getConstant(12, EVT::i64));
     OutChains[3] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 12, false, 2);
 
     // Jump to the nested function.
     OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
-                       DAG.getConstant(20, MVT::i64));
-    OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i64, Trmp,
+                       DAG.getConstant(20, EVT::i64));
+    OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, EVT::i16),
                                 Addr, TrmpAddr, 20);
 
     unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
-                       DAG.getConstant(22, MVT::i64));
-    OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i64, Trmp,
+                       DAG.getConstant(22, EVT::i64));
+    OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, EVT::i8), Addr,
                                 TrmpAddr, 22);
 
     SDValue Ops[] =
-      { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 6) };
+      { Trmp, DAG.getNode(ISD::TokenFactor, dl, EVT::Other, OutChains, 6) };
     return DAG.getMergeValues(Ops, 2, dl);
   } else {
     const Function *Func =
@@ -6498,32 +6498,32 @@
     SDValue OutChains[4];
     SDValue Addr, Disp;
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
-                       DAG.getConstant(10, MVT::i32));
-    Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i32, Trmp,
+                       DAG.getConstant(10, EVT::i32));
+    Disp = DAG.getNode(ISD::SUB, dl, EVT::i32, FPtr, Addr);
 
     const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
     const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg);
     OutChains[0] = DAG.getStore(Root, dl,
-                                DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
+                                DAG.getConstant(MOV32ri|N86Reg, EVT::i8),
                                 Trmp, TrmpAddr, 0);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
-                       DAG.getConstant(1, MVT::i32));
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i32, Trmp,
+                       DAG.getConstant(1, EVT::i32));
     OutChains[1] = DAG.getStore(Root, dl, Nest, Addr, TrmpAddr, 1, false, 1);
 
     const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
-                       DAG.getConstant(5, MVT::i32));
-    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i32, Trmp,
+                       DAG.getConstant(5, EVT::i32));
+    OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, EVT::i8), Addr,
                                 TrmpAddr, 5, false, 1);
 
-    Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
-                       DAG.getConstant(6, MVT::i32));
+    Addr = DAG.getNode(ISD::ADD, dl, EVT::i32, Trmp,
+                       DAG.getConstant(6, EVT::i32));
     OutChains[3] = DAG.getStore(Root, dl, Disp, Addr, TrmpAddr, 6, false, 1);
 
     SDValue Ops[] =
-      { Trmp, DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 4) };
+      { Trmp, DAG.getNode(ISD::TokenFactor, dl, EVT::Other, OutChains, 4) };
     return DAG.getMergeValues(Ops, 2, dl);
   }
 }
@@ -6552,37 +6552,37 @@
   const TargetMachine &TM = MF.getTarget();
   const TargetFrameInfo &TFI = *TM.getFrameInfo();
   unsigned StackAlignment = TFI.getStackAlignment();
-  MVT VT = Op.getValueType();
+  EVT VT = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
 
   // Save FP Control Word to stack slot
   int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment);
   SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
 
-  SDValue Chain = DAG.getNode(X86ISD::FNSTCW16m, dl, MVT::Other,
+  SDValue Chain = DAG.getNode(X86ISD::FNSTCW16m, dl, EVT::Other,
                               DAG.getEntryNode(), StackSlot);
 
   // Load FP Control Word from stack slot
-  SDValue CWD = DAG.getLoad(MVT::i16, dl, Chain, StackSlot, NULL, 0);
+  SDValue CWD = DAG.getLoad(EVT::i16, dl, Chain, StackSlot, NULL, 0);
 
   // Transform as necessary
   SDValue CWD1 =
-    DAG.getNode(ISD::SRL, dl, MVT::i16,
-                DAG.getNode(ISD::AND, dl, MVT::i16,
-                            CWD, DAG.getConstant(0x800, MVT::i16)),
-                DAG.getConstant(11, MVT::i8));
+    DAG.getNode(ISD::SRL, dl, EVT::i16,
+                DAG.getNode(ISD::AND, dl, EVT::i16,
+                            CWD, DAG.getConstant(0x800, EVT::i16)),
+                DAG.getConstant(11, EVT::i8));
   SDValue CWD2 =
-    DAG.getNode(ISD::SRL, dl, MVT::i16,
-                DAG.getNode(ISD::AND, dl, MVT::i16,
-                            CWD, DAG.getConstant(0x400, MVT::i16)),
-                DAG.getConstant(9, MVT::i8));
+    DAG.getNode(ISD::SRL, dl, EVT::i16,
+                DAG.getNode(ISD::AND, dl, EVT::i16,
+                            CWD, DAG.getConstant(0x400, EVT::i16)),
+                DAG.getConstant(9, EVT::i8));
 
   SDValue RetVal =
-    DAG.getNode(ISD::AND, dl, MVT::i16,
-                DAG.getNode(ISD::ADD, dl, MVT::i16,
-                            DAG.getNode(ISD::OR, dl, MVT::i16, CWD1, CWD2),
-                            DAG.getConstant(1, MVT::i16)),
-                DAG.getConstant(3, MVT::i16));
+    DAG.getNode(ISD::AND, dl, EVT::i16,
+                DAG.getNode(ISD::ADD, dl, EVT::i16,
+                            DAG.getNode(ISD::OR, dl, EVT::i16, CWD1, CWD2),
+                            DAG.getConstant(1, EVT::i16)),
+                DAG.getConstant(3, EVT::i16));
 
 
   return DAG.getNode((VT.getSizeInBits() < 16 ?
@@ -6590,70 +6590,70 @@
 }
 
 SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
-  MVT OpVT = VT;
+  EVT VT = Op.getValueType();
+  EVT OpVT = VT;
   unsigned NumBits = VT.getSizeInBits();
   DebugLoc dl = Op.getDebugLoc();
 
   Op = Op.getOperand(0);
-  if (VT == MVT::i8) {
+  if (VT == EVT::i8) {
     // Zero extend to i32 since there is not an i8 bsr.
-    OpVT = MVT::i32;
+    OpVT = EVT::i32;
     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
   }
 
   // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
-  SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
+  SDVTList VTs = DAG.getVTList(OpVT, EVT::i32);
   Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
 
   // If src is zero (i.e. bsr sets ZF), returns NumBits.
   SmallVector<SDValue, 4> Ops;
   Ops.push_back(Op);
   Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT));
-  Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
+  Ops.push_back(DAG.getConstant(X86::COND_E, EVT::i8));
   Ops.push_back(Op.getValue(1));
   Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4);
 
   // Finally xor with NumBits-1.
   Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
 
-  if (VT == MVT::i8)
-    Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
+  if (VT == EVT::i8)
+    Op = DAG.getNode(ISD::TRUNCATE, dl, EVT::i8, Op);
   return Op;
 }
 
 SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
-  MVT OpVT = VT;
+  EVT VT = Op.getValueType();
+  EVT OpVT = VT;
   unsigned NumBits = VT.getSizeInBits();
   DebugLoc dl = Op.getDebugLoc();
 
   Op = Op.getOperand(0);
-  if (VT == MVT::i8) {
-    OpVT = MVT::i32;
+  if (VT == EVT::i8) {
+    OpVT = EVT::i32;
     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
   }
 
   // Issue a bsf (scan bits forward) which also sets EFLAGS.
-  SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
+  SDVTList VTs = DAG.getVTList(OpVT, EVT::i32);
   Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
 
   // If src is zero (i.e. bsf sets ZF), returns NumBits.
   SmallVector<SDValue, 4> Ops;
   Ops.push_back(Op);
   Ops.push_back(DAG.getConstant(NumBits, OpVT));
-  Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
+  Ops.push_back(DAG.getConstant(X86::COND_E, EVT::i8));
   Ops.push_back(Op.getValue(1));
   Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4);
 
-  if (VT == MVT::i8)
-    Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
+  if (VT == EVT::i8)
+    Op = DAG.getNode(ISD::TRUNCATE, dl, EVT::i8, Op);
   return Op;
 }
 
 SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) {
-  MVT VT = Op.getValueType();
-  assert(VT == MVT::v2i64 && "Only know how to lower V2I64 multiply");
+  EVT VT = Op.getValueType();
+  assert(VT == EVT::v2i64 && "Only know how to lower V2I64 multiply");
   DebugLoc dl = Op.getDebugLoc();
 
   //  ulong2 Ahi = __builtin_ia32_psrlqi128( a, 32);
@@ -6670,26 +6670,26 @@
   SDValue B = Op.getOperand(1);
 
   SDValue Ahi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
-                       A, DAG.getConstant(32, MVT::i32));
+                       DAG.getConstant(Intrinsic::x86_sse2_psrli_q, EVT::i32),
+                       A, DAG.getConstant(32, EVT::i32));
   SDValue Bhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
-                       B, DAG.getConstant(32, MVT::i32));
+                       DAG.getConstant(Intrinsic::x86_sse2_psrli_q, EVT::i32),
+                       B, DAG.getConstant(32, EVT::i32));
   SDValue AloBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
+                       DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, EVT::i32),
                        A, B);
   SDValue AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
+                       DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, EVT::i32),
                        A, Bhi);
   SDValue AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, MVT::i32),
+                       DAG.getConstant(Intrinsic::x86_sse2_pmulu_dq, EVT::i32),
                        Ahi, B);
   AloBhi = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
-                       AloBhi, DAG.getConstant(32, MVT::i32));
+                       DAG.getConstant(Intrinsic::x86_sse2_pslli_q, EVT::i32),
+                       AloBhi, DAG.getConstant(32, EVT::i32));
   AhiBlo = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
-                       DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
-                       AhiBlo, DAG.getConstant(32, MVT::i32));
+                       DAG.getConstant(Intrinsic::x86_sse2_pslli_q, EVT::i32),
+                       AhiBlo, DAG.getConstant(32, EVT::i32));
   SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
   Res = DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
   return Res;
@@ -6753,29 +6753,29 @@
   }
 
   // Also sets EFLAGS.
-  SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
+  SDVTList VTs = DAG.getVTList(N->getValueType(0), EVT::i32);
   SDValue Sum = DAG.getNode(BaseOp, dl, VTs, LHS, RHS);
 
   SDValue SetCC =
     DAG.getNode(X86ISD::SETCC, dl, N->getValueType(1),
-                DAG.getConstant(Cond, MVT::i32), SDValue(Sum.getNode(), 1));
+                DAG.getConstant(Cond, EVT::i32), SDValue(Sum.getNode(), 1));
 
   DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC);
   return Sum;
 }
 
 SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) {
-  MVT T = Op.getValueType();
+  EVT T = Op.getValueType();
   DebugLoc dl = Op.getDebugLoc();
   unsigned Reg = 0;
   unsigned size = 0;
   switch(T.getSimpleVT()) {
   default:
     assert(false && "Invalid value type!");
-  case MVT::i8:  Reg = X86::AL;  size = 1; break;
-  case MVT::i16: Reg = X86::AX;  size = 2; break;
-  case MVT::i32: Reg = X86::EAX; size = 4; break;
-  case MVT::i64:
+  case EVT::i8:  Reg = X86::AL;  size = 1; break;
+  case EVT::i16: Reg = X86::AX;  size = 2; break;
+  case EVT::i32: Reg = X86::EAX; size = 4; break;
+  case EVT::i64:
     assert(Subtarget->is64Bit() && "Node not type legal!");
     Reg = X86::RAX; size = 8;
     break;
@@ -6785,9 +6785,9 @@
   SDValue Ops[] = { cpIn.getValue(0),
                     Op.getOperand(1),
                     Op.getOperand(3),
-                    DAG.getTargetConstant(size, MVT::i8),
+                    DAG.getTargetConstant(size, EVT::i8),
                     cpIn.getValue(1) };
-  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
   SDValue Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, dl, Tys, Ops, 5);
   SDValue cpOut =
     DAG.getCopyFromReg(Result.getValue(0), dl, Reg, T, Result.getValue(1));
@@ -6797,17 +6797,17 @@
 SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
                                                  SelectionDAG &DAG) {
   assert(Subtarget->is64Bit() && "Result not type legalized?");
-  SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
   SDValue TheChain = Op.getOperand(0);
   DebugLoc dl = Op.getDebugLoc();
   SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
-  SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, MVT::i64, rd.getValue(1));
-  SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, MVT::i64,
+  SDValue rax = DAG.getCopyFromReg(rd, dl, X86::RAX, EVT::i64, rd.getValue(1));
+  SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), dl, X86::RDX, EVT::i64,
                                    rax.getValue(2));
-  SDValue Tmp = DAG.getNode(ISD::SHL, dl, MVT::i64, rdx,
-                            DAG.getConstant(32, MVT::i8));
+  SDValue Tmp = DAG.getNode(ISD::SHL, dl, EVT::i64, rdx,
+                            DAG.getConstant(32, EVT::i8));
   SDValue Ops[] = {
-    DAG.getNode(ISD::OR, dl, MVT::i64, rax, Tmp),
+    DAG.getNode(ISD::OR, dl, EVT::i64, rax, Tmp),
     rdx.getValue(1)
   };
   return DAG.getMergeValues(Ops, 2, dl);
@@ -6816,7 +6816,7 @@
 SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
   SDNode *Node = Op.getNode();
   DebugLoc dl = Node->getDebugLoc();
-  MVT T = Node->getValueType(0);
+  EVT T = Node->getValueType(0);
   SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
                               DAG.getConstant(0, T), Node->getOperand(2));
   return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
@@ -6886,24 +6886,24 @@
 void X86TargetLowering::
 ReplaceATOMIC_BINARY_64(SDNode *Node, SmallVectorImpl<SDValue>&Results,
                         SelectionDAG &DAG, unsigned NewOp) {
-  MVT T = Node->getValueType(0);
+  EVT T = Node->getValueType(0);
   DebugLoc dl = Node->getDebugLoc();
-  assert (T == MVT::i64 && "Only know how to expand i64 atomics");
+  assert (T == EVT::i64 && "Only know how to expand i64 atomics");
 
   SDValue Chain = Node->getOperand(0);
   SDValue In1 = Node->getOperand(1);
-  SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+  SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
                              Node->getOperand(2), DAG.getIntPtrConstant(0));
-  SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
+  SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
                              Node->getOperand(2), DAG.getIntPtrConstant(1));
   // This is a generalized SDNode, not an AtomicSDNode, so it doesn't
   // have a MemOperand.  Pass the info through as a normal operand.
   SDValue LSI = DAG.getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
   SDValue Ops[] = { Chain, In1, In2L, In2H, LSI };
-  SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
+  SDVTList Tys = DAG.getVTList(EVT::i32, EVT::i32, EVT::Other);
   SDValue Result = DAG.getNode(NewOp, dl, Tys, Ops, 5);
   SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
-  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
+  Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, OpsF, 2));
   Results.push_back(Result.getValue(2));
 }
 
@@ -6922,42 +6922,42 @@
         FP_TO_INTHelper(SDValue(N, 0), DAG, true);
     SDValue FIST = Vals.first, StackSlot = Vals.second;
     if (FIST.getNode() != 0) {
-      MVT VT = N->getValueType(0);
+      EVT VT = N->getValueType(0);
       // Return a load from the stack slot.
       Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot, NULL, 0));
     }
     return;
   }
   case ISD::READCYCLECOUNTER: {
-    SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+    SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
     SDValue TheChain = N->getOperand(0);
     SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, dl, Tys, &TheChain, 1);
-    SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, MVT::i32,
+    SDValue eax = DAG.getCopyFromReg(rd, dl, X86::EAX, EVT::i32,
                                      rd.getValue(1));
-    SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, MVT::i32,
+    SDValue edx = DAG.getCopyFromReg(eax.getValue(1), dl, X86::EDX, EVT::i32,
                                      eax.getValue(2));
     // Use a buildpair to merge the two 32-bit values into a 64-bit one.
     SDValue Ops[] = { eax, edx };
-    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Ops, 2));
+    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, Ops, 2));
     Results.push_back(edx.getValue(1));
     return;
   }
   case ISD::ATOMIC_CMP_SWAP: {
-    MVT T = N->getValueType(0);
-    assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
+    EVT T = N->getValueType(0);
+    assert (T == EVT::i64 && "Only know how to expand i64 Cmp and Swap");
     SDValue cpInL, cpInH;
-    cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2),
-                        DAG.getConstant(0, MVT::i32));
-    cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(2),
-                        DAG.getConstant(1, MVT::i32));
+    cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, N->getOperand(2),
+                        DAG.getConstant(0, EVT::i32));
+    cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, N->getOperand(2),
+                        DAG.getConstant(1, EVT::i32));
     cpInL = DAG.getCopyToReg(N->getOperand(0), dl, X86::EAX, cpInL, SDValue());
     cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, X86::EDX, cpInH,
                              cpInL.getValue(1));
     SDValue swapInL, swapInH;
-    swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3),
-                          DAG.getConstant(0, MVT::i32));
-    swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(3),
-                          DAG.getConstant(1, MVT::i32));
+    swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, N->getOperand(3),
+                          DAG.getConstant(0, EVT::i32));
+    swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, N->getOperand(3),
+                          DAG.getConstant(1, EVT::i32));
     swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl, X86::EBX, swapInL,
                                cpInH.getValue(1));
     swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl, X86::ECX, swapInH,
@@ -6965,14 +6965,14 @@
     SDValue Ops[] = { swapInH.getValue(0),
                       N->getOperand(1),
                       swapInH.getValue(1) };
-    SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
+    SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
     SDValue Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, 3);
     SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, X86::EAX,
-                                        MVT::i32, Result.getValue(1));
+                                        EVT::i32, Result.getValue(1));
     SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, X86::EDX,
-                                        MVT::i32, cpOutL.getValue(2));
+                                        EVT::i32, cpOutL.getValue(2));
     SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
-    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2));
+    Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, OpsF, 2));
     Results.push_back(cpOutH.getValue(1));
     return;
   }
@@ -7144,7 +7144,7 @@
   return Subtarget->is64Bit() || NumBits1 < 64;
 }
 
-bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const {
+bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
   if (!VT1.isInteger() || !VT2.isInteger())
     return false;
   unsigned NumBits1 = VT1.getSizeInBits();
@@ -7159,14 +7159,14 @@
   return Ty1 == Type::Int32Ty && Ty2 == Type::Int64Ty && Subtarget->is64Bit();
 }
 
-bool X86TargetLowering::isZExtFree(MVT VT1, MVT VT2) const {
+bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
-  return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
+  return VT1 == EVT::i32 && VT2 == EVT::i64 && Subtarget->is64Bit();
 }
 
-bool X86TargetLowering::isNarrowingProfitable(MVT VT1, MVT VT2) const {
+bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
   // i16 instructions are longer (0x66 prefix) and potentially slower.
-  return !(VT1 == MVT::i32 && VT2 == MVT::i16);
+  return !(VT1 == EVT::i32 && VT2 == EVT::i16);
 }
 
 /// isShuffleMaskLegal - Targets can use this to indicate that they only
@@ -7175,7 +7175,7 @@
 /// are assumed to be legal.
 bool
 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 
-                                      MVT VT) const {
+                                      EVT VT) const {
   // Only do shuffles on 128-bit vector types for now.
   if (VT.getSizeInBits() == 64)
     return false;
@@ -7196,7 +7196,7 @@
 
 bool
 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
-                                          MVT VT) const {
+                                          EVT VT) const {
   unsigned NumElts = VT.getVectorNumElements();
   // FIXME: This collection of masks seems suspect.
   if (NumElts == 2)
@@ -7941,7 +7941,7 @@
 }
 
 static bool EltsFromConsecutiveLoads(ShuffleVectorSDNode *N, unsigned NumElems,
-                                     MVT EVT, LoadSDNode *&LDBase,
+                                     EVT EVT, LoadSDNode *&LDBase,
                                      unsigned &LastLoadedElt,
                                      SelectionDAG &DAG, MachineFrameInfo *MFI,
                                      const TargetLowering &TLI) {
@@ -7985,8 +7985,8 @@
 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
                                      const TargetLowering &TLI) {
   DebugLoc dl = N->getDebugLoc();
-  MVT VT = N->getValueType(0);
-  MVT EVT = VT.getVectorElementType();
+  EVT VT = N->getValueType(0);
+  EVT EVT = VT.getVectorElementType();
   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
   unsigned NumElems = VT.getVectorNumElements();
 
@@ -8010,7 +8010,7 @@
                        LD->getSrcValue(), LD->getSrcValueOffset(),
                        LD->isVolatile(), LD->getAlignment());
   } else if (NumElems == 4 && LastLoadedElt == 1) {
-    SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
+    SDVTList Tys = DAG.getVTList(EVT::v2i64, EVT::Other);
     SDValue Ops[] = { LD->getChain(), LD->getBasePtr() };
     SDValue ResNode = DAG.getNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, 2);
     return DAG.getNode(ISD::BIT_CONVERT, dl, VT, ResNode);
@@ -8029,7 +8029,7 @@
   
   // If we have SSE[12] support, try to form min/max nodes.
   if (Subtarget->hasSSE2() &&
-      (LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64) &&
+      (LHS.getValueType() == EVT::f32 || LHS.getValueType() == EVT::f64) &&
       Cond.getOpcode() == ISD::SETCC) {
     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
 
@@ -8117,7 +8117,7 @@
           
           unsigned ShAmt = TrueC->getAPIntValue().logBase2();
           return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
-                             DAG.getConstant(ShAmt, MVT::i8));
+                             DAG.getConstant(ShAmt, EVT::i8));
         }
         
         // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
@@ -8135,9 +8135,9 @@
         
         // Optimize cases that will turn into an LEA instruction.  This requires
         // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
-        if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
+        if (N->getValueType(0) == EVT::i32 || N->getValueType(0) == EVT::i64) {
           uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
-          if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
+          if (N->getValueType(0) == EVT::i32) Diff = (unsigned)Diff;
           
           bool isFastMultiplier = false;
           if (Diff < 10) {
@@ -8210,15 +8210,15 @@
       // shift amount.
       if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
         SDValue Cond = N->getOperand(3);
-        Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
-                           DAG.getConstant(CC, MVT::i8), Cond);
+        Cond = DAG.getNode(X86ISD::SETCC, DL, EVT::i8,
+                           DAG.getConstant(CC, EVT::i8), Cond);
       
         // Zero extend the condition if needed.
         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
         
         unsigned ShAmt = TrueC->getAPIntValue().logBase2();
         Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
-                           DAG.getConstant(ShAmt, MVT::i8));
+                           DAG.getConstant(ShAmt, EVT::i8));
         if (N->getNumValues() == 2)  // Dead flag value?
           return DCI.CombineTo(N, Cond, SDValue());
         return Cond;
@@ -8228,8 +8228,8 @@
       // for any integer data type, including i8/i16.
       if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
         SDValue Cond = N->getOperand(3);
-        Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
-                           DAG.getConstant(CC, MVT::i8), Cond);
+        Cond = DAG.getNode(X86ISD::SETCC, DL, EVT::i8,
+                           DAG.getConstant(CC, EVT::i8), Cond);
         
         // Zero extend the condition if needed.
         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
@@ -8244,9 +8244,9 @@
       
       // Optimize cases that will turn into an LEA instruction.  This requires
       // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
-      if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
+      if (N->getValueType(0) == EVT::i32 || N->getValueType(0) == EVT::i64) {
         uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
-        if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
+        if (N->getValueType(0) == EVT::i32) Diff = (unsigned)Diff;
        
         bool isFastMultiplier = false;
         if (Diff < 10) {
@@ -8267,8 +8267,8 @@
         if (isFastMultiplier) {
           APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
           SDValue Cond = N->getOperand(3);
-          Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
-                             DAG.getConstant(CC, MVT::i8), Cond);
+          Cond = DAG.getNode(X86ISD::SETCC, DL, EVT::i8,
+                             DAG.getConstant(CC, EVT::i8), Cond);
           // Zero extend the condition if needed.
           Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
                              Cond);
@@ -8304,8 +8304,8 @@
   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
     return SDValue();
 
-  MVT VT = N->getValueType(0);
-  if (VT != MVT::i64)
+  EVT VT = N->getValueType(0);
+  if (VT != EVT::i64)
     return SDValue();
 
   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
@@ -8341,14 +8341,14 @@
     SDValue NewMul;
     if (isPowerOf2_64(MulAmt1)) 
       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
-                           DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
+                           DAG.getConstant(Log2_64(MulAmt1), EVT::i8));
     else
       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
                            DAG.getConstant(MulAmt1, VT));
 
     if (isPowerOf2_64(MulAmt2)) 
       NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
-                           DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
+                           DAG.getConstant(Log2_64(MulAmt2), EVT::i8));
     else 
       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
                            DAG.getConstant(MulAmt2, VT));
@@ -8371,12 +8371,12 @@
   if (!Subtarget->hasSSE2())
     return SDValue();
 
-  MVT VT = N->getValueType(0);
-  if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16)
+  EVT VT = N->getValueType(0);
+  if (VT != EVT::v2i64 && VT != EVT::v4i32 && VT != EVT::v8i16)
     return SDValue();
 
   SDValue ShAmtOp = N->getOperand(1);
-  MVT EltVT = VT.getVectorElementType();
+  EVT EltVT = VT.getVectorElementType();
   DebugLoc DL = N->getDebugLoc();
   SDValue BaseShAmt;
   if (ShAmtOp.getOpcode() == ISD::BUILD_VECTOR) {
@@ -8402,10 +8402,10 @@
   } else
     return SDValue();
 
-  if (EltVT.bitsGT(MVT::i32))
-    BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, BaseShAmt);
-  else if (EltVT.bitsLT(MVT::i32))
-    BaseShAmt = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, BaseShAmt);
+  if (EltVT.bitsGT(EVT::i32))
+    BaseShAmt = DAG.getNode(ISD::TRUNCATE, DL, EVT::i32, BaseShAmt);
+  else if (EltVT.bitsLT(EVT::i32))
+    BaseShAmt = DAG.getNode(ISD::ANY_EXTEND, DL, EVT::i32, BaseShAmt);
 
   // The shift amount is identical so we can do a vector shift.
   SDValue  ValOp = N->getOperand(0);
@@ -8414,41 +8414,41 @@
     llvm_unreachable("Unknown shift opcode!");
     break;
   case ISD::SHL:
-    if (VT == MVT::v2i64)
+    if (VT == EVT::v2i64)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_pslli_q, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_pslli_q, EVT::i32),
                          ValOp, BaseShAmt);
-    if (VT == MVT::v4i32)
+    if (VT == EVT::v4i32)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_pslli_d, EVT::i32),
                          ValOp, BaseShAmt);
-    if (VT == MVT::v8i16)
+    if (VT == EVT::v8i16)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_pslli_w, EVT::i32),
                          ValOp, BaseShAmt);
     break;
   case ISD::SRA:
-    if (VT == MVT::v4i32)
+    if (VT == EVT::v4i32)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_psrai_d, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_psrai_d, EVT::i32),
                          ValOp, BaseShAmt);
-    if (VT == MVT::v8i16)
+    if (VT == EVT::v8i16)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_psrai_w, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_psrai_w, EVT::i32),
                          ValOp, BaseShAmt);
     break;
   case ISD::SRL:
-    if (VT == MVT::v2i64)
+    if (VT == EVT::v2i64)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_psrli_q, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_psrli_q, EVT::i32),
                          ValOp, BaseShAmt);
-    if (VT == MVT::v4i32)
+    if (VT == EVT::v4i32)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_psrli_d, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_psrli_d, EVT::i32),
                          ValOp, BaseShAmt);
-    if (VT ==  MVT::v8i16)
+    if (VT ==  EVT::v8i16)
       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
-                         DAG.getConstant(Intrinsic::x86_sse2_psrli_w, MVT::i32),
+                         DAG.getConstant(Intrinsic::x86_sse2_psrli_w, EVT::i32),
                          ValOp, BaseShAmt);
     break;
   }
@@ -8465,7 +8465,7 @@
 
   // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
   StoreSDNode *St = cast<StoreSDNode>(N);
-  MVT VT = St->getValue().getValueType();
+  EVT VT = St->getValue().getValueType();
   if (VT.getSizeInBits() != 64)
     return SDValue();
 
@@ -8474,7 +8474,7 @@
   bool F64IsLegal = !UseSoftFloat && !NoImplicitFloatOps 
     && Subtarget->hasSSE2();
   if ((VT.isVector() ||
-       (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
+       (VT == EVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
       isa<LoadSDNode>(St->getValue()) &&
       !cast<LoadSDNode>(St->getValue())->isVolatile() &&
       St->getChain().hasOneUse() && !St->isVolatile()) {
@@ -8514,7 +8514,7 @@
     // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
     // pair instead.
     if (Subtarget->is64Bit() || F64IsLegal) {
-      MVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
+      EVT LdVT = Subtarget->is64Bit() ? EVT::i64 : EVT::f64;
       SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(),
                                   Ld->getBasePtr(), Ld->getSrcValue(),
                                   Ld->getSrcValueOffset(), Ld->isVolatile(),
@@ -8522,7 +8522,7 @@
       SDValue NewChain = NewLd.getValue(1);
       if (TokenFactorIndex != -1) {
         Ops.push_back(NewChain);
-        NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
+        NewChain = DAG.getNode(ISD::TokenFactor, LdDL, EVT::Other, &Ops[0],
                                Ops.size());
       }
       return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
@@ -8532,13 +8532,13 @@
 
     // Otherwise, lower to two pairs of 32-bit loads / stores.
     SDValue LoAddr = Ld->getBasePtr();
-    SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
-                                 DAG.getConstant(4, MVT::i32));
+    SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, EVT::i32, LoAddr,
+                                 DAG.getConstant(4, EVT::i32));
 
-    SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
+    SDValue LoLd = DAG.getLoad(EVT::i32, LdDL, Ld->getChain(), LoAddr,
                                Ld->getSrcValue(), Ld->getSrcValueOffset(),
                                Ld->isVolatile(), Ld->getAlignment());
-    SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
+    SDValue HiLd = DAG.getLoad(EVT::i32, LdDL, Ld->getChain(), HiAddr,
                                Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
                                Ld->isVolatile(),
                                MinAlign(Ld->getAlignment(), 4));
@@ -8547,13 +8547,13 @@
     if (TokenFactorIndex != -1) {
       Ops.push_back(LoLd);
       Ops.push_back(HiLd);
-      NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, &Ops[0],
+      NewChain = DAG.getNode(ISD::TokenFactor, LdDL, EVT::Other, &Ops[0],
                              Ops.size());
     }
 
     LoAddr = St->getBasePtr();
-    HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
-                         DAG.getConstant(4, MVT::i32));
+    HiAddr = DAG.getNode(ISD::ADD, StDL, EVT::i32, LoAddr,
+                         DAG.getConstant(4, EVT::i32));
 
     SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
                                 St->getSrcValue(), St->getSrcValueOffset(),
@@ -8563,7 +8563,7 @@
                                 St->getSrcValueOffset() + 4,
                                 St->isVolatile(),
                                 MinAlign(St->getAlignment(), 4));
-    return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
+    return DAG.getNode(ISD::TokenFactor, StDL, EVT::Other, LoSt, HiSt);
   }
   return SDValue();
 }
@@ -8618,7 +8618,7 @@
   SDValue Op = N->getOperand(0);
   if (Op.getOpcode() == ISD::BIT_CONVERT)
     Op = Op.getOperand(0);
-  MVT VT = N->getValueType(0), OpVT = Op.getValueType();
+  EVT VT = N->getValueType(0), OpVT = Op.getValueType();
   if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
       VT.getVectorElementType().getSizeInBits() == 
       OpVT.getVectorElementType().getSizeInBits()) {
@@ -8833,7 +8833,7 @@
 /// with another that has more specific requirements based on the type of the
 /// corresponding operand.
 const char *X86TargetLowering::
-LowerXConstraint(MVT ConstraintVT) const {
+LowerXConstraint(EVT ConstraintVT) const {
   // FP X constraints get lowered to SSE1/2 registers if available, otherwise
   // 'f' like normal targets.
   if (ConstraintVT.isFloatingPoint()) {
@@ -8895,7 +8895,7 @@
       const ConstantInt *CI = C->getConstantIntValue();
       if (CI->isValueValidForType(Type::Int32Ty, C->getSExtValue())) {
         // Widen to 64 bits here to get it sign extended.
-        Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
+        Result = DAG.getTargetConstant(C->getSExtValue(), EVT::i64);
         break;
       }
     // FIXME gcc accepts some relocatable values here too, but only in certain
@@ -8920,7 +8920,7 @@
     // Literal immediates are always ok.
     if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
       // Widen to 64 bits here to get it sign extended.
-      Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
+      Result = DAG.getTargetConstant(CST->getSExtValue(), EVT::i64);
       break;
     }
 
@@ -8978,33 +8978,33 @@
 
 std::vector<unsigned> X86TargetLowering::
 getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  MVT VT) const {
+                                  EVT VT) const {
   if (Constraint.size() == 1) {
     // FIXME: not handling fp-stack yet!
     switch (Constraint[0]) {      // GCC X86 Constraint Letters
     default: break;  // Unknown constraint letter
     case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
       if (Subtarget->is64Bit()) {
-        if (VT == MVT::i32)
+        if (VT == EVT::i32)
           return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
                                        X86::ESI, X86::EDI, X86::R8D, X86::R9D,
                                        X86::R10D,X86::R11D,X86::R12D,
                                        X86::R13D,X86::R14D,X86::R15D,
                                        X86::EBP, X86::ESP, 0);
-        else if (VT == MVT::i16)
+        else if (VT == EVT::i16)
           return make_vector<unsigned>(X86::AX,  X86::DX,  X86::CX, X86::BX,
                                        X86::SI,  X86::DI,  X86::R8W,X86::R9W,
                                        X86::R10W,X86::R11W,X86::R12W,
                                        X86::R13W,X86::R14W,X86::R15W,
                                        X86::BP,  X86::SP, 0);
-        else if (VT == MVT::i8)
+        else if (VT == EVT::i8)
           return make_vector<unsigned>(X86::AL,  X86::DL,  X86::CL, X86::BL,
                                        X86::SIL, X86::DIL, X86::R8B,X86::R9B,
                                        X86::R10B,X86::R11B,X86::R12B,
                                        X86::R13B,X86::R14B,X86::R15B,
                                        X86::BPL, X86::SPL, 0);
 
-        else if (VT == MVT::i64)
+        else if (VT == EVT::i64)
           return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX,
                                        X86::RSI, X86::RDI, X86::R8,  X86::R9,
                                        X86::R10, X86::R11, X86::R12,
@@ -9015,13 +9015,13 @@
       }
       // 32-bit fallthrough 
     case 'Q':   // Q_REGS
-      if (VT == MVT::i32)
+      if (VT == EVT::i32)
         return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
-      else if (VT == MVT::i16)
+      else if (VT == EVT::i16)
         return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
-      else if (VT == MVT::i8)
+      else if (VT == EVT::i8)
         return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
-      else if (VT == MVT::i64)
+      else if (VT == EVT::i64)
         return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
       break;
     }
@@ -9032,7 +9032,7 @@
 
 std::pair<unsigned, const TargetRegisterClass*>
 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
-                                                MVT VT) const {
+                                                EVT VT) const {
   // First, see if this is a constraint that directly corresponds to an LLVM
   // register class.
   if (Constraint.size() == 1) {
@@ -9042,19 +9042,19 @@
     case 'r':   // GENERAL_REGS
     case 'R':   // LEGACY_REGS
     case 'l':   // INDEX_REGS
-      if (VT == MVT::i8)
+      if (VT == EVT::i8)
         return std::make_pair(0U, X86::GR8RegisterClass);
-      if (VT == MVT::i16)
+      if (VT == EVT::i16)
         return std::make_pair(0U, X86::GR16RegisterClass);
-      if (VT == MVT::i32 || !Subtarget->is64Bit())
+      if (VT == EVT::i32 || !Subtarget->is64Bit())
         return std::make_pair(0U, X86::GR32RegisterClass);
       return std::make_pair(0U, X86::GR64RegisterClass);
     case 'f':  // FP Stack registers.
       // If SSE is enabled for this VT, use f80 to ensure the isel moves the
       // value to the correct fpstack register class.
-      if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
+      if (VT == EVT::f32 && !isScalarFPTypeInSSEReg(VT))
         return std::make_pair(0U, X86::RFP32RegisterClass);
-      if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
+      if (VT == EVT::f64 && !isScalarFPTypeInSSEReg(VT))
         return std::make_pair(0U, X86::RFP64RegisterClass);
       return std::make_pair(0U, X86::RFP80RegisterClass);
     case 'y':   // MMX_REGS if MMX allowed.
@@ -9069,19 +9069,19 @@
       switch (VT.getSimpleVT()) {
       default: break;
       // Scalar SSE types.
-      case MVT::f32:
-      case MVT::i32:
+      case EVT::f32:
+      case EVT::i32:
         return std::make_pair(0U, X86::FR32RegisterClass);
-      case MVT::f64:
-      case MVT::i64:
+      case EVT::f64:
+      case EVT::i64:
         return std::make_pair(0U, X86::FR64RegisterClass);
       // Vector types.
-      case MVT::v16i8:
-      case MVT::v8i16:
-      case MVT::v4i32:
-      case MVT::v2i64:
-      case MVT::v4f32:
-      case MVT::v2f64:
+      case EVT::v16i8:
+      case EVT::v8i16:
+      case EVT::v4i32:
+      case EVT::v2i64:
+      case EVT::v4f32:
+      case EVT::v2f64:
         return std::make_pair(0U, X86::VR128RegisterClass);
       }
       break;
@@ -9119,7 +9119,7 @@
   // really want an 8-bit or 32-bit register, map to the appropriate register
   // class and return the appropriate register.
   if (Res.second == X86::GR16RegisterClass) {
-    if (VT == MVT::i8) {
+    if (VT == EVT::i8) {
       unsigned DestReg = 0;
       switch (Res.first) {
       default: break;
@@ -9132,7 +9132,7 @@
         Res.first = DestReg;
         Res.second = X86::GR8RegisterClass;
       }
-    } else if (VT == MVT::i32) {
+    } else if (VT == EVT::i32) {
       unsigned DestReg = 0;
       switch (Res.first) {
       default: break;
@@ -9149,7 +9149,7 @@
         Res.first = DestReg;
         Res.second = X86::GR32RegisterClass;
       }
-    } else if (VT == MVT::i64) {
+    } else if (VT == EVT::i64) {
       unsigned DestReg = 0;
       switch (Res.first) {
       default: break;
@@ -9174,9 +9174,9 @@
     // wrong class.  This can happen with constraints like {xmm0} where the
     // target independent register mapper will just pick the first match it can
     // find, ignoring the required type.
-    if (VT == MVT::f32)
+    if (VT == EVT::f32)
       Res.second = X86::FR32RegisterClass;
-    else if (VT == MVT::f64)
+    else if (VT == EVT::f64)
       Res.second = X86::FR64RegisterClass;
     else if (X86::VR128RegisterClass->hasType(VT))
       Res.second = X86::VR128RegisterClass;
@@ -9191,11 +9191,11 @@
 
 /// getWidenVectorType: given a vector type, returns the type to widen
 /// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
-/// If there is no vector type that we want to widen to, returns MVT::Other
+/// If there is no vector type that we want to widen to, returns EVT::Other
 /// When and where to widen is target dependent based on the cost of
 /// scalarizing vs using the wider vector type.
 
-MVT X86TargetLowering::getWidenVectorType(MVT VT) const {
+EVT X86TargetLowering::getWidenVectorType(EVT VT) const {
   assert(VT.isVector());
   if (isTypeLegal(VT))
     return VT;
@@ -9204,21 +9204,21 @@
   //       type based on element type.  This would speed up our search (though
   //       it may not be worth it since the size of the list is relatively
   //       small).
-  MVT EltVT = VT.getVectorElementType();
+  EVT EltVT = VT.getVectorElementType();
   unsigned NElts = VT.getVectorNumElements();
 
   // On X86, it make sense to widen any vector wider than 1
   if (NElts <= 1)
-    return MVT::Other;
+    return EVT::Other;
 
-  for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE;
-       nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
-    MVT SVT = (MVT::SimpleValueType)nVT;
+  for (unsigned nVT = EVT::FIRST_VECTOR_VALUETYPE;
+       nVT <= EVT::LAST_VECTOR_VALUETYPE; ++nVT) {
+    EVT SVT = (EVT::SimpleValueType)nVT;
 
     if (isTypeLegal(SVT) &&
         SVT.getVectorElementType() == EltVT &&
         SVT.getVectorNumElements() > NElts)
       return SVT;
   }
-  return MVT::Other;
+  return EVT::Other;
 }
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 459b7be..d9c745b 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -382,10 +382,10 @@
 
     /// getOptimalMemOpType - Returns the target specific optimal type for load
     /// and store operations as a result of memset, memcpy, and memmove
-    /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
+    /// lowering. It returns EVT::iAny if SelectionDAG should be responsible for
     /// determining it.
     virtual
-    MVT getOptimalMemOpType(uint64_t Size, unsigned Align,
+    EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
                             bool isSrcConst, bool isSrcStr,
                             SelectionDAG &DAG) const;
 
@@ -411,7 +411,7 @@
     virtual const char *getTargetNodeName(unsigned Opcode) const;
 
     /// getSetCCResultType - Return the ISD::SETCC ValueType
-    virtual MVT::SimpleValueType getSetCCResultType(MVT VT) const;
+    virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
 
     /// computeMaskedBitsForTargetNode - Determine which of the bits specified 
     /// in Mask are known to be either zero or one and return them in the 
@@ -434,9 +434,9 @@
      
     std::vector<unsigned> 
       getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                        MVT VT) const;
+                                        EVT VT) const;
 
-    virtual const char *LowerXConstraint(MVT ConstraintVT) const;
+    virtual const char *LowerXConstraint(EVT ConstraintVT) const;
 
     /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
     /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is
@@ -454,7 +454,7 @@
     /// error, this returns a register number of 0.
     std::pair<unsigned, const TargetRegisterClass*> 
       getRegForInlineAsmConstraint(const std::string &Constraint,
-                                   MVT VT) const;
+                                   EVT VT) const;
     
     /// isLegalAddressingMode - Return true if the addressing mode represented
     /// by AM is legal for this target, for a load/store of the specified type.
@@ -464,7 +464,7 @@
     /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
     /// register EAX to i16 by referencing its sub-register AX.
     virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
-    virtual bool isTruncateFree(MVT VT1, MVT VT2) const;
+    virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
 
     /// isZExtFree - Return true if any actual instruction that defines a
     /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
@@ -475,35 +475,35 @@
     /// all instructions that define 32-bit values implicit zero-extend the
     /// result out to 64 bits.
     virtual bool isZExtFree(const Type *Ty1, const Type *Ty2) const;
-    virtual bool isZExtFree(MVT VT1, MVT VT2) const;
+    virtual bool isZExtFree(EVT VT1, EVT VT2) const;
 
     /// isNarrowingProfitable - Return true if it's profitable to narrow
     /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
     /// from i32 to i8 but not from i32 to i16.
-    virtual bool isNarrowingProfitable(MVT VT1, MVT VT2) const;
+    virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
 
     /// isShuffleMaskLegal - Targets can use this to indicate that they only
     /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
     /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
     /// values are assumed to be legal.
     virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
-                                    MVT VT) const;
+                                    EVT VT) const;
 
     /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
     /// used by Targets can use this to indicate if there is a suitable
     /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
     /// pool entry.
     virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
-                                        MVT VT) const;
+                                        EVT VT) const;
 
     /// ShouldShrinkFPConstant - If true, then instruction selection should
     /// seek to shrink the FP constant of the specified type to a smaller type
     /// in order to save space and / or reduce runtime.
-    virtual bool ShouldShrinkFPConstant(MVT VT) const {
+    virtual bool ShouldShrinkFPConstant(EVT VT) const {
       // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
       // expensive than a straight movsd. On the other hand, it's important to
       // shrink long double fp constant since fldt is very slow.
-      return !X86ScalarSSEf64 || VT == MVT::f80;
+      return !X86ScalarSSEf64 || VT == EVT::f80;
     }
     
     /// IsEligibleForTailCallOptimization - Check whether the call is eligible
@@ -522,17 +522,17 @@
 
     /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
     /// computed in an SSE register, not on the X87 floating point stack.
-    bool isScalarFPTypeInSSEReg(MVT VT) const {
-      return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
-      (VT == MVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
+    bool isScalarFPTypeInSSEReg(EVT VT) const {
+      return (VT == EVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
+      (VT == EVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
     }
 
     /// getWidenVectorType: given a vector type, returns the type to widen
     /// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
-    /// If there is no vector type that we want to widen to, returns MVT::Other
+    /// If there is no vector type that we want to widen to, returns EVT::Other
     /// When and were to widen is target dependent based on the cost of
     /// scalarizing vs using the wider vector type.
-    virtual MVT getWidenVectorType(MVT VT) const;
+    virtual EVT getWidenVectorType(EVT VT) const;
 
     /// createFastISel - This method returns a target specific FastISel object,
     /// or null if the target does not support "fast" ISel.
@@ -610,7 +610,7 @@
     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
     SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG);
     SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
-    SDValue BuildFILD(SDValue Op, MVT SrcVT, SDValue Chain, SDValue StackSlot,
+    SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
                       SelectionDAG &DAG);
     SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG);
     SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG);
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index b59f8e8..79bd0af 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -2525,24 +2525,24 @@
   SDNode *Load = 0;
   const MachineFunction &MF = DAG.getMachineFunction();
   if (FoldedLoad) {
-    MVT VT = *RC->vt_begin();
+    EVT VT = *RC->vt_begin();
     bool isAligned = (RI.getStackAlignment() >= 16) ||
       RI.needsStackRealignment(MF);
     Load = DAG.getTargetNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
-                             VT, MVT::Other, &AddrOps[0], AddrOps.size());
+                             VT, EVT::Other, &AddrOps[0], AddrOps.size());
     NewNodes.push_back(Load);
   }
 
   // Emit the data processing instruction.
-  std::vector<MVT> VTs;
+  std::vector<EVT> VTs;
   const TargetRegisterClass *DstRC = 0;
   if (TID.getNumDefs() > 0) {
     DstRC = TID.OpInfo[0].getRegClass(&RI);
     VTs.push_back(*DstRC->vt_begin());
   }
   for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
-    MVT VT = N->getValueType(i);
-    if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs())
+    EVT VT = N->getValueType(i);
+    if (VT != EVT::Other && i >= (unsigned)TID.getNumDefs())
       VTs.push_back(VT);
   }
   if (Load)
@@ -2561,7 +2561,7 @@
       RI.needsStackRealignment(MF);
     SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(0, DstRC,
                                                         isAligned, TM),
-                                      dl, MVT::Other,
+                                      dl, EVT::Other,
                                       &AddrOps[0], AddrOps.size());
     NewNodes.push_back(Store);
   }
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index cae6290..09457a1 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -1262,10 +1262,10 @@
 }
 
 namespace llvm {
-unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) {
+unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
   switch (VT.getSimpleVT()) {
   default: return Reg;
-  case MVT::i8:
+  case EVT::i8:
     if (High) {
       switch (Reg) {
       default: return 0;
@@ -1315,7 +1315,7 @@
         return X86::R15B;
       }
     }
-  case MVT::i16:
+  case EVT::i16:
     switch (Reg) {
     default: return Reg;
     case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
@@ -1351,7 +1351,7 @@
     case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
       return X86::R15W;
     }
-  case MVT::i32:
+  case EVT::i32:
     switch (Reg) {
     default: return Reg;
     case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
@@ -1387,7 +1387,7 @@
     case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
       return X86::R15D;
     }
-  case MVT::i64:
+  case EVT::i64:
     switch (Reg) {
     default: return Reg;
     case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h
index f6c119d..c89a57c 100644
--- a/lib/Target/X86/X86RegisterInfo.h
+++ b/lib/Target/X86/X86RegisterInfo.h
@@ -163,8 +163,8 @@
 
 // getX86SubSuperRegister - X86 utility function. It returns the sub or super
 // register of a specific X86 register.
-// e.g. getX86SubSuperRegister(X86::EAX, MVT::i16) return X86:AX
-unsigned getX86SubSuperRegister(unsigned, MVT, bool High=false);
+// e.g. getX86SubSuperRegister(X86::EAX, EVT::i16) return X86:AX
+unsigned getX86SubSuperRegister(unsigned, EVT, bool High=false);
 
 } // End llvm namespace
 
diff --git a/lib/Target/XCore/XCoreISelDAGToDAG.cpp b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
index ebd0a26..750747d 100644
--- a/lib/Target/XCore/XCoreISelDAGToDAG.cpp
+++ b/lib/Target/XCore/XCoreISelDAGToDAG.cpp
@@ -54,7 +54,7 @@
     /// getI32Imm - Return a target constant with the specified value, of type
     /// i32.
     inline SDValue getI32Imm(unsigned Imm) {
-      return CurDAG->getTargetConstant(Imm, MVT::i32);
+      return CurDAG->getTargetConstant(Imm, EVT::i32);
     }
 
     // Complex Pattern Selectors.
@@ -87,8 +87,8 @@
                                   SDValue &Base, SDValue &Offset) {
   FrameIndexSDNode *FIN = 0;
   if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
-    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-    Offset = CurDAG->getTargetConstant(0, MVT::i32);
+    Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
+    Offset = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
   if (Addr.getOpcode() == ISD::ADD) {
@@ -97,8 +97,8 @@
       && (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
       && (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
       // Constant positive word offset from frame index
-      Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
+      Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
+      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
       return true;
     }
   }
@@ -109,7 +109,7 @@
                                   SDValue &Base, SDValue &Offset) {
   if (Addr.getOpcode() == XCoreISD::DPRelativeWrapper) {
     Base = Addr.getOperand(0);
-    Offset = CurDAG->getTargetConstant(0, MVT::i32);
+    Offset = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
   if (Addr.getOpcode() == ISD::ADD) {
@@ -119,7 +119,7 @@
       && (CN->getSExtValue() % 4 == 0)) {
       // Constant word offset from a object in the data region
       Base = Addr.getOperand(0).getOperand(0);
-      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
+      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
       return true;
     }
   }
@@ -130,7 +130,7 @@
                                   SDValue &Base, SDValue &Offset) {
   if (Addr.getOpcode() == XCoreISD::CPRelativeWrapper) {
     Base = Addr.getOperand(0);
-    Offset = CurDAG->getTargetConstant(0, MVT::i32);
+    Offset = CurDAG->getTargetConstant(0, EVT::i32);
     return true;
   }
   if (Addr.getOpcode() == ISD::ADD) {
@@ -140,7 +140,7 @@
       && (CN->getSExtValue() % 4 == 0)) {
       // Constant word offset from a object in the data region
       Base = Addr.getOperand(0).getOperand(0);
-      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
+      Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
       return true;
     }
   }
@@ -162,22 +162,22 @@
 SDNode *XCoreDAGToDAGISel::Select(SDValue Op) {
   SDNode *N = Op.getNode();
   DebugLoc dl = N->getDebugLoc();
-  MVT NVT = N->getValueType(0);
-  if (NVT == MVT::i32) {
+  EVT NVT = N->getValueType(0);
+  if (NVT == EVT::i32) {
     switch (N->getOpcode()) {
       default: break;
       case ISD::Constant: {
         if (Predicate_immMskBitp(N)) {
           SDValue MskSize = Transform_msksize_xform(N);
-          return CurDAG->getTargetNode(XCore::MKMSK_rus, dl, MVT::i32, MskSize);
+          return CurDAG->getTargetNode(XCore::MKMSK_rus, dl, EVT::i32, MskSize);
         }
         else if (! Predicate_immU16(N)) {
           unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
           SDValue CPIdx =
             CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val),
                                           TLI.getPointerTy());
-          return CurDAG->getTargetNode(XCore::LDWCP_lru6, dl, MVT::i32, 
-                                       MVT::Other, CPIdx, 
+          return CurDAG->getTargetNode(XCore::LDWCP_lru6, dl, EVT::i32, 
+                                       EVT::Other, CPIdx, 
                                        CurDAG->getEntryNode());
         }
         break;
@@ -185,11 +185,11 @@
       case ISD::SMUL_LOHI: {
         // FIXME fold addition into the macc instruction
         if (!Subtarget.isXS1A()) {
-          SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, MVT::i32,
-                                  CurDAG->getTargetConstant(0, MVT::i32)), 0);
+          SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, EVT::i32,
+                                  CurDAG->getTargetConstant(0, EVT::i32)), 0);
           SDValue Ops[] = { Zero, Zero, Op.getOperand(0), Op.getOperand(1) };
           SDNode *ResNode = CurDAG->getTargetNode(XCore::MACCS_l4r, dl,
-                                                  MVT::i32, MVT::i32, Ops, 4);
+                                                  EVT::i32, EVT::i32, Ops, 4);
           ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1));
           ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0));
           return NULL;
@@ -198,12 +198,12 @@
       }
       case ISD::UMUL_LOHI: {
         // FIXME fold addition into the macc / lmul instruction
-        SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, MVT::i32,
-                                  CurDAG->getTargetConstant(0, MVT::i32)), 0);
+        SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, EVT::i32,
+                                  CurDAG->getTargetConstant(0, EVT::i32)), 0);
         SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
                             Zero, Zero };
-        SDNode *ResNode = CurDAG->getTargetNode(XCore::LMUL_l6r, dl, MVT::i32,
-                                                MVT::i32, Ops, 4);
+        SDNode *ResNode = CurDAG->getTargetNode(XCore::LMUL_l6r, dl, EVT::i32,
+                                                EVT::i32, Ops, 4);
         ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1));
         ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0));
         return NULL;
@@ -212,7 +212,7 @@
         if (!Subtarget.isXS1A()) {
           SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
                               Op.getOperand(2) };
-          return CurDAG->getTargetNode(XCore::LADD_l5r, dl, MVT::i32, MVT::i32,
+          return CurDAG->getTargetNode(XCore::LADD_l5r, dl, EVT::i32, EVT::i32,
                                        Ops, 3);
         }
         break;
@@ -221,7 +221,7 @@
         if (!Subtarget.isXS1A()) {
           SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
                               Op.getOperand(2) };
-          return CurDAG->getTargetNode(XCore::LSUB_l5r, dl, MVT::i32, MVT::i32,
+          return CurDAG->getTargetNode(XCore::LSUB_l5r, dl, EVT::i32, EVT::i32,
                                        Ops, 3);
         }
         break;
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 38648e0..cc1f1f7 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -61,7 +61,7 @@
     Subtarget(*XTM.getSubtargetImpl()) {
 
   // Set up the register classes.
-  addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass);
+  addRegisterClass(EVT::i32, XCore::GRRegsRegisterClass);
 
   // Compute derived properties from the register classes
   computeRegisterProperties();
@@ -69,7 +69,7 @@
   // Division is expensive
   setIntDivIsCheap(false);
 
-  setShiftAmountType(MVT::i32);
+  setShiftAmountType(EVT::i32);
   setStackPointerRegisterToSaveRestore(XCore::SP);
 
   setSchedulingPreference(SchedulingForRegPressure);
@@ -78,75 +78,75 @@
   setBooleanContents(ZeroOrOneBooleanContent);
 
   // XCore does not have the NodeTypes below.
-  setOperationAction(ISD::BR_CC,     MVT::Other, Expand);
-  setOperationAction(ISD::SELECT_CC, MVT::i32,   Custom);
-  setOperationAction(ISD::ADDC, MVT::i32, Expand);
-  setOperationAction(ISD::ADDE, MVT::i32, Expand);
-  setOperationAction(ISD::SUBC, MVT::i32, Expand);
-  setOperationAction(ISD::SUBE, MVT::i32, Expand);
+  setOperationAction(ISD::BR_CC,     EVT::Other, Expand);
+  setOperationAction(ISD::SELECT_CC, EVT::i32,   Custom);
+  setOperationAction(ISD::ADDC, EVT::i32, Expand);
+  setOperationAction(ISD::ADDE, EVT::i32, Expand);
+  setOperationAction(ISD::SUBC, EVT::i32, Expand);
+  setOperationAction(ISD::SUBE, EVT::i32, Expand);
 
   // Stop the combiner recombining select and set_cc
-  setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+  setOperationAction(ISD::SELECT_CC, EVT::Other, Expand);
   
   // 64bit
   if (!Subtarget.isXS1A()) {
-    setOperationAction(ISD::ADD, MVT::i64, Custom);
-    setOperationAction(ISD::SUB, MVT::i64, Custom);
+    setOperationAction(ISD::ADD, EVT::i64, Custom);
+    setOperationAction(ISD::SUB, EVT::i64, Custom);
   }
   if (Subtarget.isXS1A()) {
-    setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+    setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
   }
-  setOperationAction(ISD::MULHS, MVT::i32, Expand);
-  setOperationAction(ISD::MULHU, MVT::i32, Expand);
-  setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
-  setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+  setOperationAction(ISD::MULHS, EVT::i32, Expand);
+  setOperationAction(ISD::MULHU, EVT::i32, Expand);
+  setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
+  setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
   
   // Bit Manipulation
-  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
-  setOperationAction(ISD::ROTL , MVT::i32, Expand);
-  setOperationAction(ISD::ROTR , MVT::i32, Expand);
+  setOperationAction(ISD::CTPOP, EVT::i32, Expand);
+  setOperationAction(ISD::ROTL , EVT::i32, Expand);
+  setOperationAction(ISD::ROTR , EVT::i32, Expand);
   
-  setOperationAction(ISD::TRAP, MVT::Other, Legal);
+  setOperationAction(ISD::TRAP, EVT::Other, Legal);
   
   // Expand jump tables for now
-  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
-  setOperationAction(ISD::JumpTable, MVT::i32, Custom);
+  setOperationAction(ISD::BR_JT, EVT::Other, Expand);
+  setOperationAction(ISD::JumpTable, EVT::i32, Custom);
 
-  setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
+  setOperationAction(ISD::GlobalAddress, EVT::i32,   Custom);
   
   // Thread Local Storage
-  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
+  setOperationAction(ISD::GlobalTLSAddress, EVT::i32, Custom);
   
   // Conversion of i64 -> double produces constantpool nodes
-  setOperationAction(ISD::ConstantPool, MVT::i32,   Custom);
+  setOperationAction(ISD::ConstantPool, EVT::i32,   Custom);
 
   // Loads
-  setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+  setLoadExtAction(ISD::EXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
 
-  setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
-  setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
+  setLoadExtAction(ISD::SEXTLOAD, EVT::i8, Expand);
+  setLoadExtAction(ISD::ZEXTLOAD, EVT::i16, Expand);
 
   // Custom expand misaligned loads / stores.
-  setOperationAction(ISD::LOAD, MVT::i32, Custom);
-  setOperationAction(ISD::STORE, MVT::i32, Custom);
+  setOperationAction(ISD::LOAD, EVT::i32, Custom);
+  setOperationAction(ISD::STORE, EVT::i32, Custom);
 
   // Varargs
-  setOperationAction(ISD::VAEND, MVT::Other, Expand);
-  setOperationAction(ISD::VACOPY, MVT::Other, Expand);
-  setOperationAction(ISD::VAARG, MVT::Other, Custom);
-  setOperationAction(ISD::VASTART, MVT::Other, Custom);
+  setOperationAction(ISD::VAEND, EVT::Other, Expand);
+  setOperationAction(ISD::VACOPY, EVT::Other, Expand);
+  setOperationAction(ISD::VAARG, EVT::Other, Custom);
+  setOperationAction(ISD::VASTART, EVT::Other, Custom);
   
   // Dynamic stack
-  setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
-  setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
-  setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
+  setOperationAction(ISD::STACKSAVE, EVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE, EVT::Other, Expand);
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32, Expand);
   
   // Debug
-  setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
-  setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
+  setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
+  setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
 
   maxStoresPerMemset = 4;
   maxStoresPerMemmove = maxStoresPerMemcpy = 2;
@@ -208,9 +208,9 @@
 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG)
 {
   DebugLoc dl = Op.getDebugLoc();
-  SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
+  SDValue Cond = DAG.getNode(ISD::SETCC, dl, EVT::i32, Op.getOperand(2),
                              Op.getOperand(3), Op.getOperand(4));
-  return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
+  return DAG.getNode(ISD::SELECT, dl, EVT::i32, Cond, Op.getOperand(0),
                      Op.getOperand(1));
 }
 
@@ -220,7 +220,7 @@
   // FIXME there is no actual debug info here
   DebugLoc dl = GA.getDebugLoc();
   if (isa<Function>(GV)) {
-    return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
+    return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, EVT::i32, GA);
   } else if (!Subtarget.isXS1A()) {
     const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
     if (!GVar) {
@@ -230,17 +230,17 @@
     }
     bool isConst = GVar && GVar->isConstant();
     if (isConst) {
-      return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
+      return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, EVT::i32, GA);
     }
   }
-  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
+  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, EVT::i32, GA);
 }
 
 SDValue XCoreTargetLowering::
 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG)
 {
   GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
-  SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
+  SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
   // If it's a debug information descriptor, don't mess with it.
   if (DAG.isVerifiedDebugInfoDesc(Op))
     return GA;
@@ -248,8 +248,8 @@
 }
 
 static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
-  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
-                     DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
+  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, EVT::i32,
+                     DAG.getConstant(Intrinsic::xcore_getid, EVT::i32));
 }
 
 static inline bool isZeroLengthArray(const Type *Ty) {
@@ -264,7 +264,7 @@
   DebugLoc dl = Op.getDebugLoc();
   // transform to label + getid() * size
   GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
-  SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
+  SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
   if (!GVar) {
     // If GV is an alias then use the aliasee to determine size
@@ -286,9 +286,9 @@
   SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
   const TargetData *TD = TM.getTargetData();
   unsigned Size = TD->getTypeAllocSize(Ty);
-  SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl),
-                       DAG.getConstant(Size, MVT::i32));
-  return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset);
+  SDValue offset = DAG.getNode(ISD::MUL, dl, EVT::i32, BuildGetId(DAG, dl),
+                       DAG.getConstant(Size, EVT::i32));
+  return DAG.getNode(ISD::ADD, dl, EVT::i32, base, offset);
 }
 
 SDValue XCoreTargetLowering::
@@ -301,7 +301,7 @@
     llvm_unreachable("Lowering of constant pool unimplemented");
     return SDValue();
   } else {
-    MVT PtrVT = Op.getValueType();
+    EVT PtrVT = Op.getValueType();
     SDValue Res;
     if (CP->isMachineConstantPoolEntry()) {
       Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
@@ -310,7 +310,7 @@
       Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
                                       CP->getAlignment());
     }
-    return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
+    return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, EVT::i32, Res);
   }
 }
 
@@ -319,10 +319,10 @@
 {
   // FIXME there isn't really debug info here
   DebugLoc dl = Op.getDebugLoc();
-  MVT PtrVT = Op.getValueType();
+  EVT PtrVT = Op.getValueType();
   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
-  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, JTI);
+  return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, EVT::i32, JTI);
 }
 
 static bool
@@ -368,12 +368,12 @@
 {
   LoadSDNode *LD = cast<LoadSDNode>(Op);
   assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type");
-  assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load MVT");
+  assert(LD->getMemoryVT() == EVT::i32 && "Unexpected load EVT");
   if (allowsUnalignedMemoryAccesses()) {
     return SDValue();
   }
   unsigned ABIAlignment = getTargetData()->
-    getABITypeAlignment(LD->getMemoryVT().getTypeForMVT());
+    getABITypeAlignment(LD->getMemoryVT().getTypeForEVT());
   // Leave aligned load alone.
   if (LD->getAlignment() >= ABIAlignment) {
     return SDValue();
@@ -397,22 +397,22 @@
     // shr low_shifted, low, (offset & 0x3) * 8
     // shl high_shifted, high, 32 - (offset & 0x3) * 8
     // or result, low_shifted, high_shifted
-    SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32);
-    SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32);
-    SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
-    SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
+    SDValue LowOffset = DAG.getConstant(Offset & ~0x3, EVT::i32);
+    SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, EVT::i32);
+    SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, EVT::i32);
+    SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, EVT::i32);
     
-    SDValue LowAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, LowOffset);
-    SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, HighOffset);
+    SDValue LowAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, Base, LowOffset);
+    SDValue HighAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, Base, HighOffset);
     
     SDValue Low = DAG.getLoad(getPointerTy(), dl, Chain,
                                LowAddr, NULL, 4);
     SDValue High = DAG.getLoad(getPointerTy(), dl, Chain,
                                HighAddr, NULL, 4);
-    SDValue LowShifted = DAG.getNode(ISD::SRL, dl, MVT::i32, Low, LowShift);
-    SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, HighShift);
-    SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, LowShifted, HighShifted);
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
+    SDValue LowShifted = DAG.getNode(ISD::SRL, dl, EVT::i32, Low, LowShift);
+    SDValue HighShifted = DAG.getNode(ISD::SHL, dl, EVT::i32, High, HighShift);
+    SDValue Result = DAG.getNode(ISD::OR, dl, EVT::i32, LowShifted, HighShifted);
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Low.getValue(1),
                              High.getValue(1));
     SDValue Ops[] = { Result, Chain };
     return DAG.getMergeValues(Ops, 2, dl);
@@ -420,18 +420,18 @@
   
   if (LD->getAlignment() == 2) {
     int SVOffset = LD->getSrcValueOffset();
-    SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
-                                 BasePtr, LD->getSrcValue(), SVOffset, MVT::i16,
+    SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, EVT::i32, Chain,
+                                 BasePtr, LD->getSrcValue(), SVOffset, EVT::i16,
                                  LD->isVolatile(), 2);
-    SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
-                                   DAG.getConstant(2, MVT::i32));
-    SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, Chain,
+    SDValue HighAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, BasePtr,
+                                   DAG.getConstant(2, EVT::i32));
+    SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, EVT::i32, Chain,
                                   HighAddr, LD->getSrcValue(), SVOffset + 2,
-                                  MVT::i16, LD->isVolatile(), 2);
-    SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High,
-                                      DAG.getConstant(16, MVT::i32));
-    SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, Low, HighShifted);
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
+                                  EVT::i16, LD->isVolatile(), 2);
+    SDValue HighShifted = DAG.getNode(ISD::SHL, dl, EVT::i32, High,
+                                      DAG.getConstant(16, EVT::i32));
+    SDValue Result = DAG.getNode(ISD::OR, dl, EVT::i32, Low, HighShifted);
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Low.getValue(1),
                              High.getValue(1));
     SDValue Ops[] = { Result, Chain };
     return DAG.getMergeValues(Ops, 2, dl);
@@ -464,12 +464,12 @@
 {
   StoreSDNode *ST = cast<StoreSDNode>(Op);
   assert(!ST->isTruncatingStore() && "Unexpected store type");
-  assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store MVT");
+  assert(ST->getMemoryVT() == EVT::i32 && "Unexpected store EVT");
   if (allowsUnalignedMemoryAccesses()) {
     return SDValue();
   }
   unsigned ABIAlignment = getTargetData()->
-    getABITypeAlignment(ST->getMemoryVT().getTypeForMVT());
+    getABITypeAlignment(ST->getMemoryVT().getTypeForEVT());
   // Leave aligned store alone.
   if (ST->getAlignment() >= ABIAlignment) {
     return SDValue();
@@ -482,17 +482,17 @@
   if (ST->getAlignment() == 2) {
     int SVOffset = ST->getSrcValueOffset();
     SDValue Low = Value;
-    SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
-                                      DAG.getConstant(16, MVT::i32));
+    SDValue High = DAG.getNode(ISD::SRL, dl, EVT::i32, Value,
+                                      DAG.getConstant(16, EVT::i32));
     SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
-                                         ST->getSrcValue(), SVOffset, MVT::i16,
+                                         ST->getSrcValue(), SVOffset, EVT::i16,
                                          ST->isVolatile(), 2);
-    SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
-                                   DAG.getConstant(2, MVT::i32));
+    SDValue HighAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, BasePtr,
+                                   DAG.getConstant(2, EVT::i32));
     SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
                                           ST->getSrcValue(), SVOffset + 2,
-                                          MVT::i16, ST->isVolatile(), 2);
-    return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
+                                          EVT::i16, ST->isVolatile(), 2);
+    return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, StoreLow, StoreHigh);
   }
   
   // Lower to a call to __misaligned_store(BasePtr, Value).
@@ -520,35 +520,35 @@
 SDValue XCoreTargetLowering::
 ExpandADDSUB(SDNode *N, SelectionDAG &DAG)
 {
-  assert(N->getValueType(0) == MVT::i64 &&
+  assert(N->getValueType(0) == EVT::i64 &&
          (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
         "Unknown operand to lower!");
   assert(!Subtarget.isXS1A() && "Cannot custom lower ADD/SUB on xs1a");
   DebugLoc dl = N->getDebugLoc();
   
   // Extract components
-  SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
-                            N->getOperand(0),  DAG.getConstant(0, MVT::i32));
-  SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
-                            N->getOperand(0),  DAG.getConstant(1, MVT::i32));
-  SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
-                             N->getOperand(1), DAG.getConstant(0, MVT::i32));
-  SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
-                             N->getOperand(1), DAG.getConstant(1, MVT::i32));
+  SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
+                            N->getOperand(0),  DAG.getConstant(0, EVT::i32));
+  SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
+                            N->getOperand(0),  DAG.getConstant(1, EVT::i32));
+  SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
+                             N->getOperand(1), DAG.getConstant(0, EVT::i32));
+  SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
+                             N->getOperand(1), DAG.getConstant(1, EVT::i32));
   
   // Expand
   unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
                                                    XCoreISD::LSUB;
-  SDValue Zero = DAG.getConstant(0, MVT::i32);
-  SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
+  SDValue Zero = DAG.getConstant(0, EVT::i32);
+  SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(EVT::i32, EVT::i32),
                                   LHSL, RHSL, Zero);
   SDValue Lo(Carry.getNode(), 1);
   
-  SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
+  SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(EVT::i32, EVT::i32),
                                   LHSH, RHSH, Carry);
   SDValue Hi(Ignored.getNode(), 1);
   // Merge the pieces
-  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
+  return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, Lo, Hi);
 }
 
 SDValue XCoreTargetLowering::
@@ -559,7 +559,7 @@
   SDNode *Node = Op.getNode();
   DebugLoc dl = Node->getDebugLoc();
   const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
-  MVT VT = Node->getValueType(0);
+  EVT VT = Node->getValueType(0);
   SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
                                Node->getOperand(1), V, 0);
   // Increment the pointer, VAList, to the next vararg
@@ -580,7 +580,7 @@
   // memory location argument
   MachineFunction &MF = DAG.getMachineFunction();
   XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
-  SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
+  SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), EVT::i32);
   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
   return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), SV, 0);
 }
@@ -594,7 +594,7 @@
   MachineFunction &MF = DAG.getMachineFunction();
   const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, 
-                            RegInfo->getFrameRegister(MF), MVT::i32);
+                            RegInfo->getFrameRegister(MF), EVT::i32);
 }
 
 //===----------------------------------------------------------------------===//
@@ -691,16 +691,16 @@
 
       int Offset = VA.getLocMemOffset();
 
-      MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 
+      MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, EVT::Other, 
                                         Chain, Arg,
-                                        DAG.getConstant(Offset/4, MVT::i32)));
+                                        DAG.getConstant(Offset/4, EVT::i32)));
     }
   }
 
   // Transform all store nodes into one single node because
   // all store nodes are independent of each other.
   if (!MemOpChains.empty())
-    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 
+    Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, 
                         &MemOpChains[0], MemOpChains.size());
 
   // Build a sequence of copy-to-reg nodes chained together with token 
@@ -718,15 +718,15 @@
   // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
   // Likewise ExternalSymbol -> TargetExternalSymbol.
   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
-    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
+    Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i32);
   else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
-    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
+    Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i32);
 
   // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
   //             = Chain, Callee, Reg#1, Reg#2, ...  
   //
   // Returns a chain & a flag for retval copy to use.
-  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
+  SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
   SmallVector<SDValue, 8> Ops;
   Ops.push_back(Chain);
   Ops.push_back(Callee);
@@ -841,7 +841,7 @@
     
     if (VA.isRegLoc()) {
       // Arguments passed in registers
-      MVT RegVT = VA.getLocVT();
+      EVT RegVT = VA.getLocVT();
       switch (RegVT.getSimpleVT()) {
       default:
         {
@@ -851,7 +851,7 @@
 #endif
           llvm_unreachable(0);
         }
-      case MVT::i32:
+      case EVT::i32:
         unsigned VReg = RegInfo.createVirtualRegister(
                           XCore::GRRegsRegisterClass);
         RegInfo.addLiveIn(VA.getLocReg(), VReg);
@@ -873,7 +873,7 @@
 
       // Create the SelectionDAG nodes corresponding to a load
       //from this parameter
-      SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+      SDValue FIN = DAG.getFrameIndex(FI, EVT::i32);
       InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0));
     }
   }
@@ -898,18 +898,18 @@
           XFI->setVarArgsFrameIndex(FI);
         }
         offset -= StackSlotSize;
-        SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
+        SDValue FIN = DAG.getFrameIndex(FI, EVT::i32);
         // Move argument from phys reg -> virt reg
         unsigned VReg = RegInfo.createVirtualRegister(
                           XCore::GRRegsRegisterClass);
         RegInfo.addLiveIn(ArgRegs[i], VReg);
-        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+        SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
         // Move argument from virt reg -> stack
         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
         MemOps.push_back(Store);
       }
       if (!MemOps.empty())
-        Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+        Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
                             &MemOps[0], MemOps.size());
     } else {
       // This will point to the next argument passed via stack.
@@ -967,11 +967,11 @@
 
   // Return on XCore is always a "retsp 0"
   if (Flag.getNode())
-    return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
-                       Chain, DAG.getConstant(0, MVT::i32), Flag);
+    return DAG.getNode(XCoreISD::RETSP, dl, EVT::Other,
+                       Chain, DAG.getConstant(0, EVT::i32), Flag);
   else // Return Void
-    return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
-                       Chain, DAG.getConstant(0, MVT::i32));
+    return DAG.getNode(XCoreISD::RETSP, dl, EVT::Other,
+                       Chain, DAG.getConstant(0, EVT::i32));
 }
 
 //===----------------------------------------------------------------------===//
@@ -1059,7 +1059,7 @@
       break;
     }
     unsigned ABIAlignment = getTargetData()->
-      getABITypeAlignment(ST->getMemoryVT().getTypeForMVT());
+      getABITypeAlignment(ST->getMemoryVT().getTypeForEVT());
     unsigned Alignment = ST->getAlignment();
     if (Alignment >= ABIAlignment) {
       break;
@@ -1072,7 +1072,7 @@
         Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
         return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
                               LD->getBasePtr(),
-                              DAG.getConstant(StoreBits/8, MVT::i32),
+                              DAG.getConstant(StoreBits/8, EVT::i32),
                               Alignment, ST->getSrcValue(),
                               ST->getSrcValueOffset(), LD->getSrcValue(),
                               LD->getSrcValueOffset());
@@ -1154,7 +1154,7 @@
 
 std::vector<unsigned> XCoreTargetLowering::
 getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  MVT VT) const 
+                                  EVT VT) const 
 {
   if (Constraint.size() != 1)
     return std::vector<unsigned>();
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index a21744e..1265934 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -129,7 +129,7 @@
     // Inline asm support
     std::vector<unsigned>
     getRegClassForInlineAsmConstraint(const std::string &Constraint,
-              MVT VT) const;
+              EVT VT) const;
   
     // Expand specifics
     SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG);