Rename MVT to EVT, in preparation for splitting SimpleValueType out into its own struct type.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78610 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index a4bb1be..3046681 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -79,16 +79,16 @@
 #include "X86GenFastISel.inc"
 
 private:
-  bool X86FastEmitCompare(Value *LHS, Value *RHS, MVT VT);
+  bool X86FastEmitCompare(Value *LHS, Value *RHS, EVT VT);
   
-  bool X86FastEmitLoad(MVT VT, const X86AddressMode &AM, unsigned &RR);
+  bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
 
-  bool X86FastEmitStore(MVT VT, Value *Val,
+  bool X86FastEmitStore(EVT VT, Value *Val,
                         const X86AddressMode &AM);
-  bool X86FastEmitStore(MVT VT, unsigned Val,
+  bool X86FastEmitStore(EVT VT, unsigned Val,
                         const X86AddressMode &AM);
 
-  bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
+  bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
                          unsigned &ResultReg);
   
   bool X86SelectAddress(Value *V, X86AddressMode &AM);
@@ -133,36 +133,36 @@
 
   /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
   /// computed in an SSE register, not on the X87 floating point stack.
-  bool isScalarFPTypeInSSEReg(MVT VT) const {
-    return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
-      (VT == MVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
+  bool isScalarFPTypeInSSEReg(EVT VT) const {
+    return (VT == EVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
+      (VT == EVT::f32 && X86ScalarSSEf32);   // f32 is when SSE1
   }
 
-  bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
+  bool isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1 = false);
 };
   
 } // end anonymous namespace.
 
-bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
+bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
   VT = TLI.getValueType(Ty, /*HandleUnknown=*/true);
-  if (VT == MVT::Other || !VT.isSimple())
+  if (VT == EVT::Other || !VT.isSimple())
     // Unhandled type. Halt "fast" selection and bail.
     return false;
   
   // For now, require SSE/SSE2 for performing floating-point operations,
   // since x87 requires additional work.
-  if (VT == MVT::f64 && !X86ScalarSSEf64)
+  if (VT == EVT::f64 && !X86ScalarSSEf64)
      return false;
-  if (VT == MVT::f32 && !X86ScalarSSEf32)
+  if (VT == EVT::f32 && !X86ScalarSSEf32)
      return false;
   // Similarly, no f80 support yet.
-  if (VT == MVT::f80)
+  if (VT == EVT::f80)
     return false;
   // We only handle legal types. For example, on x86-32 the instruction
   // selector contains all of the 64-bit instructions from x86-64,
   // under the assumption that i64 won't be used if the target doesn't
   // support it.
-  return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
+  return (AllowI1 && VT == EVT::i1) || TLI.isTypeLegal(VT);
 }
 
 #include "X86GenCallingConv.inc"
@@ -188,31 +188,31 @@
 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
 /// Return true and the result register by reference if it is possible.
-bool X86FastISel::X86FastEmitLoad(MVT VT, const X86AddressMode &AM,
+bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
                                   unsigned &ResultReg) {
   // Get opcode and regclass of the output for the given load instruction.
   unsigned Opc = 0;
   const TargetRegisterClass *RC = NULL;
   switch (VT.getSimpleVT()) {
   default: return false;
-  case MVT::i8:
+  case EVT::i8:
     Opc = X86::MOV8rm;
     RC  = X86::GR8RegisterClass;
     break;
-  case MVT::i16:
+  case EVT::i16:
     Opc = X86::MOV16rm;
     RC  = X86::GR16RegisterClass;
     break;
-  case MVT::i32:
+  case EVT::i32:
     Opc = X86::MOV32rm;
     RC  = X86::GR32RegisterClass;
     break;
-  case MVT::i64:
+  case EVT::i64:
     // Must be in x86-64 mode.
     Opc = X86::MOV64rm;
     RC  = X86::GR64RegisterClass;
     break;
-  case MVT::f32:
+  case EVT::f32:
     if (Subtarget->hasSSE1()) {
       Opc = X86::MOVSSrm;
       RC  = X86::FR32RegisterClass;
@@ -221,7 +221,7 @@
       RC  = X86::RFP32RegisterClass;
     }
     break;
-  case MVT::f64:
+  case EVT::f64:
     if (Subtarget->hasSSE2()) {
       Opc = X86::MOVSDrm;
       RC  = X86::FR64RegisterClass;
@@ -230,7 +230,7 @@
       RC  = X86::RFP64RegisterClass;
     }
     break;
-  case MVT::f80:
+  case EVT::f80:
     // No f80 support yet.
     return false;
   }
@@ -245,21 +245,21 @@
 /// and a displacement offset, or a GlobalAddress,
 /// i.e. V. Return true if it is possible.
 bool
-X86FastISel::X86FastEmitStore(MVT VT, unsigned Val,
+X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
                               const X86AddressMode &AM) {
   // Get opcode and regclass of the output for the given store instruction.
   unsigned Opc = 0;
   switch (VT.getSimpleVT()) {
-  case MVT::f80: // No f80 support yet.
+  case EVT::f80: // No f80 support yet.
   default: return false;
-  case MVT::i8:  Opc = X86::MOV8mr;  break;
-  case MVT::i16: Opc = X86::MOV16mr; break;
-  case MVT::i32: Opc = X86::MOV32mr; break;
-  case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
-  case MVT::f32:
+  case EVT::i8:  Opc = X86::MOV8mr;  break;
+  case EVT::i16: Opc = X86::MOV16mr; break;
+  case EVT::i32: Opc = X86::MOV32mr; break;
+  case EVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
+  case EVT::f32:
     Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m;
     break;
-  case MVT::f64:
+  case EVT::f64:
     Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
     break;
   }
@@ -268,7 +268,7 @@
   return true;
 }
 
-bool X86FastISel::X86FastEmitStore(MVT VT, Value *Val,
+bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
                                    const X86AddressMode &AM) {
   // Handle 'null' like i32/i64 0.
   if (isa<ConstantPointerNull>(Val))
@@ -279,10 +279,10 @@
     unsigned Opc = 0;
     switch (VT.getSimpleVT()) {
     default: break;
-    case MVT::i8:  Opc = X86::MOV8mi;  break;
-    case MVT::i16: Opc = X86::MOV16mi; break;
-    case MVT::i32: Opc = X86::MOV32mi; break;
-    case MVT::i64:
+    case EVT::i8:  Opc = X86::MOV8mi;  break;
+    case EVT::i16: Opc = X86::MOV16mi; break;
+    case EVT::i32: Opc = X86::MOV32mi; break;
+    case EVT::i64:
       // Must be a 32-bit sign extended value.
       if ((int)CI->getSExtValue() == CI->getSExtValue())
         Opc = X86::MOV64mi32;
@@ -306,8 +306,8 @@
 /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
 /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
 /// ISD::SIGN_EXTEND).
-bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT,
-                                    unsigned Src, MVT SrcVT,
+bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
+                                    unsigned Src, EVT SrcVT,
                                     unsigned &ResultReg) {
   unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
   
@@ -478,7 +478,7 @@
       StubAM.GV = GV;
       StubAM.GVOpFlags = GVFlags;
 
-      if (TLI.getPointerTy() == MVT::i64) {
+      if (TLI.getPointerTy() == EVT::i64) {
         Opc = X86::MOV64rm;
         RC  = X86::GR64RegisterClass;
         
@@ -605,7 +605,7 @@
 
 /// X86SelectStore - Select and emit code to implement store instructions.
 bool X86FastISel::X86SelectStore(Instruction* I) {
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(I->getOperand(0)->getType(), VT))
     return false;
 
@@ -619,7 +619,7 @@
 /// X86SelectLoad - Select and emit code to implement load instructions.
 ///
 bool X86FastISel::X86SelectLoad(Instruction *I)  {
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(I->getType(), VT))
     return false;
 
@@ -635,29 +635,29 @@
   return false;
 }
 
-static unsigned X86ChooseCmpOpcode(MVT VT) {
+static unsigned X86ChooseCmpOpcode(EVT VT) {
   switch (VT.getSimpleVT()) {
   default:       return 0;
-  case MVT::i8:  return X86::CMP8rr;
-  case MVT::i16: return X86::CMP16rr;
-  case MVT::i32: return X86::CMP32rr;
-  case MVT::i64: return X86::CMP64rr;
-  case MVT::f32: return X86::UCOMISSrr;
-  case MVT::f64: return X86::UCOMISDrr;
+  case EVT::i8:  return X86::CMP8rr;
+  case EVT::i16: return X86::CMP16rr;
+  case EVT::i32: return X86::CMP32rr;
+  case EVT::i64: return X86::CMP64rr;
+  case EVT::f32: return X86::UCOMISSrr;
+  case EVT::f64: return X86::UCOMISDrr;
   }
 }
 
 /// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
 /// of the comparison, return an opcode that works for the compare (e.g.
 /// CMP32ri) otherwise return 0.
-static unsigned X86ChooseCmpImmediateOpcode(MVT VT, ConstantInt *RHSC) {
+static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
   switch (VT.getSimpleVT()) {
   // Otherwise, we can't fold the immediate into this comparison.
   default: return 0;
-  case MVT::i8: return X86::CMP8ri;
-  case MVT::i16: return X86::CMP16ri;
-  case MVT::i32: return X86::CMP32ri;
-  case MVT::i64:
+  case EVT::i8: return X86::CMP8ri;
+  case EVT::i16: return X86::CMP16ri;
+  case EVT::i32: return X86::CMP32ri;
+  case EVT::i64:
     // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
     // field.
     if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
@@ -666,7 +666,7 @@
   }
 }
 
-bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, MVT VT) {
+bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
   unsigned Op0Reg = getRegForValue(Op0);
   if (Op0Reg == 0) return false;
   
@@ -698,7 +698,7 @@
 bool X86FastISel::X86SelectCmp(Instruction *I) {
   CmpInst *CI = cast<CmpInst>(I);
 
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(I->getOperand(0)->getType(), VT))
     return false;
 
@@ -778,7 +778,7 @@
     unsigned ResultReg = getRegForValue(I->getOperand(0));
     if (ResultReg == 0) return false;
     // Set the high bits to zero.
-    ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg);
+    ResultReg = FastEmitZExtFromI1(EVT::i8, ResultReg);
     if (ResultReg == 0) return false;
     UpdateValueMap(I, ResultReg);
     return true;
@@ -798,7 +798,7 @@
   // Fold the common case of a conditional branch with a comparison.
   if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
     if (CI->hasOneUse()) {
-      MVT VT = TLI.getValueType(CI->getOperand(0)->getType());
+      EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
 
       // Try to take advantage of fallthrough opportunities.
       CmpInst::Predicate Predicate = CI->getPredicate();
@@ -975,8 +975,8 @@
     return false;
   }
 
-  MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
-  if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+  EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
+  if (VT == EVT::Other || !isTypeLegal(I->getType(), VT))
     return false;
 
   unsigned Op0Reg = getRegForValue(I->getOperand(0));
@@ -1009,19 +1009,19 @@
 }
 
 bool X86FastISel::X86SelectSelect(Instruction *I) {
-  MVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
-  if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
+  EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
+  if (VT == EVT::Other || !isTypeLegal(I->getType(), VT))
     return false;
   
   unsigned Opc = 0;
   const TargetRegisterClass *RC = NULL;
-  if (VT.getSimpleVT() == MVT::i16) {
+  if (VT.getSimpleVT() == EVT::i16) {
     Opc = X86::CMOVE16rr;
     RC = &X86::GR16RegClass;
-  } else if (VT.getSimpleVT() == MVT::i32) {
+  } else if (VT.getSimpleVT() == EVT::i32) {
     Opc = X86::CMOVE32rr;
     RC = &X86::GR32RegClass;
-  } else if (VT.getSimpleVT() == MVT::i64) {
+  } else if (VT.getSimpleVT() == EVT::i64) {
     Opc = X86::CMOVE64rr;
     RC = &X86::GR64RegClass;
   } else {
@@ -1081,14 +1081,14 @@
   if (Subtarget->is64Bit())
     // All other cases should be handled by the tblgen generated code.
     return false;
-  MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
-  MVT DstVT = TLI.getValueType(I->getType());
+  EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+  EVT DstVT = TLI.getValueType(I->getType());
   
   // This code only handles truncation to byte right now.
-  if (DstVT != MVT::i8 && DstVT != MVT::i1)
+  if (DstVT != EVT::i8 && DstVT != EVT::i1)
     // All other cases should be handled by the tblgen generated code.
     return false;
-  if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
+  if (SrcVT != EVT::i16 && SrcVT != EVT::i32)
     // All other cases should be handled by the tblgen generated code.
     return false;
 
@@ -1098,14 +1098,14 @@
     return false;
 
   // First issue a copy to GR16_ABCD or GR32_ABCD.
-  unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
-  const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
+  unsigned CopyOpc = (SrcVT == EVT::i16) ? X86::MOV16rr : X86::MOV32rr;
+  const TargetRegisterClass *CopyRC = (SrcVT == EVT::i16)
     ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
   unsigned CopyReg = createResultReg(CopyRC);
   BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
 
   // Then issue an extract_subreg.
-  unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
+  unsigned ResultReg = FastEmitInst_extractsubreg(EVT::i8,
                                                   CopyReg, X86::SUBREG_8BIT);
   if (!ResultReg)
     return false;
@@ -1150,7 +1150,7 @@
     const Type *RetTy =
       cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
 
-    MVT VT;
+    EVT VT;
     if (!isTypeLegal(RetTy, VT))
       return false;
 
@@ -1164,9 +1164,9 @@
       return false;
 
     unsigned OpC = 0;
-    if (VT == MVT::i32)
+    if (VT == EVT::i32)
       OpC = X86::ADD32rr;
-    else if (VT == MVT::i64)
+    else if (VT == EVT::i64)
       OpC = X86::ADD64rr;
     else
       return false;
@@ -1185,7 +1185,7 @@
     if (DestReg1 != ResultReg)
       ResultReg = DestReg1+1;
     else
-      ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
+      ResultReg = createResultReg(TLI.getRegClassFor(EVT::i8));
     
     unsigned Opc = X86::SETBr;
     if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
@@ -1229,9 +1229,9 @@
 
   // Handle *simple* calls for now.
   const Type *RetTy = CS.getType();
-  MVT RetVT;
+  EVT RetVT;
   if (RetTy == Type::VoidTy)
-    RetVT = MVT::isVoid;
+    RetVT = EVT::isVoid;
   else if (!isTypeLegal(RetTy, RetVT, true))
     return false;
 
@@ -1251,15 +1251,15 @@
 
   // Allow calls which produce i1 results.
   bool AndToI1 = false;
-  if (RetVT == MVT::i1) {
-    RetVT = MVT::i8;
+  if (RetVT == EVT::i1) {
+    RetVT = EVT::i8;
     AndToI1 = true;
   }
 
   // Deal with call operands first.
   SmallVector<Value*, 8> ArgVals;
   SmallVector<unsigned, 8> Args;
-  SmallVector<MVT, 8> ArgVTs;
+  SmallVector<EVT, 8> ArgVTs;
   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
   Args.reserve(CS.arg_size());
   ArgVals.reserve(CS.arg_size());
@@ -1285,7 +1285,7 @@
       return false;
 
     const Type *ArgTy = (*i)->getType();
-    MVT ArgVT;
+    EVT ArgVT;
     if (!isTypeLegal(ArgTy, ArgVT))
       return false;
     unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
@@ -1315,7 +1315,7 @@
   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
     CCValAssign &VA = ArgLocs[i];
     unsigned Arg = Args[VA.getValNo()];
-    MVT ArgVT = ArgVTs[VA.getValNo()];
+    EVT ArgVT = ArgVTs[VA.getValNo()];
   
     // Promote the value if needed.
     switch (VA.getLocInfo()) {
@@ -1445,14 +1445,14 @@
   BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
 
   // Now handle call return value (if any).
-  if (RetVT.getSimpleVT() != MVT::isVoid) {
+  if (RetVT.getSimpleVT() != EVT::isVoid) {
     SmallVector<CCValAssign, 16> RVLocs;
     CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
     CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
 
     // Copy all of the result registers out of their specified physreg.
     assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
-    MVT CopyVT = RVLocs[0].getValVT();
+    EVT CopyVT = RVLocs[0].getValVT();
     TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
     TargetRegisterClass *SrcRC = DstRC;
     
@@ -1462,7 +1462,7 @@
     if ((RVLocs[0].getLocReg() == X86::ST0 ||
          RVLocs[0].getLocReg() == X86::ST1) &&
         isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
-      CopyVT = MVT::f80;
+      CopyVT = EVT::f80;
       SrcRC = X86::RSTRegisterClass;
       DstRC = X86::RFP80RegisterClass;
     }
@@ -1476,14 +1476,14 @@
       // Round the F80 the right size, which also moves to the appropriate xmm
       // register. This is accomplished by storing the F80 value in memory and
       // then loading it back. Ewww...
-      MVT ResVT = RVLocs[0].getValVT();
-      unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
+      EVT ResVT = RVLocs[0].getValVT();
+      unsigned Opc = ResVT == EVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
       unsigned MemSize = ResVT.getSizeInBits()/8;
       int FI = MFI.CreateStackObject(MemSize, MemSize);
       addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
-      DstRC = ResVT == MVT::f32
+      DstRC = ResVT == EVT::f32
         ? X86::FR32RegisterClass : X86::FR64RegisterClass;
-      Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
+      Opc = ResVT == EVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
       ResultReg = createResultReg(DstRC);
       addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
     }
@@ -1536,8 +1536,8 @@
     return X86SelectExtractValue(I);
   case Instruction::IntToPtr: // Deliberate fall-through.
   case Instruction::PtrToInt: {
-    MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
-    MVT DstVT = TLI.getValueType(I->getType());
+    EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
+    EVT DstVT = TLI.getValueType(I->getType());
     if (DstVT.bitsGT(SrcVT))
       return X86SelectZExt(I);
     if (DstVT.bitsLT(SrcVT))
@@ -1553,7 +1553,7 @@
 }
 
 unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
-  MVT VT;
+  EVT VT;
   if (!isTypeLegal(C->getType(), VT))
     return false;
   
@@ -1562,24 +1562,24 @@
   const TargetRegisterClass *RC = NULL;
   switch (VT.getSimpleVT()) {
   default: return false;
-  case MVT::i8:
+  case EVT::i8:
     Opc = X86::MOV8rm;
     RC  = X86::GR8RegisterClass;
     break;
-  case MVT::i16:
+  case EVT::i16:
     Opc = X86::MOV16rm;
     RC  = X86::GR16RegisterClass;
     break;
-  case MVT::i32:
+  case EVT::i32:
     Opc = X86::MOV32rm;
     RC  = X86::GR32RegisterClass;
     break;
-  case MVT::i64:
+  case EVT::i64:
     // Must be in x86-64 mode.
     Opc = X86::MOV64rm;
     RC  = X86::GR64RegisterClass;
     break;
-  case MVT::f32:
+  case EVT::f32:
     if (Subtarget->hasSSE1()) {
       Opc = X86::MOVSSrm;
       RC  = X86::FR32RegisterClass;
@@ -1588,7 +1588,7 @@
       RC  = X86::RFP32RegisterClass;
     }
     break;
-  case MVT::f64:
+  case EVT::f64:
     if (Subtarget->hasSSE2()) {
       Opc = X86::MOVSDrm;
       RC  = X86::FR64RegisterClass;
@@ -1597,7 +1597,7 @@
       RC  = X86::RFP64RegisterClass;
     }
     break;
-  case MVT::f80:
+  case EVT::f80:
     // No f80 support yet.
     return false;
   }
@@ -1606,7 +1606,7 @@
   if (isa<GlobalValue>(C)) {
     X86AddressMode AM;
     if (X86SelectAddress(C, AM)) {
-      if (TLI.getPointerTy() == MVT::i32)
+      if (TLI.getPointerTy() == EVT::i32)
         Opc = X86::LEA32r;
       else
         Opc = X86::LEA64r;