[X86] Rename some instructions that start with Int_ to have the _Int at the end.

This matches AVX512 version and is more consistent overall. And improves our scheduler models.

In some cases this adds _Int to instructions that didn't have any Int_ before. It's a side effect of the adjustments made to some of the multiclasses.

llvm-svn: 320325
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 03be87e..cea4dec 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2424,11 +2424,11 @@
 
   if (I->getType()->isDoubleTy()) {
     // sitofp int -> double
-    Opcode = InTy->isIntegerTy(64) ? X86::VCVTSI2SD64rr : X86::VCVTSI2SDrr;
+    Opcode = InTy->isIntegerTy(64) ? X86::VCVTSI642SDrr : X86::VCVTSI2SDrr;
     RC = &X86::FR64RegClass;
   } else if (I->getType()->isFloatTy()) {
     // sitofp int -> float
-    Opcode = InTy->isIntegerTy(64) ? X86::VCVTSI2SS64rr : X86::VCVTSI2SSrr;
+    Opcode = InTy->isIntegerTy(64) ? X86::VCVTSI642SSrr : X86::VCVTSI2SSrr;
     RC = &X86::FR32RegClass;
   } else
     return false;
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index bb18969..8cb1cb8 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -6530,16 +6530,16 @@
                                   X86VectorVTInfo DstVT, SDNode OpNode,
                                   OpndItins itins, string asm> {
   let Predicates = [HasAVX512] in {
-    def rr : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src),
+    def rr_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src),
                 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
                 [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 FROUND_CURRENT)))],
                 itins.rr>, EVEX, VEX_LIG, Sched<[itins.Sched]>;
-    def rrb : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src, AVX512RC:$rc),
+    def rrb_Int : SI<opc, MRMSrcReg, (outs DstVT.RC:$dst), (ins SrcVT.RC:$src, AVX512RC:$rc),
                  !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
                  [(set DstVT.RC:$dst, (OpNode (SrcVT.VT SrcVT.RC:$src),(i32 imm:$rc)))],
                  itins.rr>, EVEX, VEX_LIG, EVEX_B, EVEX_RC,
                  Sched<[itins.Sched]>;
-    def rm : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.IntScalarMemOp:$src),
+    def rm_Int : SI<opc, MRMSrcMem, (outs DstVT.RC:$dst), (ins SrcVT.IntScalarMemOp:$src),
                 !strconcat(asm,"\t{$src, $dst|$dst, $src}"),
                 [(set DstVT.RC:$dst, (OpNode
                       (SrcVT.VT SrcVT.ScalarIntMemCPat:$src),
@@ -6578,21 +6578,21 @@
 // Therefore, the SSE intrinsics are mapped to the AVX512 instructions.
 let Predicates = [HasAVX512] in {
   def : Pat<(i32 (int_x86_sse_cvtss2si (v4f32 VR128X:$src))),
-            (VCVTSS2SIZrr VR128X:$src)>;
+            (VCVTSS2SIZrr_Int VR128X:$src)>;
   def : Pat<(i32 (int_x86_sse_cvtss2si sse_load_f32:$src)),
-            (VCVTSS2SIZrm sse_load_f32:$src)>;
+            (VCVTSS2SIZrm_Int sse_load_f32:$src)>;
   def : Pat<(i64 (int_x86_sse_cvtss2si64 (v4f32 VR128X:$src))),
-            (VCVTSS2SI64Zrr VR128X:$src)>;
+            (VCVTSS2SI64Zrr_Int VR128X:$src)>;
   def : Pat<(i64 (int_x86_sse_cvtss2si64 sse_load_f32:$src)),
-            (VCVTSS2SI64Zrm sse_load_f32:$src)>;
+            (VCVTSS2SI64Zrm_Int sse_load_f32:$src)>;
   def : Pat<(i32 (int_x86_sse2_cvtsd2si (v2f64 VR128X:$src))),
-            (VCVTSD2SIZrr VR128X:$src)>;
+            (VCVTSD2SIZrr_Int VR128X:$src)>;
   def : Pat<(i32 (int_x86_sse2_cvtsd2si sse_load_f64:$src)),
-            (VCVTSD2SIZrm sse_load_f64:$src)>;
+            (VCVTSD2SIZrm_Int sse_load_f64:$src)>;
   def : Pat<(i64 (int_x86_sse2_cvtsd2si64 (v2f64 VR128X:$src))),
-            (VCVTSD2SI64Zrr VR128X:$src)>;
+            (VCVTSD2SI64Zrr_Int VR128X:$src)>;
   def : Pat<(i64 (int_x86_sse2_cvtsd2si64 sse_load_f64:$src)),
-            (VCVTSD2SI64Zrm sse_load_f64:$src)>;
+            (VCVTSD2SI64Zrm_Int sse_load_f64:$src)>;
 } // HasAVX512
 
 let Predicates = [HasAVX512] in {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 68a7bb2..a4ddb31 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -559,14 +559,30 @@
     { X86::CMP32rr,         X86::CMP32rm,             0 },
     { X86::CMP64rr,         X86::CMP64rm,             0 },
     { X86::CMP8rr,          X86::CMP8rm,              0 },
+    { X86::CVTDQ2PDrr,      X86::CVTDQ2PDrm,          TB_NO_REVERSE },
+    { X86::CVTDQ2PSrr,      X86::CVTDQ2PSrm,          TB_ALIGN_16 },
+    { X86::CVTPD2DQrr,      X86::CVTPD2DQrm,          TB_ALIGN_16 },
+    { X86::CVTPD2PSrr,      X86::CVTPD2PSrm,          TB_ALIGN_16 },
+    { X86::CVTPS2DQrr,      X86::CVTPS2DQrm,          TB_ALIGN_16 },
+    { X86::CVTPS2PDrr,      X86::CVTPS2PDrm,          TB_NO_REVERSE },
+    { X86::CVTSD2SI64rr_Int, X86::CVTSD2SI64rm_Int,   TB_NO_REVERSE },
+    { X86::CVTSD2SIrr_Int,  X86::CVTSD2SIrm_Int,      TB_NO_REVERSE },
     { X86::CVTSD2SSrr,      X86::CVTSD2SSrm,          0 },
-    { X86::CVTSI2SD64rr,    X86::CVTSI2SD64rm,        0 },
+    { X86::CVTSI642SDrr,    X86::CVTSI642SDrm,        0 },
     { X86::CVTSI2SDrr,      X86::CVTSI2SDrm,          0 },
-    { X86::CVTSI2SS64rr,    X86::CVTSI2SS64rm,        0 },
+    { X86::CVTSI642SSrr,    X86::CVTSI642SSrm,        0 },
     { X86::CVTSI2SSrr,      X86::CVTSI2SSrm,          0 },
     { X86::CVTSS2SDrr,      X86::CVTSS2SDrm,          0 },
+    { X86::CVTSS2SI64rr_Int, X86::CVTSS2SI64rm_Int,   TB_NO_REVERSE },
+    { X86::CVTSS2SIrr_Int,  X86::CVTSS2SIrm_Int,      TB_NO_REVERSE },
+    { X86::CVTTPD2DQrr,     X86::CVTTPD2DQrm,         TB_ALIGN_16 },
+    { X86::CVTTPS2DQrr,     X86::CVTTPS2DQrm,         TB_ALIGN_16 },
     { X86::CVTTSD2SI64rr,   X86::CVTTSD2SI64rm,       0 },
+    { X86::CVTTSD2SI64rr_Int,X86::CVTTSD2SI64rm_Int,  TB_NO_REVERSE },
     { X86::CVTTSD2SIrr,     X86::CVTTSD2SIrm,         0 },
+    { X86::CVTTSD2SIrr_Int, X86::CVTTSD2SIrm_Int,     TB_NO_REVERSE },
+    { X86::CVTTSS2SI64rr_Int,X86::CVTTSS2SI64rm_Int,  TB_NO_REVERSE },
+    { X86::CVTTSS2SIrr_Int, X86::CVTTSS2SIrm_Int,     TB_NO_REVERSE },
     { X86::CVTTSS2SI64rr,   X86::CVTTSS2SI64rm,       0 },
     { X86::CVTTSS2SIrr,     X86::CVTTSS2SIrm,         0 },
     { X86::IMUL16rri,       X86::IMUL16rmi,           0 },
@@ -577,22 +593,6 @@
     { X86::IMUL64rri8,      X86::IMUL64rmi8,          0 },
     { X86::Int_COMISDrr,    X86::Int_COMISDrm,        TB_NO_REVERSE },
     { X86::Int_COMISSrr,    X86::Int_COMISSrm,        TB_NO_REVERSE },
-    { X86::CVTSD2SI64rr,    X86::CVTSD2SI64rm,        TB_NO_REVERSE },
-    { X86::CVTSD2SIrr,      X86::CVTSD2SIrm,          TB_NO_REVERSE },
-    { X86::CVTSS2SI64rr,    X86::CVTSS2SI64rm,        TB_NO_REVERSE },
-    { X86::CVTSS2SIrr,      X86::CVTSS2SIrm,          TB_NO_REVERSE },
-    { X86::CVTDQ2PDrr,      X86::CVTDQ2PDrm,          TB_NO_REVERSE },
-    { X86::CVTDQ2PSrr,      X86::CVTDQ2PSrm,          TB_ALIGN_16 },
-    { X86::CVTPD2DQrr,      X86::CVTPD2DQrm,          TB_ALIGN_16 },
-    { X86::CVTPD2PSrr,      X86::CVTPD2PSrm,          TB_ALIGN_16 },
-    { X86::CVTPS2DQrr,      X86::CVTPS2DQrm,          TB_ALIGN_16 },
-    { X86::CVTPS2PDrr,      X86::CVTPS2PDrm,          TB_NO_REVERSE },
-    { X86::CVTTPD2DQrr,     X86::CVTTPD2DQrm,         TB_ALIGN_16 },
-    { X86::CVTTPS2DQrr,     X86::CVTTPS2DQrm,         TB_ALIGN_16 },
-    { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm,  TB_NO_REVERSE },
-    { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm,     TB_NO_REVERSE },
-    { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm,  TB_NO_REVERSE },
-    { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm,     TB_NO_REVERSE },
     { X86::Int_UCOMISDrr,   X86::Int_UCOMISDrm,       TB_NO_REVERSE },
     { X86::Int_UCOMISSrr,   X86::Int_UCOMISSrm,       TB_NO_REVERSE },
     { X86::MOV16rr,         X86::MOV16rm,             0 },
@@ -694,17 +694,17 @@
     { X86::Int_VUCOMISDrr,  X86::Int_VUCOMISDrm,      TB_NO_REVERSE },
     { X86::Int_VUCOMISSrr,  X86::Int_VUCOMISSrm,      TB_NO_REVERSE },
     { X86::VCVTTSD2SI64rr,  X86::VCVTTSD2SI64rm,      0 },
-    { X86::Int_VCVTTSD2SI64rr,X86::Int_VCVTTSD2SI64rm,TB_NO_REVERSE },
+    { X86::VCVTTSD2SI64rr_Int,X86::VCVTTSD2SI64rm_Int,TB_NO_REVERSE },
     { X86::VCVTTSD2SIrr,    X86::VCVTTSD2SIrm,        0 },
-    { X86::Int_VCVTTSD2SIrr,X86::Int_VCVTTSD2SIrm,    TB_NO_REVERSE },
+    { X86::VCVTTSD2SIrr_Int,X86::VCVTTSD2SIrm_Int,    TB_NO_REVERSE },
     { X86::VCVTTSS2SI64rr,  X86::VCVTTSS2SI64rm,      0 },
-    { X86::Int_VCVTTSS2SI64rr,X86::Int_VCVTTSS2SI64rm,TB_NO_REVERSE },
+    { X86::VCVTTSS2SI64rr_Int,X86::VCVTTSS2SI64rm_Int,TB_NO_REVERSE },
     { X86::VCVTTSS2SIrr,    X86::VCVTTSS2SIrm,        0 },
-    { X86::Int_VCVTTSS2SIrr,X86::Int_VCVTTSS2SIrm,    TB_NO_REVERSE },
-    { X86::VCVTSD2SI64rr,   X86::VCVTSD2SI64rm,       TB_NO_REVERSE },
-    { X86::VCVTSD2SIrr,     X86::VCVTSD2SIrm,         TB_NO_REVERSE },
-    { X86::VCVTSS2SI64rr,   X86::VCVTSS2SI64rm,       TB_NO_REVERSE },
-    { X86::VCVTSS2SIrr,     X86::VCVTSS2SIrm,         TB_NO_REVERSE },
+    { X86::VCVTTSS2SIrr_Int,X86::VCVTTSS2SIrm_Int,    TB_NO_REVERSE },
+    { X86::VCVTSD2SI64rr_Int, X86::VCVTSD2SI64rm_Int, TB_NO_REVERSE },
+    { X86::VCVTSD2SIrr_Int,   X86::VCVTSD2SIrm_Int,   TB_NO_REVERSE },
+    { X86::VCVTSS2SI64rr_Int, X86::VCVTSS2SI64rm_Int, TB_NO_REVERSE },
+    { X86::VCVTSS2SIrr_Int, X86::VCVTSS2SIrm_Int,     TB_NO_REVERSE },
     { X86::VCVTDQ2PDrr,     X86::VCVTDQ2PDrm,         TB_NO_REVERSE },
     { X86::VCVTDQ2PSrr,     X86::VCVTDQ2PSrm,         0 },
     { X86::VCVTPD2DQrr,     X86::VCVTPD2DQrm,         0 },
@@ -1195,9 +1195,13 @@
     { X86::CMPPDrri,        X86::CMPPDrmi,      TB_ALIGN_16 },
     { X86::CMPPSrri,        X86::CMPPSrmi,      TB_ALIGN_16 },
     { X86::CMPSDrr,         X86::CMPSDrm,       0 },
+    { X86::CMPSDrr_Int,     X86::CMPSDrm_Int,   TB_NO_REVERSE },
     { X86::CMPSSrr,         X86::CMPSSrm,       0 },
+    { X86::CMPSSrr_Int,     X86::CMPSSrm_Int,   TB_NO_REVERSE },
     { X86::CRC32r32r32,     X86::CRC32r32m32,   0 },
     { X86::CRC32r64r64,     X86::CRC32r64m64,   0 },
+    { X86::CVTSD2SSrr_Int,  X86::CVTSD2SSrm_Int,      TB_NO_REVERSE },
+    { X86::CVTSS2SDrr_Int,  X86::CVTSS2SDrm_Int,      TB_NO_REVERSE },
     { X86::DIVPDrr,         X86::DIVPDrm,       TB_ALIGN_16 },
     { X86::DIVPSrr,         X86::DIVPSrm,       TB_ALIGN_16 },
     { X86::DIVSDrr,         X86::DIVSDrm,       0 },
@@ -1213,14 +1217,10 @@
     { X86::IMUL16rr,        X86::IMUL16rm,      0 },
     { X86::IMUL32rr,        X86::IMUL32rm,      0 },
     { X86::IMUL64rr,        X86::IMUL64rm,      0 },
-    { X86::Int_CMPSDrr,     X86::Int_CMPSDrm,   TB_NO_REVERSE },
-    { X86::Int_CMPSSrr,     X86::Int_CMPSSrm,   TB_NO_REVERSE },
-    { X86::Int_CVTSD2SSrr,  X86::Int_CVTSD2SSrm,      TB_NO_REVERSE },
-    { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm,    0 },
-    { X86::Int_CVTSI2SDrr,  X86::Int_CVTSI2SDrm,      0 },
-    { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm,    0 },
-    { X86::Int_CVTSI2SSrr,  X86::Int_CVTSI2SSrm,      0 },
-    { X86::Int_CVTSS2SDrr,  X86::Int_CVTSS2SDrm,      TB_NO_REVERSE },
+    { X86::CVTSI642SDrr_Int,X86::CVTSI642SDrm_Int,    0 },
+    { X86::CVTSI2SDrr_Int,  X86::CVTSI2SDrm_Int,      0 },
+    { X86::CVTSI642SSrr_Int,X86::CVTSI642SSrm_Int,    0 },
+    { X86::CVTSI2SSrr_Int,  X86::CVTSI2SSrm_Int,      0 },
     { X86::MAXPDrr,         X86::MAXPDrm,       TB_ALIGN_16 },
     { X86::MAXCPDrr,        X86::MAXCPDrm,      TB_ALIGN_16 },
     { X86::MAXPSrr,         X86::MAXPSrm,       TB_ALIGN_16 },
@@ -1465,14 +1465,14 @@
     { X86::PMULHRWrr,         X86::PMULHRWrm,         0 },
 
     // AVX 128-bit versions of foldable instructions
-    { X86::VCVTSI2SD64rr,     X86::VCVTSI2SD64rm,      0 },
-    { X86::Int_VCVTSI2SD64rr, X86::Int_VCVTSI2SD64rm,  0 },
+    { X86::VCVTSI642SDrr,     X86::VCVTSI642SDrm,      0 },
+    { X86::VCVTSI642SDrr_Int, X86::VCVTSI642SDrm_Int,  0 },
     { X86::VCVTSI2SDrr,       X86::VCVTSI2SDrm,        0 },
-    { X86::Int_VCVTSI2SDrr,   X86::Int_VCVTSI2SDrm,    0 },
-    { X86::VCVTSI2SS64rr,     X86::VCVTSI2SS64rm,      0 },
-    { X86::Int_VCVTSI2SS64rr, X86::Int_VCVTSI2SS64rm,  0 },
+    { X86::VCVTSI2SDrr_Int,   X86::VCVTSI2SDrm_Int,    0 },
+    { X86::VCVTSI642SSrr,     X86::VCVTSI642SSrm,      0 },
+    { X86::VCVTSI642SSrr_Int, X86::VCVTSI642SSrm_Int,  0 },
     { X86::VCVTSI2SSrr,       X86::VCVTSI2SSrm,        0 },
-    { X86::Int_VCVTSI2SSrr,   X86::Int_VCVTSI2SSrm,    0 },
+    { X86::VCVTSI2SSrr_Int,   X86::VCVTSI2SSrm_Int,    0 },
     { X86::VADDPDrr,          X86::VADDPDrm,           0 },
     { X86::VADDPSrr,          X86::VADDPSrm,           0 },
     { X86::VADDSDrr,          X86::VADDSDrm,           0 },
@@ -1492,7 +1492,9 @@
     { X86::VCMPPDrri,         X86::VCMPPDrmi,          0 },
     { X86::VCMPPSrri,         X86::VCMPPSrmi,          0 },
     { X86::VCMPSDrr,          X86::VCMPSDrm,           0 },
+    { X86::VCMPSDrr_Int,      X86::VCMPSDrm_Int,       TB_NO_REVERSE },
     { X86::VCMPSSrr,          X86::VCMPSSrm,           0 },
+    { X86::VCMPSSrr_Int,      X86::VCMPSSrm_Int,       TB_NO_REVERSE },
     { X86::VDIVPDrr,          X86::VDIVPDrm,           0 },
     { X86::VDIVPSrr,          X86::VDIVPSrm,           0 },
     { X86::VDIVSDrr,          X86::VDIVSDrm,           0 },
@@ -1505,8 +1507,6 @@
     { X86::VHADDPSrr,         X86::VHADDPSrm,          0 },
     { X86::VHSUBPDrr,         X86::VHSUBPDrm,          0 },
     { X86::VHSUBPSrr,         X86::VHSUBPSrm,          0 },
-    { X86::Int_VCMPSDrr,      X86::Int_VCMPSDrm,       TB_NO_REVERSE },
-    { X86::Int_VCMPSSrr,      X86::Int_VCMPSSrm,       TB_NO_REVERSE },
     { X86::VMAXCPDrr,         X86::VMAXCPDrm,          0 },
     { X86::VMAXCPSrr,         X86::VMAXCPSrm,          0 },
     { X86::VMAXCSDrr,         X86::VMAXCSDrm,          0 },
@@ -7995,12 +7995,12 @@
   switch (Opcode) {
   case X86::CVTSI2SSrr:
   case X86::CVTSI2SSrm:
-  case X86::CVTSI2SS64rr:
-  case X86::CVTSI2SS64rm:
+  case X86::CVTSI642SSrr:
+  case X86::CVTSI642SSrm:
   case X86::CVTSI2SDrr:
   case X86::CVTSI2SDrm:
-  case X86::CVTSI2SD64rr:
-  case X86::CVTSI2SD64rm:
+  case X86::CVTSI642SDrr:
+  case X86::CVTSI642SDrm:
   case X86::CVTSD2SSrr:
   case X86::CVTSD2SSrm:
   case X86::CVTSS2SDrr:
@@ -8066,28 +8066,28 @@
   switch (Opcode) {
   case X86::VCVTSI2SSrr:
   case X86::VCVTSI2SSrm:
-  case X86::Int_VCVTSI2SSrr:
-  case X86::Int_VCVTSI2SSrm:
-  case X86::VCVTSI2SS64rr:
-  case X86::VCVTSI2SS64rm:
-  case X86::Int_VCVTSI2SS64rr:
-  case X86::Int_VCVTSI2SS64rm:
+  case X86::VCVTSI2SSrr_Int:
+  case X86::VCVTSI2SSrm_Int:
+  case X86::VCVTSI642SSrr:
+  case X86::VCVTSI642SSrm:
+  case X86::VCVTSI642SSrr_Int:
+  case X86::VCVTSI642SSrm_Int:
   case X86::VCVTSI2SDrr:
   case X86::VCVTSI2SDrm:
-  case X86::Int_VCVTSI2SDrr:
-  case X86::Int_VCVTSI2SDrm:
-  case X86::VCVTSI2SD64rr:
-  case X86::VCVTSI2SD64rm:
-  case X86::Int_VCVTSI2SD64rr:
-  case X86::Int_VCVTSI2SD64rm:
+  case X86::VCVTSI2SDrr_Int:
+  case X86::VCVTSI2SDrm_Int:
+  case X86::VCVTSI642SDrr:
+  case X86::VCVTSI642SDrm:
+  case X86::VCVTSI642SDrr_Int:
+  case X86::VCVTSI642SDrm_Int:
   case X86::VCVTSD2SSrr:
   case X86::VCVTSD2SSrm:
-  case X86::Int_VCVTSD2SSrr:
-  case X86::Int_VCVTSD2SSrm:
+  case X86::VCVTSD2SSrr_Int:
+  case X86::VCVTSD2SSrm_Int:
   case X86::VCVTSS2SDrr:
   case X86::VCVTSS2SDrm:
-  case X86::Int_VCVTSS2SDrr:
-  case X86::Int_VCVTSS2SDrm:
+  case X86::VCVTSS2SDrr_Int:
+  case X86::VCVTSS2SDrm_Int:
   case X86::VRCPSSr:
   case X86::VRCPSSr_Int:
   case X86::VRCPSSm:
@@ -8661,7 +8661,7 @@
     // instruction isn't scalar (SS).
     switch (UserOpc) {
     case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
-    case X86::Int_CMPSSrr: case X86::Int_VCMPSSrr: case X86::VCMPSSZrr_Int:
+    case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
     case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
     case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
     case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
@@ -8712,7 +8712,7 @@
     // instruction isn't scalar (SD).
     switch (UserOpc) {
     case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
-    case X86::Int_CMPSDrr: case X86::Int_VCMPSDrr: case X86::VCMPSDZrr_Int:
+    case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
     case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
     case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
     case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 7150396..a86a0bf 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -1299,11 +1299,11 @@
 // where appropriate to do so.
 defm VCVTSI2SS   : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss{l}",
                                   SSE_CVT_SI2SS>, XS, VEX_4V, VEX_LIG;
-defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}",
+defm VCVTSI642SS : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}",
                                   SSE_CVT_SI2SS>, XS, VEX_4V, VEX_W, VEX_LIG;
 defm VCVTSI2SD   : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}",
                                   SSE_CVT_SI2SD>, XD, VEX_4V, VEX_LIG;
-defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}",
+defm VCVTSI642SD : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}",
                                   SSE_CVT_SI2SD>, XD, VEX_4V, VEX_W, VEX_LIG;
 
 let Predicates = [UseAVX] in {
@@ -1315,20 +1315,20 @@
   def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
             (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
   def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
-            (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
+            (VCVTSI642SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
   def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
             (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
   def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
-            (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
+            (VCVTSI642SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
 
   def : Pat<(f32 (sint_to_fp GR32:$src)),
             (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
   def : Pat<(f32 (sint_to_fp GR64:$src)),
-            (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
+            (VCVTSI642SSrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
   def : Pat<(f64 (sint_to_fp GR32:$src)),
             (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
   def : Pat<(f64 (sint_to_fp GR64:$src)),
-            (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
+            (VCVTSI642SDrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
 }
 
 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
@@ -1346,13 +1346,13 @@
 defm CVTSI2SS  : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
                       "cvtsi2ss{l}\t{$src, $dst|$dst, $src}",
                       SSE_CVT_SI2SS>, XS;
-defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
+defm CVTSI642SS : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
                       "cvtsi2ss{q}\t{$src, $dst|$dst, $src}",
                       SSE_CVT_SI2SS>, XS, REX_W;
 defm CVTSI2SD  : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
                       "cvtsi2sd{l}\t{$src, $dst|$dst, $src}",
                       SSE_CVT_SI2SD>, XD;
-defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
+defm CVTSI642SD : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
                       "cvtsi2sd{q}\t{$src, $dst|$dst, $src}",
                       SSE_CVT_SI2SD>, XD, REX_W;
 
@@ -1386,33 +1386,33 @@
 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
                          Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
                          string asm, OpndItins itins> {
-  def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
-              !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
-              [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
-           Sched<[itins.Sched]>;
-  def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
-              !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
-              [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
-           Sched<[itins.Sched.Folded]>;
+  def rr_Int : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
+                  !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+                  [(set DstRC:$dst, (Int SrcRC:$src))], itins.rr>,
+               Sched<[itins.Sched]>;
+  def rm_Int : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
+                  !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
+                  [(set DstRC:$dst, (Int mem_cpat:$src))], itins.rm>,
+               Sched<[itins.Sched.Folded]>;
 }
 
 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
                     RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
                     PatFrag ld_frag, string asm, OpndItins itins,
                     bit Is2Addr = 1> {
-  def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
-              !if(Is2Addr,
-                  !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
-                  !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
-              [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
-              itins.rr>, Sched<[itins.Sched]>;
-  def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
-              (ins DstRC:$src1, x86memop:$src2),
-              !if(Is2Addr,
-                  !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
-                  !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
-              [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
-              itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
+  def rr_Int : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
+                  !if(Is2Addr,
+                      !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+                      !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+                  [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))],
+                  itins.rr>, Sched<[itins.Sched]>;
+  def rm_Int : SI<opc, MRMSrcMem, (outs DstRC:$dst),
+                  (ins DstRC:$src1, x86memop:$src2),
+                  !if(Is2Addr,
+                      !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
+                      !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
+                  [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))],
+                  itins.rm>, Sched<[itins.Sched.Folded, ReadAfterLd]>;
 }
 
 let Predicates = [UseAVX] in {
@@ -1431,32 +1431,32 @@
 
 let isCodeGenOnly = 1 in {
   let Predicates = [UseAVX] in {
-  defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+  defm VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
             int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
             SSE_CVT_SI2SS, 0>, XS, VEX_4V;
-  defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+  defm VCVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
             int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
             SSE_CVT_SI2SS, 0>, XS, VEX_4V,
             VEX_W;
-  defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+  defm VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
             int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
             SSE_CVT_SI2SD, 0>, XD, VEX_4V;
-  defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+  defm VCVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
             int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
             SSE_CVT_SI2SD, 0>, XD,
             VEX_4V, VEX_W;
   }
   let Constraints = "$src1 = $dst" in {
-    defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+    defm CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
                           int_x86_sse_cvtsi2ss, i32mem, loadi32,
                           "cvtsi2ss{l}", SSE_CVT_SI2SS>, XS;
-    defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+    defm CVTSI642SS : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
                           int_x86_sse_cvtsi642ss, i64mem, loadi64,
                           "cvtsi2ss{q}", SSE_CVT_SI2SS>, XS, REX_W;
-    defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
+    defm CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
                           int_x86_sse2_cvtsi2sd, i32mem, loadi32,
                           "cvtsi2sd{l}", SSE_CVT_SI2SD>, XD;
-    defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
+    defm CVTSI642SD : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
                           int_x86_sse2_cvtsi642sd, i64mem, loadi64,
                           "cvtsi2sd{q}", SSE_CVT_SI2SD>, XD, REX_W;
   }
@@ -1467,31 +1467,31 @@
 // Aliases for intrinsics
 let isCodeGenOnly = 1 in {
 let Predicates = [UseAVX] in {
-defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
-                                    ssmem, sse_load_f32, "cvttss2si",
-                                    SSE_CVT_SS2SI_32>, XS, VEX;
-defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
-                                   int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
-                                   "cvttss2si", SSE_CVT_SS2SI_64>,
-                                   XS, VEX, VEX_W;
-defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
-                                    sdmem, sse_load_f64, "cvttsd2si",
-                                    SSE_CVT_SD2SI>, XD, VEX;
-defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
-                                  int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
-                                  "cvttsd2si", SSE_CVT_SD2SI>,
-                                  XD, VEX, VEX_W;
+defm VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
+                                ssmem, sse_load_f32, "cvttss2si",
+                                SSE_CVT_SS2SI_32>, XS, VEX;
+defm VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+                               int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
+                               "cvttss2si", SSE_CVT_SS2SI_64>,
+                               XS, VEX, VEX_W;
+defm VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
+                                sdmem, sse_load_f64, "cvttsd2si",
+                                SSE_CVT_SD2SI>, XD, VEX;
+defm VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+                              int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
+                              "cvttsd2si", SSE_CVT_SD2SI>,
+                              XD, VEX, VEX_W;
 }
-defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
+defm CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
                                     ssmem, sse_load_f32, "cvttss2si",
                                     SSE_CVT_SS2SI_32>, XS;
-defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+defm CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
                                    int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
                                    "cvttss2si", SSE_CVT_SS2SI_64>, XS, REX_W;
-defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
+defm CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
                                     sdmem, sse_load_f64, "cvttsd2si",
                                     SSE_CVT_SD2SI>, XD;
-defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
+defm CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
                                   int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
                                   "cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
 } // isCodeGenOnly = 1
@@ -1527,39 +1527,39 @@
 
 let Predicates = [UseAVX] in {
 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
-                (VCVTSS2SIrr GR32:$dst, VR128:$src), 0>;
+                (VCVTSS2SIrr_Int GR32:$dst, VR128:$src), 0>;
 def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
-                (VCVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
+                (VCVTSS2SIrm_Int GR32:$dst, ssmem:$src), 0>;
 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
-                (VCVTSD2SIrr GR32:$dst, VR128:$src), 0>;
+                (VCVTSD2SIrr_Int GR32:$dst, VR128:$src), 0>;
 def : InstAlias<"vcvtsd2si{l}\t{$src, $dst|$dst, $src}",
-                (VCVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
+                (VCVTSD2SIrm_Int GR32:$dst, sdmem:$src), 0>;
 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
-                (VCVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
+                (VCVTSS2SI64rr_Int GR64:$dst, VR128:$src), 0>;
 def : InstAlias<"vcvtss2si{q}\t{$src, $dst|$dst, $src}",
-                (VCVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
+                (VCVTSS2SI64rm_Int GR64:$dst, ssmem:$src), 0>;
 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
-                (VCVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
+                (VCVTSD2SI64rr_Int GR64:$dst, VR128:$src), 0>;
 def : InstAlias<"vcvtsd2si{q}\t{$src, $dst|$dst, $src}",
-                (VCVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
+                (VCVTSD2SI64rm_Int GR64:$dst, sdmem:$src), 0>;
 }
 
 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
-                (CVTSS2SIrr GR32:$dst, VR128:$src), 0>;
+                (CVTSS2SIrr_Int GR32:$dst, VR128:$src), 0>;
 def : InstAlias<"cvtss2si{l}\t{$src, $dst|$dst, $src}",
-                (CVTSS2SIrm GR32:$dst, ssmem:$src), 0>;
+                (CVTSS2SIrm_Int GR32:$dst, ssmem:$src), 0>;
 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
-                (CVTSD2SIrr GR32:$dst, VR128:$src), 0>;
+                (CVTSD2SIrr_Int GR32:$dst, VR128:$src), 0>;
 def : InstAlias<"cvtsd2si{l}\t{$src, $dst|$dst, $src}",
-                (CVTSD2SIrm GR32:$dst, sdmem:$src), 0>;
+                (CVTSD2SIrm_Int GR32:$dst, sdmem:$src), 0>;
 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
-                (CVTSS2SI64rr GR64:$dst, VR128:$src), 0>;
+                (CVTSS2SI64rr_Int GR64:$dst, VR128:$src), 0>;
 def : InstAlias<"cvtss2si{q}\t{$src, $dst|$dst, $src}",
-                (CVTSS2SI64rm GR64:$dst, ssmem:$src), 0>;
+                (CVTSS2SI64rm_Int GR64:$dst, ssmem:$src), 0>;
 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
-                (CVTSD2SI64rr GR64:$dst, VR128:$src), 0>;
+                (CVTSD2SI64rr_Int GR64:$dst, VR128:$src), 0>;
 def : InstAlias<"cvtsd2si{q}\t{$src, $dst|$dst, $src}",
-                (CVTSD2SI64rm GR64:$dst, sdmem:$src), 0>;
+                (CVTSD2SI64rm_Int GR64:$dst, sdmem:$src), 0>;
 
 /// SSE 2 Only
 
@@ -1594,14 +1594,14 @@
                   Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
 
 let isCodeGenOnly = 1 in {
-def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
+def VCVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
                        (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
                        "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                        [(set VR128:$dst,
                          (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
                        IIC_SSE_CVT_Scalar_RR>, XD, VEX_4V, VEX_WIG,
                        Requires<[HasAVX]>, Sched<[WriteCvtF2F]>;
-def Int_VCVTSD2SSrm: I<0x5A, MRMSrcMem,
+def VCVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
                        (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
                        "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                        [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
@@ -1610,14 +1610,14 @@
                        Requires<[HasAVX]>, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
 
 let Constraints = "$src1 = $dst" in {
-def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg,
+def CVTSD2SSrr_Int: I<0x5A, MRMSrcReg,
                        (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
                        "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
                        [(set VR128:$dst,
                          (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))],
                        IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>,
                        Sched<[WriteCvtF2F]>;
-def Int_CVTSD2SSrm: I<0x5A, MRMSrcMem,
+def CVTSD2SSrm_Int: I<0x5A, MRMSrcMem,
                        (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
                        "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
                        [(set VR128:$dst, (int_x86_sse2_cvtsd2ss
@@ -1677,14 +1677,14 @@
           (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
 
 let isCodeGenOnly = 1 in {
-def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
+def VCVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
                       (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
                     "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                     [(set VR128:$dst,
                       (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
                     IIC_SSE_CVT_Scalar_RR>, XS, VEX_4V, VEX_WIG,
                     Requires<[HasAVX]>, Sched<[WriteCvtF2F]>;
-def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
+def VCVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
                       (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
                     "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
                     [(set VR128:$dst,
@@ -1692,14 +1692,14 @@
                     IIC_SSE_CVT_Scalar_RM>, XS, VEX_4V, VEX_WIG,
                     Requires<[HasAVX]>, Sched<[WriteCvtF2FLd, ReadAfterLd]>;
 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
-def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
+def CVTSS2SDrr_Int: I<0x5A, MRMSrcReg,
                       (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
                     "cvtss2sd\t{$src2, $dst|$dst, $src2}",
                     [(set VR128:$dst,
                       (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))],
                     IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>,
                     Sched<[WriteCvtF2F]>;
-def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
+def CVTSS2SDrm_Int: I<0x5A, MRMSrcMem,
                       (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
                     "cvtss2sd\t{$src2, $dst|$dst, $src2}",
                     [(set VR128:$dst,
@@ -1717,33 +1717,33 @@
                    (v4f32 VR128:$dst),
                    (v4f32 (scalar_to_vector
                      (f32 (fpround (f64 (extractelt VR128:$src, (iPTR 0))))))))),
-          (Int_VCVTSD2SSrr VR128:$dst, VR128:$src)>;
+          (VCVTSD2SSrr_Int VR128:$dst, VR128:$src)>;
 
 def : Pat<(v2f64 (X86Movsd
                    (v2f64 VR128:$dst),
                    (v2f64 (scalar_to_vector
                      (f64 (fpextend (f32 (extractelt VR128:$src, (iPTR 0))))))))),
-          (Int_VCVTSS2SDrr VR128:$dst, VR128:$src)>;
+          (VCVTSS2SDrr_Int VR128:$dst, VR128:$src)>;
 
 def : Pat<(v4f32 (X86Movss
                    (v4f32 VR128:$dst),
                    (v4f32 (scalar_to_vector (f32 (sint_to_fp GR64:$src)))))),
-          (Int_VCVTSI2SS64rr VR128:$dst, GR64:$src)>;
+          (VCVTSI642SSrr_Int VR128:$dst, GR64:$src)>;
 
 def : Pat<(v4f32 (X86Movss
                    (v4f32 VR128:$dst),
                    (v4f32 (scalar_to_vector (f32 (sint_to_fp GR32:$src)))))),
-          (Int_VCVTSI2SSrr VR128:$dst, GR32:$src)>;
+          (VCVTSI2SSrr_Int VR128:$dst, GR32:$src)>;
 
 def : Pat<(v2f64 (X86Movsd
                    (v2f64 VR128:$dst),
                    (v2f64 (scalar_to_vector (f64 (sint_to_fp GR64:$src)))))),
-          (Int_VCVTSI2SD64rr VR128:$dst, GR64:$src)>;
+          (VCVTSI642SDrr_Int VR128:$dst, GR64:$src)>;
 
 def : Pat<(v2f64 (X86Movsd
                    (v2f64 VR128:$dst),
                    (v2f64 (scalar_to_vector (f64 (sint_to_fp GR32:$src)))))),
-          (Int_VCVTSI2SDrr VR128:$dst, GR32:$src)>;
+          (VCVTSI2SDrr_Int VR128:$dst, GR32:$src)>;
 } // Predicates = [UseAVX]
 
 let Predicates = [UseSSE2] in {
@@ -1751,35 +1751,35 @@
                    (v4f32 VR128:$dst),
                    (v4f32 (scalar_to_vector
                      (f32 (fpround (f64 (extractelt VR128:$src, (iPTR 0))))))))),
-          (Int_CVTSD2SSrr VR128:$dst, VR128:$src)>;
+          (CVTSD2SSrr_Int VR128:$dst, VR128:$src)>;
 
 def : Pat<(v2f64 (X86Movsd
                    (v2f64 VR128:$dst),
                    (v2f64 (scalar_to_vector
                      (f64 (fpextend (f32 (extractelt VR128:$src, (iPTR 0))))))))),
-          (Int_CVTSS2SDrr VR128:$dst, VR128:$src)>;
+          (CVTSS2SDrr_Int VR128:$dst, VR128:$src)>;
 
 def : Pat<(v2f64 (X86Movsd
                    (v2f64 VR128:$dst),
                    (v2f64 (scalar_to_vector (f64 (sint_to_fp GR64:$src)))))),
-          (Int_CVTSI2SD64rr VR128:$dst, GR64:$src)>;
+          (CVTSI642SDrr_Int VR128:$dst, GR64:$src)>;
 
 def : Pat<(v2f64 (X86Movsd
                    (v2f64 VR128:$dst),
                    (v2f64 (scalar_to_vector (f64 (sint_to_fp GR32:$src)))))),
-          (Int_CVTSI2SDrr VR128:$dst, GR32:$src)>;
+          (CVTSI2SDrr_Int VR128:$dst, GR32:$src)>;
 } // Predicates = [UseSSE2]
 
 let Predicates = [UseSSE1] in {
 def : Pat<(v4f32 (X86Movss
                    (v4f32 VR128:$dst),
                    (v4f32 (scalar_to_vector (f32 (sint_to_fp GR64:$src)))))),
-          (Int_CVTSI2SS64rr VR128:$dst, GR64:$src)>;
+          (CVTSI642SSrr_Int VR128:$dst, GR64:$src)>;
 
 def : Pat<(v4f32 (X86Movss
                    (v4f32 VR128:$dst),
                    (v4f32 (scalar_to_vector (f32 (sint_to_fp GR32:$src)))))),
-          (Int_CVTSI2SSrr VR128:$dst, GR32:$src)>;
+          (CVTSI2SSrr_Int VR128:$dst, GR32:$src)>;
 } // Predicates = [UseSSE1]
 
 // Convert packed single/double fp to doubleword
@@ -2212,14 +2212,14 @@
 multiclass sse12_cmp_scalar_int<Operand memop, Operand CC,
                          Intrinsic Int, string asm, OpndItins itins,
                          ComplexPattern mem_cpat> {
-  def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
+  def rr_Int : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
                       (ins VR128:$src1, VR128:$src, CC:$cc), asm,
                         [(set VR128:$dst, (Int VR128:$src1,
                                                VR128:$src, imm:$cc))],
                                                itins.rr>,
            Sched<[itins.Sched]>;
 let mayLoad = 1 in
-  def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
+  def rm_Int : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
                       (ins VR128:$src1, memop:$src, CC:$cc), asm,
                         [(set VR128:$dst, (Int VR128:$src1,
                                                mem_cpat:$src, imm:$cc))],
@@ -2230,21 +2230,21 @@
 let isCodeGenOnly = 1 in {
   // Aliases to match intrinsics which expect XMM operand(s).
   let ExeDomain = SSEPackedSingle in
-  defm Int_VCMPSS  : sse12_cmp_scalar_int<ssmem, AVXCC, int_x86_sse_cmp_ss,
+  defm VCMPSS  : sse12_cmp_scalar_int<ssmem, AVXCC, int_x86_sse_cmp_ss,
                        "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
                        SSE_ALU_F32S, sse_load_f32>, XS, VEX_4V;
   let ExeDomain = SSEPackedDouble in
-  defm Int_VCMPSD  : sse12_cmp_scalar_int<sdmem, AVXCC, int_x86_sse2_cmp_sd,
+  defm VCMPSD  : sse12_cmp_scalar_int<sdmem, AVXCC, int_x86_sse2_cmp_sd,
                        "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
                        SSE_ALU_F32S, sse_load_f64>, // same latency as f32
                        XD, VEX_4V;
   let Constraints = "$src1 = $dst" in {
     let ExeDomain = SSEPackedSingle in
-    defm Int_CMPSS  : sse12_cmp_scalar_int<ssmem, SSECC, int_x86_sse_cmp_ss,
+    defm CMPSS  : sse12_cmp_scalar_int<ssmem, SSECC, int_x86_sse_cmp_ss,
                          "cmp${cc}ss\t{$src, $dst|$dst, $src}",
                          SSE_ALU_F32S, sse_load_f32>, XS;
     let ExeDomain = SSEPackedDouble in
-    defm Int_CMPSD  : sse12_cmp_scalar_int<sdmem, SSECC, int_x86_sse2_cmp_sd,
+    defm CMPSD  : sse12_cmp_scalar_int<sdmem, SSECC, int_x86_sse2_cmp_sd,
                          "cmp${cc}sd\t{$src, $dst|$dst, $src}",
                          SSE_ALU_F64S, sse_load_f64>, XD;
 }
diff --git a/llvm/lib/Target/X86/X86SchedBroadwell.td b/llvm/lib/Target/X86/X86SchedBroadwell.td
index f4c4b0a..6e5f6f3 100755
--- a/llvm/lib/Target/X86/X86SchedBroadwell.td
+++ b/llvm/lib/Target/X86/X86SchedBroadwell.td
@@ -1602,7 +1602,7 @@
 def: InstRW<[BWWriteResGroup42], (instregex "CVTPD2DQrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "CVTPD2PSrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "CVTSD2SSrr")>;
-def: InstRW<[BWWriteResGroup42], (instregex "CVTSI2SD64rr")>;
+def: InstRW<[BWWriteResGroup42], (instregex "CVTSI642SDrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "CVTSI2SDrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "CVTSI2SSrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "CVTTPD2DQrr")>;
@@ -1619,7 +1619,7 @@
 def: InstRW<[BWWriteResGroup42], (instregex "VCVTPD2PSrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "VCVTPS2PHrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "VCVTSD2SSrr")>;
-def: InstRW<[BWWriteResGroup42], (instregex "VCVTSI2SD64rr")>;
+def: InstRW<[BWWriteResGroup42], (instregex "VCVTSI642SDrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "VCVTSI2SDrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "VCVTSI2SSrr")>;
 def: InstRW<[BWWriteResGroup42], (instregex "VCVTTPD2DQrr")>;
@@ -1890,12 +1890,12 @@
   let NumMicroOps = 3;
   let ResourceCycles = [1,2];
 }
-def: InstRW<[BWWriteResGroup50], (instregex "CVTSI2SS64rr")>;
+def: InstRW<[BWWriteResGroup50], (instregex "CVTSI642SSrr")>;
 def: InstRW<[BWWriteResGroup50], (instregex "HADDPDrr")>;
 def: InstRW<[BWWriteResGroup50], (instregex "HADDPSrr")>;
 def: InstRW<[BWWriteResGroup50], (instregex "HSUBPDrr")>;
 def: InstRW<[BWWriteResGroup50], (instregex "HSUBPSrr")>;
-def: InstRW<[BWWriteResGroup50], (instregex "VCVTSI2SS64rr")>;
+def: InstRW<[BWWriteResGroup50], (instregex "VCVTSI642SSrr")>;
 def: InstRW<[BWWriteResGroup50], (instregex "VHADDPDYrr")>;
 def: InstRW<[BWWriteResGroup50], (instregex "VHADDPDrr")>;
 def: InstRW<[BWWriteResGroup50], (instregex "VHADDPSYrr")>;
diff --git a/llvm/lib/Target/X86/X86SchedHaswell.td b/llvm/lib/Target/X86/X86SchedHaswell.td
index 24dd081..c8a876a 100644
--- a/llvm/lib/Target/X86/X86SchedHaswell.td
+++ b/llvm/lib/Target/X86/X86SchedHaswell.td
@@ -3122,7 +3122,7 @@
 def: InstRW<[HWWriteResGroup73], (instregex "CVTPD2DQrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "CVTPD2PSrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "CVTSD2SSrr")>;
-def: InstRW<[HWWriteResGroup73], (instregex "CVTSI2SD64rr")>;
+def: InstRW<[HWWriteResGroup73], (instregex "CVTSI642SDrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "CVTSI2SDrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "CVTSI2SSrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "CVTTPD2DQrr")>;
@@ -3136,7 +3136,7 @@
 def: InstRW<[HWWriteResGroup73], (instregex "VCVTPD2PSrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "VCVTPS2PHrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "VCVTSD2SSrr")>;
-def: InstRW<[HWWriteResGroup73], (instregex "VCVTSI2SD64rr")>;
+def: InstRW<[HWWriteResGroup73], (instregex "VCVTSI642SDrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "VCVTSI2SDrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "VCVTSI2SSrr")>;
 def: InstRW<[HWWriteResGroup73], (instregex "VCVTTPD2DQrr")>;
@@ -3688,12 +3688,12 @@
   let NumMicroOps = 3;
   let ResourceCycles = [1,2];
 }
-def: InstRW<[HWWriteResGroup93], (instregex "CVTSI2SS64rr")>;
+def: InstRW<[HWWriteResGroup93], (instregex "CVTSI642SSrr")>;
 def: InstRW<[HWWriteResGroup93], (instregex "HADDPDrr")>;
 def: InstRW<[HWWriteResGroup93], (instregex "HADDPSrr")>;
 def: InstRW<[HWWriteResGroup93], (instregex "HSUBPDrr")>;
 def: InstRW<[HWWriteResGroup93], (instregex "HSUBPSrr")>;
-def: InstRW<[HWWriteResGroup93], (instregex "VCVTSI2SS64rr")>;
+def: InstRW<[HWWriteResGroup93], (instregex "VCVTSI642SSrr")>;
 def: InstRW<[HWWriteResGroup93], (instregex "VHADDPDYrr")>;
 def: InstRW<[HWWriteResGroup93], (instregex "VHADDPDrr")>;
 def: InstRW<[HWWriteResGroup93], (instregex "VHADDPSYrr")>;
diff --git a/llvm/lib/Target/X86/X86SchedSandyBridge.td b/llvm/lib/Target/X86/X86SchedSandyBridge.td
index fb133e9..4466d30 100644
--- a/llvm/lib/Target/X86/X86SchedSandyBridge.td
+++ b/llvm/lib/Target/X86/X86SchedSandyBridge.td
@@ -1180,7 +1180,7 @@
 def: InstRW<[SBWriteResGroup28], (instregex "CVTPD2DQrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "CVTPD2PSrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "CVTSD2SSrr")>;
-def: InstRW<[SBWriteResGroup28], (instregex "CVTSI2SD64rr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "CVTSI642SDrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "CVTSI2SDrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "CVTTPD2DQrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "MMX_CVTPD2PIirr")>;
@@ -1193,7 +1193,7 @@
 def: InstRW<[SBWriteResGroup28], (instregex "VCVTPD2PSYrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "VCVTPD2PSrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "VCVTSD2SSrr")>;
-def: InstRW<[SBWriteResGroup28], (instregex "VCVTSI2SD64rr")>;
+def: InstRW<[SBWriteResGroup28], (instregex "VCVTSI642SDrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "VCVTSI2SDrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "VCVTTPD2DQYrr")>;
 def: InstRW<[SBWriteResGroup28], (instregex "VCVTTPD2DQrr")>;
@@ -1361,13 +1361,13 @@
   let ResourceCycles = [1,2];
 }
 def: InstRW<[SBWriteResGroup35], (instregex "CLI")>;
-def: InstRW<[SBWriteResGroup35], (instregex "CVTSI2SS64rr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "CVTSI642SSrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "CVTSI2SSrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "HADDPDrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "HADDPSrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "HSUBPDrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "HSUBPSrr")>;
-def: InstRW<[SBWriteResGroup35], (instregex "VCVTSI2SS64rr")>;
+def: InstRW<[SBWriteResGroup35], (instregex "VCVTSI642SSrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "VCVTSI2SSrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "VHADDPDYrr")>;
 def: InstRW<[SBWriteResGroup35], (instregex "VHADDPDrr")>;
@@ -2272,7 +2272,7 @@
 def: InstRW<[SBWriteResGroup90], (instregex "CMPSSrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "CVTDQ2PSrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "CVTPS2DQrm")>;
-def: InstRW<[SBWriteResGroup90], (instregex "CVTSI2SD64rm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "CVTSI642SDrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "CVTSI2SDrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "CVTTPS2DQrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "MAX(C?)PDrm")>;
@@ -2307,7 +2307,7 @@
 def: InstRW<[SBWriteResGroup90], (instregex "VCMPSSrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "VCVTDQ2PSrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "VCVTPS2DQrm")>;
-def: InstRW<[SBWriteResGroup90], (instregex "VCVTSI2SD64rm")>;
+def: InstRW<[SBWriteResGroup90], (instregex "VCVTSI642SDrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "VCVTSI2SDrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "VCVTTPS2DQrm")>;
 def: InstRW<[SBWriteResGroup90], (instregex "VMAX(C?)PDrm")>;
@@ -2506,7 +2506,7 @@
 def: InstRW<[SBWriteResGroup103], (instregex "CVTPD2DQrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "CVTPD2PSrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "CVTSD2SSrm")>;
-def: InstRW<[SBWriteResGroup103], (instregex "CVTSI2SS64rm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "CVTSI642SSrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "CVTSI2SSrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "CVTTPD2DQrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "MMX_CVTPD2PIirm")>;
@@ -2517,7 +2517,7 @@
 def: InstRW<[SBWriteResGroup103], (instregex "VCVTPD2DQrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "VCVTPD2PSrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "VCVTSD2SSrm")>;
-def: InstRW<[SBWriteResGroup103], (instregex "VCVTSI2SS64rm")>;
+def: InstRW<[SBWriteResGroup103], (instregex "VCVTSI642SSrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "VCVTSI2SSrm")>;
 def: InstRW<[SBWriteResGroup103], (instregex "VCVTTPD2DQrm")>;
 
diff --git a/llvm/lib/Target/X86/X86SchedSkylakeClient.td b/llvm/lib/Target/X86/X86SchedSkylakeClient.td
index 3e8b996..59f535c 100644
--- a/llvm/lib/Target/X86/X86SchedSkylakeClient.td
+++ b/llvm/lib/Target/X86/X86SchedSkylakeClient.td
@@ -1918,7 +1918,7 @@
 def: InstRW<[SKLWriteResGroup60], (instregex "CVTPD2PSrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "CVTPS2PDrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "CVTSD2SSrr")>;
-def: InstRW<[SKLWriteResGroup60], (instregex "CVTSI2SD64rr")>;
+def: InstRW<[SKLWriteResGroup60], (instregex "CVTSI642SDrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "CVTSI2SDrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "CVTSI2SSrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "CVTSS2SDrr")>;
@@ -1933,7 +1933,7 @@
 def: InstRW<[SKLWriteResGroup60], (instregex "VCVTPS2PDrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "VCVTPS2PHrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "VCVTSD2SSrr")>;
-def: InstRW<[SKLWriteResGroup60], (instregex "VCVTSI2SD64rr")>;
+def: InstRW<[SKLWriteResGroup60], (instregex "VCVTSI642SDrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "VCVTSI2SDrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "VCVTSI2SSrr")>;
 def: InstRW<[SKLWriteResGroup60], (instregex "VCVTSS2SDrr")>;
@@ -2265,8 +2265,8 @@
   let NumMicroOps = 3;
   let ResourceCycles = [2,1];
 }
-def: InstRW<[SKLWriteResGroup78], (instregex "CVTSI2SS64rr")>;
-def: InstRW<[SKLWriteResGroup78], (instregex "VCVTSI2SS64rr")>;
+def: InstRW<[SKLWriteResGroup78], (instregex "CVTSI642SSrr")>;
+def: InstRW<[SKLWriteResGroup78], (instregex "VCVTSI642SSrr")>;
 
 def SKLWriteResGroup79 : SchedWriteRes<[SKLPort1,SKLPort06,SKLPort0156]> {
   let Latency = 6;
diff --git a/llvm/lib/Target/X86/X86SchedSkylakeServer.td b/llvm/lib/Target/X86/X86SchedSkylakeServer.td
index 769c821..d97c2f6 100755
--- a/llvm/lib/Target/X86/X86SchedSkylakeServer.td
+++ b/llvm/lib/Target/X86/X86SchedSkylakeServer.td
@@ -3100,7 +3100,7 @@
 def: InstRW<[SKXWriteResGroup61], (instregex "CVTPD2PSrr")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "CVTPS2PDrr")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "CVTSD2SSrr")>;
-def: InstRW<[SKXWriteResGroup61], (instregex "CVTSI2SD64rr")>;
+def: InstRW<[SKXWriteResGroup61], (instregex "CVTSI642SDrr")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "CVTSI2SDrr")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "CVTSI2SSrr")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "CVTSS2SDrr")>;
@@ -3125,7 +3125,7 @@
 def: InstRW<[SKXWriteResGroup61], (instregex "VCVTPS2UQQZ128rr(b?)(k?)(z?)")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "VCVTQQ2PSZ128rr(b?)(k?)(z?)")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "VCVTSD2SSrr")>;
-def: InstRW<[SKXWriteResGroup61], (instregex "VCVTSI2SD64rr")>;
+def: InstRW<[SKXWriteResGroup61], (instregex "VCVTSI642SDrr")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "VCVTSI2SDZrr(b?)(k?)(z?)")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "VCVTSI2SDrr")>;
 def: InstRW<[SKXWriteResGroup61], (instregex "VCVTSI2SSZrr(b?)(k?)(z?)")>;
@@ -3566,12 +3566,12 @@
   let NumMicroOps = 3;
   let ResourceCycles = [2,1];
 }
-def: InstRW<[SKXWriteResGroup82], (instregex "CVTSI2SS64rr")>;
+def: InstRW<[SKXWriteResGroup82], (instregex "CVTSI642SSrr")>;
 def: InstRW<[SKXWriteResGroup82], (instregex "HADDPDrr")>;
 def: InstRW<[SKXWriteResGroup82], (instregex "HADDPSrr")>;
 def: InstRW<[SKXWriteResGroup82], (instregex "HSUBPDrr")>;
 def: InstRW<[SKXWriteResGroup82], (instregex "HSUBPSrr")>;
-def: InstRW<[SKXWriteResGroup82], (instregex "VCVTSI2SS64rr")>;
+def: InstRW<[SKXWriteResGroup82], (instregex "VCVTSI642SSrr")>;
 def: InstRW<[SKXWriteResGroup82], (instregex "VCVTSI642SSZrr(b?)(k?)(z?)")>;
 def: InstRW<[SKXWriteResGroup82], (instregex "VCVTUSI642SSZrr(b?)(k?)(z?)")>;
 def: InstRW<[SKXWriteResGroup82], (instregex "VHADDPDYrr")>;