[X86] Teach fast isel to use MOV32ri64 for loading an unsigned 32 immediate into a 64-bit register.

Previously we used SUBREG_TO_REG+MOV32ri. But regular isel was changed recently to use the MOV32ri64 pseudo. Fast isel now does the same.

llvm-svn: 342788
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index b9a40b5..888c43a 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -3744,7 +3744,7 @@
   case MVT::i32: Opc = X86::MOV32ri; break;
   case MVT::i64: {
     if (isUInt<32>(Imm))
-      Opc = X86::MOV32ri;
+      Opc = X86::MOV32ri64;
     else if (isInt<32>(Imm))
       Opc = X86::MOV64ri32;
     else
@@ -3752,14 +3752,6 @@
     break;
   }
   }
-  if (VT == MVT::i64 && Opc == X86::MOV32ri) {
-    unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm);
-    unsigned ResultReg = createResultReg(&X86::GR64RegClass);
-    BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
-            TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
-      .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
-    return ResultReg;
-  }
   return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
 }
 
diff --git a/llvm/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll b/llvm/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll
index 01c225d..140614b 100644
--- a/llvm/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll
+++ b/llvm/test/CodeGen/X86/bmi-intrinsics-fast-isel-x86_64.ll
@@ -67,9 +67,9 @@
 define i64 @test__tzcnt_u64(i64 %a0) {
 ; X64-LABEL: test__tzcnt_u64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl $64, %ecx
-; X64-NEXT:    tzcntq %rdi, %rax
-; X64-NEXT:    cmovbq %rcx, %rax
+; X64-NEXT:    tzcntq %rdi, %rcx
+; X64-NEXT:    movl $64, %eax
+; X64-NEXT:    cmovaeq %rcx, %rax
 ; X64-NEXT:    retq
   %cmp = icmp ne i64 %a0, 0
   %cttz = call i64 @llvm.cttz.i64(i64 %a0, i1 true)
@@ -151,9 +151,9 @@
 define i64 @test_tzcnt_u64(i64 %a0) {
 ; X64-LABEL: test_tzcnt_u64:
 ; X64:       # %bb.0:
-; X64-NEXT:    movl $64, %ecx
-; X64-NEXT:    tzcntq %rdi, %rax
-; X64-NEXT:    cmovbq %rcx, %rax
+; X64-NEXT:    tzcntq %rdi, %rcx
+; X64-NEXT:    movl $64, %eax
+; X64-NEXT:    cmovaeq %rcx, %rax
 ; X64-NEXT:    retq
   %cmp = icmp ne i64 %a0, 0
   %cttz = call i64 @llvm.cttz.i64(i64 %a0, i1 true)