R600/SI: cleanup literal handling v3

Seems to be allot simpler, and also paves the
way for further improvements.

v2: rebased on master, use 0 in BUFFER_LOAD_FORMAT_XYZW,
    use VGPR0 in dummy EXP, avoid compiler warning, break
    after encoding the first literal.
v3: correctly use V_ADD_F32_e64

This is a candidate for the stable branch.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Tom Stellard <thomas.stellard@amd.com>

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@175354 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
index f6d00f5..4dfd26e 100644
--- a/lib/Target/R600/SIInstrInfo.cpp
+++ b/lib/Target/R600/SIInstrInfo.cpp
@@ -68,7 +68,7 @@
 
 MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
                                            int64_t Imm) const {
-  MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_IMM_I32), DebugLoc());
+  MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
   MachineInstrBuilder MIB(*MF, MI);
   MIB.addReg(DstReg, RegState::Define);
   MIB.addImm(Imm);
@@ -84,9 +84,6 @@
   case AMDGPU::S_MOV_B64:
   case AMDGPU::V_MOV_B32_e32:
   case AMDGPU::V_MOV_B32_e64:
-  case AMDGPU::V_MOV_IMM_F32:
-  case AMDGPU::V_MOV_IMM_I32:
-  case AMDGPU::S_MOV_IMM_I32:
     return true;
   }
 }