AMDGPU: Use i16 for i16 shift amount

llvm-svn: 290351
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0fc27b7..fa53831 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1811,8 +1811,10 @@
   return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
 }
 
-MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const {
-  return MVT::i32;
+MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
+  // TODO: Should i16 be used always if legal? For now it would force VALU
+  // shifts.
+  return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
 }
 
 // Answering this is somewhat tricky and depends on the specific device which
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 0b18e00..1770d06 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -416,18 +416,18 @@
 multiclass Bits_OpsRev_i16_Pats <SDPatternOperator op, Instruction inst> {
 
 def : Pat<
-  (op i16:$src0, i32:$src1),
+  (op i16:$src0, i16:$src1),
   (inst $src1, $src0)
 >;
 
 def : Pat<
-  (i32 (zext (op i16:$src0, i32:$src1))),
+  (i32 (zext (op i16:$src0, i16:$src1))),
   (inst $src1, $src0)
 >;
 
 
 def : Pat<
-  (i64 (zext (op i16:$src0, i32:$src1))),
+  (i64 (zext (op i16:$src0, i16:$src1))),
    (REG_SEQUENCE VReg_64,
      (inst $src1, $src0), sub0,
      (V_MOV_B32_e32 (i32 0)), sub1)
@@ -464,9 +464,9 @@
   (V_XOR_B32_e64 $src0, $src1)
 >;
 
-defm : Bits_OpsRev_i16_Pats<shl, V_LSHLREV_B16_e32>;
-defm : Bits_OpsRev_i16_Pats<srl, V_LSHRREV_B16_e32>;
-defm : Bits_OpsRev_i16_Pats<sra, V_ASHRREV_I16_e32>;
+defm : Bits_OpsRev_i16_Pats<shl, V_LSHLREV_B16_e64>;
+defm : Bits_OpsRev_i16_Pats<srl, V_LSHRREV_B16_e64>;
+defm : Bits_OpsRev_i16_Pats<sra, V_ASHRREV_I16_e64>;
 
 def : ZExt_i16_i1_Pat<zext>;
 def : ZExt_i16_i1_Pat<anyext>;