[Hexagon] New HVX target features.

This patch lets the llvm tools handle the new HVX target features that
are added by frontend (clang). The target-features are of the form
"hvx-length64b" for 64 Byte HVX mode, "hvx-length128b" for 128 Byte mode HVX.
"hvx-double" is an alias to "hvx-length128b" and is soon will be deprecated.
The hvx version target feature is upgated form "+hvx" to "+hvxv{version_number}.
Eg: "+hvxv62"

For the correct HVX code generation, the user must use the following
target features.
For 64B mode: "+hvxv62" "+hvx-length64b"
For 128B mode: "+hvxv62" "+hvx-length128b"

Clang picks a default length if none is specified. If for some reason,
no hvx-length is specified to llvm, the compilation will bail out.
There is a corresponding clang patch.

Differential Revision: https://reviews.llvm.org/D38851

llvm-svn: 316101
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index d9d8dbe..957fc8ca 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -357,7 +357,7 @@
   auto &MF = State.getMachineFunction();
   auto &HST = MF.getSubtarget<HexagonSubtarget>();
 
-  if (HST.useHVXSglOps() &&
+  if (HST.useHVX64BOps() &&
       (LocVT == MVT::v8i64 || LocVT == MVT::v16i32 || LocVT == MVT::v32i16 ||
        LocVT == MVT::v64i8 || LocVT == MVT::v512i1)) {
     if (unsigned Reg = State.AllocateReg(VecLstS)) {
@@ -368,7 +368,7 @@
     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
     return false;
   }
-  if (HST.useHVXSglOps() && (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 ||
+  if (HST.useHVX64BOps() && (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 ||
                              LocVT == MVT::v64i16 || LocVT == MVT::v128i8)) {
     if (unsigned Reg = State.AllocateReg(VecLstD)) {
       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -379,8 +379,8 @@
     return false;
   }
   // 128B Mode
-  if (HST.useHVXDblOps() && (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 ||
-                             LocVT == MVT::v128i16 || LocVT == MVT::v256i8)) {
+  if (HST.useHVX128BOps() && (LocVT == MVT::v32i64 || LocVT == MVT::v64i32 ||
+                              LocVT == MVT::v128i16 || LocVT == MVT::v256i8)) {
     if (unsigned Reg = State.AllocateReg(VecLstD)) {
       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
       return false;
@@ -389,7 +389,7 @@
     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
     return false;
   }
-  if (HST.useHVXDblOps() &&
+  if (HST.useHVX128BOps() &&
       (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 ||
        LocVT == MVT::v128i8 || LocVT == MVT::v1024i1)) {
     if (unsigned Reg = State.AllocateReg(VecLstS)) {
@@ -437,7 +437,7 @@
     LocInfo = CCValAssign::Full;
   } else if (LocVT == MVT::v128i8 || LocVT == MVT::v64i16 ||
              LocVT == MVT::v32i32 || LocVT == MVT::v16i64 ||
-             (LocVT == MVT::v1024i1 && HST.useHVXDblOps())) {
+             (LocVT == MVT::v1024i1 && HST.useHVX128BOps())) {
     LocVT = MVT::v32i32;
     ValVT = MVT::v32i32;
     LocInfo = CCValAssign::Full;
@@ -507,7 +507,7 @@
       return false;
     }
   } else if (LocVT == MVT::v32i32) {
-    unsigned Req = HST.useHVXDblOps() ? Hexagon::V0 : Hexagon::W0;
+    unsigned Req = HST.useHVX128BOps() ? Hexagon::V0 : Hexagon::W0;
     if (unsigned Reg = State.AllocateReg(Req)) {
       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
       return false;
@@ -827,9 +827,9 @@
     DEBUG(dbgs() << "Function needs byte stack align due to call args\n");
     // V6 vectors passed by value have 64 or 128 byte alignment depending
     // on whether we are 64 byte vector mode or 128 byte.
-    bool UseHVXDbl = Subtarget.useHVXDblOps();
+    bool UseHVX128B = Subtarget.useHVX128BOps();
     assert(Subtarget.useHVXOps());
-    const unsigned ObjAlign = UseHVXDbl ? 128 : 64;
+    const unsigned ObjAlign = UseHVX128B ? 128 : 64;
     LargestAlignSeen = std::max(LargestAlignSeen, ObjAlign);
     MFI.ensureMaxAlignment(LargestAlignSeen);
   }
@@ -940,15 +940,15 @@
 
   auto &HST = static_cast<const HexagonSubtarget&>(DAG.getSubtarget());
 
-  bool ValidHVXDblType =
-      HST.useHVXDblOps() && (VT == MVT::v32i32 || VT == MVT::v16i64 ||
-                             VT == MVT::v64i16 || VT == MVT::v128i8);
+  bool ValidHVX128BType =
+      HST.useHVX128BOps() && (VT == MVT::v32i32 || VT == MVT::v16i64 ||
+                              VT == MVT::v64i16 || VT == MVT::v128i8);
   bool ValidHVXType =
-      HST.useHVXSglOps() && (VT == MVT::v16i32 || VT == MVT::v8i64 ||
+      HST.useHVX64BOps() && (VT == MVT::v16i32 || VT == MVT::v8i64 ||
                              VT == MVT::v32i16 || VT == MVT::v64i8);
 
-  if (ValidHVXDblType || ValidHVXType ||
-      VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
+  if (ValidHVX128BType || ValidHVXType || VT == MVT::i64 || VT == MVT::i32 ||
+      VT == MVT::i16 || VT == MVT::i8) {
     IsInc = (Ptr->getOpcode() == ISD::ADD);
     Base = Ptr->getOperand(0);
     Offset = Ptr->getOperand(1);
@@ -1182,7 +1182,7 @@
           RegInfo.createVirtualRegister(&Hexagon::HvxVRRegClass);
         RegInfo.addLiveIn(VA.getLocReg(), VReg);
         InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
-      } else if (Subtarget.useHVXDblOps() &&
+      } else if (Subtarget.useHVX128BOps() &&
                  ((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 ||
                    RegVT == MVT::v64i16 || RegVT == MVT::v128i8))) {
         unsigned VReg =
@@ -1197,7 +1197,7 @@
           RegInfo.createVirtualRegister(&Hexagon::HvxWRRegClass);
         RegInfo.addLiveIn(VA.getLocReg(), VReg);
         InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
-      } else if (Subtarget.useHVXDblOps() &&
+      } else if (Subtarget.useHVX128BOps() &&
                  ((RegVT == MVT::v32i64 || RegVT == MVT::v64i32 ||
                    RegVT == MVT::v128i16 || RegVT == MVT::v256i8))) {
         unsigned VReg =
@@ -1743,7 +1743,7 @@
   }
 
   if (Subtarget.hasV60TOps()) {
-    if (Subtarget.useHVXSglOps()) {
+    if (Subtarget.useHVX64BOps()) {
       addRegisterClass(MVT::v64i8,  &Hexagon::HvxVRRegClass);
       addRegisterClass(MVT::v32i16, &Hexagon::HvxVRRegClass);
       addRegisterClass(MVT::v16i32, &Hexagon::HvxVRRegClass);
@@ -1753,7 +1753,7 @@
       addRegisterClass(MVT::v32i32, &Hexagon::HvxWRRegClass);
       addRegisterClass(MVT::v16i64, &Hexagon::HvxWRRegClass);
       addRegisterClass(MVT::v512i1, &Hexagon::HvxQRRegClass);
-    } else if (Subtarget.useHVXDblOps()) {
+    } else if (Subtarget.useHVX128BOps()) {
       addRegisterClass(MVT::v128i8,  &Hexagon::HvxVRRegClass);
       addRegisterClass(MVT::v64i16,  &Hexagon::HvxVRRegClass);
       addRegisterClass(MVT::v32i32,  &Hexagon::HvxVRRegClass);
@@ -1992,7 +1992,7 @@
   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8,  Custom);
 
   if (Subtarget.useHVXOps()) {
-    if (Subtarget.useHVXSglOps()) {
+    if (Subtarget.useHVX64BOps()) {
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i8,  Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i16,  Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i32,  Custom);
@@ -2004,7 +2004,7 @@
       setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v64i8, Custom);
       setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i16, Custom);
       setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
-    } else if (Subtarget.useHVXDblOps()) {
+    } else if (Subtarget.useHVX128BOps()) {
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v256i8,  Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v128i16, Custom);
       setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i32,  Custom);
@@ -2082,13 +2082,13 @@
     setIndexedStoreAction(ISD::POST_INC, VT, Legal);
   }
 
-  if (Subtarget.useHVXSglOps()) {
+  if (Subtarget.useHVX64BOps()) {
     for (MVT VT : {MVT::v64i8,  MVT::v32i16, MVT::v16i32, MVT::v8i64,
                    MVT::v128i8, MVT::v64i16, MVT::v32i32, MVT::v16i64}) {
       setIndexedLoadAction(ISD::POST_INC, VT, Legal);
       setIndexedStoreAction(ISD::POST_INC, VT, Legal);
     }
-  } else if (Subtarget.useHVXDblOps()) {
+  } else if (Subtarget.useHVX128BOps()) {
     for (MVT VT : {MVT::v128i8, MVT::v64i16,  MVT::v32i32, MVT::v16i64,
                    MVT::v256i8, MVT::v128i16, MVT::v64i32, MVT::v32i64}) {
       setIndexedLoadAction(ISD::POST_INC, VT, Legal);
@@ -2353,8 +2353,8 @@
     size_t MaskLen = Mask.size();
     unsigned SizeInBits = VT.getScalarSizeInBits() * MaskLen;
 
-    if ((Subtarget.useHVXSglOps() && SizeInBits == 64 * 8) ||
-        (Subtarget.useHVXDblOps() && SizeInBits == 128 * 8)) {
+    if ((Subtarget.useHVX64BOps() && SizeInBits == 64 * 8) ||
+        (Subtarget.useHVX128BOps() && SizeInBits == 128 * 8)) {
       StridedLoadKind Pattern = isStridedLoad(Mask);
       if (Pattern == StridedLoadKind::NoPattern)
         return SDValue();
@@ -2617,11 +2617,11 @@
       return DAG.getNode(HexagonISD::COMBINE, dl, VT, Op.getOperand(1), Vec0);
 
     if (UseHVX) {
-      assert((Width ==  64*8 && Subtarget.useHVXSglOps()) ||
-             (Width == 128*8 && Subtarget.useHVXDblOps()));
+      assert((Width == 64 * 8 && Subtarget.useHVX64BOps()) ||
+             (Width == 128 * 8 && Subtarget.useHVX128BOps()));
       SDValue Vec1 = Op.getOperand(1);
-      MVT OpTy = Subtarget.useHVXSglOps() ? MVT::v16i32 : MVT::v32i32;
-      MVT ReTy = Subtarget.useHVXSglOps() ? MVT::v32i32 : MVT::v64i32;
+      MVT OpTy = Subtarget.useHVX64BOps() ? MVT::v16i32 : MVT::v32i32;
+      MVT ReTy = Subtarget.useHVX64BOps() ? MVT::v32i32 : MVT::v64i32;
       SDValue B0 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec0);
       SDValue B1 = DAG.getNode(ISD::BITCAST, dl, OpTy, Vec1);
       SDValue VC = DAG.getNode(HexagonISD::VCOMBINE, dl, ReTy, B1, B0);
@@ -2667,7 +2667,7 @@
   EVT VT = Op.getOperand(0).getValueType();
   SDLoc dl(Op);
   bool UseHVX = Subtarget.useHVXOps();
-  bool UseHVXSgl = Subtarget.useHVXSglOps();
+  bool UseHVX64B = Subtarget.useHVX64BOps();
   // Just in case...
 
   if (!VT.isVector() || !UseHVX)
@@ -2675,7 +2675,7 @@
 
   EVT ResVT = Op.getValueType();
   unsigned ResSize = ResVT.getSizeInBits();
-  unsigned VectorSizeInBits = UseHVXSgl ? (64 * 8) : (128 * 8);
+  unsigned VectorSizeInBits = UseHVX64B ? (64 * 8) : (128 * 8);
   unsigned OpSize = VT.getSizeInBits();
 
   // We deal only with cases where the result is the vector size
@@ -3001,7 +3001,7 @@
       case 512:
         return std::make_pair(0U, &Hexagon::HvxVRRegClass);
       case 1024:
-        if (Subtarget.hasV60TOps() && Subtarget.useHVXDblOps())
+        if (Subtarget.hasV60TOps() && Subtarget.useHVX128BOps())
           return std::make_pair(0U, &Hexagon::HvxVRRegClass);
         return std::make_pair(0U, &Hexagon::HvxWRRegClass);
       case 2048:
@@ -3204,7 +3204,7 @@
   case MVT::v32i32:
   case MVT::v16i64:
     if (Subtarget.hasV60TOps() && Subtarget.useHVXOps() &&
-        Subtarget.useHVXDblOps())
+        Subtarget.useHVX128BOps())
       RRC = &Hexagon::HvxVRRegClass;
     else
       RRC = &Hexagon::HvxWRRegClass;