[ARM] Add patterns for bitreverse intrinsic on MVE

BITREVERSE can use the VBRSR which will reverse and right shift.
Shifting right by 0 will just reverse the bits.

llvm-svn: 372001
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index d8e3b09..c5848c9 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -263,6 +263,7 @@
     setOperationAction(ISD::MSTORE, VT, Legal);
     setOperationAction(ISD::CTLZ, VT, Legal);
     setOperationAction(ISD::CTTZ, VT, Expand);
+    setOperationAction(ISD::BITREVERSE, VT, Legal);
 
     // No native support for these.
     setOperationAction(ISD::UDIV, VT, Expand);
diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td
index aa12d50..b931dd0 100644
--- a/llvm/lib/Target/ARM/ARMInstrMVE.td
+++ b/llvm/lib/Target/ARM/ARMInstrMVE.td
@@ -3772,6 +3772,17 @@
 def MVE_VBRSR16 : MVE_VBRSR<"vbrsr", "16", 0b01>;
 def MVE_VBRSR32 : MVE_VBRSR<"vbrsr", "32", 0b10>;
 
+let Predicates = [HasMVEInt] in {
+  def : Pat<(v16i8 ( bitreverse (v16i8 MQPR:$val1))),
+            (v16i8 ( MVE_VBRSR8 (v16i8 MQPR:$val1), (t2MOVi (i32 8)) ))>;
+
+  def : Pat<(v4i32 ( bitreverse (v4i32 MQPR:$val1))),
+            (v4i32 ( MVE_VBRSR32 (v4i32 MQPR:$val1), (t2MOVi (i32 32)) ))>;
+
+  def : Pat<(v8i16 ( bitreverse (v8i16 MQPR:$val1))),
+            (v8i16 ( MVE_VBRSR16 (v8i16 MQPR:$val1), (t2MOVi (i32 16)) ))>;
+}
+
 class MVE_VMUL_qr_int<string iname, string suffix,
                       bits<2> size, list<dag> pattern=[]>
   : MVE_qDest_rSrc<iname, suffix, "", pattern> {
diff --git a/llvm/test/CodeGen/Thumb2/mve-bitreverse.ll b/llvm/test/CodeGen/Thumb2/mve-bitreverse.ll
new file mode 100644
index 0000000..99f9506
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-bitreverse.ll
@@ -0,0 +1,52 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -verify-machineinstrs -mattr=+mve %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc <2 x i64> @brv_2i64_t(<2 x i64> %src){
+; CHECK-LABEL: brv_2i64_t:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vrev64.8 q1, q0
+; CHECK-NEXT:    movs r0, #8
+; CHECK-NEXT:    vbrsr.8 q0, q1, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %src)
+  ret <2 x i64> %0
+}
+
+define arm_aapcs_vfpcc <4 x i32> @brv_4i32_t(<4 x i32> %src){
+; CHECK-LABEL: brv_4i32_t:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r0, #32
+; CHECK-NEXT:    vbrsr.32 q0, q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %src)
+  ret <4 x i32> %0
+}
+
+define arm_aapcs_vfpcc <8 x i16> @brv_8i16_t(<8 x i16> %src){
+; CHECK-LABEL: brv_8i16_t:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r0, #16
+; CHECK-NEXT:    vbrsr.16 q0, q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %src)
+  ret <8 x i16> %0
+}
+
+define arm_aapcs_vfpcc <16 x i8> @brv_16i8_t(<16 x i8> %src){
+; CHECK-LABEL: brv_16i8_t:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    movs r0, #8
+; CHECK-NEXT:    vbrsr.8 q0, q0, r0
+; CHECK-NEXT:    bx lr
+entry:
+  %0 = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %src)
+  ret <16 x i8> %0
+}
+
+declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>)
+declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)
+declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
+declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>)