CellSPU:
(a) Improve the extract element code: there's no need to do gymnastics with
rotates into the preferred slot if a shuffle will do the same thing.
(b) Rename a couple of SPUISD pseudo-instructions for readability and better
semantic correspondence.
(c) Fix i64 sign/any/zero extension lowering.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@59965 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h
index d6fb3f8..a252ee3 100644
--- a/lib/Target/CellSPU/SPUISelLowering.h
+++ b/lib/Target/CellSPU/SPUISelLowering.h
@@ -40,8 +40,8 @@
SHUFFLE_MASK, ///< Shuffle mask
CNTB, ///< Count leading ones in bytes
PROMOTE_SCALAR, ///< Promote scalar->vector
- EXTRACT_ELT0, ///< Extract element 0
- EXTRACT_ELT0_CHAINED, ///< Extract element 0, with chain
+ VEC2PREFSLOT, ///< Extract element 0
+ VEC2PREFSLOT_CHAINED, ///< Extract element 0, with chain
EXTRACT_I1_ZEXT, ///< Extract element 0 as i1, zero extend
EXTRACT_I1_SEXT, ///< Extract element 0 as i1, sign extend
EXTRACT_I8_ZEXT, ///< Extract element 0 as i8, zero extend
@@ -59,7 +59,6 @@
VEC_ROTR, ///< Vector rotate right
ROTQUAD_RZ_BYTES, ///< Rotate quad right, by bytes, zero fill
ROTQUAD_RZ_BITS, ///< Rotate quad right, by bits, zero fill
- ROTBYTES_RIGHT_S, ///< Vector rotate right, by bytes, sign fill
ROTBYTES_LEFT, ///< Rotate bytes (loads -> ROTQBYI)
ROTBYTES_LEFT_CHAINED, ///< Rotate bytes (loads -> ROTQBYI), with chain
ROTBYTES_LEFT_BITS, ///< Rotate bytes left by bit shift count