[SelectionDAG] fix bug in translating funnel shift with non-power-of-2 type

The bug is visible in the constant-folded x86 tests. We can't use the
negated shift amount when the type is not power-of-2:
https://rise4fun.com/Alive/US1r

...so in that case, use the regular lowering that includes a select
to guard against a shift-by-bitwidth. This path is improved by only
calculating the modulo shift amount once now.

Also, improve the rotate (with power-of-2 size) lowering to use
a negate rather than subtract from bitwidth. This improves the
codegen whether we have a rotate instruction or not (although
we can still see that we're not matching to a legal rotate in
all cases).

llvm-svn: 338592
diff --git a/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll b/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll
index af612ea..9062c86 100644
--- a/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll
@@ -40,8 +40,7 @@
 define i16 @rotl_i16(i16 %x, i16 %z) {
 ; CHECK-LABEL: rotl_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    orr w10, wzr, #0x10
-; CHECK-NEXT:    sub w10, w10, w1
+; CHECK-NEXT:    neg w10, w1
 ; CHECK-NEXT:    and w8, w0, #0xffff
 ; CHECK-NEXT:    and w9, w1, #0xf
 ; CHECK-NEXT:    and w10, w10, #0xf
@@ -56,8 +55,7 @@
 define i32 @rotl_i32(i32 %x, i32 %z) {
 ; CHECK-LABEL: rotl_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    orr w8, wzr, #0x20
-; CHECK-NEXT:    sub w8, w8, w1
+; CHECK-NEXT:    neg w8, w1
 ; CHECK-NEXT:    ror w0, w0, w8
 ; CHECK-NEXT:    ret
   %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z)
@@ -67,8 +65,7 @@
 define i64 @rotl_i64(i64 %x, i64 %z) {
 ; CHECK-LABEL: rotl_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    orr w9, wzr, #0x40
-; CHECK-NEXT:    sub w9, w9, w1
+; CHECK-NEXT:    neg w9, w1
 ; CHECK-NEXT:    lsl x8, x0, x1
 ; CHECK-NEXT:    lsr x9, x0, x9
 ; CHECK-NEXT:    orr x0, x8, x9
@@ -83,14 +80,13 @@
 ; CHECK-LABEL: rotl_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v2.4s, #31
-; CHECK-NEXT:    movi v3.4s, #32
-; CHECK-NEXT:    and v4.16b, v1.16b, v2.16b
-; CHECK-NEXT:    sub v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    neg v3.4s, v1.4s
 ; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    neg v1.4s, v1.4s
-; CHECK-NEXT:    ushl v3.4s, v0.4s, v4.4s
-; CHECK-NEXT:    ushl v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    orr v0.16b, v3.16b, v0.16b
+; CHECK-NEXT:    and v2.16b, v3.16b, v2.16b
+; CHECK-NEXT:    neg v2.4s, v2.4s
+; CHECK-NEXT:    ushl v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ushl v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    orr v0.16b, v1.16b, v0.16b
 ; CHECK-NEXT:    ret
   %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
   ret <4 x i32> %f
@@ -140,10 +136,9 @@
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff
 ; CHECK-NEXT:    and w9, w1, #0xf
-; CHECK-NEXT:    orr w10, wzr, #0x10
+; CHECK-NEXT:    neg w10, w1
 ; CHECK-NEXT:    lsr w8, w8, w9
-; CHECK-NEXT:    sub w9, w10, w1
-; CHECK-NEXT:    and w9, w9, #0xf
+; CHECK-NEXT:    and w9, w10, #0xf
 ; CHECK-NEXT:    lsl w9, w0, w9
 ; CHECK-NEXT:    orr w0, w9, w8
 ; CHECK-NEXT:    ret
@@ -175,14 +170,13 @@
 ; CHECK-LABEL: rotr_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    movi v2.4s, #31
-; CHECK-NEXT:    movi v3.4s, #32
-; CHECK-NEXT:    and v4.16b, v1.16b, v2.16b
-; CHECK-NEXT:    sub v1.4s, v3.4s, v1.4s
-; CHECK-NEXT:    neg v3.4s, v4.4s
+; CHECK-NEXT:    neg v3.4s, v1.4s
 ; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
-; CHECK-NEXT:    ushl v2.4s, v0.4s, v3.4s
-; CHECK-NEXT:    ushl v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and v2.16b, v3.16b, v2.16b
+; CHECK-NEXT:    neg v1.4s, v1.4s
+; CHECK-NEXT:    ushl v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ushl v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
   ret <4 x i32> %f