Run codegen dce pass for all targets at all optimization levels. Previously it's
only run for x86 with fastisel. I've found it being very effective in
eliminating some obvious dead code as result of formal parameter lowering
especially when tail call optimization eliminated the need for some of the loads
from fixed frame objects. It also shrinks a number of the tests. A couple of
tests no longer make sense and are now eliminated.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@95493 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll
index 688b7bc..76332cc 100644
--- a/test/CodeGen/ARM/long_shift.ll
+++ b/test/CodeGen/ARM/long_shift.ll
@@ -23,10 +23,10 @@
 define i32 @f2(i64 %x, i64 %y) {
 ; CHECK: f2
 ; CHECK:      mov     r0, r0, lsr r2
-; CHECK-NEXT: rsb     r3, r2, #32
+; CHECK-NEXT: rsb     r12, r2, #32
 ; CHECK-NEXT: sub     r2, r2, #32
 ; CHECK-NEXT: cmp     r2, #0
-; CHECK-NEXT: orr     r0, r0, r1, lsl r3
+; CHECK-NEXT: orr     r0, r0, r1, lsl r12
 ; CHECK-NEXT: movge   r0, r1, asr r2
 	%a = ashr i64 %x, %y
 	%b = trunc i64 %a to i32
@@ -36,10 +36,10 @@
 define i32 @f3(i64 %x, i64 %y) {
 ; CHECK: f3
 ; CHECK:      mov     r0, r0, lsr r2
-; CHECK-NEXT: rsb     r3, r2, #32
+; CHECK-NEXT: rsb     r12, r2, #32
 ; CHECK-NEXT: sub     r2, r2, #32
 ; CHECK-NEXT: cmp     r2, #0
-; CHECK-NEXT: orr     r0, r0, r1, lsl r3
+; CHECK-NEXT: orr     r0, r0, r1, lsl r12
 ; CHECK-NEXT: movge   r0, r1, lsr r2
 	%a = lshr i64 %x, %y
 	%b = trunc i64 %a to i32