RegisterScavenging: Followup to r305625

This does some improvements/cleanup to the recently introduced
scavengeRegisterBackwards() functionality:

- Rewrite findSurvivorBackwards algorithm to use the existing
  LiveRegUnit::accumulateBackward() code. This also avoids the Available
  and Candidates bitset and just need 1 LiveRegUnit instance
  (= 1 bitset).
- Pick registers in allocation order instead of register number order.

llvm-svn: 305817
diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index df4fe09..b49d8e2 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -6,9 +6,9 @@
 ; Materialize into a mov. Make sure there isn't an unnecessary copy.
 ; GCN-LABEL: {{^}}func_mov_fi_i32:
 ; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN: s_sub_u32 vcc_hi, s5, s4
-; GCN-NEXT: v_lshr_b32_e64 [[SCALED:v[0-9]+]], vcc_hi, 6
-; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, [[SCALED]]
+; GCN: s_sub_u32 s6, s5, s4
+; GCN-NEXT: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s6, 6
+; GCN-NEXT: v_add_i32_e64 v0, s[6:7], 4, [[SCALED]]
 ; GCN-NOT: v_mov
 ; GCN: ds_write_b32 v0, v0
 define void @func_mov_fi_i32() #0 {
@@ -22,9 +22,9 @@
 
 ; GCN-LABEL: {{^}}func_add_constant_to_fi_i32:
 ; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN: s_sub_u32 vcc_hi, s5, s4
-; GCN-NEXT: v_lshr_b32_e64 [[SCALED:v[0-9]+]], vcc_hi, 6
-; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, [[SCALED]]
+; GCN: s_sub_u32 s6, s5, s4
+; GCN-NEXT: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s6, 6
+; GCN-NEXT: v_add_i32_e64 v0, s[6:7], 4, [[SCALED]]
 ; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, v0
 ; GCN-NOT: v_mov
 ; GCN: ds_write_b32 v0, v0
@@ -39,9 +39,9 @@
 ; into.
 
 ; GCN-LABEL: {{^}}func_other_fi_user_i32:
-; GCN: s_sub_u32 vcc_hi, s5, s4
-; GCN-NEXT: v_lshr_b32_e64 [[SCALED:v[0-9]+]], vcc_hi, 6
-; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, [[SCALED]]
+; GCN: s_sub_u32 s6, s5, s4
+; GCN-NEXT: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s6, 6
+; GCN-NEXT: v_add_i32_e64 v0, s[6:7], 4, [[SCALED]]
 ; GCN-NEXT: v_mul_lo_i32 v0, v0, 9
 ; GCN-NOT: v_mov
 ; GCN: ds_write_b32 v0, v0
@@ -71,8 +71,8 @@
 
 ; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr:
 ; GCN: s_waitcnt
-; GCN-NEXT: s_sub_u32 vcc_hi, s5, s4
-; GCN-NEXT: v_lshr_b32_e64 v0, vcc_hi, 6
+; GCN-NEXT: s_sub_u32 s6, s5, s4
+; GCN-NEXT: v_lshr_b32_e64 v0, s6, 6
 ; GCN-NEXT: v_add_i32_e32 v0, vcc, 4, v0
 ; GCN-NOT: v_mov
 ; GCN: ds_write_b32 v0, v0
@@ -99,8 +99,8 @@
 }
 
 ; GCN-LABEL: {{^}}void_func_byval_struct_i8_i32_ptr_nonentry_block:
-; GCN: s_sub_u32 vcc_hi, s5, s4
-; GCN: v_lshr_b32_e64 v1, vcc_hi, 6
+; GCN: s_sub_u32 s6, s5, s4
+; GCN: v_lshr_b32_e64 v1, s6, 6
 ; GCN: s_and_saveexec_b64
 
 ; GCN: v_add_i32_e32 v0, vcc, 4, v1
@@ -123,10 +123,10 @@
 
 ; Added offset can't be used with VOP3 add
 ; GCN-LABEL: {{^}}func_other_fi_user_non_inline_imm_offset_i32:
-; GCN: s_sub_u32 vcc_hi, s5, s4
-; GCN-DAG: v_lshr_b32_e64 [[SCALED:v[0-9]+]], vcc_hi, 6
-; GCN-DAG: s_movk_i32 vcc_hi, 0x204
-; GCN: v_add_i32_e32 v0, vcc, vcc_hi, [[SCALED]]
+; GCN: s_sub_u32 s6, s5, s4
+; GCN-DAG: v_lshr_b32_e64 [[SCALED:v[0-9]+]], s6, 6
+; GCN-DAG: s_movk_i32 s6, 0x204
+; GCN: v_add_i32_e64 v0, s[6:7], s6, [[SCALED]]
 ; GCN: v_mul_lo_i32 v0, v0, 9
 ; GCN: ds_write_b32 v0, v0
 define void @func_other_fi_user_non_inline_imm_offset_i32() #0 {