Fix ARM tests to be register allocator independent.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@128680 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll
index 53214fd..d350937 100644
--- a/test/CodeGen/ARM/reg_sequence.ll
+++ b/test/CodeGen/ARM/reg_sequence.ll
@@ -1,4 +1,5 @@
 ; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
+; RUN: llc < %s -march=arm -mcpu=cortex-a8 -regalloc=basic | FileCheck %s
 ; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's.
 
 %struct.int16x8_t = type { <8 x i16> }
@@ -123,9 +124,9 @@
 return2:
 ; CHECK:        %return2
 ; CHECK:        vadd.i32
-; CHECK:        vmov q9, q11
+; CHECK:        vmov {{q[0-9]+}}, {{q[0-9]+}}
 ; CHECK-NOT:    vmov
-; CHECK:        vst2.32 {d16, d17, d18, d19}
+; CHECK:        vst2.32 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}}
   %tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1]
   %tmp101 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
   %tmp102 = add <4 x i32> %tmp100, %tmp101              ; <<4 x i32>> [#uses=1]
@@ -137,9 +138,10 @@
 define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
 ; CHECK:        t5:
 ; CHECK:        vldmia
-; CHECK:        vmov q9, q8
+; How can FileCheck match Q and D registers? We need a lisp interpreter.
+; CHECK:        vmov {{q[0-9]+}}, {{q[0-9]+}}
 ; CHECK-NOT:    vmov
-; CHECK:        vld2.16 {d16[1], d18[1]}, [r0]
+; CHECK:        vld2.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0]
 ; CHECK-NOT:    vmov
 ; CHECK:        vadd.i16
   %tmp0 = bitcast i16* %A to i8*                  ; <i8*> [#uses=1]
@@ -154,8 +156,8 @@
 define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind {
 ; CHECK:        t6:
 ; CHECK:        vldr.64
-; CHECK:        vmov d17, d16
-; CHECK-NEXT:   vld2.8 {d16[1], d17[1]}
+; CHECK:        vmov d[[D0:[0-9]+]], d[[D1:[0-9]+]]
+; CHECK-NEXT:   vld2.8 {d[[D1]][1], d[[D0]][1]}
   %tmp1 = load <8 x i8>* %B                       ; <<8 x i8>> [#uses=2]
   %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2]
   %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 ; <<8 x i8>> [#uses=1]
@@ -169,10 +171,10 @@
 ; CHECK:        t7:
 ; CHECK:        vld2.32
 ; CHECK:        vst2.32
-; CHECK:        vld1.32 {d16, d17},
-; CHECK:        vmov q9, q8
+; CHECK:        vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}},
+; CHECK:        vmov q[[Q0:[0-9]+]], q[[Q1:[0-9]+]]
 ; CHECK-NOT:    vmov
-; CHECK:        vuzp.32 q8, q9
+; CHECK:        vuzp.32 q[[Q1]], q[[Q0]]
 ; CHECK:        vst1.32
   %0 = bitcast i32* %iptr to i8*                  ; <i8*> [#uses=2]
   %1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
@@ -271,7 +273,7 @@
 entry:
 ; CHECK: t10:
 ; CHECK: vmul.f32 q8, q8, d0[0]
-; CHECK: vmov.i32 q9, #0x3F000000
+; CHECK: vmov.i32 q[[Q0:[0-9]+]], #0x3F000000
 ; CHECK: vadd.f32 q8, q8, q8
   %0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
   %1 = insertelement <4 x float> %0, float undef, i32 1 ; <<4 x float>> [#uses=1]