[CodeGen] Print "%vreg0" as "%0" in both MIR and debug output
As part of the unification of the debug format and the MIR format, avoid
printing "vreg" for virtual registers (which is one of the current MIR
possibilities).
Basically:
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E "s/%vreg([0-9]+)/%\1/g"
* grep -nr '%vreg' . and fix if needed
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" \) -type f -print0 | xargs -0 sed -i '' -E "s/ vreg([0-9]+)/ %\1/g"
* grep -nr 'vreg[0-9]\+' . and fix if needed
Differential Revision: https://reviews.llvm.org/D40420
llvm-svn: 319427
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index dca9d62..aa81c3af 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -43,7 +43,7 @@
; The key problem here is that we may fail to create an MBB referenced by a
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %vreg6, %vreg2; mem:ST4[%addr] GPR:%vreg6,%vreg2 (in function: pending_phis)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -63,7 +63,7 @@
}
; General legalizer inability to handle types whose size wasn't a power of 2.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST6[%addr](align=8) (in function: odd_type)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST6[%addr](align=8) (in function: odd_type)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type:
define void @odd_type(i42* %addr) {
@@ -72,7 +72,7 @@
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST28[%addr](align=32) (in function: odd_vector)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST28[%addr](align=32) (in function: odd_vector)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
define void @odd_vector(<7 x i32>* %addr) {
@@ -91,7 +91,7 @@
}
; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %vreg2<def>(s64) = G_LOAD %vreg0; mem:LD8[%addr] GPR:%vreg2,%vreg0 (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2<def>(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
define i64 @atomic_ops(i64* %addr) {
@@ -132,14 +132,14 @@
}
; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(s128) = G_FCONSTANT quad 2
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(s128) = G_FCONSTANT quad 2
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_quad_dump
; FALLBACK-WITH-REPORT-OUT-LABEL: test_quad_dump:
define fp128 @test_quad_dump() {
ret fp128 0xL00000000000000004000000000000000
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(p0) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg2; (in function: vector_of_pointers_extractelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(p0) = G_EXTRACT_VECTOR_ELT %1, %2; (in function: vector_of_pointers_extractelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement:
@var = global <2 x i16*> zeroinitializer
@@ -156,7 +156,7 @@
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg4; mem:ST16[undef] (in function: vector_of_pointers_insertelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %4; mem:ST16[undef] (in function: vector_of_pointers_insertelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
define void @vector_of_pointers_insertelement() {
@@ -172,7 +172,7 @@
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing:
%struct96 = type { float, float, float }
@@ -182,7 +182,7 @@
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing:
define void @nonpow2_add_narrowing() {
@@ -193,7 +193,7 @@
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing:
define void @nonpow2_or_narrowing() {
@@ -204,7 +204,7 @@
ret void
}
-; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
+; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing:
define void @nonpow2_load_narrowing() {
@@ -213,7 +213,7 @@
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing:
define void @nonpow2_store_narrowing(i96* %c) {
@@ -223,7 +223,7 @@
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing:
define void @nonpow2_constant_narrowing() {
@@ -233,8 +233,8 @@
; Currently can't handle vector lengths that aren't an exact multiple of
; natively supported vector lengths. Test that the fall-back works for those.
-; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(<7 x s64>) = G_ADD %vreg0, %vreg0; (in function: nonpow2_vector_add_fewerelements
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg2<def>(s64) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg3; (in function: nonpow2_vector_add_fewerelements)
+; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1<def>(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2<def>(s64) = G_EXTRACT_VECTOR_ELT %1, %3; (in function: nonpow2_vector_add_fewerelements)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:
define void @nonpow2_vector_add_fewerelements() {
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir b/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
index 9a2f7f7..7d0c9a3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
@@ -9,8 +9,8 @@
...
---
# CHECK: *** Bad machine code: Generic virtual register must have a bank in a RegBankSelected function ***
-# CHECK: instruction: %vreg0<def>(s64) = COPY
-# CHECK: operand 0: %vreg0<def>
+# CHECK: instruction: %0<def>(s64) = COPY
+# CHECK: operand 0: %0<def>
name: test
regBankSelected: true
registers:
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir b/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
index 2149903..a182cf5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
@@ -22,11 +22,11 @@
%0 = COPY %x0
; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function ***
- ; CHECK: instruction: %vreg1<def> = G_ADD
+ ; CHECK: instruction: %1<def> = G_ADD
%1 = G_ADD %0, %0
; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function ***
- ; CHECK: instruction: %vreg2<def>(s64) = COPY
- ; CHECK: operand 0: %vreg2<def>
+ ; CHECK: instruction: %2<def>(s64) = COPY
+ ; CHECK: operand 0: %2<def>
%2(s64) = COPY %x0
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
index 25cf313..0ee32f7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-stp-cluster.ll
@@ -5,10 +5,10 @@
; CHECK-LABEL: stp_i64_scale:BB#0
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(4): STRXui %vreg1, %vreg0, 1
-; CHECK:SU(3): STRXui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRXui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRXui %vreg1, %vreg0, 4
+; CHECK:SU(4): STRXui %1, %0, 1
+; CHECK:SU(3): STRXui %1, %0, 2
+; CHECK:SU(2): STRXui %1, %0, 3
+; CHECK:SU(5): STRXui %1, %0, 4
define i64 @stp_i64_scale(i64* nocapture %P, i64 %v) {
entry:
%arrayidx = getelementptr inbounds i64, i64* %P, i64 3
@@ -26,10 +26,10 @@
; CHECK-LABEL: stp_i32_scale:BB#0
; CHECK:Cluster ld/st SU(4) - SU(3)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(4): STRWui %vreg1, %vreg0, 1
-; CHECK:SU(3): STRWui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRWui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRWui %vreg1, %vreg0, 4
+; CHECK:SU(4): STRWui %1, %0, 1
+; CHECK:SU(3): STRWui %1, %0, 2
+; CHECK:SU(2): STRWui %1, %0, 3
+; CHECK:SU(5): STRWui %1, %0, 4
define i32 @stp_i32_scale(i32* nocapture %P, i32 %v) {
entry:
%arrayidx = getelementptr inbounds i32, i32* %P, i32 3
@@ -47,10 +47,10 @@
; CHECK-LABEL:stp_i64_unscale:BB#0 entry
; CHECK:Cluster ld/st SU(5) - SU(2)
; CHECK:Cluster ld/st SU(4) - SU(3)
-; CHECK:SU(5): STURXi %vreg1, %vreg0, -32
-; CHECK:SU(2): STURXi %vreg1, %vreg0, -24
-; CHECK:SU(4): STURXi %vreg1, %vreg0, -16
-; CHECK:SU(3): STURXi %vreg1, %vreg0, -8
+; CHECK:SU(5): STURXi %1, %0, -32
+; CHECK:SU(2): STURXi %1, %0, -24
+; CHECK:SU(4): STURXi %1, %0, -16
+; CHECK:SU(3): STURXi %1, %0, -8
define void @stp_i64_unscale(i64* nocapture %P, i64 %v) #0 {
entry:
%arrayidx = getelementptr inbounds i64, i64* %P, i64 -3
@@ -68,10 +68,10 @@
; CHECK-LABEL:stp_i32_unscale:BB#0 entry
; CHECK:Cluster ld/st SU(5) - SU(2)
; CHECK:Cluster ld/st SU(4) - SU(3)
-; CHECK:SU(5): STURWi %vreg1, %vreg0, -16
-; CHECK:SU(2): STURWi %vreg1, %vreg0, -12
-; CHECK:SU(4): STURWi %vreg1, %vreg0, -8
-; CHECK:SU(3): STURWi %vreg1, %vreg0, -4
+; CHECK:SU(5): STURWi %1, %0, -16
+; CHECK:SU(2): STURWi %1, %0, -12
+; CHECK:SU(4): STURWi %1, %0, -8
+; CHECK:SU(3): STURWi %1, %0, -4
define void @stp_i32_unscale(i32* nocapture %P, i32 %v) #0 {
entry:
%arrayidx = getelementptr inbounds i32, i32* %P, i32 -3
@@ -89,10 +89,10 @@
; CHECK-LABEL:stp_double:BB#0
; CHECK:Cluster ld/st SU(3) - SU(4)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(3): STRDui %vreg1, %vreg0, 1
-; CHECK:SU(4): STRDui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRDui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRDui %vreg1, %vreg0, 4
+; CHECK:SU(3): STRDui %1, %0, 1
+; CHECK:SU(4): STRDui %1, %0, 2
+; CHECK:SU(2): STRDui %1, %0, 3
+; CHECK:SU(5): STRDui %1, %0, 4
define void @stp_double(double* nocapture %P, double %v) {
entry:
%arrayidx = getelementptr inbounds double, double* %P, i64 3
@@ -110,10 +110,10 @@
; CHECK-LABEL:stp_float:BB#0
; CHECK:Cluster ld/st SU(3) - SU(4)
; CHECK:Cluster ld/st SU(2) - SU(5)
-; CHECK:SU(3): STRSui %vreg1, %vreg0, 1
-; CHECK:SU(4): STRSui %vreg1, %vreg0, 2
-; CHECK:SU(2): STRSui %vreg1, %vreg0, 3
-; CHECK:SU(5): STRSui %vreg1, %vreg0, 4
+; CHECK:SU(3): STRSui %1, %0, 1
+; CHECK:SU(4): STRSui %1, %0, 2
+; CHECK:SU(2): STRSui %1, %0, 3
+; CHECK:SU(5): STRSui %1, %0, 4
define void @stp_float(float* nocapture %P, float %v) {
entry:
%arrayidx = getelementptr inbounds float, float* %P, i64 3
@@ -130,10 +130,10 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: stp_volatile:BB#0
; CHECK-NOT: Cluster ld/st
-; CHECK:SU(2): STRXui %vreg1, %vreg0, 3; mem:Volatile
-; CHECK:SU(3): STRXui %vreg1, %vreg0, 2; mem:Volatile
-; CHECK:SU(4): STRXui %vreg1, %vreg0, 1; mem:Volatile
-; CHECK:SU(5): STRXui %vreg1, %vreg0, 4; mem:Volatile
+; CHECK:SU(2): STRXui %1, %0, 3; mem:Volatile
+; CHECK:SU(3): STRXui %1, %0, 2; mem:Volatile
+; CHECK:SU(4): STRXui %1, %0, 1; mem:Volatile
+; CHECK:SU(5): STRXui %1, %0, 4; mem:Volatile
define i64 @stp_volatile(i64* nocapture %P, i64 %v) {
entry:
%arrayidx = getelementptr inbounds i64, i64* %P, i64 3
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
index 05aa969..58f4144 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
@@ -4,9 +4,9 @@
; CHECK-SSA-LABEL: Machine code for function t1
-; CHECK-SSA: [[QUOTREG:%vreg[0-9]+]]<def> = SDIVWr
+; CHECK-SSA: [[QUOTREG:%[0-9]+]]<def> = SDIVWr
; CHECK-SSA-NOT: [[QUOTREG]]<def> =
-; CHECK-SSA: {{%vreg[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
+; CHECK-SSA: {{%[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
; CHECK-SSA-LABEL: Machine code for function t2
diff --git a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
index 64e535c..ca50e11 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldp-cluster.ll
@@ -6,13 +6,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldr_int:BB#0
; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int:BB#0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
define i32 @ldr_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load i32, i32* %p1, align 2
@@ -26,13 +26,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_sext_int:BB#0
; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_sext_int:BB#0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRSWui
define i64 @ldp_sext_int(i32* %p) nounwind {
%tmp = load i32, i32* %p, align 4
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
@@ -47,13 +47,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldur_int:BB#0
; CHECK: Cluster ld/st SU(2) - SU(1)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDURWi
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDURWi
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDURWi
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDURWi
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldur_int:BB#0
; EXYNOS: Cluster ld/st SU(2) - SU(1)
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDURWi
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDURWi
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDURWi
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDURWi
define i32 @ldur_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
%tmp1 = load i32, i32* %p1, align 2
@@ -67,13 +67,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_half_sext_zext_int:BB#0
; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3): %vreg{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(4): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; CHECK: SU(3): %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3): %vreg{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(4): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; EXYNOS: SU(3): %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@@ -90,13 +90,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_half_zext_sext_int:BB#0
; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; CHECK: SU(4): %vreg{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; CHECK: SU(4): %{{[0-9]+}}<def> = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3): %vreg{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; EXYNOS: SU(4): %vreg{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; EXYNOS: SU(4): %{{[0-9]+}}<def> = LDRSWui
define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@@ -113,13 +113,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldr_int_volatile:BB#0
; CHECK-NOT: Cluster ld/st
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int_volatile:BB#0
; EXYNOS-NOT: Cluster ld/st
-; EXYNOS: SU(1): %vreg{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2): %vreg{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
define i32 @ldr_int_volatile(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load volatile i32, i32* %p1, align 2
@@ -133,8 +133,8 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldq_cluster:BB#0
; CHECK: Cluster ld/st SU(1) - SU(3)
-; CHECK: SU(1): %vreg{{[0-9]+}}<def> = LDRQui
-; CHECK: SU(3): %vreg{{[0-9]+}}<def> = LDRQui
+; CHECK: SU(1): %{{[0-9]+}}<def> = LDRQui
+; CHECK: SU(3): %{{[0-9]+}}<def> = LDRQui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldq_cluster:BB#0
; EXYNOS-NOT: Cluster ld/st
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
index ad4feef..b4e07fe 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
@@ -6,10 +6,10 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: shiftable
-; CHECK: SU(2): %vreg2<def> = SUBXri %vreg1, 20, 0
+; CHECK: SU(2): %2<def> = SUBXri %1, 20, 0
; CHECK: Successors:
-; CHECK-NEXT: SU(4): Data Latency=1 Reg=%vreg2
-; CHECK-NEXT: SU(3): Data Latency=2 Reg=%vreg2
+; CHECK-NEXT: SU(4): Data Latency=1 Reg=%2
+; CHECK-NEXT: SU(3): Data Latency=2 Reg=%2
; CHECK: ********** INTERVALS **********
define i64 @shiftable(i64 %A, i64 %B) {
%tmp0 = sub i64 %B, 20
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
index 1b102e6..b2bfc13 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
@@ -5,15 +5,15 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: misched_bug:BB#0 entry
-; CHECK: SU(2): %vreg2<def> = LDRWui %vreg0, 1; mem:LD4[%ptr1_plus1] GPR32:%vreg2 GPR64common:%vreg0
+; CHECK: SU(2): %2<def> = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0
; CHECK: Successors:
-; CHECK-NEXT: SU(5): Data Latency=4 Reg=%vreg2
+; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2
; CHECK-NEXT: SU(4): Ord Latency=0
-; CHECK: SU(3): STRWui %wzr, %vreg0, 0; mem:ST4[%ptr1] GPR64common:%vreg0
+; CHECK: SU(3): STRWui %wzr, %0, 0; mem:ST4[%ptr1] GPR64common:%0
; CHECK: Successors:
; CHECK: SU(4): Ord Latency=0
-; CHECK: SU(4): STRWui %wzr, %vreg1, 0; mem:ST4[%ptr2] GPR64common:%vreg1
-; CHECK: SU(5): %w0<def> = COPY %vreg2; GPR32:%vreg2
+; CHECK: SU(4): STRWui %wzr, %1, 0; mem:ST4[%ptr2] GPR64common:%1
+; CHECK: SU(5): %w0<def> = COPY %2; GPR32:%2
; CHECK: ** ScheduleDAGMI::schedule picking next node
define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
entry:
diff --git a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
index 7e76dac..cb42fcc 100644
--- a/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
+++ b/llvm/test/CodeGen/AArch64/tailcall_misched_graph.ll
@@ -26,9 +26,9 @@
; CHECK: fi#-2: {{.*}} fixed, at location [SP+8]
; CHECK: fi#-1: {{.*}} fixed, at location [SP]
-; CHECK: [[VRA:%vreg.*]]<def> = LDRXui <fi#-1>
-; CHECK: [[VRB:%vreg.*]]<def> = LDRXui <fi#-2>
-; CHECK: STRXui %vreg{{.*}}, <fi#-4>
+; CHECK: [[VRA:%.*]]<def> = LDRXui <fi#-1>
+; CHECK: [[VRB:%.*]]<def> = LDRXui <fi#-2>
+; CHECK: STRXui %{{.*}}, <fi#-4>
; CHECK: STRXui [[VRB]], <fi#-3>
; Make sure that there is an dependence edge between fi#-2 and fi#-4.
@@ -40,5 +40,5 @@
; CHECK: SU([[DEPSTOREB:.*]]): Ord Latency=0
; CHECK: SU([[DEPSTOREA:.*]]): Ord Latency=0
-; CHECK: SU([[DEPSTOREA]]): STRXui %vreg{{.*}}, <fi#-4>
-; CHECK: SU([[DEPSTOREB]]): STRXui %vreg{{.*}}, <fi#-3>
+; CHECK: SU([[DEPSTOREA]]): STRXui %{{.*}}, <fi#-4>
+; CHECK: SU([[DEPSTOREB]]): STRXui %{{.*}}, <fi#-3>
diff --git a/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll b/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll
index e5df12a..f8fb12e 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-output-queue.ll
@@ -46,20 +46,20 @@
;
; The instruction selection phase will generate ISA that looks like this:
; %oqap = LDS_READ_RET
-; %vreg0 = MOV %oqap
-; %vreg1 = VTX_READ_32
-; %vreg2 = ADD_INT %vreg1, %vreg0
+; %0 = MOV %oqap
+; %1 = VTX_READ_32
+; %2 = ADD_INT %1, %0
;
; The bottom scheduler will schedule the two ALU instructions first:
;
; UNSCHEDULED:
; %oqap = LDS_READ_RET
-; %vreg1 = VTX_READ_32
+; %1 = VTX_READ_32
;
; SCHEDULED:
;
-; vreg0 = MOV %oqap
-; vreg2 = ADD_INT %vreg1, %vreg2
+; %0 = MOV %oqap
+; %2 = ADD_INT %1, %2
;
; The lack of proper aliasing results in the local memory read (LDS_READ_RET)
; to consider the global memory read (VTX_READ_32) has a chain dependency, so
@@ -69,10 +69,10 @@
; Alu clause:
; %oqap = LDS_READ_RET
; VTX clause:
-; %vreg1 = VTX_READ_32
+; %1 = VTX_READ_32
; Alu clause:
-; vreg0 = MOV %oqap
-; vreg2 = ADD_INT %vreg1, %vreg2
+; %0 = MOV %oqap
+; %2 = ADD_INT %1, %2
;
; This is an illegal program because the oqap def and use know occur in
; different ALU clauses.
diff --git a/llvm/test/CodeGen/AMDGPU/liveness.mir b/llvm/test/CodeGen/AMDGPU/liveness.mir
index 6fd8466..8bb946d 100644
--- a/llvm/test/CodeGen/AMDGPU/liveness.mir
+++ b/llvm/test/CodeGen/AMDGPU/liveness.mir
@@ -6,7 +6,7 @@
# liveranges needed it.
#
# Should see three distinct value numbers:
-# CHECK: %vreg0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}}
+# CHECK: %0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}}
--- |
define amdgpu_kernel void @test0() { ret void }
...
diff --git a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
index aceac34..1e9b6b5 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
+++ b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
@@ -2,7 +2,7 @@
# https://bugs.llvm.org/show_bug.cgi?id=33620
---
-# This would assert due to the empty live interval created for %vreg9
+# This would assert due to the empty live interval created for %9
# on the last S_NOP with an undef subreg use.
# CHECK-LABEL: name: expecting_non_empty_interval
diff --git a/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir b/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
index 62816da..2d353b8 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
+++ b/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
@@ -2,11 +2,11 @@
# REQUIRES: asserts
# CHECK: INTERVALS
-# CHECK: vreg0
+# CHECK: %0
# CHECK-LABEL: Machine code for function test0:
# CHECK: INTERVALS
-# CHECK: vreg0
+# CHECK: %0
# CHECK-LABEL: Machine code for function test1:
--- |
diff --git a/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll b/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
index 7f40571..5e71eeb 100644
--- a/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
+++ b/llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
@@ -5,11 +5,11 @@
; This test calls shrinkToUses with an early-clobber redefined live range during
; spilling.
;
-; Shrink: %vreg47,1.158257e-02 = [384r,400e:0)[400e,420r:1) 0@384r 1@400e
+; Shrink: %47,1.158257e-02 = [384r,400e:0)[400e,420r:1) 0@384r 1@400e
;
; The early-clobber instruction is an str:
;
-; %vreg12<earlyclobber,def> = t2STR_PRE %vreg6, %vreg12, 32, pred:14, pred:%noreg
+; %12<earlyclobber,def> = t2STR_PRE %6, %12, 32, pred:14, pred:%noreg
;
; This tests that shrinkToUses handles the EC redef correctly.
diff --git a/llvm/test/CodeGen/ARM/Windows/dbzchk.ll b/llvm/test/CodeGen/ARM/Windows/dbzchk.ll
index aea3799..afe30b2 100644
--- a/llvm/test/CodeGen/ARM/Windows/dbzchk.ll
+++ b/llvm/test/CodeGen/ARM/Windows/dbzchk.ll
@@ -119,7 +119,7 @@
; CHECK-CFG-DAG: t2B <BB#3>
; CHECK-CFG-DAG: BB#2
-; CHECK-CFG-DAG: tCMPi8 %vreg{{[0-9]}}, 0
+; CHECK-CFG-DAG: tCMPi8 %{{[0-9]}}, 0
; CHECK-CFG-DAG: t2Bcc <BB#5>
; CHECK-CFG-DAG: BB#4
diff --git a/llvm/test/CodeGen/ARM/crash-greedy.ll b/llvm/test/CodeGen/ARM/crash-greedy.ll
index 6a58bb8..31d6079 100644
--- a/llvm/test/CodeGen/ARM/crash-greedy.ll
+++ b/llvm/test/CodeGen/ARM/crash-greedy.ll
@@ -61,7 +61,7 @@
; CHECK: insert_elem
; This test has a sub-register copy with a kill flag:
-; %vreg6:ssub_3<def> = COPY %vreg6:ssub_2<kill>; QPR_VFP2:%vreg6
+; %6:ssub_3<def> = COPY %6:ssub_2<kill>; QPR_VFP2:%6
; The rewriter must do something sensible with that, or the scavenger crashes.
define void @insert_elem() nounwind {
entry:
diff --git a/llvm/test/CodeGen/ARM/misched-copy-arm.ll b/llvm/test/CodeGen/ARM/misched-copy-arm.ll
index 53f8b8d..bc20939 100644
--- a/llvm/test/CodeGen/ARM/misched-copy-arm.ll
+++ b/llvm/test/CodeGen/ARM/misched-copy-arm.ll
@@ -33,9 +33,9 @@
; This case was a crasher in constrainLocalCopy.
; The problem was the t2LDR_PRE defining both the global and local lrg.
; CHECK-LABEL: *** Final schedule for BB#5 ***
-; CHECK: %[[R4:vreg[0-9]+]]<def>, %[[R1:vreg[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
-; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R1]]
-; CHECK: %vreg{{[0-9]+}}<def> = COPY %[[R4]]
+; CHECK: %[[R4:[0-9]+]]<def>, %[[R1:[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
+; CHECK: %{{[0-9]+}}<def> = COPY %[[R1]]
+; CHECK: %{{[0-9]+}}<def> = COPY %[[R4]]
; CHECK-LABEL: MACHINEINSTRS
%struct.rtx_def = type { [4 x i8], [1 x %union.rtunion_def] }
%union.rtunion_def = type { i64 }
diff --git a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
index 32d1e03..9c34e8e 100644
--- a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
+++ b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -37,62 +37,62 @@
}
#
# CHECK: ********** MI Scheduling **********
-# CHECK: SU(2): %vreg2<def> = t2MOVi32imm <ga:@g1>; rGPR:%vreg2
+# CHECK: SU(2): %2<def> = t2MOVi32imm <ga:@g1>; rGPR:%2
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 2
# CHECK_R52: Latency : 2
#
-# CHECK: SU(3): %vreg3<def> = t2LDRi12 %vreg2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%vreg3,%vreg2
+# CHECK: SU(3): %3<def> = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%3,%2
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 3
# CHECK_R52: Latency : 4
#
-# CHECK : SU(6): %vreg6<def> = t2ADDrr %vreg3, %vreg3, pred:14, pred:%noreg, opt:%noreg; rGPR:%vreg6,%vreg3,%vreg3
+# CHECK : SU(6): %6<def> = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg; rGPR:%6,%3,%3
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
-# CHECK: SU(7): %vreg7<def> = t2SDIV %vreg6, %vreg5, pred:14, pred:%noreg; rGPR:%vreg7,%vreg6,%vreg5
+# CHECK: SU(7): %7<def> = t2SDIV %6, %5, pred:14, pred:%noreg; rGPR:%7,%6,%5
# CHECK_A9: Latency : 0
# CHECK_SWIFT: Latency : 14
# CHECK_R52: Latency : 8
-# CHECK: SU(8): t2STRi12 %vreg7, %vreg2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%vreg7,%vreg2
+# CHECK: SU(8): t2STRi12 %7, %2, 0, pred:14, pred:%noreg; mem:ST4[@g1] rGPR:%7,%2
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 0
# CHECK_R52: Latency : 4
#
-# CHECK: SU(9): %vreg8<def> = t2SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; rGPR:%vreg8,%vreg1,%vreg1
+# CHECK: SU(9): %8<def> = t2SMULBB %1, %1, pred:14, pred:%noreg; rGPR:%8,%1,%1
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(10): %vreg9<def> = t2SMLABB %vreg0, %vreg0, %vreg8, pred:14, pred:%noreg; rGPR:%vreg9,%vreg0,%vreg0,%vreg8
+# CHECK: SU(10): %9<def> = t2SMLABB %0, %0, %8, pred:14, pred:%noreg; rGPR:%9,%0,%0,%8
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(11): %vreg10<def> = t2UXTH %vreg9, 0, pred:14, pred:%noreg; rGPR:%vreg10,%vreg9
+# CHECK: SU(11): %10<def> = t2UXTH %9, 0, pred:14, pred:%noreg; rGPR:%10,%9
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
#
-# CHECK: SU(12): %vreg11<def> = t2MUL %vreg10, %vreg7, pred:14, pred:%noreg; rGPR:%vreg11,%vreg10,%vreg7
+# CHECK: SU(12): %11<def> = t2MUL %10, %7, pred:14, pred:%noreg; rGPR:%11,%10,%7
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(13): %vreg12<def> = t2MLA %vreg11, %vreg11, %vreg11, pred:14, pred:%noreg; rGPR:%vreg12,%vreg11,%vreg11,%vreg11
+# CHECK: SU(13): %12<def> = t2MLA %11, %11, %11, pred:14, pred:%noreg; rGPR:%12,%11,%11,%11
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(14): %vreg13<def>, %vreg14<def> = t2UMULL %vreg12, %vreg12, pred:14, pred:%noreg; rGPR:%vreg13,%vreg14,%vreg12,%vreg12
+# CHECK: SU(14): %13<def>, %14<def> = t2UMULL %12, %12, pred:14, pred:%noreg; rGPR:%13,%14,%12,%12
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(18): %vreg19<def,tied4>, %vreg20<def,tied5> = t2UMLAL %vreg12, %vreg12, %vreg19<tied0>, %vreg20<tied1>, pred:14, pred:%noreg; rGPR:%vreg19,%vreg20,%vreg12,%vreg12,%vreg20
+# CHECK: SU(18): %19<def,tied4>, %20<def,tied5> = t2UMLAL %12, %12, %19<tied0>, %20<tied1>, pred:14, pred:%noreg; rGPR:%19,%20,%12,%12,%20
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 7
# CHECK_R52: Latency : 4
diff --git a/llvm/test/CodeGen/ARM/misched-int-basic.mir b/llvm/test/CodeGen/ARM/misched-int-basic.mir
index d523126..b5d61df 100644
--- a/llvm/test/CodeGen/ARM/misched-int-basic.mir
+++ b/llvm/test/CodeGen/ARM/misched-int-basic.mir
@@ -28,37 +28,37 @@
}
# CHECK: ********** MI Scheduling **********
-# CHECK: SU(2): %vreg2<def> = SMULBB %vreg1, %vreg1, pred:14, pred:%noreg; GPR:%vreg2,%vreg1,%vreg1
+# CHECK: SU(2): %2<def> = SMULBB %1, %1, pred:14, pred:%noreg; GPR:%2,%1,%1
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(3): %vreg3<def> = SMLABB %vreg0, %vreg0, %vreg2, pred:14, pred:%noreg; GPRnopc:%vreg3,%vreg0,%vreg0 GPR:%vreg2
+# CHECK: SU(3): %3<def> = SMLABB %0, %0, %2, pred:14, pred:%noreg; GPRnopc:%3,%0,%0 GPR:%2
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(4): %vreg4<def> = UXTH %vreg3, 0, pred:14, pred:%noreg; GPRnopc:%vreg4,%vreg3
+# CHECK: SU(4): %4<def> = UXTH %3, 0, pred:14, pred:%noreg; GPRnopc:%4,%3
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
#
-# CHECK: SU(5): %vreg5<def> = MUL %vreg4, %vreg4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg5,%vreg4,%vreg4
+# CHECK: SU(5): %5<def> = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%5,%4,%4
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(6): %vreg6<def> = MLA %vreg5, %vreg5, %vreg5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg6,%vreg5,%vreg5,%vreg5
+# CHECK: SU(6): %6<def> = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%6,%5,%5,%5
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(7): %vreg7<def>, %vreg8<def> = UMULL %vreg6, %vreg6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%vreg7,%vreg8,%vreg6,%vreg6
+# CHECK: SU(7): %7<def>, %8<def> = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%7,%8,%6,%6
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(11): %vreg13<def,tied4>, %vreg14<def,tied5> = UMLAL %vreg6, %vreg6, %vreg13<tied0>, %vreg14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%vreg13 GPRnopc:%vreg14,%vreg6,%vreg6
+# CHECK: SU(11): %13<def,tied4>, %14<def,tied5> = UMLAL %6, %6, %13<tied0>, %14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%13 GPRnopc:%14,%6,%6
# CHECK_SWIFT: Latency : 7
# CHECK_A9: Latency : 3
# CHECK_R52: Latency : 4
diff --git a/llvm/test/CodeGen/ARM/single-issue-r52.mir b/llvm/test/CodeGen/ARM/single-issue-r52.mir
index 1eba074..8dfc5df 100644
--- a/llvm/test/CodeGen/ARM/single-issue-r52.mir
+++ b/llvm/test/CodeGen/ARM/single-issue-r52.mir
@@ -20,22 +20,22 @@
# CHECK: ********** MI Scheduling **********
# CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU(1): %vreg1<def> = VLD4d8Pseudo %vreg0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%vreg1 GPR:%vreg0
+# CHECK: SU(1): %1<def> = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%1 GPR:%0
# CHECK: Latency : 8
# CHECK: Single Issue : true;
-# CHECK: SU(2): %vreg4<def> = VADDv8i8 %vreg1:dsub_0, %vreg1:dsub_1, pred:14, pred:%noreg; DPR:%vreg4 QQPR:%vreg1
+# CHECK: SU(2): %4<def> = VADDv8i8 %1:dsub_0, %1:dsub_1, pred:14, pred:%noreg; DPR:%4 QQPR:%1
# CHECK: Latency : 5
# CHECK: Single Issue : false;
-# CHECK: SU(3): %vreg5<def>, %vreg6<def> = VMOVRRD %vreg4, pred:14, pred:%noreg; GPR:%vreg5,%vreg6 DPR:%vreg4
+# CHECK: SU(3): %5<def>, %6<def> = VMOVRRD %4, pred:14, pred:%noreg; GPR:%5,%6 DPR:%4
# CHECK: Latency : 4
# CHECK: Single Issue : false;
-# TOPDOWN: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# TOPDOWN: Scheduling SU(1) %1<def> = VLD4d8Pseudo
# TOPDOWN: Bump cycle to end group
-# TOPDOWN: Scheduling SU(2) %vreg4<def> = VADDv8i8
+# TOPDOWN: Scheduling SU(2) %4<def> = VADDv8i8
-# BOTTOMUP: Scheduling SU(2) %vreg4<def> = VADDv8i8
-# BOTTOMUP: Scheduling SU(1) %vreg1<def> = VLD4d8Pseudo
+# BOTTOMUP: Scheduling SU(2) %4<def> = VADDv8i8
+# BOTTOMUP: Scheduling SU(1) %1<def> = VLD4d8Pseudo
# BOTTOMUP: Bump cycle to begin group
...
diff --git a/llvm/test/CodeGen/ARM/subreg-remat.ll b/llvm/test/CodeGen/ARM/subreg-remat.ll
index d5abfc0..616ab1e 100644
--- a/llvm/test/CodeGen/ARM/subreg-remat.ll
+++ b/llvm/test/CodeGen/ARM/subreg-remat.ll
@@ -4,10 +4,10 @@
;
; The vector %v2 is built like this:
;
-; %vreg6:ssub_1<def> = ...
-; %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vreg6
+; %6:ssub_1<def> = ...
+; %6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%6
;
-; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized
+; When %6 spills, the VLDRS constant pool load cannot be rematerialized
; since it implicitly reads the ssub_1 sub-register.
;
; CHECK: f1
@@ -31,7 +31,7 @@
; because the bits are undef, we should rematerialize. The vector is now built
; like this:
;
-; %vreg2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %vreg2<imp-def>; mem:LD4[ConstantPool]
+; %2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %2<imp-def>; mem:LD4[ConstantPool]
;
; The extra <imp-def> operand indicates that the instruction fully defines the
; virtual register. It doesn't read the old value.
diff --git a/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll b/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
index e6344df..64faff7 100644
--- a/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
+++ b/llvm/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
@@ -11,10 +11,10 @@
;
; BB#2: derived from LLVM BB %finish
; Predecessors according to CFG: BB#0 BB#1
-; %vreg0<def> = PHI %vreg3, <BB#0>, %vreg5, <BB#1>
-; %vreg7<def> = LDIRdK 2
-; %vreg8<def> = LDIRdK 1
-; CPRdRr %vreg2, %vreg0, %SREG<imp-def>
+; %0<def> = PHI %3, <BB#0>, %5, <BB#1>
+; %7<def> = LDIRdK 2
+; %8<def> = LDIRdK 1
+; CPRdRr %2, %0, %SREG<imp-def>
; BREQk <BB#6>, %SREG<imp-use>
; Successors according to CFG: BB#5(?%) BB#6(?%)
;
diff --git a/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll b/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
index d15b5c9..40584ca 100644
--- a/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
+++ b/llvm/test/CodeGen/Hexagon/circ_ldd_bug.ll
@@ -7,10 +7,10 @@
; UNREACHABLE executed at llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp:615!
; This happened because after unrolling a loop with a ldd_circ instruction we
; would have several TFCR and ldd_circ instruction sequences.
-; %vreg0 (CRRegs) = TFCR %vreg0 (IntRegs)
-; = ldd_circ( , , vreg0)
-; %vreg1 (CRRegs) = TFCR %vreg1 (IntRegs)
-; = ldd_circ( , , vreg0)
+; %0 (CRRegs) = TFCR %0 (IntRegs)
+; = ldd_circ( , , %0)
+; %1 (CRRegs) = TFCR %1 (IntRegs)
+; = ldd_circ( , , %0)
; The scheduler would move the CRRegs to the top of the loop. The allocator
; would try to spill the CRRegs after running out of them. We don't have code to
; spill CRRegs and the above assertion would be triggered.
diff --git a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
index e4c54c4..550e5c5 100644
--- a/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
+++ b/llvm/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
@@ -3,12 +3,12 @@
# Check that coalesced registers are removed from live intervals.
#
-# Check that vreg3 is coalesced into vreg4, and that after coalescing
+# Check that %3 is coalesced into %4, and that after coalescing
# it is no longer in live intervals.
# CHECK-LABEL: After expand-condsets
# CHECK: INTERVALS
-# CHECK-NOT: vreg3
+# CHECK-NOT: %3
# CHECK: MACHINEINSTRS
diff --git a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
index 673a9b4..688a713 100644
--- a/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
+++ b/llvm/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
@@ -3,7 +3,7 @@
; Check that the generated post-increment load has TBAA information.
; CHECK-LABEL: Machine code for function fred:
-; CHECK: = V6_vL32b_pi %vreg{{[0-9]+}}<tied1>, 64; mem:LD64[{{.*}}](tbaa=
+; CHECK: = V6_vL32b_pi %{{[0-9]+}}<tied1>, 64; mem:LD64[{{.*}}](tbaa=
target triple = "hexagon"
diff --git a/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll b/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
index 242ee53..6279a2e 100644
--- a/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
+++ b/llvm/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
@@ -36,7 +36,7 @@
; CHECK-LABEL: SU({{.*}}): SW_RI{{.*}}, 4,
; CHECK: # preds left : 2
; CHECK: # succs left : 0
-; CHECK-LABEL: SU({{.*}}): %vreg{{.*}}<def> = LDW_RI{{.*}}, 12,
+; CHECK-LABEL: SU({{.*}}): %{{.*}}<def> = LDW_RI{{.*}}, 12,
; CHECK: # preds left : 1
; CHECK: # succs left : 4
; CHECK-LABEL: SU({{.*}}): STH_RI{{.*}}, 10,
diff --git a/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir b/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir
index f812bc7..8e80828 100644
--- a/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir
+++ b/llvm/test/CodeGen/MIR/AArch64/spill-fold.mir
@@ -22,7 +22,7 @@
...
---
# CHECK-LABEL: name: test_subreg_spill_fold2
-# Similar to test_subreg_spill_fold, but with a vreg0 register class not containing %WZR.
+# Similar to test_subreg_spill_fold, but with a %0 register class not containing %WZR.
name: test_subreg_spill_fold2
registers:
- { id: 0, class: gpr64sp }
diff --git a/llvm/test/CodeGen/PowerPC/quadint-return.ll b/llvm/test/CodeGen/PowerPC/quadint-return.ll
index 2cc995f..e968107 100644
--- a/llvm/test/CodeGen/PowerPC/quadint-return.ll
+++ b/llvm/test/CodeGen/PowerPC/quadint-return.ll
@@ -14,6 +14,6 @@
; CHECK: ********** Function: foo
; CHECK: ********** FAST REGISTER ALLOCATION **********
-; CHECK: %x3<def> = COPY %vreg
-; CHECK-NEXT: %x4<def> = COPY %vreg
+; CHECK: %x3<def> = COPY %{{[0-9]+}}
+; CHECK-NEXT: %x4<def> = COPY %{{[0-9]+}}
; CHECK-NEXT: BLR
diff --git a/llvm/test/CodeGen/WebAssembly/dbgvalue.ll b/llvm/test/CodeGen/WebAssembly/dbgvalue.ll
index a90f88a..dc108ff 100644
--- a/llvm/test/CodeGen/WebAssembly/dbgvalue.ll
+++ b/llvm/test/CodeGen/WebAssembly/dbgvalue.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s
; CHECK: BB#0
-; CHECK: #DEBUG_VALUE: usage:self <- %vreg4
+; CHECK: #DEBUG_VALUE: usage:self <- %4
; CHECK: BB#1
; CHECK: DW_TAG_variable
source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll"
diff --git a/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll b/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
index 28802fc..6e4fab5 100644
--- a/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
+++ b/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
@@ -2,17 +2,17 @@
;
; Test RegistersDefinedFromSameValue. We have multiple copies of the same vreg:
; while.body85.i:
-; vreg1 = copy vreg2
-; vreg2 = add
+; %1 = copy %2
+; %2 = add
; critical edge from land.lhs.true.i -> if.end117.i:
-; vreg27 = vreg2
+; %27 = %2
; critical edge from land.lhs.true103.i -> if.end117.i:
-; vreg27 = vreg2
+; %27 = %2
; if.then108.i:
-; vreg27 = vreg1
+; %27 = %1
;
; Prior to fixing PR10920 401.bzip miscompile, the coalescer would
-; consider vreg1 and vreg27 to be copies of the same value. It would
+; consider %1 and %27 to be copies of the same value. It would
; then remove one of the critical edge copes, which cannot safely be removed.
; There are two obvious ways the register-allocator could go here, either
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
index b941d49..08a4636 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
@@ -8,7 +8,7 @@
; the fallback path.
; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump
; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump:
define void @test_x86_fp80_dump(x86_fp80* %ptr){
diff --git a/llvm/test/CodeGen/X86/cmovcmov.ll b/llvm/test/CodeGen/X86/cmovcmov.ll
index 50860b8..22c7b3f 100644
--- a/llvm/test/CodeGen/X86/cmovcmov.ll
+++ b/llvm/test/CodeGen/X86/cmovcmov.ll
@@ -227,8 +227,8 @@
; The following test failed because llvm had a bug where a structure like:
;
-; %vreg12<def> = CMOV_GR8 %vreg7, %vreg11 ... (lt)
-; %vreg13<def> = CMOV_GR8 %vreg12, %vreg11 ... (gt)
+; %12<def> = CMOV_GR8 %7, %11 ... (lt)
+; %13<def> = CMOV_GR8 %12, %11 ... (gt)
;
; was lowered to:
;
@@ -239,9 +239,9 @@
; JG_1 BB#9
; BB#8:
; BB#9:
-; vreg12 = phi(vreg7, BB#8, vreg11, BB#0, vreg12, BB#7)
-; vreg13 = COPY vreg12
-; Which was invalid as %vreg12 is not the same value as %vreg13
+; %12 = phi(%7, BB#8, %11, BB#0, %12, BB#7)
+; %13 = COPY %12
+; Which was invalid as %12 is not the same value as %13
; CHECK-LABEL: no_cascade_opt:
; CMOV-DAG: cmpl %edx, %esi
diff --git a/llvm/test/CodeGen/X86/coalescer-dce.ll b/llvm/test/CodeGen/X86/coalescer-dce.ll
index 8d039ac..d97d11c 100644
--- a/llvm/test/CodeGen/X86/coalescer-dce.ll
+++ b/llvm/test/CodeGen/X86/coalescer-dce.ll
@@ -4,28 +4,28 @@
; This test case has a sub-register join followed by a remat:
;
-; 256L %vreg2<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg2 GR64:%vreg7
-; Considering merging %vreg2 with %vreg7:sub_32bit
+; 256L %2<def> = COPY %7:sub_32bit<kill>; GR32:%2 GR64:%7
+; Considering merging %2 with %7:sub_32bit
; Cross-class to GR64.
-; RHS = %vreg2 = [256d,272d:0) 0@256d
-; LHS = %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d
-; updated: 272L %vreg0<def> = COPY %vreg7:sub_32bit<kill>; GR32:%vreg0 GR64:%vreg7
-; Joined. Result = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
+; RHS = %2 = [256d,272d:0) 0@256d
+; LHS = %7 = [208d,256d:0)[304L,480L:0) 0@208d
+; updated: 272L %0<def> = COPY %7:sub_32bit<kill>; GR32:%0 GR64:%7
+; Joined. Result = %7 = [208d,272d:0)[304L,480L:0) 0@208d
;
-; 272L %vreg10:sub_32bit<def> = COPY %vreg7:sub_32bit<kill>, %vreg10<imp-def>; GR64:%vreg10,%vreg7
-; Considering merging %vreg7 with %vreg10
-; RHS = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
-; LHS = %vreg10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d
-; Remat: %vreg10<def> = MOV64r0 %vreg10<imp-def>, %eflags<imp-def,dead>, %vreg10<imp-def>; GR64:%vreg10
-; Shrink: %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
+; 272L %10:sub_32bit<def> = COPY %7:sub_32bit<kill>, %10<imp-def>; GR64:%10,%7
+; Considering merging %7 with %10
+; RHS = %7 = [208d,272d:0)[304L,480L:0) 0@208d
+; LHS = %10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d
+; Remat: %10<def> = MOV64r0 %10<imp-def>, %eflags<imp-def,dead>, %10<imp-def>; GR64:%10
+; Shrink: %7 = [208d,272d:0)[304L,480L:0) 0@208d
; live-in at 240L
; live-in at 416L
; live-in at 320L
; live-in at 304L
-; Shrunk: %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d
+; Shrunk: %7 = [208d,256d:0)[304L,480L:0) 0@208d
;
; The COPY at 256L is rewritten as a partial def, and that would artificially
-; extend the live range of %vreg7 to end at 256d. When the joined copy is
+; extend the live range of %7 to end at 256d. When the joined copy is
; removed, -verify-coalescing complains about the dangling kill.
;
; <rdar://problem/9967101>
diff --git a/llvm/test/CodeGen/X86/crash.ll b/llvm/test/CodeGen/X86/crash.ll
index ea648e5..537a09b 100644
--- a/llvm/test/CodeGen/X86/crash.ll
+++ b/llvm/test/CodeGen/X86/crash.ll
@@ -481,10 +481,10 @@
; Check coalescing of IMPLICIT_DEF instructions:
;
-; %vreg1 = IMPLICIT_DEF
-; %vreg2 = MOV32r0
+; %1 = IMPLICIT_DEF
+; %2 = MOV32r0
;
-; When coalescing %vreg1 and %vreg2, the IMPLICIT_DEF instruction should be
+; When coalescing %1 and %2, the IMPLICIT_DEF instruction should be
; erased along with its value number.
;
define void @rdar12474033() nounwind ssp {
diff --git a/llvm/test/CodeGen/X86/handle-move.ll b/llvm/test/CodeGen/X86/handle-move.ll
index 8acfd7f..a152f6d 100644
--- a/llvm/test/CodeGen/X86/handle-move.ll
+++ b/llvm/test/CodeGen/X86/handle-move.ll
@@ -8,8 +8,8 @@
; %edx has a live range into the function and is used by the DIV32r.
;
; Here sinking a kill + dead def:
-; 144B -> 180B: DIV32r %vreg4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
-; %vreg4: [48r,144r:0) 0@48r
+; 144B -> 180B: DIV32r %4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+; %4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r
; --> [0B,16r:0)[128r,180r:2)[180r,180d:1) 0@0B-phi 1@180r 2@128r
@@ -25,8 +25,8 @@
}
; Same as above, but moving a kill + live def:
-; 144B -> 180B: DIV32r %vreg4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
-; %vreg4: [48r,144r:0) 0@48r
+; 144B -> 180B: DIV32r %4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+; %4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r
; --> [0B,16r:0)[128r,180r:2)[180r,184r:1) 0@0B-phi 1@180r 2@128r
@@ -41,13 +41,13 @@
ret i32 %add
}
-; Moving a use below the existing kill (%vreg5):
-; Moving a tied virtual register def (%vreg11):
+; Moving a use below the existing kill (%5):
+; Moving a tied virtual register def (%11):
;
-; 96B -> 120B: %vreg11<def,tied1> = SUB32rr %vreg11<tied0>, %vreg5
-; %vreg11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r
+; 96B -> 120B: %11<def,tied1> = SUB32rr %11<tied0>, %5
+; %11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r
; --> [80r,120r:1)[120r,144r:0) 0@120r 1@80r
-; %vreg5: [16r,112r:0) 0@16r
+; %5: [16r,112r:0) 0@16r
; --> [16r,120r:0) 0@16r
;
define i32 @f3(i32 %a, i32 %b) nounwind uwtable readnone ssp {
diff --git a/llvm/test/CodeGen/X86/invalid-liveness.mir b/llvm/test/CodeGen/X86/invalid-liveness.mir
index 28f8135..47db809 100644
--- a/llvm/test/CodeGen/X86/invalid-liveness.mir
+++ b/llvm/test/CodeGen/X86/invalid-liveness.mir
@@ -5,11 +5,11 @@
define void @func() { ret void }
...
---
-# Liveness calculation should detect that we do not have a definition for vreg0
-# on all paths; In this example a def for vreg0 is missing when jumping from
+# Liveness calculation should detect that we do not have a definition for %0
+# on all paths; In this example a def for %0 is missing when jumping from
# bb.0 to bb.3.
#
-# CHECK: Use of %vreg0 does not have a corresponding definition on every path
+# CHECK: Use of %0 does not have a corresponding definition on every path
# CHECK: ERROR: Use not jointly dominated by defs.
name: func
registers:
diff --git a/llvm/test/CodeGen/X86/liveness-local-regalloc.ll b/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
index 0954f9d..5301485 100644
--- a/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
+++ b/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
@@ -62,7 +62,7 @@
; RAFast would forget to add a super-register <imp-def> when rewriting:
-; %vreg10:sub_32bit<def,read-undef> = COPY %R9D<kill>
+; %10:sub_32bit<def,read-undef> = COPY %R9D<kill>
; This trips up the machine code verifier.
define void @autogen_SD24657(i8*, i32*, i64*, i32, i64, i8) {
BB:
diff --git a/llvm/test/CodeGen/X86/misched-copy.ll b/llvm/test/CodeGen/X86/misched-copy.ll
index 1263bf9..98890c6 100644
--- a/llvm/test/CodeGen/X86/misched-copy.ll
+++ b/llvm/test/CodeGen/X86/misched-copy.ll
@@ -10,7 +10,7 @@
;
; CHECK: *** Final schedule for BB#1 ***
; CHECK: %eax<def> = COPY
-; CHECK-NEXT: MUL32r %vreg{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
+; CHECK-NEXT: MUL32r %{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
; CHECK-NEXT: COPY %e{{[ad]}}x
; CHECK-NEXT: COPY %e{{[ad]}}x
; CHECK: DIVSSrm
diff --git a/llvm/test/CodeGen/X86/norex-subreg.ll b/llvm/test/CodeGen/X86/norex-subreg.ll
index 9efafe4..66e5ca1 100644
--- a/llvm/test/CodeGen/X86/norex-subreg.ll
+++ b/llvm/test/CodeGen/X86/norex-subreg.ll
@@ -41,10 +41,10 @@
; This test case extracts a sub_8bit_hi sub-register:
;
-; %vreg2<def> = COPY %vreg1:sub_8bit_hi; GR8:%vreg2 GR64_ABCD:%vreg1
-; TEST8ri %vreg2, 1, %eflags<imp-def>; GR8:%vreg2
+; %2<def> = COPY %1:sub_8bit_hi; GR8:%2 GR64_ABCD:%1
+; TEST8ri %2, 1, %eflags<imp-def>; GR8:%2
;
-; %vreg2 must be constrained to GR8_NOREX, or the COPY could become impossible.
+; %2 must be constrained to GR8_NOREX, or the COPY could become impossible.
;
; PR11088
diff --git a/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll b/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
index 720ed69..7839936 100644
--- a/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
+++ b/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
@@ -1,10 +1,10 @@
; RUN: llc < %s -verify-machineinstrs -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s
; rdar://5571034
-; This requires physreg joining, %vreg13 is live everywhere:
-; 304L %cl<def> = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13
-; 320L %vreg15<def> = COPY %vreg19; GR32:%vreg15 GR32_NOSP:%vreg19
-; 336L %vreg15<def> = SAR32rCL %vreg15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%vreg15
+; This requires physreg joining, %13 is live everywhere:
+; 304L %cl<def> = COPY %13:sub_8bit; GR32_ABCD:%13
+; 320L %15<def> = COPY %19; GR32:%15 GR32_NOSP:%19
+; 336L %15<def> = SAR32rCL %15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%15
define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp {
; CHECK-LABEL: foo: