[CodeGen] Use MachineOperand::print in the MIRPrinter for MO_Register.

Work towards the unification of MIR and debug output by refactoring the
interfaces.

For MachineOperand::print, keep a simple version that can be easily called
from `dump()`, and a more complex one which will be called from both the
MIRPrinter and MachineInstr::print.

Add extra checks inside MachineOperand for detached operands (operands
with getParent() == nullptr).

https://reviews.llvm.org/D40836

* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: ([^ ]+) ([^ ]+)<def> ([^ ]+)/kill: \1 def \2 \3/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: ([^ ]+) ([^ ]+) ([^ ]+)<def>/kill: \1 \2 def \3/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: def ([^ ]+) ([^ ]+) ([^ ]+)<def>/kill: def \1 \2 def \3/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/<def>//g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<kill>/killed \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-use,kill>/implicit killed \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<dead>/dead \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<def[ ]*,[ ]*dead>/dead \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-def[ ]*,[ ]*dead>/implicit-def dead \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-def>/implicit-def \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-use>/implicit \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<internal>/internal \1/g'
* find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<undef>/undef \1/g'

llvm-svn: 320022
diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 2d510a4..1135f0f 100644
--- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -161,9 +161,9 @@
 /// A Chain is a sequence of instructions that are linked together by
 /// an accumulation operand. For example:
 ///
-///   fmul d0<def>, ?
-///   fmla d1<def>, ?, ?, d0<kill>
-///   fmla d2<def>, ?, ?, d1<kill>
+///   fmul def d0, ?
+///   fmla def d1, ?, ?, killed d0
+///   fmla def d2, ?, ?, killed d1
 ///
 /// There may be other instructions interleaved in the sequence that
 /// do not belong to the chain. These other instructions must not use
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index dacb193..b88beda 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2801,7 +2801,7 @@
     LiveIntervals *LIS) const {
   // This is a bit of a hack. Consider this instruction:
   //
-  //   %0<def> = COPY %sp; GPR64all:%0
+  //   %0 = COPY %sp; GPR64all:%0
   //
   // We explicitly chose GPR64all for the virtual register so such a copy might
   // be eliminated by RegisterCoalescer. However, that may not be possible, and
@@ -2830,7 +2830,7 @@
   // Handle the case where a copy is being spilled or filled but the source
   // and destination register class don't match.  For example:
   //
-  //   %0<def> = COPY %xzr; GPR64common:%0
+  //   %0 = COPY %xzr; GPR64common:%0
   //
   // In this case we can still safely fold away the COPY and generate the
   // following spill code:
@@ -2840,7 +2840,7 @@
   // This also eliminates spilled cross register class COPYs (e.g. between x and
   // d regs) of the same size.  For example:
   //
-  //   %0<def> = COPY %1; GPR64:%0, FPR64:%1
+  //   %0 = COPY %1; GPR64:%0, FPR64:%1
   //
   // will be filled as
   //
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index de91224..c406228 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -830,8 +830,8 @@
   if (SExtIdx != -1) {
     // Generate the sign extension for the proper result of the ldp.
     // I.e., with X1, that would be:
-    // %w1<def> = KILL %w1, %x1<imp-def>
-    // %x1<def> = SBFMXri %x1<kill>, 0, 31
+    // %w1 = KILL %w1, implicit-def %x1
+    // %x1 = SBFMXri killed %x1, 0, 31
     MachineOperand &DstMO = MIB->getOperand(SExtIdx);
     // Right now, DstMO has the extended register, since it comes from an
     // extended opcode.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 5ff82c5..2091823 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -1450,8 +1450,7 @@
                                        unsigned *ReplaceReg) {
   DEBUG(dbgs() << "Shrink PHI: ");
   DEBUG(PHI.dump());
-  DEBUG(dbgs() << " to " << printReg(getPHIDestReg(PHI), TRI)
-               << "<def> = PHI(");
+  DEBUG(dbgs() << " to " << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
 
   bool Replaced = false;
   unsigned NumInputs = getPHINumInputs(PHI);
@@ -1507,8 +1506,7 @@
     SmallVector<unsigned, 2> &PHIRegionIndices) {
   DEBUG(dbgs() << "Replace PHI: ");
   DEBUG(PHI.dump());
-  DEBUG(dbgs() << " with " << printReg(getPHIDestReg(PHI), TRI)
-               << "<def> = PHI(");
+  DEBUG(dbgs() << " with " << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
 
   bool HasExternalEdge = false;
   unsigned NumInputs = getPHINumInputs(PHI);
@@ -1566,7 +1564,7 @@
     DEBUG(dbgs() << " register " << printReg(CombinedSourceReg, TRI) << "\n");
     PHI.eraseFromParent();
   } else {
-    DEBUG(dbgs() << printReg(getPHIDestReg(PHI), TRI) << "<def> = PHI(");
+    DEBUG(dbgs() << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
     MachineBasicBlock *MBB = PHI.getParent();
     MachineInstrBuilder MIB =
         BuildMI(*MBB, PHI, PHI.getDebugLoc(), TII->get(TargetOpcode::PHI),
@@ -1751,7 +1749,7 @@
     return;
   }
   DEBUG(dbgs() << "Merge PHI (" << printMBBReference(*MergeBB)
-               << "): " << printReg(DestRegister, TRI) << "<def> = PHI("
+               << "): " << printReg(DestRegister, TRI) << " = PHI("
                << printReg(IfSourceRegister, TRI) << ", "
                << printMBBReference(*IfBB) << printReg(CodeSourceRegister, TRI)
                << ", " << printMBBReference(*CodeBB) << ")\n");
@@ -2147,7 +2145,7 @@
     const DebugLoc &DL = Entry->findDebugLoc(Entry->begin());
     MachineInstrBuilder MIB = BuildMI(*Entry, Entry->instr_begin(), DL,
                                       TII->get(TargetOpcode::PHI), DestReg);
-    DEBUG(dbgs() << "Entry PHI " << printReg(DestReg, TRI) << "<def> = PHI(");
+    DEBUG(dbgs() << "Entry PHI " << printReg(DestReg, TRI) << " = PHI(");
 
     unsigned CurrentBackedgeReg = 0;
 
@@ -2172,7 +2170,7 @@
           BackedgePHI.addMBB((*SRI).second);
           CurrentBackedgeReg = NewBackedgeReg;
           DEBUG(dbgs() << "Inserting backedge PHI: "
-                       << printReg(NewBackedgeReg, TRI) << "<def> = PHI("
+                       << printReg(NewBackedgeReg, TRI) << " = PHI("
                        << printReg(CurrentBackedgeReg, TRI) << ", "
                        << printMBBReference(*getPHIPred(*PHIDefInstr, 0))
                        << ", "
@@ -2441,8 +2439,7 @@
   MachineInstrBuilder MIB =
       BuildMI(*EntrySucc, EntrySucc->instr_begin(), PHI.getDebugLoc(),
               TII->get(TargetOpcode::PHI), NewDestReg);
-  DEBUG(dbgs() << "Split Entry PHI " << printReg(NewDestReg, TRI)
-               << "<def> = PHI(");
+  DEBUG(dbgs() << "Split Entry PHI " << printReg(NewDestReg, TRI) << " = PHI(");
   MIB.addReg(PHISource);
   MIB.addMBB(Entry);
   DEBUG(dbgs() << printReg(PHISource, TRI) << ", "
diff --git a/llvm/lib/Target/AMDGPU/CaymanInstructions.td b/llvm/lib/Target/AMDGPU/CaymanInstructions.td
index 429d28e..ae40c63 100644
--- a/llvm/lib/Target/AMDGPU/CaymanInstructions.td
+++ b/llvm/lib/Target/AMDGPU/CaymanInstructions.td
@@ -144,8 +144,8 @@
   // to be caused by ALU instructions in the next instruction group that wrote
   // to the $src_gpr registers of the VTX_READ.
   // e.g.
-  // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24
-  // %t2_x<def> = MOV %zero
+  // %t3_x = VTX_READ_PARAM_32_eg killed %t2_x, 24
+  // %t2_x = MOV %zero
   //Adding this constraint prevents this from happening.
   let Constraints = "$src_gpr.ptr = $dst_gpr";
 }
diff --git a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td
index c25980e..5e26f97 100644
--- a/llvm/lib/Target/AMDGPU/EvergreenInstructions.td
+++ b/llvm/lib/Target/AMDGPU/EvergreenInstructions.td
@@ -212,8 +212,8 @@
   // to be caused by ALU instructions in the next instruction group that wrote
   // to the $src_gpr registers of the VTX_READ.
   // e.g.
-  // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24
-  // %t2_x<def> = MOV %zero
+  // %t3_x = VTX_READ_PARAM_32_eg killed %t2_x, 24
+  // %t2_x = MOV %zero
   //Adding this constraint prevents this from happening.
   let Constraints = "$src_gpr.ptr = $dst_gpr";
 }
diff --git a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 1bfa837..95bc7ca 100644
--- a/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/llvm/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -12,15 +12,15 @@
 /// common data and/or have enough undef subreg using swizzle abilities.
 ///
 /// For instance let's consider the following pseudo code :
-/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
+/// %5 = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
 /// ...
-/// %7<def> = REG_SEQ %1, sub0, %3, sub1, undef, sub2, %4, sub3
+/// %7 = REG_SEQ %1, sub0, %3, sub1, undef, sub2, %4, sub3
 /// (swizzable Inst) %7, SwizzleMask : sub0, sub1, sub2, sub3
 ///
 /// is turned into :
-/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
+/// %5 = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
 /// ...
-/// %7<def> = INSERT_SUBREG %4, sub3
+/// %7 = INSERT_SUBREG %4, sub3
 /// (swizzable Inst) %7, SwizzleMask : sub0, sub2, sub1, sub3
 ///
 /// This allow regalloc to reduce register pressure for vector registers and
diff --git a/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp b/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp
index 47db898..3f657b9 100644
--- a/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFixWWMLiveness.cpp
@@ -17,8 +17,8 @@
 /// %vgpr0 = V_MOV_B32_e32 0.0
 /// if (...) {
 ///   %vgpr1 = ...
-///   %vgpr2 = WWM %vgpr1<kill>
-///   ... = %vgpr2<kill>
+///   %vgpr2 = WWM killed %vgpr1
+///   ... = killed %vgpr2
 ///   %vgpr0 = V_MOV_B32_e32 1.0
 /// }
 /// ... = %vgpr0
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 5215740..e806ebb 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -971,9 +971,9 @@
       // Prevent folding operands backwards in the function. For example,
       // the COPY opcode must not be replaced by 1 in this example:
       //
-      //    %3<def> = COPY %vgpr0; VGPR_32:%3
+      //    %3 = COPY %vgpr0; VGPR_32:%3
       //    ...
-      //    %vgpr0<def> = V_MOV_B32_e32 1, %exec<imp-use>
+      //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
       MachineOperand &Dst = MI.getOperand(0);
       if (Dst.isReg() &&
           !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 5fda45f..7b4652e 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -480,7 +480,7 @@
   }
 
   // If this is not immediate then it can be copy of immediate value, e.g.:
-  // %1<def> = S_MOV_B32 255;
+  // %1 = S_MOV_B32 255;
   if (Op.isReg()) {
     for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
       if (!isSameReg(Op, Def))
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index f9505be..b2f4a52 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1447,7 +1447,7 @@
   DEBUG(dbgs() << "widening:    " << MI);
   MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
 
-  // Get rid of the old <imp-def> of DstRegD.  Leave it if it defines a Q-reg
+  // Get rid of the old implicit-def of DstRegD.  Leave it if it defines a Q-reg
   // or some other super-register.
   int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
   if (ImpDefIdx != -1)
@@ -1650,7 +1650,7 @@
     }
 
     for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
-      // %12<def> = PICLDR %11, 0, pred:14, pred:%noreg
+      // %12 = PICLDR %11, 0, pred:14, pred:%noreg
       const MachineOperand &MO0 = MI0.getOperand(i);
       const MachineOperand &MO1 = MI1.getOperand(i);
       if (!MO0.isIdenticalTo(MO1))
@@ -4668,7 +4668,7 @@
       NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32),
                        DDst);
 
-      // On the first instruction, both DSrc and DDst may be <undef> if present.
+      // On the first instruction, both DSrc and DDst may be undef if present.
       // Specifically when the original instruction didn't have them as an
       // <imp-use>.
       unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
@@ -4688,7 +4688,7 @@
       MIB.addReg(DDst, RegState::Define);
 
       // On the second instruction, DDst has definitely been defined above, so
-      // it is not <undef>. DSrc, if present, can be <undef> as above.
+      // it is not undef. DSrc, if present, can be undef as above.
       CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
       CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
       MIB.addReg(CurReg, getUndefRegState(CurUndef));
@@ -4771,7 +4771,7 @@
 
   // We must be able to clobber the whole D-reg.
   if (TargetRegisterInfo::isVirtualRegister(Reg)) {
-    // Virtual register must be a foo:ssub_0<def,undef> operand.
+    // Virtual register must be a def undef foo:ssub_0 operand.
     if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
       return 0;
   } else if (ARM::SPRRegClass.contains(Reg)) {
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index bf67bbd..eab84ae 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -922,7 +922,7 @@
   // .Lloadcmp:
   //     ldrexd rDestLo, rDestHi, [rAddr]
   //     cmp rDestLo, rDesiredLo
-  //     sbcs rTempReg<dead>, rDestHi, rDesiredHi
+  //     sbcs dead rTempReg, rDestHi, rDesiredHi
   //     bne .Ldone
   unsigned LDREXD = IsThumb ? ARM::t2LDREXD : ARM::LDREXD;
   MachineInstrBuilder MIB;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 1726d6b..fe9562a 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -9168,7 +9168,7 @@
   // operand is still set to noreg. If needed, set the optional operand's
   // register to CPSR, and remove the redundant implicit def.
   //
-  // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
+  // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
 
   // Rename pseudo opcodes.
   unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 2b63e0c..c61e72e 100644
--- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1697,7 +1697,7 @@
       if (OddReg == EvenReg && EvenDeadKill) {
         // If the two source operands are the same, the kill marker is
         // probably on the first one. e.g.
-        // t2STRDi8 %r5<kill>, %r5, %r9<kill>, 0, 14, %reg0
+        // t2STRDi8 killed %r5, %r5, killed %r9, 0, 14, %reg0
         EvenDeadKill = false;
         OddDeadKill = true;
       }
diff --git a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 283359c..61b04d1 100644
--- a/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/llvm/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -573,7 +573,7 @@
       return;
   } else {
     // The PHI node looks like:
-    //   %2<def> = PHI %0, <%bb.1>, %1, <%bb.3>
+    //   %2 = PHI %0, <%bb.1>, %1, <%bb.3>
     // Trace each incoming definition, e.g., (%0, %bb.1) and (%1, %bb.3)
     // The AND operation can be removed if both %0 in %bb.1 and %1 in
     // %bb.3 are defined with with a load matching the MaskN.
diff --git a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
index 1953439..ff915ca 100644
--- a/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -368,7 +368,7 @@
       }
     }
     // Defs and clobbers can overlap, e.g.
-    // %d0<def,dead> = COPY %5, %r0<imp-def>, %r1<imp-def>
+    // dead %d0 = COPY %5, implicit-def %r0, implicit-def %r1
     for (RegisterRef R : Defs)
       Clobbers.erase(R);
 
diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
index 80db360..c59cc50 100644
--- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -187,7 +187,7 @@
 
     // Mapping: vreg -> cell
     // The keys are registers _without_ subregisters. This won't allow
-    // definitions in the form of "vreg:subreg<def> = ...". Such definitions
+    // definitions in the form of "vreg:subreg = ...". Such definitions
     // would be questionable from the point of view of SSA, since the "vreg"
     // could not be initialized in its entirety (specifically, an instruction
     // defining the "other part" of "vreg" would also count as a definition
@@ -1977,7 +1977,7 @@
     {
       const MachineOperand &VO = MI.getOperand(1);
       // The operand of CONST32 can be a blockaddress, e.g.
-      //   %0<def> = CONST32 <blockaddress(@eat, %l)>
+      //   %0 = CONST32 <blockaddress(@eat, %l)>
       // Do this check for all instructions for safety.
       if (!VO.isImm())
         return false;
@@ -3147,7 +3147,7 @@
       BrI.setDesc(JD);
       while (BrI.getNumOperands() > 0)
         BrI.RemoveOperand(0);
-      // This ensures that all implicit operands (e.g. %r31<imp-def>, etc)
+      // This ensures that all implicit operands (e.g. implicit-def %r31, etc)
       // are present in the rewritten branch.
       for (auto &Op : NI->operands())
         BrI.addOperand(Op);
diff --git a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 2dfd7b7..d8135e9 100644
--- a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -351,11 +351,11 @@
       //   kill flag for a register (a removeRegisterKilled() analogous to
       //   addRegisterKilled) that handles aliased register correctly.
       //   * or has a killed aliased register use of I1's use reg
-      //           %d4<def> = A2_tfrpi 16
-      //           %r6<def> = A2_tfr %r9
-      //           %r8<def> = KILL %r8, %d4<imp-use,kill>
+      //           %d4 = A2_tfrpi 16
+      //           %r6 = A2_tfr %r9
+      //           %r8 = KILL %r8, implicit killed %d4
       //      If we want to move R6 = across the KILL instruction we would have
-      //      to remove the %d4<imp-use,kill> operand. For now, we are
+      //      to remove the implicit killed %d4 operand. For now, we are
       //      conservative and disallow the move.
       // we can't move I1 across it.
       if (MI.isDebugValue()) {
diff --git a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index 652ea13..93ad2e7 100644
--- a/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -25,38 +25,38 @@
 //
 // Example:
 //
-//         %40<def> = L2_loadrub_io %39<kill>, 1
-//         %41<def> = S2_tstbit_i %40<kill>, 0
-//         J2_jumpt %41<kill>, <%bb.5>, %pc<imp-def,dead>
-//         J2_jump <%bb.4>, %pc<imp-def,dead>
+//         %40 = L2_loadrub_io killed %39, 1
+//         %41 = S2_tstbit_i killed %40, 0
+//         J2_jumpt killed %41, <%bb.5>, implicit dead %pc
+//         J2_jump <%bb.4>, implicit dead %pc
 //     Successors according to CFG: %bb.4(62) %bb.5(62)
 //
 // %bb.4: derived from LLVM BB %if.then
 //     Predecessors according to CFG: %bb.3
-//         %11<def> = A2_addp %6, %10
+//         %11 = A2_addp %6, %10
 //         S2_storerd_io %32, 16, %11
 //     Successors according to CFG: %bb.5
 //
 // %bb.5: derived from LLVM BB %if.end
 //     Predecessors according to CFG: %bb.3 %bb.4
-//         %12<def> = PHI %6, <%bb.3>, %11, <%bb.4>
-//         %13<def> = A2_addp %7, %12
-//         %42<def> = C2_cmpeqi %9, 10
-//         J2_jumpf %42<kill>, <%bb.3>, %pc<imp-def,dead>
-//         J2_jump <%bb.6>, %pc<imp-def,dead>
+//         %12 = PHI %6, <%bb.3>, %11, <%bb.4>
+//         %13 = A2_addp %7, %12
+//         %42 = C2_cmpeqi %9, 10
+//         J2_jumpf killed %42, <%bb.3>, implicit dead %pc
+//         J2_jump <%bb.6>, implicit dead %pc
 //     Successors according to CFG: %bb.6(4) %bb.3(124)
 //
 // would become:
 //
-//         %40<def> = L2_loadrub_io %39<kill>, 1
-//         %41<def> = S2_tstbit_i %40<kill>, 0
-// spec->  %11<def> = A2_addp %6, %10
+//         %40 = L2_loadrub_io killed %39, 1
+//         %41 = S2_tstbit_i killed %40, 0
+// spec->  %11 = A2_addp %6, %10
 // pred->  S2_pstorerdf_io %41, %32, 16, %11
-//         %46<def> = PS_pselect %41, %6, %11
-//         %13<def> = A2_addp %7, %46
-//         %42<def> = C2_cmpeqi %9, 10
-//         J2_jumpf %42<kill>, <%bb.3>, %pc<imp-def,dead>
-//         J2_jump <%bb.6>, %pc<imp-def,dead>
+//         %46 = PS_pselect %41, %6, %11
+//         %13 = A2_addp %7, %46
+//         %42 = C2_cmpeqi %9, 10
+//         J2_jumpf killed %42, <%bb.3>, implicit dead %pc
+//         J2_jump <%bb.6>, implicit dead %pc
 //     Successors according to CFG: %bb.6 %bb.3
 
 #include "Hexagon.h"
diff --git a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 78c7c10..d9f4322 100644
--- a/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -28,14 +28,14 @@
 // definitions are predicable, then in the second step, the conditional
 // transfers will then be rewritten as predicated instructions. E.g.
 //   %0 = A2_or %1, %2
-//   %3 = A2_tfrt %99, %0<kill>
+//   %3 = A2_tfrt %99, killed %0
 // will be rewritten as
 //   %3 = A2_port %99, %1, %2
 //
 // This replacement has two variants: "up" and "down". Consider this case:
 //   %0 = A2_or %1, %2
 //   ... [intervening instructions] ...
-//   %3 = A2_tfrt %99, %0<kill>
+//   %3 = A2_tfrt %99, killed %0
 // variant "up":
 //   %3 = A2_port %99, %1, %2
 //   ... [intervening instructions, %0->vreg3] ...
@@ -65,15 +65,15 @@
 // will see both instructions as actual definitions, and will mark the
 // first one as dead. The definition is not actually dead, and this
 // situation will need to be fixed. For example:
-//   %1<def,dead> = A2_tfrt ...  ; marked as dead
-//   %1<def> = A2_tfrf ...
+//   dead %1 = A2_tfrt ...  ; marked as dead
+//   %1 = A2_tfrf ...
 //
 // Since any of the individual predicated transfers may end up getting
 // removed (in case it is an identity copy), some pre-existing def may
 // be marked as dead after live interval recomputation:
-//   %1<def,dead> = ...          ; marked as dead
+//   dead %1 = ...          ; marked as dead
 //   ...
-//   %1<def> = A2_tfrf ...       ; if A2_tfrt is removed
+//   %1 = A2_tfrf ...       ; if A2_tfrt is removed
 // This case happens if %1 was used as a source in A2_tfrt, which means
 // that is it actually live at the A2_tfrf, and so the now dead definition
 // of %1 will need to be updated to non-dead at some point.
diff --git a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index b5fa068..d814fa7 100644
--- a/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1720,7 +1720,7 @@
     MachineOperand &MO = PredDef->getOperand(i);
     if (MO.isReg()) {
       // Skip all implicit references.  In one case there was:
-      //   %140<def> = FCMPUGT32_rr %138, %139, %usr<imp-use>
+      //   %140 = FCMPUGT32_rr %138, %139, implicit %usr
       if (MO.isImplicit())
         continue;
       if (MO.isUse()) {
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index cb00bc7..eb643d0 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -1615,8 +1615,8 @@
 }
 
 // Inspired by this pair:
-//  %r13<def> = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
-//  S2_storeri_io %r29, 132, %r1<kill>; flags:  mem:ST4[FixedStack1]
+//  %r13 = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
+//  S2_storeri_io %r29, 132, killed %r1; flags:  mem:ST4[FixedStack1]
 // Currently AA considers the addresses in these instructions to be aliasing.
 bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
     MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
@@ -3515,7 +3515,7 @@
   case Hexagon::EH_RETURN_JMPR:
   case Hexagon::PS_jmpret:
     // jumpr r31
-    // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>.
+    // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0
     DstReg = MI.getOperand(0).getReg();
     if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
       return HexagonII::HSIG_L2;
@@ -3705,7 +3705,7 @@
   case Hexagon::C2_cmovenewif:
     // if ([!]P0[.new]) Rd = #0
     // Actual form:
-    // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>;
+    // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
     DstReg = MI.getOperand(0).getReg();
     SrcReg = MI.getOperand(1).getReg();
     if (isIntRegForSubInst(DstReg) &&
diff --git a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 8f177d9a..99c16f1 100644
--- a/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -129,9 +129,9 @@
   // using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic
   // before the callsite of this function
   // But we can not as it comes in the following fashion.
-  //    %d0<def> = Hexagon_S2_lsr_r_p %d0<kill>, %r2<kill>
-  //    %r0<def> = KILL %r0, %d0<imp-use,kill>
-  //    %p0<def> = CMPEQri %r0<kill>, 0
+  //    %d0 = Hexagon_S2_lsr_r_p killed %d0, killed %r2
+  //    %r0 = KILL %r0, implicit killed %d0
+  //    %p0 = CMPEQri killed %r0, 0
   // Hence, we need to check if it's a KILL instruction.
   if (II->getOpcode() == TargetOpcode::KILL)
     return false;
@@ -196,9 +196,9 @@
     // to new value jump. If they are in the path, bail out.
     // KILL sets kill flag on the opcode. It also sets up a
     // single register, out of pair.
-    //    %d0<def> = S2_lsr_r_p %d0<kill>, %r2<kill>
-    //    %r0<def> = KILL %r0, %d0<imp-use,kill>
-    //    %p0<def> = C2_cmpeqi %r0<kill>, 0
+    //    %d0 = S2_lsr_r_p killed %d0, killed %r2
+    //    %r0 = KILL %r0, implicit killed %d0
+    //    %p0 = C2_cmpeqi killed %r0, 0
     // PHI can be anything after RA.
     // COPY can remateriaze things in between feeder, compare and nvj.
     if (MII->getOpcode() == TargetOpcode::KILL ||
diff --git a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
index 7f82a5c..581761c 100644
--- a/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -8,27 +8,27 @@
 // This peephole pass optimizes in the following cases.
 // 1. Optimizes redundant sign extends for the following case
 //    Transform the following pattern
-//    %170<def> = SXTW %166
+//    %170 = SXTW %166
 //    ...
-//    %176<def> = COPY %170:isub_lo
+//    %176 = COPY %170:isub_lo
 //
 //    Into
-//    %176<def> = COPY %166
+//    %176 = COPY %166
 //
 //  2. Optimizes redundant negation of predicates.
-//     %15<def> = CMPGTrr %6, %2
+//     %15 = CMPGTrr %6, %2
 //     ...
-//     %16<def> = NOT_p %15<kill>
+//     %16 = NOT_p killed %15
 //     ...
-//     JMP_c %16<kill>, <%bb.1>, %pc<imp-def,dead>
+//     JMP_c killed %16, <%bb.1>, implicit dead %pc
 //
 //     Into
-//     %15<def> = CMPGTrr %6, %2;
+//     %15 = CMPGTrr %6, %2;
 //     ...
-//     JMP_cNot %15<kill>, <%bb.1>, %pc<imp-def,dead>;
+//     JMP_cNot killed %15, <%bb.1>, implicit dead %pc;
 //
 // Note: The peephole pass makes the instrucstions like
-// %170<def> = SXTW %166 or %16<def> = NOT_p %15<kill>
+// %170 = SXTW %166 or %16 = NOT_p killed %15
 // redundant and relies on some form of dead removal instructions, like
 // DCE or DIE to actually eliminate them.
 
@@ -132,7 +132,7 @@
       NextI = std::next(I);
       MachineInstr &MI = *I;
       // Look for sign extends:
-      // %170<def> = SXTW %166
+      // %170 = SXTW %166
       if (!DisableOptSZExt && MI.getOpcode() == Hexagon::A2_sxtw) {
         assert(MI.getNumOperands() == 2);
         MachineOperand &Dst = MI.getOperand(0);
@@ -143,13 +143,13 @@
         if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
             TargetRegisterInfo::isVirtualRegister(SrcReg)) {
           // Map the following:
-          // %170<def> = SXTW %166
+          // %170 = SXTW %166
           // PeepholeMap[170] = %166
           PeepholeMap[DstReg] = SrcReg;
         }
       }
 
-      // Look for  %170<def> = COMBINE_ir_V4 (0, %169)
+      // Look for  %170 = COMBINE_ir_V4 (0, %169)
       // %170:DoublRegs, %169:IntRegs
       if (!DisableOptExtTo64 && MI.getOpcode() == Hexagon::A4_combineir) {
         assert(MI.getNumOperands() == 3);
@@ -192,14 +192,14 @@
         if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
             TargetRegisterInfo::isVirtualRegister(SrcReg)) {
           // Map the following:
-          // %170<def> = NOT_xx %166
+          // %170 = NOT_xx %166
           // PeepholeMap[170] = %166
           PeepholeMap[DstReg] = SrcReg;
         }
       }
 
       // Look for copy:
-      // %176<def> = COPY %170:isub_lo
+      // %176 = COPY %170:isub_lo
       if (!DisableOptSZExt && MI.isCopy()) {
         assert(MI.getNumOperands() == 2);
         MachineOperand &Dst = MI.getOperand(0);
diff --git a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index a8c5dea..0f5e297 100644
--- a/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -772,8 +772,8 @@
 
   // If data definition is because of implicit definition of the register,
   // do not newify the store. Eg.
-  // %r9<def> = ZXTH %r12, %d6<imp-use>, %r12<imp-def>
-  // S2_storerh_io %r8, 2, %r12<kill>; mem:ST2[%scevgep343]
+  // %r9 = ZXTH %r12, implicit %d6, implicit-def %r12
+  // S2_storerh_io %r8, 2, killed %r12; mem:ST2[%scevgep343]
   for (auto &MO : PacketMI.operands()) {
     if (MO.isRegMask() && MO.clobbersPhysReg(DepReg))
       return false;
@@ -787,8 +787,8 @@
   // Handle imp-use of super reg case. There is a target independent side
   // change that should prevent this situation but I am handling it for
   // just-in-case. For example, we cannot newify R2 in the following case:
-  // %r3<def> = A2_tfrsi 0;
-  // S2_storeri_io %r0<kill>, 0, %r2<kill>, %d1<imp-use,kill>;
+  // %r3 = A2_tfrsi 0;
+  // S2_storeri_io killed %r0, 0, killed %r2, implicit killed %d1;
   for (auto &MO : MI.operands()) {
     if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == DepReg)
       return false;
@@ -892,12 +892,12 @@
 // Go through the packet instructions and search for an anti dependency between
 // them and DepReg from MI. Consider this case:
 // Trying to add
-// a) %r1<def> = TFRI_cdNotPt %p3, 2
+// a) %r1 = TFRI_cdNotPt %p3, 2
 // to this packet:
 // {
-//   b) %p0<def> = C2_or %p3<kill>, %p0<kill>
-//   c) %p3<def> = C2_tfrrp %r23
-//   d) %r1<def> = C2_cmovenewit %p3, 4
+//   b) %p0 = C2_or killed %p3, killed %p0
+//   c) %p3 = C2_tfrrp %r23
+//   d) %r1 = C2_cmovenewit %p3, 4
 //  }
 // The P3 from a) and d) will be complements after
 // a)'s P3 is converted to .new form
@@ -962,11 +962,11 @@
 
   // One corner case deals with the following scenario:
   // Trying to add
-  // a) %r24<def> = A2_tfrt %p0, %r25
+  // a) %r24 = A2_tfrt %p0, %r25
   // to this packet:
   // {
-  //   b) %r25<def> = A2_tfrf %p0, %r24
-  //   c) %p0<def> = C2_cmpeqi %r26, 1
+  //   b) %r25 = A2_tfrf %p0, %r24
+  //   c) %p0 = C2_cmpeqi %r26, 1
   // }
   //
   // On general check a) and b) are complements, but presence of c) will
@@ -1543,7 +1543,7 @@
 
     // There are certain anti-dependencies that cannot be ignored.
     // Specifically:
-    //   J2_call ... %r0<imp-def>   ; SUJ
+    //   J2_call ... implicit-def %r0   ; SUJ
     //   R0 = ...                   ; SUI
     // Those cannot be packetized together, since the call will observe
     // the effect of the assignment to R0.
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
index 7dd89c6..c5f3d43 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
@@ -272,7 +272,7 @@
   case Hexagon::J2_jumpr:
   case Hexagon::PS_jmpret:
     // jumpr r31
-    // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>.
+    // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0.
     DstReg = MCI.getOperand(0).getReg();
     if (Hexagon::R31 == DstReg)
       return HexagonII::HSIG_L2;
@@ -471,7 +471,7 @@
   case Hexagon::C2_cmovenewif:
     // if ([!]P0[.new]) Rd = #0
     // Actual form:
-    // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>;
+    // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
     DstReg = MCI.getOperand(0).getReg();  // Rd
     PredReg = MCI.getOperand(1).getReg(); // P0
     if (HexagonMCInstrInfo::isIntRegForSubInst(DstReg) &&
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
index 3a4a41c..7bd54fd 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
@@ -113,9 +113,10 @@
 
   if (!HexagonMCInstrInfo::bundleSize(MCB)) {
     // There once was a bundle:
-    //    BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ...
-    //      * %d2<def> = IMPLICIT_DEF; flags:
-    //      * %d7<def> = IMPLICIT_DEF; flags:
+    //    BUNDLE implicit-def %d2, implicit-def %r4, implicit-def %r5,
+    //    implicit-def %d7, ...
+    //      * %d2 = IMPLICIT_DEF; flags:
+    //      * %d7 = IMPLICIT_DEF; flags:
     // After the IMPLICIT_DEFs were removed by the asm printer, the bundle
     // became empty.
     DEBUG(dbgs() << "Skipping empty bundle");
@@ -137,9 +138,10 @@
 
   if (!HexagonMCInstrInfo::bundleSize(MCB)) {
     // There once was a bundle:
-    //    BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ...
-    //      * %d2<def> = IMPLICIT_DEF; flags:
-    //      * %d7<def> = IMPLICIT_DEF; flags:
+    //    BUNDLE implicit-def %d2, implicit-def %r4, implicit-def %r5,
+    //    implicit-def %d7, ...
+    //      * %d2 = IMPLICIT_DEF; flags:
+    //      * %d7 = IMPLICIT_DEF; flags:
     // After the IMPLICIT_DEFs were removed by the asm printer, the bundle
     // became empty.
     DEBUG(dbgs() << "Skipping empty bundle");
diff --git a/llvm/lib/Target/Hexagon/RDFGraph.h b/llvm/lib/Target/Hexagon/RDFGraph.h
index 25c4b672..e3abb0e 100644
--- a/llvm/lib/Target/Hexagon/RDFGraph.h
+++ b/llvm/lib/Target/Hexagon/RDFGraph.h
@@ -183,7 +183,7 @@
 //   This is typically used to prevent keeping registers artificially live
 //   in cases when they are defined via predicated instructions. For example:
 //     r0 = add-if-true cond, r10, r11                (1)
-//     r0 = add-if-false cond, r12, r13, r0<imp-use>  (2)
+//     r0 = add-if-false cond, r12, r13, implicit r0  (2)
 //     ... = r0                                       (3)
 //   Before (1), r0 is not intended to be live, and the use of r0 in (3) is
 //   not meant to be reached by any def preceding (1). However, since the
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index 74394d0..589c8f9 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -480,7 +480,7 @@
   MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));
 
   // For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an
-  // immediate 0 as an operand and requires the removal of it's %ra<imp-def>
+  // immediate 0 as an operand and requires the removal of it's implicit-def %ra
   // implicit operand as copying the implicit operations of the instructio we're
   // looking at will give us the correct flags.
   if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 ||
diff --git a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
index f33655a..415889d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXPeephole.cpp
@@ -22,11 +22,11 @@
 // This peephole pass optimizes these cases, for example
 //
 // It will transform the following pattern
-//    %0<def> = LEA_ADDRi64 %VRFrame, 4
-//    %1<def> = cvta_to_local_yes_64 %0
+//    %0 = LEA_ADDRi64 %VRFrame, 4
+//    %1 = cvta_to_local_yes_64 %0
 //
 // into
-//    %1<def> = LEA_ADDRi64 %VRFrameLocal, 4
+//    %1 = LEA_ADDRi64 %VRFrameLocal, 4
 //
 // %VRFrameLocal is the virtual register name of %SPL
 //
diff --git a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index cd07897..48b94a5 100644
--- a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -62,9 +62,9 @@
 /// %bb.0: derived from LLVM BB %entry
 ///    Live Ins: %f1 %f3 %x6
 ///        <SNIP1>
-///        %0<def> = COPY %f1; F8RC:%0
-///        %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
-///        %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+///        %0 = COPY %f1; F8RC:%0
+///        %5 = CMPLWI killed %4, 0; CRRC:%5 GPRC:%4
+///        %8 = LXSDX %zero8, killed %7, implicit %rm;
 ///                    mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
 ///        BCC 76, %5, <%bb.2>; CRRC:%5
 ///    Successors according to CFG: %bb.1(?%) %bb.2(?%)
@@ -75,7 +75,7 @@
 ///
 /// %bb.2: derived from LLVM BB %entry
 ///    Predecessors according to CFG: %bb.0 %bb.1
-///        %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
+///        %9 = PHI %8, <%bb.1>, %0, <%bb.0>;
 ///                    F8RC:%9,%8,%0
 ///        <SNIP2>
 ///        BCC 76, %5, <%bb.4>; CRRC:%5
@@ -87,10 +87,10 @@
 ///
 /// %bb.4: derived from LLVM BB %entry
 ///    Predecessors according to CFG: %bb.2 %bb.3
-///        %13<def> = PHI %12, <%bb.3>, %2, <%bb.2>;
+///        %13 = PHI %12, <%bb.3>, %2, <%bb.2>;
 ///                     F8RC:%13,%12,%2
 ///        <SNIP3>
-///        BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
+///        BLR8 implicit %lr8, implicit %rm, implicit %f1
 ///
 /// When this pattern is detected, branch coalescing will try to collapse
 /// it by moving code in %bb.2 to %bb.0 and/or %bb.4 and removing %bb.3.
@@ -100,9 +100,9 @@
 /// %bb.0: derived from LLVM BB %entry
 ///    Live Ins: %f1 %f3 %x6
 ///        <SNIP1>
-///        %0<def> = COPY %f1; F8RC:%0
-///        %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
-///        %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+///        %0 = COPY %f1; F8RC:%0
+///        %5 = CMPLWI killed %4, 0; CRRC:%5 GPRC:%4
+///        %8 = LXSDX %zero8, killed %7, implicit %rm;
 ///                     mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
 ///        <SNIP2>
 ///        BCC 76, %5, <%bb.4>; CRRC:%5
@@ -115,12 +115,12 @@
 ///
 /// %bb.4: derived from LLVM BB %entry
 ///    Predecessors according to CFG: %bb.0 %bb.1
-///        %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
+///        %9 = PHI %8, <%bb.1>, %0, <%bb.0>;
 ///                    F8RC:%9,%8,%0
-///        %13<def> = PHI %12, <%bb.1>, %2, <%bb.0>;
+///        %13 = PHI %12, <%bb.1>, %2, <%bb.0>;
 ///                     F8RC:%13,%12,%2
 ///        <SNIP3>
-///        BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
+///        BLR8 implicit %lr8, implicit %rm, implicit %f1
 ///
 /// Branch Coalescing does not split blocks, it moves everything in the same
 /// direction ensuring it does not break use/definition semantics.
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 15cc1c7..fcc38e2 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2315,10 +2315,10 @@
 
       // For a method return value, we check the ZExt/SExt flags in attribute.
       // We assume the following code sequence for method call.
-      //   ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use>
+      //   ADJCALLSTACKDOWN 32, implicit dead %r1, implicit %r1
       //   BL8_NOP <ga:@func>,...
-      //   ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use>
-      //   %5<def> = COPY %x3; G8RC:%5
+      //   ADJCALLSTACKUP 32, 0, implicit dead %r1, implicit %r1
+      //   %5 = COPY %x3; G8RC:%5
       if (SrcReg == PPC::X3) {
         const MachineBasicBlock *MBB = MI.getParent();
         MachineBasicBlock::const_instr_iterator II =
diff --git a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
index c6fcea7..05eb756 100644
--- a/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -585,8 +585,8 @@
         // We can eliminate RLDICL (e.g. for zero-extension)
         // if all bits to clear are already zero in the input.
         // This code assume following code sequence for zero-extension.
-        //   %6<def> = COPY %5:sub_32; (optional)
-        //   %8<def> = IMPLICIT_DEF;
+        //   %6 = COPY %5:sub_32; (optional)
+        //   %8 = IMPLICIT_DEF;
         //   %7<def,tied1> = INSERT_SUBREG %8<tied0>, %6, sub_32;
         if (!EnableZExtElimination) break;
 
@@ -685,7 +685,7 @@
           DEBUG(dbgs() << "Optimizing LI to ADDI: ");
           DEBUG(LiMI->dump());
 
-          // There could be repeated registers in the PHI, e.g: %1<def> =
+          // There could be repeated registers in the PHI, e.g: %1 =
           // PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've
           // already replaced the def instruction, skip.
           if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
diff --git a/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
index 1039416..544c7f2 100644
--- a/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
+++ b/llvm/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
@@ -79,8 +79,8 @@
       }
 
       // We're looking for a sequence like this:
-      // %f0<def> = LFD 0, %x3<kill>, %qf0<imp-def>; mem:LD8[%a](tbaa=!2)
-      // %qf1<def> = QVESPLATI %qf0<kill>, 0, %rm<imp-use>
+      // %f0 = LFD 0, killed %x3, implicit-def %qf0; mem:LD8[%a](tbaa=!2)
+      // %qf1 = QVESPLATI killed %qf0, 0, implicit %rm
 
       for (auto SI = Splats.begin(); SI != Splats.end();) {
         MachineInstr *SMI = *SI;
diff --git a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 4d001c0..422bb7b 100644
--- a/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/llvm/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -90,21 +90,21 @@
         // This pass is run after register coalescing, and so we're looking for
         // a situation like this:
         //   ...
-        //   %5<def> = COPY %9; VSLRC:%5,%9
+        //   %5 = COPY %9; VSLRC:%5,%9
         //   %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
-        //                         %rm<imp-use>; VSLRC:%5,%17,%16
+        //                         implicit %rm; VSLRC:%5,%17,%16
         //   ...
         //   %9<def,tied1> = XSMADDADP %9<tied0>, %17, %19,
-        //                         %rm<imp-use>; VSLRC:%9,%17,%19
+        //                         implicit %rm; VSLRC:%9,%17,%19
         //   ...
         // Where we can eliminate the copy by changing from the A-type to the
         // M-type instruction. Specifically, for this example, this means:
         //   %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
-        //                         %rm<imp-use>; VSLRC:%5,%17,%16
+        //                         implicit %rm; VSLRC:%5,%17,%16
         // is replaced by:
         //   %16<def,tied1> = XSMADDMDP %16<tied0>, %18, %9,
-        //                         %rm<imp-use>; VSLRC:%16,%18,%9
-        // and we remove: %5<def> = COPY %9; VSLRC:%5,%9
+        //                         implicit %rm; VSLRC:%16,%18,%9
+        // and we remove: %5 = COPY %9; VSLRC:%5,%9
 
         SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
 
@@ -150,10 +150,10 @@
         // walking the MIs we may as well test liveness here.
         //
         // FIXME: There is a case that occurs in practice, like this:
-        //   %9<def> = COPY %f1; VSSRC:%9
+        //   %9 = COPY %f1; VSSRC:%9
         //   ...
-        //   %6<def> = COPY %9; VSSRC:%6,%9
-        //   %7<def> = COPY %9; VSSRC:%7,%9
+        //   %6 = COPY %9; VSSRC:%6,%9
+        //   %7 = COPY %9; VSSRC:%7,%9
         //   %9<def,tied1> = XSMADDASP %9<tied0>, %1, %4; VSSRC:
         //   %6<def,tied1> = XSMADDASP %6<tied0>, %1, %2; VSSRC:
         //   %7<def,tied1> = XSMADDASP %7<tied0>, %1, %3; VSSRC:
diff --git a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index 8009341..ca82740 100644
--- a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -436,8 +436,8 @@
   // Also do a forward search to handle cases where an instruction after the
   // compare can be converted like
   //
-  // LTEBRCompare %f0s, %f0s, %cc<imp-def> LTEBRCompare %f0s, %f0s, %cc<imp-def>
-  // %f2s<def> = LER %f0s
+  // LTEBRCompare %f0s, %f0s, implicit-def %cc LTEBRCompare %f0s, %f0s,
+  // implicit-def %cc %f2s = LER %f0s
   //
   MBBI = Compare, MBBE = MBB.end();
   while (++MBBI != MBBE) {
diff --git a/llvm/lib/Target/X86/README-X86-64.txt b/llvm/lib/Target/X86/README-X86-64.txt
index 1385648..a3ea459 100644
--- a/llvm/lib/Target/X86/README-X86-64.txt
+++ b/llvm/lib/Target/X86/README-X86-64.txt
@@ -103,20 +103,20 @@
   
 Before regalloc, we have:
 
-        %reg1025<def> = IMUL32rri8 %reg1024, 45, %eflags<imp-def>
+        %reg1025 = IMUL32rri8 %reg1024, 45, implicit-def %eflags
         JMP mbb<bb2,0x203afb0>
     Successors according to CFG: 0x203afb0 (#3)
 
 bb1: 0x203af60, LLVM BB @0x1e02310, ID#2:
     Predecessors according to CFG: 0x203aec0 (#0)
-        %reg1026<def> = IMUL32rri8 %reg1024, 78, %eflags<imp-def>
+        %reg1026 = IMUL32rri8 %reg1024, 78, implicit-def %eflags
     Successors according to CFG: 0x203afb0 (#3)
 
 bb2: 0x203afb0, LLVM BB @0x1e02340, ID#3:
     Predecessors according to CFG: 0x203af10 (#1) 0x203af60 (#2)
-        %reg1027<def> = PHI %reg1025, mbb<bb,0x203af10>,
+        %reg1027 = PHI %reg1025, mbb<bb,0x203af10>,
                             %reg1026, mbb<bb1,0x203af60>
-        %reg1029<def> = MOVZX64rr32 %reg1027
+        %reg1029 = MOVZX64rr32 %reg1027
 
 so we'd have to know that IMUL32rri8 leaves the high word zero extended and to
 be able to recognize the zero extend.  This could also presumably be implemented
diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
index 2e39cb0..2f7dd58 100644
--- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp
@@ -191,15 +191,15 @@
 ///   %bb.2: derived from LLVM BB %if.then
 ///   Live Ins: %rdi
 ///   Predecessors according to CFG: %bb.0
-///   %ax<def> = MOV16rm %rdi<kill>, 1, %noreg, 0, %noreg, %eax<imp-def>;
+///   %ax = MOV16rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax;
 ///   mem:LD2[%p]
-///                                             No %eax<imp-use>
+///                                             No implicit %eax
 ///   Successors according to CFG: %bb.3(?%)
 ///
 ///   %bb.3: derived from LLVM BB %if.end
 ///   Live Ins: %eax                            Only %ax is actually live
 ///   Predecessors according to CFG: %bb.2 %bb.1
-///   %ax<def> = KILL %ax, %eax<imp-use,kill>
+///   %ax = KILL %ax, implicit killed %eax
 ///   RET 0, %ax
 static bool isLive(const MachineInstr &MI,
                    const LivePhysRegs &LiveRegs,
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 96f19d3..8eeb571 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4469,7 +4469,7 @@
     unsigned leaInReg2 = 0;
     MachineInstr *InsMI2 = nullptr;
     if (Src == Src2) {
-      // ADD16rr %reg1028<kill>, %reg1028
+      // ADD16rr killed %reg1028, %reg1028
       // just a single insert_subreg.
       addRegReg(MIB, leaInReg, true, leaInReg, false);
     } else {
@@ -7633,7 +7633,7 @@
 /// This is used for mapping:
 ///   %xmm4 = V_SET0
 /// to:
-///   %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef>
+///   %xmm4 = PXORrr undef %xmm4, undef %xmm4
 ///
 static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
                              const MCInstrDesc &Desc) {
@@ -8197,12 +8197,12 @@
 ///
 /// This catches the VCVTSI2SD family of instructions:
 ///
-/// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
+/// vcvtsi2sdq %rax, undef %xmm0, %xmm14
 ///
 /// We should to be careful *not* to catch VXOR idioms which are presumably
 /// handled specially in the pipeline:
 ///
-/// vxorps %xmm1<undef>, %xmm1<undef>, %xmm1
+/// vxorps undef %xmm1, undef %xmm1, %xmm1
 ///
 /// Like getPartialRegUpdateClearance, this makes a strong assumption that the
 /// high bits that are passed-through are not live.
@@ -10895,7 +10895,7 @@
   // FIXME: There are instructions which are being manually built without
   // explicit uses/defs so we also have to check the MCInstrDesc. We should be
   // able to remove the extra checks once those are fixed up. For example,
-  // sometimes we might get something like %rax<def> = POP64r 1. This won't be
+  // sometimes we might get something like %rax = POP64r 1. This won't be
   // caught by modifiesRegister or readsRegister even though the instruction
   // really ought to be formed so that modifiesRegister/readsRegister would
   // catch it.
diff --git a/llvm/lib/Target/X86/X86VZeroUpper.cpp b/llvm/lib/Target/X86/X86VZeroUpper.cpp
index 5999591..0b67e81 100644
--- a/llvm/lib/Target/X86/X86VZeroUpper.cpp
+++ b/llvm/lib/Target/X86/X86VZeroUpper.cpp
@@ -235,7 +235,7 @@
     // If the call has no RegMask, skip it as well. It usually happens on
     // helper function calls (such as '_chkstk', '_ftol2') where standard
     // calling convention is not used (RegMask is not used to mark register
-    // clobbered and register usage (def/imp-def/use) is well-defined and
+    // clobbered and register usage (def/implicit-def/use) is well-defined and
     // explicitly specified.
     if (IsCall && !callHasRegMask(MI))
       continue;