Add bundle aware API for querying instruction properties and switch the code
generator to it. For non-bundle instructions, these behave exactly the same
as the MC layer API.

For properties like mayLoad / mayStore, look into the bundle and if any of the
bundled instructions has the property it would return true.
For properties like isPredicable, only return true if *all* of the bundled
instructions have the property.
For properties like canFoldAsLoad, isCompare, conservatively return false for
bundles.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146026 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index b382169..2bb173b 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -157,7 +157,7 @@
   MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
   ExitSU.setInstr(ExitMI);
   bool AllDepKnown = ExitMI &&
-    (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier());
+    (ExitMI->isCall() || ExitMI->isBarrier());
   if (ExitMI && AllDepKnown) {
     // If it's a call or a barrier, add dependencies on the defs and uses of
     // instruction.
@@ -238,13 +238,12 @@
       continue;
     }
 
-    const MCInstrDesc &MCID = MI->getDesc();
-    assert(!MCID.isTerminator() && !MI->isLabel() &&
+    assert(!MI->isTerminator() && !MI->isLabel() &&
            "Cannot schedule terminators or labels!");
     // Create the SUnit for this MI.
     SUnit *SU = NewSUnit(MI);
-    SU->isCall = MCID.isCall();
-    SU->isCommutable = MCID.isCommutable();
+    SU->isCall = MI->isCall();
+    SU->isCommutable = MI->isCommutable();
 
     // Assign the Latency field of SU using target-provided information.
     if (UnitLatencies)
@@ -315,7 +314,7 @@
             int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
             assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
             if (RegUseIndex >= 0 &&
-                (UseMCID.mayLoad() || UseMCID.mayStore()) &&
+                (UseMI->mayLoad() || UseMI->mayStore()) &&
                 (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
                 UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
               LDataLatency += SpecialAddressLatency;
@@ -419,9 +418,9 @@
     // produce more precise dependence information.
 #define STORE_LOAD_LATENCY 1
     unsigned TrueMemOrderLatency = 0;
-    if (MCID.isCall() || MI->hasUnmodeledSideEffects() ||
+    if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
         (MI->hasVolatileMemoryRef() &&
-         (!MCID.mayLoad() || !MI->isInvariantLoad(AA)))) {
+         (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) {
       // Be conservative with these and add dependencies on all memory
       // references, even those that are known to not alias.
       for (std::map<const Value *, SUnit *>::iterator I =
@@ -460,7 +459,7 @@
       PendingLoads.clear();
       AliasMemDefs.clear();
       AliasMemUses.clear();
-    } else if (MCID.mayStore()) {
+    } else if (MI->mayStore()) {
       bool MayAlias = true;
       TrueMemOrderLatency = STORE_LOAD_LATENCY;
       if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
@@ -516,7 +515,7 @@
                             /*Reg=*/0, /*isNormalMemory=*/false,
                             /*isMustAlias=*/false,
                             /*isArtificial=*/true));
-    } else if (MCID.mayLoad()) {
+    } else if (MI->mayLoad()) {
       bool MayAlias = true;
       TrueMemOrderLatency = 0;
       if (MI->isInvariantLoad(AA)) {
@@ -576,7 +575,7 @@
 
     // Simplistic target-independent heuristic: assume that loads take
     // extra time.
-    if (SU->getInstr()->getDesc().mayLoad())
+    if (SU->getInstr()->mayLoad())
       SU->Latency += 2;
   } else {
     SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr());