[llvm-mca] Make the LSUnit a HardwareUnit, and allow derived classes to implement a different memory consistency model.

The LSUnit is now a HardwareUnit, and it is owned by the mca::Context.
Derived classes can now implement a different consistency model by overriding
method `LSUnit::isReady()`.

This patch also slightly refactors the Scheduler interface in the attempt to
simplifying the interaction between ExecuteStage and the underlying Scheduler.

llvm-svn: 340176
diff --git a/llvm/tools/llvm-mca/Scheduler.h b/llvm/tools/llvm-mca/Scheduler.h
index 018519d..1fc9b8d 100644
--- a/llvm/tools/llvm-mca/Scheduler.h
+++ b/llvm/tools/llvm-mca/Scheduler.h
@@ -321,7 +321,7 @@
 
   // Returns true if all resources are in-order, and there is at least one
   // resource which is a dispatch hazard (BufferSize = 0).
-  bool mustIssueImmediately(const InstrDesc &Desc);
+  bool mustIssueImmediately(const InstrDesc &Desc) const;
 
   bool canBeIssued(const InstrDesc &Desc) const;
 
@@ -364,10 +364,10 @@
 /// leaves the IssuedSet when it reaches the write-back stage.
 class Scheduler : public HardwareUnit {
   const llvm::MCSchedModel &SM;
+  LSUnit *LSU;
 
   // Hardware resources that are managed by this scheduler.
   std::unique_ptr<ResourceManager> Resources;
-  std::unique_ptr<LSUnit> LSU;
 
   std::vector<InstRef> WaitSet;
   std::vector<InstRef> ReadySet;
@@ -379,54 +379,49 @@
       llvm::SmallVectorImpl<std::pair<ResourceRef, double>> &Pipes);
 
 public:
-  Scheduler(const llvm::MCSchedModel &Model, unsigned LoadQueueSize,
-            unsigned StoreQueueSize, bool AssumeNoAlias)
-      : SM(Model), Resources(llvm::make_unique<ResourceManager>(SM)),
-        LSU(llvm::make_unique<LSUnit>(LoadQueueSize, StoreQueueSize,
-                                      AssumeNoAlias)) {}
+  Scheduler(const llvm::MCSchedModel &Model, LSUnit *Lsu)
+      : SM(Model), LSU(Lsu), Resources(llvm::make_unique<ResourceManager>(SM)) {
+  }
 
   // Stalls generated by the scheduler.
-  enum StallKind {
-    NoStall,
-    LoadQueueFull,
-    StoreQueueFull,
-    SchedulerQueueFull,
-    DispatchGroupStall
+  enum Status {
+    SC_AVAILABLE,
+    SC_LOAD_QUEUE_FULL,
+    SC_STORE_QUEUE_FULL,
+    SC_BUFFERS_FULL,
+    SC_DISPATCH_GROUP_STALL,
   };
 
-  /// Check if the instruction in 'IR' can be dispatched.
+  /// Check if the instruction in 'IR' can be dispatched and returns an answer
+  /// in the form of a Status value.
   ///
   /// The DispatchStage is responsible for querying the Scheduler before
   /// dispatching new instructions. This routine is used for performing such
   /// a query.  If the instruction 'IR' can be dispatched, then true is
   /// returned, otherwise false is returned with Event set to the stall type.
-  bool canBeDispatched(const InstRef &IR, StallKind &Event) const;
+  /// Internally, it also checks if the load/store unit is available.
+  Status isAvailable(const InstRef &IR) const;
 
-  /// Returns true if there is availibility for IR in the LSU.
-  bool isReady(const InstRef &IR) const { return LSU->isReady(IR); }
+  /// Reserves buffer and LSUnit queue resources that are necessary to issue
+  /// this instruction.
+  ///
+  /// Returns true if instruction IR is ready to be issued to the underlying
+  /// pipelines. Note that this operation cannot fail; it assumes that a
+  /// previous call to method `isAvailable(IR)` returned `SC_AVAILABLE`.
+  void dispatch(const InstRef &IR);
+
+  /// Returns true if IR is ready to be executed by the underlying pipelines.
+  /// This method assumes that IR has been previously dispatched.
+  bool isReady(const InstRef &IR) const;
 
   /// Issue an instruction.  The Used container is populated with
   /// the resource objects consumed on behalf of issuing this instruction.
-  void
-  issueInstruction(InstRef &IR,
+  void issueInstruction(InstRef &IR,
                    llvm::SmallVectorImpl<std::pair<ResourceRef, double>> &Used);
 
-  /// This routine will attempt to issue an instruction immediately (for
-  /// zero-latency instructions).
-  ///
-  /// Returns true if the instruction is issued immediately.  If this does not
-  /// occur, then the instruction will be added to the Scheduler's ReadySet.
-  bool issueImmediately(InstRef &IR);
-
-  /// Reserve one entry in each buffered resource.
-  void reserveBuffers(llvm::ArrayRef<uint64_t> Buffers) {
-    Resources->reserveBuffers(Buffers);
-  }
-
-  /// Release buffer entries previously allocated by method reserveBuffers.
-  void releaseBuffers(llvm::ArrayRef<uint64_t> Buffers) {
-    Resources->releaseBuffers(Buffers);
-  }
+  /// Returns true if IR has to be issued immediately, or if IR is a zero
+  /// latency instruction.
+  bool mustIssueImmediately(const InstRef &IR) const;
 
   /// Update the resources managed by the scheduler.
   /// This routine is to be called at the start of a new cycle, and is
@@ -444,21 +439,12 @@
   /// Update the issued queue.
   void updateIssuedSet(llvm::SmallVectorImpl<InstRef> &Executed);
 
-  /// Updates the Scheduler's resources to reflect that an instruction has just
-  /// been executed.
-  void onInstructionExecuted(const InstRef &IR);
-
   /// Obtain the processor's resource identifier for the given
   /// resource mask.
   unsigned getResourceID(uint64_t Mask) {
     return Resources->resolveResourceMask(Mask);
   }
 
-  /// Reserve resources necessary to issue the instruction.
-  /// Returns true if the resources are ready and the (LSU) can
-  /// execute the given instruction immediately.
-  bool reserveResources(InstRef &IR);
-
   /// Select the next instruction to issue from the ReadySet.
   /// This method gives priority to older instructions.
   InstRef select();