blob: b65f59be6852cd741c1c4968def1ffe0e855bc02 [file] [log] [blame]
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +00001//===------------------------- LSUnit.h --------------------------*- C++-*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10///
11/// A Load/Store unit class that models load/store queues and that implements
12/// a simple weak memory consistency model.
13///
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_TOOLS_LLVM_MCA_LSUNIT_H
17#define LLVM_TOOLS_LLVM_MCA_LSUNIT_H
18
Matt Davis271ce762018-08-27 17:16:32 +000019#include "HardwareUnits/HardwareUnit.h"
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000020#include <set>
21
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000022namespace mca {
23
Matt Davis21a8d322018-05-07 18:29:15 +000024class InstRef;
Andrea Di Biagio2dee62b2018-03-22 14:14:49 +000025struct InstrDesc;
26
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000027/// A Load/Store Unit implementing a load and store queues.
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000028///
29/// This class implements a load queue and a store queue to emulate the
30/// out-of-order execution of memory operations.
31/// Each load (or store) consumes an entry in the load (or store) queue.
32///
33/// Rules are:
34/// 1) A younger load is allowed to pass an older load only if there are no
35/// stores nor barriers in between the two loads.
36/// 2) An younger store is not allowed to pass an older store.
37/// 3) A younger store is not allowed to pass an older load.
38/// 4) A younger load is allowed to pass an older store only if the load does
39/// not alias with the store.
40///
41/// This class optimistically assumes that loads don't alias store operations.
42/// Under this assumption, younger loads are always allowed to pass older
43/// stores (this would only affects rule 4).
44/// Essentially, this LSUnit doesn't attempt to run any sort alias analysis to
45/// predict when loads and stores don't alias with eachother.
46///
47/// To enforce aliasing between loads and stores, flag `AssumeNoAlias` must be
48/// set to `false` by the constructor of LSUnit.
49///
50/// In the case of write-combining memory, rule 2. could be relaxed to allow
51/// reordering of non-aliasing store operations. At the moment, this is not
52/// allowed.
53/// To put it in another way, there is no option to specify a different memory
54/// type for memory operations (example: write-through, write-combining, etc.).
55/// Also, there is no way to weaken the memory model, and this unit currently
56/// doesn't support write-combining behavior.
57///
58/// No assumptions are made on the size of the store buffer.
59/// As mentioned before, this class doesn't perform alias analysis.
60/// Consequently, LSUnit doesn't know how to identify cases where
61/// store-to-load forwarding may occur.
62///
63/// LSUnit doesn't attempt to predict whether a load or store hits or misses
64/// the L1 cache. To be more specific, LSUnit doesn't know anything about
65/// the cache hierarchy and memory types.
66/// It only knows if an instruction "mayLoad" and/or "mayStore". For loads, the
67/// scheduling model provides an "optimistic" load-to-use latency (which usually
68/// matches the load-to-use latency for when there is a hit in the L1D).
69///
70/// Class MCInstrDesc in LLVM doesn't know about serializing operations, nor
71/// memory-barrier like instructions.
72/// LSUnit conservatively assumes that an instruction which `mayLoad` and has
73/// `unmodeled side effects` behave like a "soft" load-barrier. That means, it
74/// serializes loads without forcing a flush of the load queue.
75/// Similarly, instructions that both `mayStore` and have `unmodeled side
76/// effects` are treated like store barriers. A full memory
77/// barrier is a 'mayLoad' and 'mayStore' instruction with unmodeled side
78/// effects. This is obviously inaccurate, but this is the best that we can do
79/// at the moment.
80///
81/// Each load/store barrier consumes one entry in the load/store queue. A
82/// load/store barrier enforces ordering of loads/stores:
83/// - A younger load cannot pass a load barrier.
84/// - A younger store cannot pass a store barrier.
85///
86/// A younger load has to wait for the memory load barrier to execute.
87/// A load/store barrier is "executed" when it becomes the oldest entry in
88/// the load/store queue(s). That also means, all the older loads/stores have
89/// already been executed.
Andrea Di Biagio0875e752018-08-20 14:41:36 +000090class LSUnit : public HardwareUnit {
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +000091 // Load queue size.
92 // LQ_Size == 0 means that there are infinite slots in the load queue.
93 unsigned LQ_Size;
94
95 // Store queue size.
96 // SQ_Size == 0 means that there are infinite slots in the store queue.
97 unsigned SQ_Size;
98
99 // If true, loads will never alias with stores. This is the default.
100 bool NoAlias;
101
102 std::set<unsigned> LoadQueue;
103 std::set<unsigned> StoreQueue;
104
105 void assignLQSlot(unsigned Index);
106 void assignSQSlot(unsigned Index);
107 bool isReadyNoAlias(unsigned Index) const;
108
109 // An instruction that both 'mayStore' and 'HasUnmodeledSideEffects' is
110 // conservatively treated as a store barrier. It forces older store to be
111 // executed before newer stores are issued.
112 std::set<unsigned> StoreBarriers;
113
114 // An instruction that both 'MayLoad' and 'HasUnmodeledSideEffects' is
115 // conservatively treated as a load barrier. It forces older loads to execute
116 // before newer loads are issued.
117 std::set<unsigned> LoadBarriers;
118
Andrea Di Biagio0875e752018-08-20 14:41:36 +0000119 bool isSQEmpty() const { return StoreQueue.empty(); }
120 bool isLQEmpty() const { return LoadQueue.empty(); }
121 bool isSQFull() const { return SQ_Size != 0 && StoreQueue.size() == SQ_Size; }
122 bool isLQFull() const { return LQ_Size != 0 && LoadQueue.size() == LQ_Size; }
123
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000124public:
125 LSUnit(unsigned LQ = 0, unsigned SQ = 0, bool AssumeNoAlias = false)
126 : LQ_Size(LQ), SQ_Size(SQ), NoAlias(AssumeNoAlias) {}
127
128#ifndef NDEBUG
129 void dump() const;
130#endif
131
Andrea Di Biagio0875e752018-08-20 14:41:36 +0000132 enum Status {
133 LSU_AVAILABLE = 0,
134 LSU_LQUEUE_FULL,
135 LSU_SQUEUE_FULL
136 };
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000137
Andrea Di Biagio0875e752018-08-20 14:41:36 +0000138 // Returns LSU_AVAILABLE if there are enough load/store queue entries to serve
139 // IR. It also returns LSU_AVAILABLE if IR is not a memory operation.
140 Status isAvailable(const InstRef &IR) const;
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000141
Andrea Di Biagio0875e752018-08-20 14:41:36 +0000142 // Allocates load/store queue resources for IR.
143 //
144 // This method assumes that a previous call to `isAvailable(IR)` returned
145 // LSU_AVAILABLE, and that IR is a memory operation.
146 void dispatch(const InstRef &IR);
147
148 // By default, rules are:
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000149 // 1. A store may not pass a previous store.
150 // 2. A load may not pass a previous store unless flag 'NoAlias' is set.
151 // 3. A load may pass a previous load.
152 // 4. A store may not pass a previous load (regardless of flag 'NoAlias').
153 // 5. A load has to wait until an older load barrier is fully executed.
154 // 6. A store has to wait until an older store barrier is fully executed.
Andrea Di Biagio0875e752018-08-20 14:41:36 +0000155 virtual bool isReady(const InstRef &IR) const;
Matt Davis21a8d322018-05-07 18:29:15 +0000156 void onInstructionExecuted(const InstRef &IR);
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000157};
Andrea Di Biagioeec6b812018-06-26 10:44:12 +0000158
Andrea Di Biagio3a6b0922018-03-08 13:05:02 +0000159} // namespace mca
160
161#endif