Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 1 | //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// \brief Insert wait instructions for memory reads and writes. |
| 12 | /// |
| 13 | /// Memory reads and writes are issued asynchronously, so we need to insert |
| 14 | /// S_WAITCNT instructions when we want to access any of their results or |
| 15 | /// overwrite any register that's used asynchronously. |
| 16 | // |
| 17 | //===----------------------------------------------------------------------===// |
| 18 | |
| 19 | #include "AMDGPU.h" |
Eric Christopher | d913448 | 2014-08-04 21:25:23 +0000 | [diff] [blame] | 20 | #include "AMDGPUSubtarget.h" |
Matt Arsenault | 9783e00 | 2014-09-29 15:50:26 +0000 | [diff] [blame] | 21 | #include "SIDefines.h" |
Matt Arsenault | 1fd0c62 | 2014-09-29 15:53:15 +0000 | [diff] [blame] | 22 | #include "SIInstrInfo.h" |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 23 | #include "SIMachineFunctionInfo.h" |
Konstantin Zhuravlyov | 836cbff | 2016-09-30 17:01:40 +0000 | [diff] [blame] | 24 | #include "Utils/AMDGPUBaseInfo.h" |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 25 | #include "llvm/CodeGen/MachineFunction.h" |
| 26 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 27 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 28 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 29 | |
Tom Stellard | 6e1967e | 2016-02-05 17:42:38 +0000 | [diff] [blame] | 30 | #define DEBUG_TYPE "si-insert-waits" |
| 31 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 32 | using namespace llvm; |
Konstantin Zhuravlyov | 836cbff | 2016-09-30 17:01:40 +0000 | [diff] [blame] | 33 | using namespace llvm::AMDGPU; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 34 | |
| 35 | namespace { |
| 36 | |
| 37 | /// \brief One variable for each of the hardware counters |
| 38 | typedef union { |
| 39 | struct { |
| 40 | unsigned VM; |
| 41 | unsigned EXP; |
| 42 | unsigned LGKM; |
| 43 | } Named; |
| 44 | unsigned Array[3]; |
| 45 | |
| 46 | } Counters; |
| 47 | |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 48 | typedef enum { |
| 49 | OTHER, |
| 50 | SMEM, |
| 51 | VMEM |
| 52 | } InstType; |
| 53 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 54 | typedef Counters RegCounters[512]; |
| 55 | typedef std::pair<unsigned, unsigned> RegInterval; |
| 56 | |
| 57 | class SIInsertWaits : public MachineFunctionPass { |
| 58 | |
| 59 | private: |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 60 | const SISubtarget *ST; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 61 | const SIInstrInfo *TII; |
Bill Wendling | 37e9adb | 2013-06-07 20:28:55 +0000 | [diff] [blame] | 62 | const SIRegisterInfo *TRI; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 63 | const MachineRegisterInfo *MRI; |
Konstantin Zhuravlyov | 836cbff | 2016-09-30 17:01:40 +0000 | [diff] [blame] | 64 | IsaVersion IV; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 65 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 66 | /// \brief Constant zero value |
| 67 | static const Counters ZeroCounts; |
| 68 | |
Konstantin Zhuravlyov | cdd4547 | 2016-10-11 18:58:22 +0000 | [diff] [blame] | 69 | /// \brief Hardware limits |
| 70 | Counters HardwareLimits; |
| 71 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 72 | /// \brief Counter values we have already waited on. |
| 73 | Counters WaitedOn; |
| 74 | |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 75 | /// \brief Counter values that we must wait on before the next counter |
| 76 | /// increase. |
| 77 | Counters DelayedWaitOn; |
| 78 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 79 | /// \brief Counter values for last instruction issued. |
| 80 | Counters LastIssued; |
| 81 | |
| 82 | /// \brief Registers used by async instructions. |
| 83 | RegCounters UsedRegs; |
| 84 | |
| 85 | /// \brief Registers defined by async instructions. |
| 86 | RegCounters DefinedRegs; |
| 87 | |
| 88 | /// \brief Different export instruction types seen since last wait. |
| 89 | unsigned ExpInstrTypesSeen; |
| 90 | |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 91 | /// \brief Type of the last opcode. |
| 92 | InstType LastOpcodeType; |
| 93 | |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 94 | bool LastInstWritesM0; |
| 95 | |
Tom Stellard | 6695ba0 | 2016-10-28 23:53:48 +0000 | [diff] [blame] | 96 | /// Whether or not we have flat operations outstanding. |
| 97 | bool IsFlatOutstanding; |
| 98 | |
Marek Olsak | 8e9cc63 | 2016-01-13 17:23:09 +0000 | [diff] [blame] | 99 | /// \brief Whether the machine function returns void |
| 100 | bool ReturnsVoid; |
| 101 | |
Tom Stellard | 3096176 | 2016-02-08 19:49:20 +0000 | [diff] [blame] | 102 | /// Whether the VCCZ bit is possibly corrupt |
| 103 | bool VCCZCorrupt; |
| 104 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 105 | /// \brief Get increment/decrement amount for this instruction. |
| 106 | Counters getHwCounts(MachineInstr &MI); |
| 107 | |
| 108 | /// \brief Is operand relevant for async execution? |
| 109 | bool isOpRelevant(MachineOperand &Op); |
| 110 | |
| 111 | /// \brief Get register interval an operand affects. |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 112 | RegInterval getRegInterval(const TargetRegisterClass *RC, |
| 113 | const MachineOperand &Reg) const; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 114 | |
| 115 | /// \brief Handle instructions async components |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 116 | void pushInstruction(MachineBasicBlock &MBB, |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 117 | MachineBasicBlock::iterator I, |
| 118 | const Counters& Increment); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 119 | |
| 120 | /// \brief Insert the actual wait instruction |
| 121 | bool insertWait(MachineBasicBlock &MBB, |
| 122 | MachineBasicBlock::iterator I, |
| 123 | const Counters &Counts); |
| 124 | |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 125 | /// \brief Handle existing wait instructions (from intrinsics) |
| 126 | void handleExistingWait(MachineBasicBlock::iterator I); |
| 127 | |
Christian Konig | 862fd9f | 2013-03-01 09:46:04 +0000 | [diff] [blame] | 128 | /// \brief Do we need def2def checks? |
| 129 | bool unorderedDefines(MachineInstr &MI); |
| 130 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 131 | /// \brief Resolve all operand dependencies to counter requirements |
| 132 | Counters handleOperands(MachineInstr &MI); |
| 133 | |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 134 | /// \brief Insert S_NOP between an instruction writing M0 and S_SENDMSG. |
| 135 | void handleSendMsg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I); |
| 136 | |
Tom Stellard | 3096176 | 2016-02-08 19:49:20 +0000 | [diff] [blame] | 137 | /// Return true if there are LGKM instrucitons that haven't been waited on |
| 138 | /// yet. |
| 139 | bool hasOutstandingLGKM() const; |
| 140 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 141 | public: |
Tom Stellard | 6e1967e | 2016-02-05 17:42:38 +0000 | [diff] [blame] | 142 | static char ID; |
| 143 | |
| 144 | SIInsertWaits() : |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 145 | MachineFunctionPass(ID), |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 146 | ST(nullptr), |
Craig Topper | 062a2ba | 2014-04-25 05:30:21 +0000 | [diff] [blame] | 147 | TII(nullptr), |
| 148 | TRI(nullptr), |
Tom Stellard | 3096176 | 2016-02-08 19:49:20 +0000 | [diff] [blame] | 149 | ExpInstrTypesSeen(0), |
| 150 | VCCZCorrupt(false) { } |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 151 | |
Craig Topper | 5656db4 | 2014-04-29 07:57:24 +0000 | [diff] [blame] | 152 | bool runOnMachineFunction(MachineFunction &MF) override; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 153 | |
Mehdi Amini | 117296c | 2016-10-01 02:56:57 +0000 | [diff] [blame] | 154 | StringRef getPassName() const override { |
Matt Arsenault | 0cb8517 | 2015-09-25 17:21:28 +0000 | [diff] [blame] | 155 | return "SI insert wait instructions"; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 156 | } |
| 157 | |
Matt Arsenault | 0cb8517 | 2015-09-25 17:21:28 +0000 | [diff] [blame] | 158 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 159 | AU.setPreservesCFG(); |
| 160 | MachineFunctionPass::getAnalysisUsage(AU); |
| 161 | } |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 162 | }; |
| 163 | |
| 164 | } // End anonymous namespace |
| 165 | |
Tom Stellard | 6e1967e | 2016-02-05 17:42:38 +0000 | [diff] [blame] | 166 | INITIALIZE_PASS_BEGIN(SIInsertWaits, DEBUG_TYPE, |
| 167 | "SI Insert Waits", false, false) |
| 168 | INITIALIZE_PASS_END(SIInsertWaits, DEBUG_TYPE, |
| 169 | "SI Insert Waits", false, false) |
| 170 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 171 | char SIInsertWaits::ID = 0; |
| 172 | |
Tom Stellard | 6e1967e | 2016-02-05 17:42:38 +0000 | [diff] [blame] | 173 | char &llvm::SIInsertWaitsID = SIInsertWaits::ID; |
| 174 | |
| 175 | FunctionPass *llvm::createSIInsertWaitsPass() { |
| 176 | return new SIInsertWaits(); |
| 177 | } |
| 178 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 179 | const Counters SIInsertWaits::ZeroCounts = { { 0, 0, 0 } }; |
| 180 | |
Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 181 | static bool readsVCCZ(const MachineInstr &MI) { |
| 182 | unsigned Opc = MI.getOpcode(); |
| 183 | return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) && |
| 184 | !MI.getOperand(1).isUndef(); |
Tom Stellard | 3096176 | 2016-02-08 19:49:20 +0000 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | bool SIInsertWaits::hasOutstandingLGKM() const { |
| 188 | return WaitedOn.Named.LGKM != LastIssued.Named.LGKM; |
| 189 | } |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 190 | |
| 191 | Counters SIInsertWaits::getHwCounts(MachineInstr &MI) { |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 192 | uint64_t TSFlags = MI.getDesc().TSFlags; |
Matt Arsenault | e66621b | 2015-09-24 19:52:27 +0000 | [diff] [blame] | 193 | Counters Result = { { 0, 0, 0 } }; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 194 | |
| 195 | Result.Named.VM = !!(TSFlags & SIInstrFlags::VM_CNT); |
| 196 | |
| 197 | // Only consider stores or EXP for EXP_CNT |
Matt Arsenault | 7bee6ac | 2016-12-05 20:23:10 +0000 | [diff] [blame] | 198 | Result.Named.EXP = !!(TSFlags & SIInstrFlags::EXP_CNT) && MI.mayStore(); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 199 | |
| 200 | // LGKM may uses larger values |
| 201 | if (TSFlags & SIInstrFlags::LGKM_CNT) { |
| 202 | |
Matt Arsenault | 3add643 | 2015-10-20 04:35:43 +0000 | [diff] [blame] | 203 | if (TII->isSMRD(MI)) { |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 204 | |
Matt Arsenault | e66621b | 2015-09-24 19:52:27 +0000 | [diff] [blame] | 205 | if (MI.getNumOperands() != 0) { |
Matt Arsenault | b733f00 | 2015-10-01 22:40:35 +0000 | [diff] [blame] | 206 | assert(MI.getOperand(0).isReg() && |
| 207 | "First LGKM operand must be a register!"); |
Michel Danzer | 20680b1 | 2013-08-16 16:19:24 +0000 | [diff] [blame] | 208 | |
Matt Arsenault | e66621b | 2015-09-24 19:52:27 +0000 | [diff] [blame] | 209 | // XXX - What if this is a write into a super register? |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 210 | const TargetRegisterClass *RC = TII->getOpRegClass(MI, 0); |
| 211 | unsigned Size = RC->getSize(); |
Matt Arsenault | e66621b | 2015-09-24 19:52:27 +0000 | [diff] [blame] | 212 | Result.Named.LGKM = Size > 4 ? 2 : 1; |
| 213 | } else { |
| 214 | // s_dcache_inv etc. do not have a a destination register. Assume we |
| 215 | // want a wait on these. |
| 216 | // XXX - What is the right value? |
| 217 | Result.Named.LGKM = 1; |
| 218 | } |
Michel Danzer | 20680b1 | 2013-08-16 16:19:24 +0000 | [diff] [blame] | 219 | } else { |
| 220 | // DS |
| 221 | Result.Named.LGKM = 1; |
| 222 | } |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 223 | |
| 224 | } else { |
| 225 | Result.Named.LGKM = 0; |
| 226 | } |
| 227 | |
| 228 | return Result; |
| 229 | } |
| 230 | |
| 231 | bool SIInsertWaits::isOpRelevant(MachineOperand &Op) { |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 232 | // Constants are always irrelevant |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 233 | if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg())) |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 234 | return false; |
| 235 | |
| 236 | // Defines are always relevant |
| 237 | if (Op.isDef()) |
| 238 | return true; |
| 239 | |
Matt Arsenault | 7bee6ac | 2016-12-05 20:23:10 +0000 | [diff] [blame] | 240 | // For exports all registers are relevant. |
| 241 | // TODO: Skip undef/disabled registers. |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 242 | MachineInstr &MI = *Op.getParent(); |
Matt Arsenault | 7bee6ac | 2016-12-05 20:23:10 +0000 | [diff] [blame] | 243 | if (TII->isEXP(MI)) |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 244 | return true; |
| 245 | |
| 246 | // For stores the stored value is also relevant |
| 247 | if (!MI.getDesc().mayStore()) |
| 248 | return false; |
| 249 | |
Tom Stellard | b3931b8 | 2015-01-06 19:52:04 +0000 | [diff] [blame] | 250 | // Check if this operand is the value being stored. |
Tom Stellard | 2d26fe7 | 2016-02-19 15:33:13 +0000 | [diff] [blame] | 251 | // Special case for DS/FLAT instructions, since the address |
Tom Stellard | b3931b8 | 2015-01-06 19:52:04 +0000 | [diff] [blame] | 252 | // operand comes before the value operand and it may have |
| 253 | // multiple data operands. |
| 254 | |
Tom Stellard | 2d26fe7 | 2016-02-19 15:33:13 +0000 | [diff] [blame] | 255 | if (TII->isDS(MI)) { |
Tom Stellard | b3931b8 | 2015-01-06 19:52:04 +0000 | [diff] [blame] | 256 | MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0); |
| 257 | if (Data0 && Op.isIdenticalTo(*Data0)) |
| 258 | return true; |
| 259 | |
| 260 | MachineOperand *Data1 = TII->getNamedOperand(MI, AMDGPU::OpName::data1); |
Matt Arsenault | 8226fc4 | 2016-03-02 23:00:21 +0000 | [diff] [blame] | 261 | return Data1 && Op.isIdenticalTo(*Data1); |
Tom Stellard | b3931b8 | 2015-01-06 19:52:04 +0000 | [diff] [blame] | 262 | } |
| 263 | |
Matt Arsenault | 97279a8 | 2016-11-29 19:30:44 +0000 | [diff] [blame] | 264 | if (TII->isFLAT(MI)) { |
| 265 | MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::vdata); |
| 266 | if (Data && Op.isIdenticalTo(*Data)) |
| 267 | return true; |
| 268 | } |
| 269 | |
Tom Stellard | b3931b8 | 2015-01-06 19:52:04 +0000 | [diff] [blame] | 270 | // NOTE: This assumes that the value operand is before the |
| 271 | // address operand, and that there is only one value operand. |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 272 | for (MachineInstr::mop_iterator I = MI.operands_begin(), |
| 273 | E = MI.operands_end(); I != E; ++I) { |
| 274 | |
| 275 | if (I->isReg() && I->isUse()) |
| 276 | return Op.isIdenticalTo(*I); |
| 277 | } |
| 278 | |
| 279 | return false; |
| 280 | } |
| 281 | |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 282 | RegInterval SIInsertWaits::getRegInterval(const TargetRegisterClass *RC, |
| 283 | const MachineOperand &Reg) const { |
| 284 | unsigned Size = RC->getSize(); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 285 | assert(Size >= 4); |
| 286 | |
| 287 | RegInterval Result; |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 288 | Result.first = TRI->getEncodingValue(Reg.getReg()); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 289 | Result.second = Result.first + Size / 4; |
| 290 | |
| 291 | return Result; |
| 292 | } |
| 293 | |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 294 | void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB, |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 295 | MachineBasicBlock::iterator I, |
| 296 | const Counters &Increment) { |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 297 | |
| 298 | // Get the hardware counter increments and sum them up |
Tom Stellard | bd8a085 | 2015-08-21 22:47:27 +0000 | [diff] [blame] | 299 | Counters Limit = ZeroCounts; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 300 | unsigned Sum = 0; |
| 301 | |
Tom Stellard | 6695ba0 | 2016-10-28 23:53:48 +0000 | [diff] [blame] | 302 | if (TII->mayAccessFlatAddressSpace(*I)) |
| 303 | IsFlatOutstanding = true; |
| 304 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 305 | for (unsigned i = 0; i < 3; ++i) { |
| 306 | LastIssued.Array[i] += Increment.Array[i]; |
Tom Stellard | bd8a085 | 2015-08-21 22:47:27 +0000 | [diff] [blame] | 307 | if (Increment.Array[i]) |
| 308 | Limit.Array[i] = LastIssued.Array[i]; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 309 | Sum += Increment.Array[i]; |
| 310 | } |
| 311 | |
| 312 | // If we don't increase anything then that's it |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 313 | if (Sum == 0) { |
| 314 | LastOpcodeType = OTHER; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 315 | return; |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 316 | } |
| 317 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 318 | if (ST->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { |
Benjamin Kramer | df005cb | 2015-08-08 18:27:36 +0000 | [diff] [blame] | 319 | // Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 320 | // or SMEM clause, respectively. |
| 321 | // |
| 322 | // The temporary workaround is to break the clauses with S_NOP. |
| 323 | // |
| 324 | // The proper solution would be to allocate registers such that all source |
| 325 | // and destination registers don't overlap, e.g. this is illegal: |
| 326 | // r0 = load r2 |
| 327 | // r2 = load r0 |
Tom Stellard | 1f520e5 | 2016-05-02 17:39:06 +0000 | [diff] [blame] | 328 | if (LastOpcodeType == VMEM && Increment.Named.VM) { |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 329 | // Insert a NOP to break the clause. |
| 330 | BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)) |
| 331 | .addImm(0); |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 332 | LastInstWritesM0 = false; |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 333 | } |
| 334 | |
Matt Arsenault | 3add643 | 2015-10-20 04:35:43 +0000 | [diff] [blame] | 335 | if (TII->isSMRD(*I)) |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 336 | LastOpcodeType = SMEM; |
| 337 | else if (Increment.Named.VM) |
| 338 | LastOpcodeType = VMEM; |
| 339 | } |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 340 | |
| 341 | // Remember which export instructions we have seen |
| 342 | if (Increment.Named.EXP) { |
Matt Arsenault | 7bee6ac | 2016-12-05 20:23:10 +0000 | [diff] [blame] | 343 | ExpInstrTypesSeen |= TII->isEXP(*I) ? 1 : 2; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 344 | } |
| 345 | |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 346 | for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 347 | MachineOperand &Op = I->getOperand(i); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 348 | if (!isOpRelevant(Op)) |
| 349 | continue; |
| 350 | |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 351 | const TargetRegisterClass *RC = TII->getOpRegClass(*I, i); |
| 352 | RegInterval Interval = getRegInterval(RC, Op); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 353 | for (unsigned j = Interval.first; j < Interval.second; ++j) { |
| 354 | |
| 355 | // Remember which registers we define |
| 356 | if (Op.isDef()) |
Tom Stellard | bd8a085 | 2015-08-21 22:47:27 +0000 | [diff] [blame] | 357 | DefinedRegs[j] = Limit; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 358 | |
| 359 | // and which one we are using |
| 360 | if (Op.isUse()) |
Tom Stellard | bd8a085 | 2015-08-21 22:47:27 +0000 | [diff] [blame] | 361 | UsedRegs[j] = Limit; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 362 | } |
| 363 | } |
| 364 | } |
| 365 | |
| 366 | bool SIInsertWaits::insertWait(MachineBasicBlock &MBB, |
| 367 | MachineBasicBlock::iterator I, |
| 368 | const Counters &Required) { |
| 369 | |
| 370 | // End of program? No need to wait on anything |
Marek Olsak | 8e9cc63 | 2016-01-13 17:23:09 +0000 | [diff] [blame] | 371 | // A function not returning void needs to wait, because other bytecode will |
| 372 | // be appended after it and we don't know what it will be. |
| 373 | if (I != MBB.end() && I->getOpcode() == AMDGPU::S_ENDPGM && ReturnsVoid) |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 374 | return false; |
| 375 | |
| 376 | // Figure out if the async instructions execute in order |
| 377 | bool Ordered[3]; |
| 378 | |
Tom Stellard | 6695ba0 | 2016-10-28 23:53:48 +0000 | [diff] [blame] | 379 | // VM_CNT is always ordered except when there are flat instructions, which |
| 380 | // can return out of order. |
| 381 | Ordered[0] = !IsFlatOutstanding; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 382 | |
| 383 | // EXP_CNT is unordered if we have both EXP & VM-writes |
| 384 | Ordered[1] = ExpInstrTypesSeen == 3; |
| 385 | |
| 386 | // LGKM_CNT is handled as always unordered. TODO: Handle LDS and GDS |
| 387 | Ordered[2] = false; |
| 388 | |
| 389 | // The values we are going to put into the S_WAITCNT instruction |
Konstantin Zhuravlyov | cdd4547 | 2016-10-11 18:58:22 +0000 | [diff] [blame] | 390 | Counters Counts = HardwareLimits; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 391 | |
| 392 | // Do we really need to wait? |
| 393 | bool NeedWait = false; |
| 394 | |
| 395 | for (unsigned i = 0; i < 3; ++i) { |
| 396 | |
| 397 | if (Required.Array[i] <= WaitedOn.Array[i]) |
| 398 | continue; |
| 399 | |
| 400 | NeedWait = true; |
Matt Arsenault | 9748369 | 2014-07-17 17:50:22 +0000 | [diff] [blame] | 401 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 402 | if (Ordered[i]) { |
| 403 | unsigned Value = LastIssued.Array[i] - Required.Array[i]; |
| 404 | |
Matt Arsenault | 9748369 | 2014-07-17 17:50:22 +0000 | [diff] [blame] | 405 | // Adjust the value to the real hardware possibilities. |
Konstantin Zhuravlyov | cdd4547 | 2016-10-11 18:58:22 +0000 | [diff] [blame] | 406 | Counts.Array[i] = std::min(Value, HardwareLimits.Array[i]); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 407 | |
| 408 | } else |
| 409 | Counts.Array[i] = 0; |
| 410 | |
Matt Arsenault | 9748369 | 2014-07-17 17:50:22 +0000 | [diff] [blame] | 411 | // Remember on what we have waited on. |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 412 | WaitedOn.Array[i] = LastIssued.Array[i] - Counts.Array[i]; |
| 413 | } |
| 414 | |
| 415 | if (!NeedWait) |
| 416 | return false; |
| 417 | |
| 418 | // Reset EXP_CNT instruction types |
| 419 | if (Counts.Named.EXP == 0) |
| 420 | ExpInstrTypesSeen = 0; |
| 421 | |
| 422 | // Build the wait instruction |
| 423 | BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)) |
Konstantin Zhuravlyov | cdd4547 | 2016-10-11 18:58:22 +0000 | [diff] [blame] | 424 | .addImm(encodeWaitcnt(IV, |
| 425 | Counts.Named.VM, |
| 426 | Counts.Named.EXP, |
| 427 | Counts.Named.LGKM)); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 428 | |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 429 | LastOpcodeType = OTHER; |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 430 | LastInstWritesM0 = false; |
Tom Stellard | 6695ba0 | 2016-10-28 23:53:48 +0000 | [diff] [blame] | 431 | IsFlatOutstanding = false; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 432 | return true; |
| 433 | } |
| 434 | |
| 435 | /// \brief helper function for handleOperands |
| 436 | static void increaseCounters(Counters &Dst, const Counters &Src) { |
| 437 | |
| 438 | for (unsigned i = 0; i < 3; ++i) |
| 439 | Dst.Array[i] = std::max(Dst.Array[i], Src.Array[i]); |
| 440 | } |
| 441 | |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 442 | /// \brief check whether any of the counters is non-zero |
| 443 | static bool countersNonZero(const Counters &Counter) { |
| 444 | for (unsigned i = 0; i < 3; ++i) |
| 445 | if (Counter.Array[i]) |
| 446 | return true; |
| 447 | return false; |
| 448 | } |
| 449 | |
| 450 | void SIInsertWaits::handleExistingWait(MachineBasicBlock::iterator I) { |
| 451 | assert(I->getOpcode() == AMDGPU::S_WAITCNT); |
| 452 | |
| 453 | unsigned Imm = I->getOperand(0).getImm(); |
| 454 | Counters Counts, WaitOn; |
| 455 | |
Konstantin Zhuravlyov | cdd4547 | 2016-10-11 18:58:22 +0000 | [diff] [blame] | 456 | Counts.Named.VM = decodeVmcnt(IV, Imm); |
| 457 | Counts.Named.EXP = decodeExpcnt(IV, Imm); |
| 458 | Counts.Named.LGKM = decodeLgkmcnt(IV, Imm); |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 459 | |
| 460 | for (unsigned i = 0; i < 3; ++i) { |
| 461 | if (Counts.Array[i] <= LastIssued.Array[i]) |
| 462 | WaitOn.Array[i] = LastIssued.Array[i] - Counts.Array[i]; |
| 463 | else |
| 464 | WaitOn.Array[i] = 0; |
| 465 | } |
| 466 | |
| 467 | increaseCounters(DelayedWaitOn, WaitOn); |
| 468 | } |
| 469 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 470 | Counters SIInsertWaits::handleOperands(MachineInstr &MI) { |
| 471 | |
| 472 | Counters Result = ZeroCounts; |
| 473 | |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 474 | // For each register affected by this instruction increase the result |
| 475 | // sequence. |
| 476 | // |
| 477 | // TODO: We could probably just look at explicit operands if we removed VCC / |
| 478 | // EXEC from SMRD dest reg classes. |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 479 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 480 | MachineOperand &Op = MI.getOperand(i); |
Matt Arsenault | d1d499a | 2015-10-01 21:43:15 +0000 | [diff] [blame] | 481 | if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg())) |
| 482 | continue; |
| 483 | |
| 484 | const TargetRegisterClass *RC = TII->getOpRegClass(MI, i); |
| 485 | RegInterval Interval = getRegInterval(RC, Op); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 486 | for (unsigned j = Interval.first; j < Interval.second; ++j) { |
| 487 | |
Christian Konig | 862fd9f | 2013-03-01 09:46:04 +0000 | [diff] [blame] | 488 | if (Op.isDef()) { |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 489 | increaseCounters(Result, UsedRegs[j]); |
Christian Konig | f1fd5fa | 2013-03-18 11:33:45 +0000 | [diff] [blame] | 490 | increaseCounters(Result, DefinedRegs[j]); |
Christian Konig | 862fd9f | 2013-03-01 09:46:04 +0000 | [diff] [blame] | 491 | } |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 492 | |
| 493 | if (Op.isUse()) |
| 494 | increaseCounters(Result, DefinedRegs[j]); |
| 495 | } |
| 496 | } |
| 497 | |
| 498 | return Result; |
| 499 | } |
| 500 | |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 501 | void SIInsertWaits::handleSendMsg(MachineBasicBlock &MBB, |
| 502 | MachineBasicBlock::iterator I) { |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 503 | if (ST->getGeneration() < SISubtarget::VOLCANIC_ISLANDS) |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 504 | return; |
| 505 | |
| 506 | // There must be "S_NOP 0" between an instruction writing M0 and S_SENDMSG. |
| 507 | if (LastInstWritesM0 && I->getOpcode() == AMDGPU::S_SENDMSG) { |
| 508 | BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0); |
| 509 | LastInstWritesM0 = false; |
| 510 | return; |
| 511 | } |
| 512 | |
| 513 | // Set whether this instruction sets M0 |
| 514 | LastInstWritesM0 = false; |
| 515 | |
| 516 | unsigned NumOperands = I->getNumOperands(); |
| 517 | for (unsigned i = 0; i < NumOperands; i++) { |
| 518 | const MachineOperand &Op = I->getOperand(i); |
| 519 | |
| 520 | if (Op.isReg() && Op.isDef() && Op.getReg() == AMDGPU::M0) |
| 521 | LastInstWritesM0 = true; |
| 522 | } |
| 523 | } |
| 524 | |
Matt Arsenault | a0050b0 | 2014-06-19 01:19:19 +0000 | [diff] [blame] | 525 | // FIXME: Insert waits listed in Table 4.2 "Required User-Inserted Wait States" |
| 526 | // around other non-memory instructions. |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 527 | bool SIInsertWaits::runOnMachineFunction(MachineFunction &MF) { |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 528 | bool Changes = false; |
| 529 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 530 | ST = &MF.getSubtarget<SISubtarget>(); |
| 531 | TII = ST->getInstrInfo(); |
| 532 | TRI = &TII->getRegisterInfo(); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 533 | MRI = &MF.getRegInfo(); |
Konstantin Zhuravlyov | 836cbff | 2016-09-30 17:01:40 +0000 | [diff] [blame] | 534 | IV = getIsaVersion(ST->getFeatureBits()); |
Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 535 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 536 | |
Konstantin Zhuravlyov | cdd4547 | 2016-10-11 18:58:22 +0000 | [diff] [blame] | 537 | HardwareLimits.Named.VM = getVmcntBitMask(IV); |
| 538 | HardwareLimits.Named.EXP = getExpcntBitMask(IV); |
| 539 | HardwareLimits.Named.LGKM = getLgkmcntBitMask(IV); |
| 540 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 541 | WaitedOn = ZeroCounts; |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 542 | DelayedWaitOn = ZeroCounts; |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 543 | LastIssued = ZeroCounts; |
Marek Olsak | fa58e5e | 2014-12-07 17:17:43 +0000 | [diff] [blame] | 544 | LastOpcodeType = OTHER; |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 545 | LastInstWritesM0 = false; |
Tom Stellard | 6695ba0 | 2016-10-28 23:53:48 +0000 | [diff] [blame] | 546 | IsFlatOutstanding = false; |
Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 547 | ReturnsVoid = MFI->returnsVoid(); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 548 | |
| 549 | memset(&UsedRegs, 0, sizeof(UsedRegs)); |
| 550 | memset(&DefinedRegs, 0, sizeof(DefinedRegs)); |
| 551 | |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 552 | SmallVector<MachineInstr *, 4> RemoveMI; |
Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 553 | SmallVector<MachineBasicBlock *, 4> EndPgmBlocks; |
| 554 | |
| 555 | bool HaveScalarStores = false; |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 556 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 557 | for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); |
| 558 | BI != BE; ++BI) { |
| 559 | |
| 560 | MachineBasicBlock &MBB = *BI; |
Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 561 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 562 | for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); |
| 563 | I != E; ++I) { |
| 564 | |
Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 565 | if (!HaveScalarStores && TII->isScalarStore(*I)) |
| 566 | HaveScalarStores = true; |
| 567 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 568 | if (ST->getGeneration() <= SISubtarget::SEA_ISLANDS) { |
Tom Stellard | 3096176 | 2016-02-08 19:49:20 +0000 | [diff] [blame] | 569 | // There is a hardware bug on CI/SI where SMRD instruction may corrupt |
| 570 | // vccz bit, so when we detect that an instruction may read from a |
| 571 | // corrupt vccz bit, we need to: |
| 572 | // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD operations to |
| 573 | // complete. |
| 574 | // 2. Restore the correct value of vccz by writing the current value |
| 575 | // of vcc back to vcc. |
| 576 | |
| 577 | if (TII->isSMRD(I->getOpcode())) { |
| 578 | VCCZCorrupt = true; |
| 579 | } else if (!hasOutstandingLGKM() && I->modifiesRegister(AMDGPU::VCC, TRI)) { |
| 580 | // FIXME: We only care about SMRD instructions here, not LDS or GDS. |
| 581 | // Whenever we store a value in vcc, the correct value of vccz is |
| 582 | // restored. |
| 583 | VCCZCorrupt = false; |
| 584 | } |
| 585 | |
| 586 | // Check if we need to apply the bug work-around |
Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 587 | if (VCCZCorrupt && readsVCCZ(*I)) { |
Tom Stellard | 3096176 | 2016-02-08 19:49:20 +0000 | [diff] [blame] | 588 | DEBUG(dbgs() << "Inserting vccz bug work-around before: " << *I << '\n'); |
| 589 | |
| 590 | // Wait on everything, not just LGKM. vccz reads usually come from |
| 591 | // terminators, and we always wait on everything at the end of the |
| 592 | // block, so if we only wait on LGKM here, we might end up with |
| 593 | // another s_waitcnt inserted right after this if there are non-LGKM |
| 594 | // instructions still outstanding. |
| 595 | insertWait(MBB, I, LastIssued); |
| 596 | |
| 597 | // Restore the vccz bit. Any time a value is written to vcc, the vcc |
| 598 | // bit is updated, so we can restore the bit by reading the value of |
| 599 | // vcc and then writing it back to the register. |
| 600 | BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), |
| 601 | AMDGPU::VCC) |
Matt Arsenault | 52f14ec | 2016-11-07 19:09:27 +0000 | [diff] [blame] | 602 | .addReg(AMDGPU::VCC); |
Tom Stellard | 3096176 | 2016-02-08 19:49:20 +0000 | [diff] [blame] | 603 | } |
| 604 | } |
| 605 | |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 606 | // Record pre-existing, explicitly requested waits |
| 607 | if (I->getOpcode() == AMDGPU::S_WAITCNT) { |
| 608 | handleExistingWait(*I); |
Duncan P. N. Exon Smith | 4d29511 | 2016-07-08 19:16:05 +0000 | [diff] [blame] | 609 | RemoveMI.push_back(&*I); |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 610 | continue; |
| 611 | } |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 612 | |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 613 | Counters Required; |
| 614 | |
| 615 | // Wait for everything before a barrier. |
| 616 | // |
| 617 | // S_SENDMSG implicitly waits for all outstanding LGKM transfers to finish, |
| 618 | // but we also want to wait for any other outstanding transfers before |
| 619 | // signalling other hardware blocks |
Konstantin Zhuravlyov | d7bdf24 | 2016-09-30 16:50:36 +0000 | [diff] [blame] | 620 | if ((I->getOpcode() == AMDGPU::S_BARRIER && |
| 621 | ST->needWaitcntBeforeBarrier()) || |
| 622 | I->getOpcode() == AMDGPU::S_SENDMSG) |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 623 | Required = LastIssued; |
| 624 | else |
| 625 | Required = handleOperands(*I); |
| 626 | |
| 627 | Counters Increment = getHwCounts(*I); |
| 628 | |
| 629 | if (countersNonZero(Required) || countersNonZero(Increment)) |
| 630 | increaseCounters(Required, DelayedWaitOn); |
| 631 | |
| 632 | Changes |= insertWait(MBB, I, Required); |
| 633 | |
| 634 | pushInstruction(MBB, I, Increment); |
Marek Olsak | 1bd2463 | 2015-02-03 17:37:52 +0000 | [diff] [blame] | 635 | handleSendMsg(MBB, I); |
Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 636 | |
| 637 | if (I->getOpcode() == AMDGPU::S_ENDPGM || |
| 638 | I->getOpcode() == AMDGPU::SI_RETURN) |
| 639 | EndPgmBlocks.push_back(&MBB); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 640 | } |
| 641 | |
| 642 | // Wait for everything at the end of the MBB |
| 643 | Changes |= insertWait(MBB, MBB.getFirstTerminator(), LastIssued); |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 644 | } |
| 645 | |
Marek Olsak | 79c0587 | 2016-11-25 17:37:09 +0000 | [diff] [blame] | 646 | if (HaveScalarStores) { |
| 647 | // If scalar writes are used, the cache must be flushed or else the next |
| 648 | // wave to reuse the same scratch memory can be clobbered. |
| 649 | // |
| 650 | // Insert s_dcache_wb at wave termination points if there were any scalar |
| 651 | // stores, and only if the cache hasn't already been flushed. This could be |
| 652 | // improved by looking across blocks for flushes in postdominating blocks |
| 653 | // from the stores but an explicitly requested flush is probably very rare. |
| 654 | for (MachineBasicBlock *MBB : EndPgmBlocks) { |
| 655 | bool SeenDCacheWB = false; |
| 656 | |
| 657 | for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); |
| 658 | I != E; ++I) { |
| 659 | |
| 660 | if (I->getOpcode() == AMDGPU::S_DCACHE_WB) |
| 661 | SeenDCacheWB = true; |
| 662 | else if (TII->isScalarStore(*I)) |
| 663 | SeenDCacheWB = false; |
| 664 | |
| 665 | // FIXME: It would be better to insert this before a waitcnt if any. |
| 666 | if ((I->getOpcode() == AMDGPU::S_ENDPGM || |
| 667 | I->getOpcode() == AMDGPU::SI_RETURN) && !SeenDCacheWB) { |
| 668 | Changes = true; |
| 669 | BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB)); |
| 670 | } |
| 671 | } |
| 672 | } |
| 673 | } |
| 674 | |
Nicolai Haehnle | f66bdb5 | 2016-04-27 15:46:01 +0000 | [diff] [blame] | 675 | for (MachineInstr *I : RemoveMI) |
| 676 | I->eraseFromParent(); |
| 677 | |
Tom Stellard | c4cabef | 2013-01-18 21:15:53 +0000 | [diff] [blame] | 678 | return Changes; |
| 679 | } |