blob: 4935a914a99e9f58d13b16da7d87ceaa7a2c4c89 [file] [log] [blame]
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00001//===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// This pass adds instructions to enable whole quad mode for pixel
Connor Abbott92638ab2017-08-04 18:36:52 +000012/// shaders, and whole wavefront mode for all programs.
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000013///
14/// Whole quad mode is required for derivative computations, but it interferes
15/// with shader side effects (stores and atomics). This pass is run on the
16/// scheduled machine IR but before register coalescing, so that machine SSA is
17/// available for analysis. It ensures that WQM is enabled when necessary, but
18/// disabled around stores and atomics.
19///
20/// When necessary, this pass creates a function prolog
21///
22/// S_MOV_B64 LiveMask, EXEC
23/// S_WQM_B64 EXEC, EXEC
24///
25/// to enter WQM at the top of the function and surrounds blocks of Exact
26/// instructions by
27///
28/// S_AND_SAVEEXEC_B64 Tmp, LiveMask
29/// ...
30/// S_MOV_B64 EXEC, Tmp
31///
Connor Abbott92638ab2017-08-04 18:36:52 +000032/// We also compute when a sequence of instructions requires Whole Wavefront
33/// Mode (WWM) and insert instructions to save and restore it:
34///
35/// S_OR_SAVEEXEC_B64 Tmp, -1
36/// ...
37/// S_MOV_B64 EXEC, Tmp
38///
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000039/// In order to avoid excessive switching during sequences of Exact
40/// instructions, the pass first analyzes which instructions must be run in WQM
41/// (aka which instructions produce values that lead to derivative
42/// computations).
43///
44/// Basic blocks are always exited in WQM as long as some successor needs WQM.
45///
46/// There is room for improvement given better control flow analysis:
47///
48/// (1) at the top level (outside of control flow statements, and as long as
49/// kill hasn't been used), one SGPR can be saved by recovering WQM from
50/// the LiveMask (this is implemented for the entry block).
51///
52/// (2) when entire regions (e.g. if-else blocks or entire loops) only
53/// consist of exact and don't-care instructions, the switch only has to
54/// be done at the entry and exit points rather than potentially in each
55/// block of the region.
56///
57//===----------------------------------------------------------------------===//
58
59#include "AMDGPU.h"
60#include "AMDGPUSubtarget.h"
61#include "SIInstrInfo.h"
62#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000063#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000064#include "llvm/ADT/DenseMap.h"
Connor Abbottde068fe2017-08-04 18:36:50 +000065#include "llvm/ADT/PostOrderIterator.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000066#include "llvm/ADT/SmallVector.h"
67#include "llvm/ADT/StringRef.h"
68#include "llvm/CodeGen/LiveInterval.h"
Matthias Braunf8422972017-12-13 02:51:04 +000069#include "llvm/CodeGen/LiveIntervals.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000070#include "llvm/CodeGen/MachineBasicBlock.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000071#include "llvm/CodeGen/MachineFunction.h"
72#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000073#include "llvm/CodeGen/MachineInstr.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000074#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000075#include "llvm/CodeGen/MachineOperand.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000076#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000077#include "llvm/CodeGen/SlotIndexes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000078#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000079#include "llvm/IR/CallingConv.h"
80#include "llvm/IR/DebugLoc.h"
81#include "llvm/MC/MCRegisterInfo.h"
82#include "llvm/Pass.h"
83#include "llvm/Support/Debug.h"
84#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000085#include <cassert>
86#include <vector>
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000087
88using namespace llvm;
89
90#define DEBUG_TYPE "si-wqm"
91
92namespace {
93
94enum {
95 StateWQM = 0x1,
Connor Abbott92638ab2017-08-04 18:36:52 +000096 StateWWM = 0x2,
97 StateExact = 0x4,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000098};
99
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000100struct PrintState {
101public:
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000102 int State;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000103
104 explicit PrintState(int State) : State(State) {}
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000105};
106
Eric Christopher3148a1b2017-11-16 03:18:15 +0000107#ifndef NDEBUG
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000108static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) {
109 if (PS.State & StateWQM)
110 OS << "WQM";
Connor Abbott92638ab2017-08-04 18:36:52 +0000111 if (PS.State & StateWWM) {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000112 if (PS.State & StateWQM)
113 OS << '|';
Connor Abbott92638ab2017-08-04 18:36:52 +0000114 OS << "WWM";
115 }
116 if (PS.State & StateExact) {
117 if (PS.State & (StateWQM | StateWWM))
118 OS << '|';
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000119 OS << "Exact";
120 }
121
122 return OS;
123}
Eric Christopher3148a1b2017-11-16 03:18:15 +0000124#endif
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000125
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000126struct InstrInfo {
127 char Needs = 0;
Connor Abbottde068fe2017-08-04 18:36:50 +0000128 char Disabled = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000129 char OutNeeds = 0;
130};
131
132struct BlockInfo {
133 char Needs = 0;
134 char InNeeds = 0;
135 char OutNeeds = 0;
136};
137
138struct WorkItem {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000139 MachineBasicBlock *MBB = nullptr;
140 MachineInstr *MI = nullptr;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000141
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000142 WorkItem() = default;
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000143 WorkItem(MachineBasicBlock *MBB) : MBB(MBB) {}
144 WorkItem(MachineInstr *MI) : MI(MI) {}
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000145};
146
147class SIWholeQuadMode : public MachineFunctionPass {
148private:
Connor Abbott92638ab2017-08-04 18:36:52 +0000149 CallingConv::ID CallingConv;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000150 const SIInstrInfo *TII;
151 const SIRegisterInfo *TRI;
152 MachineRegisterInfo *MRI;
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000153 LiveIntervals *LIS;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000154
155 DenseMap<const MachineInstr *, InstrInfo> Instructions;
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000156 DenseMap<MachineBasicBlock *, BlockInfo> Blocks;
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000157 SmallVector<MachineInstr *, 1> LiveMaskQueries;
Connor Abbott8c217d02017-08-04 18:36:49 +0000158 SmallVector<MachineInstr *, 4> LowerToCopyInstrs;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000159
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000160 void printInfo();
161
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000162 void markInstruction(MachineInstr &MI, char Flag,
163 std::vector<WorkItem> &Worklist);
Connor Abbottde068fe2017-08-04 18:36:50 +0000164 void markInstructionUses(const MachineInstr &MI, char Flag,
165 std::vector<WorkItem> &Worklist);
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000166 char scanInstructions(MachineFunction &MF, std::vector<WorkItem> &Worklist);
167 void propagateInstruction(MachineInstr &MI, std::vector<WorkItem> &Worklist);
168 void propagateBlock(MachineBasicBlock &MBB, std::vector<WorkItem> &Worklist);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000169 char analyzeFunction(MachineFunction &MF);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000170
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000171 bool requiresCorrectState(const MachineInstr &MI) const;
172
173 MachineBasicBlock::iterator saveSCC(MachineBasicBlock &MBB,
174 MachineBasicBlock::iterator Before);
175 MachineBasicBlock::iterator
176 prepareInsertion(MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
177 MachineBasicBlock::iterator Last, bool PreferLast,
178 bool SaveSCC);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000179 void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
180 unsigned SaveWQM, unsigned LiveMaskReg);
181 void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
182 unsigned SavedWQM);
Connor Abbott92638ab2017-08-04 18:36:52 +0000183 void toWWM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
184 unsigned SaveOrig);
185 void fromWWM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
186 unsigned SavedOrig);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000187 void processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, bool isEntry);
188
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000189 void lowerLiveMaskQueries(unsigned LiveMaskReg);
Connor Abbott8c217d02017-08-04 18:36:49 +0000190 void lowerCopyInstrs();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000191
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000192public:
193 static char ID;
194
195 SIWholeQuadMode() :
196 MachineFunctionPass(ID) { }
197
198 bool runOnMachineFunction(MachineFunction &MF) override;
199
Mehdi Amini117296c2016-10-01 02:56:57 +0000200 StringRef getPassName() const override { return "SI Whole Quad Mode"; }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000201
202 void getAnalysisUsage(AnalysisUsage &AU) const override {
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000203 AU.addRequired<LiveIntervals>();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000204 AU.setPreservesCFG();
205 MachineFunctionPass::getAnalysisUsage(AU);
206 }
207};
208
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000209} // end anonymous namespace
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000210
211char SIWholeQuadMode::ID = 0;
212
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000213INITIALIZE_PASS_BEGIN(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
214 false)
215INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
216INITIALIZE_PASS_END(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
217 false)
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000218
219char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID;
220
221FunctionPass *llvm::createSIWholeQuadModePass() {
222 return new SIWholeQuadMode;
223}
224
Eric Christopher3148a1b2017-11-16 03:18:15 +0000225#ifndef NDEBUG
Eric Christopher63481882017-11-16 03:25:02 +0000226LLVM_DUMP_METHOD void SIWholeQuadMode::printInfo() {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000227 for (const auto &BII : Blocks) {
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +0000228 dbgs() << "\n"
229 << printMBBReference(*BII.first) << ":\n"
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000230 << " InNeeds = " << PrintState(BII.second.InNeeds)
231 << ", Needs = " << PrintState(BII.second.Needs)
232 << ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n";
233
234 for (const MachineInstr &MI : *BII.first) {
235 auto III = Instructions.find(&MI);
236 if (III == Instructions.end())
237 continue;
238
239 dbgs() << " " << MI << " Needs = " << PrintState(III->second.Needs)
240 << ", OutNeeds = " << PrintState(III->second.OutNeeds) << '\n';
241 }
242 }
243}
Eric Christopher3148a1b2017-11-16 03:18:15 +0000244#endif
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000245
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000246void SIWholeQuadMode::markInstruction(MachineInstr &MI, char Flag,
247 std::vector<WorkItem> &Worklist) {
248 InstrInfo &II = Instructions[&MI];
249
Connor Abbott92638ab2017-08-04 18:36:52 +0000250 assert(!(Flag & StateExact) && Flag != 0);
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000251
Connor Abbottde068fe2017-08-04 18:36:50 +0000252 // Remove any disabled states from the flag. The user that required it gets
253 // an undefined value in the helper lanes. For example, this can happen if
254 // the result of an atomic is used by instruction that requires WQM, where
255 // ignoring the request for WQM is correct as per the relevant specs.
256 Flag &= ~II.Disabled;
257
258 // Ignore if the flag is already encompassed by the existing needs, or we
259 // just disabled everything.
260 if ((II.Needs & Flag) == Flag)
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000261 return;
262
Connor Abbottde068fe2017-08-04 18:36:50 +0000263 II.Needs |= Flag;
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000264 Worklist.push_back(&MI);
265}
266
Connor Abbottde068fe2017-08-04 18:36:50 +0000267/// Mark all instructions defining the uses in \p MI with \p Flag.
268void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
269 std::vector<WorkItem> &Worklist) {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000270 for (const MachineOperand &Use : MI.uses()) {
271 if (!Use.isReg() || !Use.isUse())
272 continue;
273
274 unsigned Reg = Use.getReg();
275
276 // Handle physical registers that we need to track; this is mostly relevant
277 // for VCC, which can appear as the (implicit) input of a uniform branch,
278 // e.g. when a loop counter is stored in a VGPR.
279 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
280 if (Reg == AMDGPU::EXEC)
281 continue;
282
283 for (MCRegUnitIterator RegUnit(Reg, TRI); RegUnit.isValid(); ++RegUnit) {
284 LiveRange &LR = LIS->getRegUnit(*RegUnit);
285 const VNInfo *Value = LR.Query(LIS->getInstructionIndex(MI)).valueIn();
286 if (!Value)
287 continue;
288
289 // Since we're in machine SSA, we do not need to track physical
290 // registers across basic blocks.
291 if (Value->isPHIDef())
292 continue;
293
Connor Abbottde068fe2017-08-04 18:36:50 +0000294 markInstruction(*LIS->getInstructionFromIndex(Value->def), Flag,
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000295 Worklist);
296 }
297
298 continue;
299 }
300
301 for (MachineInstr &DefMI : MRI->def_instructions(Use.getReg()))
Connor Abbottde068fe2017-08-04 18:36:50 +0000302 markInstruction(DefMI, Flag, Worklist);
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000303 }
304}
305
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000306// Scan instructions to determine which ones require an Exact execmask and
307// which ones seed WQM requirements.
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000308char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000309 std::vector<WorkItem> &Worklist) {
310 char GlobalFlags = 0;
Matthias Braunf1caa282017-12-15 22:22:58 +0000311 bool WQMOutputs = MF.getFunction().hasFnAttribute("amdgpu-ps-wqm-outputs");
Connor Abbott66b9bd62017-08-04 18:36:54 +0000312 SmallVector<MachineInstr *, 4> SetInactiveInstrs;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000313
Connor Abbottde068fe2017-08-04 18:36:50 +0000314 // We need to visit the basic blocks in reverse post-order so that we visit
315 // defs before uses, in particular so that we don't accidentally mark an
316 // instruction as needing e.g. WQM before visiting it and realizing it needs
317 // WQM disabled.
318 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
319 for (auto BI = RPOT.begin(), BE = RPOT.end(); BI != BE; ++BI) {
320 MachineBasicBlock &MBB = **BI;
321 BlockInfo &BBI = Blocks[&MBB];
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000322
323 for (auto II = MBB.begin(), IE = MBB.end(); II != IE; ++II) {
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000324 MachineInstr &MI = *II;
Connor Abbottde068fe2017-08-04 18:36:50 +0000325 InstrInfo &III = Instructions[&MI];
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000326 unsigned Opcode = MI.getOpcode();
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000327 char Flags = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000328
Tim Renouf18a1e9d2018-05-07 13:21:26 +0000329 if (TII->isWQM(Opcode)) {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000330 // Sampling instructions don't need to produce results for all pixels
331 // in a quad, they just require all inputs of a quad to have been
332 // computed for derivatives.
Connor Abbottde068fe2017-08-04 18:36:50 +0000333 markInstructionUses(MI, StateWQM, Worklist);
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000334 GlobalFlags |= StateWQM;
335 continue;
Connor Abbott8c217d02017-08-04 18:36:49 +0000336 } else if (Opcode == AMDGPU::WQM) {
337 // The WQM intrinsic requires its output to have all the helper lanes
338 // correct, so we need it to be in WQM.
339 Flags = StateWQM;
340 LowerToCopyInstrs.push_back(&MI);
Connor Abbott92638ab2017-08-04 18:36:52 +0000341 } else if (Opcode == AMDGPU::WWM) {
342 // The WWM intrinsic doesn't make the same guarantee, and plus it needs
343 // to be executed in WQM or Exact so that its copy doesn't clobber
344 // inactive lanes.
345 markInstructionUses(MI, StateWWM, Worklist);
346 GlobalFlags |= StateWWM;
347 LowerToCopyInstrs.push_back(&MI);
348 continue;
Connor Abbott66b9bd62017-08-04 18:36:54 +0000349 } else if (Opcode == AMDGPU::V_SET_INACTIVE_B32 ||
350 Opcode == AMDGPU::V_SET_INACTIVE_B64) {
351 III.Disabled = StateWWM;
352 MachineOperand &Inactive = MI.getOperand(2);
353 if (Inactive.isReg()) {
354 if (Inactive.isUndef()) {
355 LowerToCopyInstrs.push_back(&MI);
356 } else {
357 unsigned Reg = Inactive.getReg();
358 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
359 for (MachineInstr &DefMI : MRI->def_instructions(Reg))
360 markInstruction(DefMI, StateWWM, Worklist);
361 }
362 }
363 }
364 SetInactiveInstrs.push_back(&MI);
365 continue;
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000366 } else if (TII->isDisableWQM(MI)) {
Connor Abbottde068fe2017-08-04 18:36:50 +0000367 BBI.Needs |= StateExact;
368 if (!(BBI.InNeeds & StateExact)) {
369 BBI.InNeeds |= StateExact;
370 Worklist.push_back(&MBB);
371 }
372 GlobalFlags |= StateExact;
Connor Abbott92638ab2017-08-04 18:36:52 +0000373 III.Disabled = StateWQM | StateWWM;
Connor Abbottde068fe2017-08-04 18:36:50 +0000374 continue;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000375 } else {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000376 if (Opcode == AMDGPU::SI_PS_LIVE) {
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000377 LiveMaskQueries.push_back(&MI);
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000378 } else if (WQMOutputs) {
379 // The function is in machine SSA form, which means that physical
380 // VGPRs correspond to shader inputs and outputs. Inputs are
381 // only used, outputs are only defined.
382 for (const MachineOperand &MO : MI.defs()) {
383 if (!MO.isReg())
384 continue;
385
386 unsigned Reg = MO.getReg();
387
388 if (!TRI->isVirtualRegister(Reg) &&
389 TRI->hasVGPRs(TRI->getPhysRegClass(Reg))) {
390 Flags = StateWQM;
391 break;
392 }
393 }
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000394 }
395
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000396 if (!Flags)
397 continue;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000398 }
399
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000400 markInstruction(MI, Flags, Worklist);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000401 GlobalFlags |= Flags;
402 }
403 }
404
Connor Abbott66b9bd62017-08-04 18:36:54 +0000405 // Mark sure that any SET_INACTIVE instructions are computed in WQM if WQM is
406 // ever used anywhere in the function. This implements the corresponding
407 // semantics of @llvm.amdgcn.set.inactive.
408 if (GlobalFlags & StateWQM) {
409 for (MachineInstr *MI : SetInactiveInstrs)
410 markInstruction(*MI, StateWQM, Worklist);
411 }
412
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000413 return GlobalFlags;
414}
415
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000416void SIWholeQuadMode::propagateInstruction(MachineInstr &MI,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000417 std::vector<WorkItem>& Worklist) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000418 MachineBasicBlock *MBB = MI.getParent();
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000419 InstrInfo II = Instructions[&MI]; // take a copy to prevent dangling references
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000420 BlockInfo &BI = Blocks[MBB];
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000421
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000422 // Control flow-type instructions and stores to temporary memory that are
423 // followed by WQM computations must themselves be in WQM.
Connor Abbottde068fe2017-08-04 18:36:50 +0000424 if ((II.OutNeeds & StateWQM) && !(II.Disabled & StateWQM) &&
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000425 (MI.isTerminator() || (TII->usesVM_CNT(MI) && MI.mayStore()))) {
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000426 Instructions[&MI].Needs = StateWQM;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000427 II.Needs = StateWQM;
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000428 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000429
430 // Propagate to block level
Connor Abbottde068fe2017-08-04 18:36:50 +0000431 if (II.Needs & StateWQM) {
432 BI.Needs |= StateWQM;
433 if (!(BI.InNeeds & StateWQM)) {
434 BI.InNeeds |= StateWQM;
435 Worklist.push_back(MBB);
436 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000437 }
438
439 // Propagate backwards within block
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000440 if (MachineInstr *PrevMI = MI.getPrevNode()) {
Connor Abbott92638ab2017-08-04 18:36:52 +0000441 char InNeeds = (II.Needs & ~StateWWM) | II.OutNeeds;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000442 if (!PrevMI->isPHI()) {
443 InstrInfo &PrevII = Instructions[PrevMI];
444 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
445 PrevII.OutNeeds |= InNeeds;
446 Worklist.push_back(PrevMI);
447 }
448 }
449 }
450
451 // Propagate WQM flag to instruction inputs
Connor Abbottde068fe2017-08-04 18:36:50 +0000452 assert(!(II.Needs & StateExact));
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000453
Connor Abbottde068fe2017-08-04 18:36:50 +0000454 if (II.Needs != 0)
455 markInstructionUses(MI, II.Needs, Worklist);
Tim Renouf364edcd2018-05-27 17:26:11 +0000456
457 // Ensure we process a block containing WWM, even if it does not require any
458 // WQM transitions.
459 if (II.Needs & StateWWM)
460 BI.Needs |= StateWWM;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000461}
462
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000463void SIWholeQuadMode::propagateBlock(MachineBasicBlock &MBB,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000464 std::vector<WorkItem>& Worklist) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000465 BlockInfo BI = Blocks[&MBB]; // Make a copy to prevent dangling references.
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000466
467 // Propagate through instructions
468 if (!MBB.empty()) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000469 MachineInstr *LastMI = &*MBB.rbegin();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000470 InstrInfo &LastII = Instructions[LastMI];
471 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
472 LastII.OutNeeds |= BI.OutNeeds;
473 Worklist.push_back(LastMI);
474 }
475 }
476
477 // Predecessor blocks must provide for our WQM/Exact needs.
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000478 for (MachineBasicBlock *Pred : MBB.predecessors()) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000479 BlockInfo &PredBI = Blocks[Pred];
480 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
481 continue;
482
483 PredBI.OutNeeds |= BI.InNeeds;
484 PredBI.InNeeds |= BI.InNeeds;
485 Worklist.push_back(Pred);
486 }
487
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000488 // All successors must be prepared to accept the same set of WQM/Exact data.
489 for (MachineBasicBlock *Succ : MBB.successors()) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000490 BlockInfo &SuccBI = Blocks[Succ];
491 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
492 continue;
493
494 SuccBI.InNeeds |= BI.OutNeeds;
495 Worklist.push_back(Succ);
496 }
497}
498
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000499char SIWholeQuadMode::analyzeFunction(MachineFunction &MF) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000500 std::vector<WorkItem> Worklist;
501 char GlobalFlags = scanInstructions(MF, Worklist);
502
503 while (!Worklist.empty()) {
504 WorkItem WI = Worklist.back();
505 Worklist.pop_back();
506
507 if (WI.MI)
508 propagateInstruction(*WI.MI, Worklist);
509 else
510 propagateBlock(*WI.MBB, Worklist);
511 }
512
513 return GlobalFlags;
514}
515
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000516/// Whether \p MI really requires the exec state computed during analysis.
517///
518/// Scalar instructions must occasionally be marked WQM for correct propagation
519/// (e.g. thread masks leading up to branches), but when it comes to actual
520/// execution, they don't care about EXEC.
521bool SIWholeQuadMode::requiresCorrectState(const MachineInstr &MI) const {
522 if (MI.isTerminator())
523 return true;
524
525 // Skip instructions that are not affected by EXEC
526 if (TII->isScalarUnit(MI))
527 return false;
528
529 // Generic instructions such as COPY will either disappear by register
530 // coalescing or be lowered to SALU or VALU instructions.
531 if (MI.isTransient()) {
532 if (MI.getNumExplicitOperands() >= 1) {
533 const MachineOperand &Op = MI.getOperand(0);
534 if (Op.isReg()) {
535 if (TRI->isSGPRReg(*MRI, Op.getReg())) {
536 // SGPR instructions are not affected by EXEC
537 return false;
538 }
539 }
540 }
541 }
542
543 return true;
544}
545
546MachineBasicBlock::iterator
547SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB,
548 MachineBasicBlock::iterator Before) {
Marek Olsak79c05872016-11-25 17:37:09 +0000549 unsigned SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000550
551 MachineInstr *Save =
552 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg)
553 .addReg(AMDGPU::SCC);
554 MachineInstr *Restore =
555 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::SCC)
556 .addReg(SaveReg);
557
558 LIS->InsertMachineInstrInMaps(*Save);
559 LIS->InsertMachineInstrInMaps(*Restore);
560 LIS->createAndComputeVirtRegInterval(SaveReg);
561
562 return Restore;
563}
564
565// Return an iterator in the (inclusive) range [First, Last] at which
566// instructions can be safely inserted, keeping in mind that some of the
567// instructions we want to add necessarily clobber SCC.
568MachineBasicBlock::iterator SIWholeQuadMode::prepareInsertion(
569 MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
570 MachineBasicBlock::iterator Last, bool PreferLast, bool SaveSCC) {
571 if (!SaveSCC)
572 return PreferLast ? Last : First;
573
574 LiveRange &LR = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI));
575 auto MBBE = MBB.end();
576 SlotIndex FirstIdx = First != MBBE ? LIS->getInstructionIndex(*First)
577 : LIS->getMBBEndIdx(&MBB);
578 SlotIndex LastIdx =
579 Last != MBBE ? LIS->getInstructionIndex(*Last) : LIS->getMBBEndIdx(&MBB);
580 SlotIndex Idx = PreferLast ? LastIdx : FirstIdx;
581 const LiveRange::Segment *S;
582
583 for (;;) {
584 S = LR.getSegmentContaining(Idx);
585 if (!S)
586 break;
587
588 if (PreferLast) {
589 SlotIndex Next = S->start.getBaseIndex();
590 if (Next < FirstIdx)
591 break;
592 Idx = Next;
593 } else {
594 SlotIndex Next = S->end.getNextIndex().getBaseIndex();
595 if (Next > LastIdx)
596 break;
597 Idx = Next;
598 }
599 }
600
601 MachineBasicBlock::iterator MBBI;
602
603 if (MachineInstr *MI = LIS->getInstructionFromIndex(Idx))
604 MBBI = MI;
605 else {
606 assert(Idx == LIS->getMBBEndIdx(&MBB));
607 MBBI = MBB.end();
608 }
609
610 if (S)
611 MBBI = saveSCC(MBB, MBBI);
612
613 return MBBI;
614}
615
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000616void SIWholeQuadMode::toExact(MachineBasicBlock &MBB,
617 MachineBasicBlock::iterator Before,
Nicolai Haehnlea56e6b62016-03-21 20:39:24 +0000618 unsigned SaveWQM, unsigned LiveMaskReg) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000619 MachineInstr *MI;
620
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000621 if (SaveWQM) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000622 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_SAVEEXEC_B64),
623 SaveWQM)
624 .addReg(LiveMaskReg);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000625 } else {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000626 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_B64),
627 AMDGPU::EXEC)
628 .addReg(AMDGPU::EXEC)
629 .addReg(LiveMaskReg);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000630 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000631
632 LIS->InsertMachineInstrInMaps(*MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000633}
634
635void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB,
636 MachineBasicBlock::iterator Before,
Nicolai Haehnlea56e6b62016-03-21 20:39:24 +0000637 unsigned SavedWQM) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000638 MachineInstr *MI;
639
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000640 if (SavedWQM) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000641 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::EXEC)
642 .addReg(SavedWQM);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000643 } else {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000644 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
645 AMDGPU::EXEC)
646 .addReg(AMDGPU::EXEC);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000647 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000648
649 LIS->InsertMachineInstrInMaps(*MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000650}
651
Connor Abbott92638ab2017-08-04 18:36:52 +0000652void SIWholeQuadMode::toWWM(MachineBasicBlock &MBB,
653 MachineBasicBlock::iterator Before,
654 unsigned SaveOrig) {
655 MachineInstr *MI;
656
657 assert(SaveOrig);
658 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_OR_SAVEEXEC_B64),
659 SaveOrig)
660 .addImm(-1);
661 LIS->InsertMachineInstrInMaps(*MI);
662}
663
664void SIWholeQuadMode::fromWWM(MachineBasicBlock &MBB,
665 MachineBasicBlock::iterator Before,
666 unsigned SavedOrig) {
667 MachineInstr *MI;
668
669 assert(SavedOrig);
670 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_WWM), AMDGPU::EXEC)
671 .addReg(SavedOrig);
672 LIS->InsertMachineInstrInMaps(*MI);
673}
674
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000675void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
676 bool isEntry) {
677 auto BII = Blocks.find(&MBB);
678 if (BII == Blocks.end())
679 return;
680
681 const BlockInfo &BI = BII->second;
682
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000683 // This is a non-entry block that is WQM throughout, so no need to do
684 // anything.
Connor Abbott92638ab2017-08-04 18:36:52 +0000685 if (!isEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact)
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000686 return;
687
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000688 LLVM_DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB)
689 << ":\n");
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000690
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000691 unsigned SavedWQMReg = 0;
Connor Abbott92638ab2017-08-04 18:36:52 +0000692 unsigned SavedNonWWMReg = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000693 bool WQMFromExec = isEntry;
Connor Abbott92638ab2017-08-04 18:36:52 +0000694 char State = (isEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM;
695 char NonWWMState = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000696
697 auto II = MBB.getFirstNonPHI(), IE = MBB.end();
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000698 if (isEntry)
699 ++II; // Skip the instruction that saves LiveMask
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000700
Connor Abbott92638ab2017-08-04 18:36:52 +0000701 // This stores the first instruction where it's safe to switch from WQM to
702 // Exact or vice versa.
703 MachineBasicBlock::iterator FirstWQM = IE;
704
705 // This stores the first instruction where it's safe to switch from WWM to
706 // Exact/WQM or to switch to WWM. It must always be the same as, or after,
707 // FirstWQM since if it's safe to switch to/from WWM, it must be safe to
708 // switch to/from WQM as well.
709 MachineBasicBlock::iterator FirstWWM = IE;
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000710 for (;;) {
711 MachineBasicBlock::iterator Next = II;
Connor Abbott92638ab2017-08-04 18:36:52 +0000712 char Needs = StateExact | StateWQM; // WWM is disabled by default
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000713 char OutNeeds = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000714
Connor Abbott92638ab2017-08-04 18:36:52 +0000715 if (FirstWQM == IE)
716 FirstWQM = II;
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000717
Connor Abbott92638ab2017-08-04 18:36:52 +0000718 if (FirstWWM == IE)
719 FirstWWM = II;
720
721 // First, figure out the allowed states (Needs) based on the propagated
722 // flags.
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000723 if (II != IE) {
724 MachineInstr &MI = *II;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000725
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000726 if (requiresCorrectState(MI)) {
727 auto III = Instructions.find(&MI);
728 if (III != Instructions.end()) {
Connor Abbott92638ab2017-08-04 18:36:52 +0000729 if (III->second.Needs & StateWWM)
730 Needs = StateWWM;
731 else if (III->second.Needs & StateWQM)
Connor Abbottde068fe2017-08-04 18:36:50 +0000732 Needs = StateWQM;
733 else
734 Needs &= ~III->second.Disabled;
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000735 OutNeeds = III->second.OutNeeds;
736 }
Connor Abbott92638ab2017-08-04 18:36:52 +0000737 } else {
738 // If the instruction doesn't actually need a correct EXEC, then we can
739 // safely leave WWM enabled.
740 Needs = StateExact | StateWQM | StateWWM;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000741 }
742
Connor Abbottde068fe2017-08-04 18:36:50 +0000743 if (MI.isTerminator() && OutNeeds == StateExact)
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000744 Needs = StateExact;
745
746 if (MI.getOpcode() == AMDGPU::SI_ELSE && BI.OutNeeds == StateExact)
747 MI.getOperand(3).setImm(1);
748
749 ++Next;
750 } else {
751 // End of basic block
752 if (BI.OutNeeds & StateWQM)
753 Needs = StateWQM;
754 else if (BI.OutNeeds == StateExact)
755 Needs = StateExact;
Connor Abbottde068fe2017-08-04 18:36:50 +0000756 else
757 Needs = StateWQM | StateExact;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000758 }
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000759
Connor Abbott92638ab2017-08-04 18:36:52 +0000760 // Now, transition if necessary.
Connor Abbottde068fe2017-08-04 18:36:50 +0000761 if (!(Needs & State)) {
Connor Abbott92638ab2017-08-04 18:36:52 +0000762 MachineBasicBlock::iterator First;
763 if (State == StateWWM || Needs == StateWWM) {
764 // We must switch to or from WWM
765 First = FirstWWM;
766 } else {
767 // We only need to switch to/from WQM, so we can use FirstWQM
768 First = FirstWQM;
769 }
770
Connor Abbottde068fe2017-08-04 18:36:50 +0000771 MachineBasicBlock::iterator Before =
772 prepareInsertion(MBB, First, II, Needs == StateWQM,
773 Needs == StateExact || WQMFromExec);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000774
Connor Abbott92638ab2017-08-04 18:36:52 +0000775 if (State == StateWWM) {
776 assert(SavedNonWWMReg);
777 fromWWM(MBB, Before, SavedNonWWMReg);
778 State = NonWWMState;
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000779 }
780
Connor Abbott92638ab2017-08-04 18:36:52 +0000781 if (Needs == StateWWM) {
782 NonWWMState = State;
783 SavedNonWWMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
784 toWWM(MBB, Before, SavedNonWWMReg);
785 State = StateWWM;
786 } else {
787 if (State == StateWQM && (Needs & StateExact) && !(Needs & StateWQM)) {
788 if (!WQMFromExec && (OutNeeds & StateWQM))
789 SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
790
791 toExact(MBB, Before, SavedWQMReg, LiveMaskReg);
792 State = StateExact;
793 } else if (State == StateExact && (Needs & StateWQM) &&
794 !(Needs & StateExact)) {
795 assert(WQMFromExec == (SavedWQMReg == 0));
796
797 toWQM(MBB, Before, SavedWQMReg);
798
799 if (SavedWQMReg) {
800 LIS->createAndComputeVirtRegInterval(SavedWQMReg);
801 SavedWQMReg = 0;
802 }
803 State = StateWQM;
804 } else {
805 // We can get here if we transitioned from WWM to a non-WWM state that
806 // already matches our needs, but we shouldn't need to do anything.
807 assert(Needs & State);
808 }
809 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000810 }
811
Connor Abbott92638ab2017-08-04 18:36:52 +0000812 if (Needs != (StateExact | StateWQM | StateWWM)) {
813 if (Needs != (StateExact | StateWQM))
814 FirstWQM = IE;
815 FirstWWM = IE;
816 }
Connor Abbottde068fe2017-08-04 18:36:50 +0000817
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000818 if (II == IE)
819 break;
820 II = Next;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000821 }
822}
823
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000824void SIWholeQuadMode::lowerLiveMaskQueries(unsigned LiveMaskReg) {
825 for (MachineInstr *MI : LiveMaskQueries) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000826 const DebugLoc &DL = MI->getDebugLoc();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000827 unsigned Dest = MI->getOperand(0).getReg();
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000828 MachineInstr *Copy =
829 BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest)
830 .addReg(LiveMaskReg);
831
832 LIS->ReplaceMachineInstrInMaps(*MI, *Copy);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000833 MI->eraseFromParent();
834 }
835}
836
Connor Abbott8c217d02017-08-04 18:36:49 +0000837void SIWholeQuadMode::lowerCopyInstrs() {
Connor Abbott66b9bd62017-08-04 18:36:54 +0000838 for (MachineInstr *MI : LowerToCopyInstrs) {
839 for (unsigned i = MI->getNumExplicitOperands() - 1; i > 1; i--)
840 MI->RemoveOperand(i);
Connor Abbott8c217d02017-08-04 18:36:49 +0000841 MI->setDesc(TII->get(AMDGPU::COPY));
Connor Abbott66b9bd62017-08-04 18:36:54 +0000842 }
Connor Abbott8c217d02017-08-04 18:36:49 +0000843}
844
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000845bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000846 Instructions.clear();
847 Blocks.clear();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000848 LiveMaskQueries.clear();
Connor Abbott8c217d02017-08-04 18:36:49 +0000849 LowerToCopyInstrs.clear();
Matthias Braunf1caa282017-12-15 22:22:58 +0000850 CallingConv = MF.getFunction().getCallingConv();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000851
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000852 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
853
854 TII = ST.getInstrInfo();
855 TRI = &TII->getRegisterInfo();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000856 MRI = &MF.getRegInfo();
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000857 LIS = &getAnalysis<LiveIntervals>();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000858
859 char GlobalFlags = analyzeFunction(MF);
Connor Abbott92638ab2017-08-04 18:36:52 +0000860 unsigned LiveMaskReg = 0;
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000861 if (!(GlobalFlags & StateWQM)) {
862 lowerLiveMaskQueries(AMDGPU::EXEC);
Connor Abbott92638ab2017-08-04 18:36:52 +0000863 if (!(GlobalFlags & StateWWM))
864 return !LiveMaskQueries.empty();
865 } else {
866 // Store a copy of the original live mask when required
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000867 MachineBasicBlock &Entry = MF.front();
868 MachineBasicBlock::iterator EntryMI = Entry.getFirstNonPHI();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000869
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000870 if (GlobalFlags & StateExact || !LiveMaskQueries.empty()) {
871 LiveMaskReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000872 MachineInstr *MI = BuildMI(Entry, EntryMI, DebugLoc(),
873 TII->get(AMDGPU::COPY), LiveMaskReg)
874 .addReg(AMDGPU::EXEC);
875 LIS->InsertMachineInstrInMaps(*MI);
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000876 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000877
Connor Abbott92638ab2017-08-04 18:36:52 +0000878 lowerLiveMaskQueries(LiveMaskReg);
879
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000880 if (GlobalFlags == StateWQM) {
881 // For a shader that needs only WQM, we can just set it once.
882 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
883 AMDGPU::EXEC)
884 .addReg(AMDGPU::EXEC);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000885
Connor Abbott8c217d02017-08-04 18:36:49 +0000886 lowerCopyInstrs();
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000887 // EntryMI may become invalid here
888 return true;
889 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000890 }
891
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000892 LLVM_DEBUG(printInfo());
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000893
Connor Abbott8c217d02017-08-04 18:36:49 +0000894 lowerCopyInstrs();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000895
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000896 // Handle the general case
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000897 for (auto BII : Blocks)
898 processBlock(*BII.first, LiveMaskReg, BII.first == &*MF.begin());
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000899
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000900 // Physical registers like SCC aren't tracked by default anyway, so just
901 // removing the ranges we computed is the simplest option for maintaining
902 // the analysis results.
903 LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI));
904
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000905 return true;
906}