blob: 62e1b7e84c46f55ea29750f98e000b33828d2865 [file] [log] [blame]
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00001//===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This pass adds instructions to enable whole quad mode for pixel
12/// shaders.
13///
14/// Whole quad mode is required for derivative computations, but it interferes
15/// with shader side effects (stores and atomics). This pass is run on the
16/// scheduled machine IR but before register coalescing, so that machine SSA is
17/// available for analysis. It ensures that WQM is enabled when necessary, but
18/// disabled around stores and atomics.
19///
20/// When necessary, this pass creates a function prolog
21///
22/// S_MOV_B64 LiveMask, EXEC
23/// S_WQM_B64 EXEC, EXEC
24///
25/// to enter WQM at the top of the function and surrounds blocks of Exact
26/// instructions by
27///
28/// S_AND_SAVEEXEC_B64 Tmp, LiveMask
29/// ...
30/// S_MOV_B64 EXEC, Tmp
31///
32/// In order to avoid excessive switching during sequences of Exact
33/// instructions, the pass first analyzes which instructions must be run in WQM
34/// (aka which instructions produce values that lead to derivative
35/// computations).
36///
37/// Basic blocks are always exited in WQM as long as some successor needs WQM.
38///
39/// There is room for improvement given better control flow analysis:
40///
41/// (1) at the top level (outside of control flow statements, and as long as
42/// kill hasn't been used), one SGPR can be saved by recovering WQM from
43/// the LiveMask (this is implemented for the entry block).
44///
45/// (2) when entire regions (e.g. if-else blocks or entire loops) only
46/// consist of exact and don't-care instructions, the switch only has to
47/// be done at the entry and exit points rather than potentially in each
48/// block of the region.
49///
50//===----------------------------------------------------------------------===//
51
52#include "AMDGPU.h"
53#include "AMDGPUSubtarget.h"
54#include "SIInstrInfo.h"
55#include "SIMachineFunctionInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000056#include "llvm/ADT/DenseMap.h"
57#include "llvm/ADT/SmallVector.h"
58#include "llvm/ADT/StringRef.h"
59#include "llvm/CodeGen/LiveInterval.h"
60#include "llvm/CodeGen/LiveIntervalAnalysis.h"
61#include "llvm/CodeGen/MachineBasicBlock.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000062#include "llvm/CodeGen/MachineFunction.h"
63#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000064#include "llvm/CodeGen/MachineInstr.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000065#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000066#include "llvm/CodeGen/MachineOperand.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000067#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000068#include "llvm/CodeGen/SlotIndexes.h"
69#include "llvm/IR/CallingConv.h"
70#include "llvm/IR/DebugLoc.h"
71#include "llvm/MC/MCRegisterInfo.h"
72#include "llvm/Pass.h"
73#include "llvm/Support/Debug.h"
74#include "llvm/Support/raw_ostream.h"
75#include "llvm/Target/TargetRegisterInfo.h"
76#include <cassert>
77#include <vector>
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000078
79using namespace llvm;
80
81#define DEBUG_TYPE "si-wqm"
82
83namespace {
84
85enum {
86 StateWQM = 0x1,
87 StateExact = 0x2,
88};
89
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +000090struct PrintState {
91public:
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +000092 int State;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000093
94 explicit PrintState(int State) : State(State) {}
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +000095};
96
97static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) {
98 if (PS.State & StateWQM)
99 OS << "WQM";
100 if (PS.State & StateExact) {
101 if (PS.State & StateWQM)
102 OS << '|';
103 OS << "Exact";
104 }
105
106 return OS;
107}
108
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000109struct InstrInfo {
110 char Needs = 0;
111 char OutNeeds = 0;
112};
113
114struct BlockInfo {
115 char Needs = 0;
116 char InNeeds = 0;
117 char OutNeeds = 0;
118};
119
120struct WorkItem {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000121 MachineBasicBlock *MBB = nullptr;
122 MachineInstr *MI = nullptr;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000123
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000124 WorkItem() = default;
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000125 WorkItem(MachineBasicBlock *MBB) : MBB(MBB) {}
126 WorkItem(MachineInstr *MI) : MI(MI) {}
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000127};
128
129class SIWholeQuadMode : public MachineFunctionPass {
130private:
131 const SIInstrInfo *TII;
132 const SIRegisterInfo *TRI;
133 MachineRegisterInfo *MRI;
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000134 LiveIntervals *LIS;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000135
136 DenseMap<const MachineInstr *, InstrInfo> Instructions;
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000137 DenseMap<MachineBasicBlock *, BlockInfo> Blocks;
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000138 SmallVector<MachineInstr *, 1> LiveMaskQueries;
Connor Abbott8c217d02017-08-04 18:36:49 +0000139 SmallVector<MachineInstr *, 4> LowerToCopyInstrs;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000140
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000141 void printInfo();
142
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000143 void markInstruction(MachineInstr &MI, char Flag,
144 std::vector<WorkItem> &Worklist);
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000145 void markUsesWQM(const MachineInstr &MI, std::vector<WorkItem> &Worklist);
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000146 char scanInstructions(MachineFunction &MF, std::vector<WorkItem> &Worklist);
147 void propagateInstruction(MachineInstr &MI, std::vector<WorkItem> &Worklist);
148 void propagateBlock(MachineBasicBlock &MBB, std::vector<WorkItem> &Worklist);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000149 char analyzeFunction(MachineFunction &MF);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000150
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000151 bool requiresCorrectState(const MachineInstr &MI) const;
152
153 MachineBasicBlock::iterator saveSCC(MachineBasicBlock &MBB,
154 MachineBasicBlock::iterator Before);
155 MachineBasicBlock::iterator
156 prepareInsertion(MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
157 MachineBasicBlock::iterator Last, bool PreferLast,
158 bool SaveSCC);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000159 void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
160 unsigned SaveWQM, unsigned LiveMaskReg);
161 void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
162 unsigned SavedWQM);
163 void processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, bool isEntry);
164
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000165 void lowerLiveMaskQueries(unsigned LiveMaskReg);
Connor Abbott8c217d02017-08-04 18:36:49 +0000166 void lowerCopyInstrs();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000167
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000168public:
169 static char ID;
170
171 SIWholeQuadMode() :
172 MachineFunctionPass(ID) { }
173
174 bool runOnMachineFunction(MachineFunction &MF) override;
175
Mehdi Amini117296c2016-10-01 02:56:57 +0000176 StringRef getPassName() const override { return "SI Whole Quad Mode"; }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000177
178 void getAnalysisUsage(AnalysisUsage &AU) const override {
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000179 AU.addRequired<LiveIntervals>();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000180 AU.setPreservesCFG();
181 MachineFunctionPass::getAnalysisUsage(AU);
182 }
183};
184
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000185} // end anonymous namespace
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000186
187char SIWholeQuadMode::ID = 0;
188
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000189INITIALIZE_PASS_BEGIN(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
190 false)
191INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
192INITIALIZE_PASS_END(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
193 false)
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000194
195char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID;
196
197FunctionPass *llvm::createSIWholeQuadModePass() {
198 return new SIWholeQuadMode;
199}
200
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000201void SIWholeQuadMode::printInfo() {
202 for (const auto &BII : Blocks) {
203 dbgs() << "\nBB#" << BII.first->getNumber() << ":\n"
204 << " InNeeds = " << PrintState(BII.second.InNeeds)
205 << ", Needs = " << PrintState(BII.second.Needs)
206 << ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n";
207
208 for (const MachineInstr &MI : *BII.first) {
209 auto III = Instructions.find(&MI);
210 if (III == Instructions.end())
211 continue;
212
213 dbgs() << " " << MI << " Needs = " << PrintState(III->second.Needs)
214 << ", OutNeeds = " << PrintState(III->second.OutNeeds) << '\n';
215 }
216 }
217}
218
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000219void SIWholeQuadMode::markInstruction(MachineInstr &MI, char Flag,
220 std::vector<WorkItem> &Worklist) {
221 InstrInfo &II = Instructions[&MI];
222
223 assert(Flag == StateWQM || Flag == StateExact);
224
225 // Ignore if the instruction is already marked. The typical case is that we
226 // mark an instruction WQM multiple times, but for atomics it can happen that
227 // Flag is StateWQM, but Needs is already set to StateExact. In this case,
228 // letting the atomic run in StateExact is correct as per the relevant specs.
229 if (II.Needs)
230 return;
231
232 II.Needs = Flag;
233 Worklist.push_back(&MI);
234}
235
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000236/// Mark all instructions defining the uses in \p MI as WQM.
237void SIWholeQuadMode::markUsesWQM(const MachineInstr &MI,
238 std::vector<WorkItem> &Worklist) {
239 for (const MachineOperand &Use : MI.uses()) {
240 if (!Use.isReg() || !Use.isUse())
241 continue;
242
243 unsigned Reg = Use.getReg();
244
245 // Handle physical registers that we need to track; this is mostly relevant
246 // for VCC, which can appear as the (implicit) input of a uniform branch,
247 // e.g. when a loop counter is stored in a VGPR.
248 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
249 if (Reg == AMDGPU::EXEC)
250 continue;
251
252 for (MCRegUnitIterator RegUnit(Reg, TRI); RegUnit.isValid(); ++RegUnit) {
253 LiveRange &LR = LIS->getRegUnit(*RegUnit);
254 const VNInfo *Value = LR.Query(LIS->getInstructionIndex(MI)).valueIn();
255 if (!Value)
256 continue;
257
258 // Since we're in machine SSA, we do not need to track physical
259 // registers across basic blocks.
260 if (Value->isPHIDef())
261 continue;
262
263 markInstruction(*LIS->getInstructionFromIndex(Value->def), StateWQM,
264 Worklist);
265 }
266
267 continue;
268 }
269
270 for (MachineInstr &DefMI : MRI->def_instructions(Use.getReg()))
271 markInstruction(DefMI, StateWQM, Worklist);
272 }
273}
274
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000275// Scan instructions to determine which ones require an Exact execmask and
276// which ones seed WQM requirements.
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000277char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000278 std::vector<WorkItem> &Worklist) {
279 char GlobalFlags = 0;
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000280 bool WQMOutputs = MF.getFunction()->hasFnAttribute("amdgpu-ps-wqm-outputs");
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000281
282 for (auto BI = MF.begin(), BE = MF.end(); BI != BE; ++BI) {
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000283 MachineBasicBlock &MBB = *BI;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000284
285 for (auto II = MBB.begin(), IE = MBB.end(); II != IE; ++II) {
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000286 MachineInstr &MI = *II;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000287 unsigned Opcode = MI.getOpcode();
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000288 char Flags = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000289
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000290 if (TII->isDS(Opcode)) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000291 Flags = StateWQM;
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000292 } else if (TII->isWQM(Opcode)) {
293 // Sampling instructions don't need to produce results for all pixels
294 // in a quad, they just require all inputs of a quad to have been
295 // computed for derivatives.
296 markUsesWQM(MI, Worklist);
297 GlobalFlags |= StateWQM;
298 continue;
Connor Abbott8c217d02017-08-04 18:36:49 +0000299 } else if (Opcode == AMDGPU::WQM) {
300 // The WQM intrinsic requires its output to have all the helper lanes
301 // correct, so we need it to be in WQM.
302 Flags = StateWQM;
303 LowerToCopyInstrs.push_back(&MI);
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000304 } else if (TII->isDisableWQM(MI)) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000305 Flags = StateExact;
306 } else {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000307 if (Opcode == AMDGPU::SI_PS_LIVE) {
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000308 LiveMaskQueries.push_back(&MI);
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000309 } else if (WQMOutputs) {
310 // The function is in machine SSA form, which means that physical
311 // VGPRs correspond to shader inputs and outputs. Inputs are
312 // only used, outputs are only defined.
313 for (const MachineOperand &MO : MI.defs()) {
314 if (!MO.isReg())
315 continue;
316
317 unsigned Reg = MO.getReg();
318
319 if (!TRI->isVirtualRegister(Reg) &&
320 TRI->hasVGPRs(TRI->getPhysRegClass(Reg))) {
321 Flags = StateWQM;
322 break;
323 }
324 }
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000325 }
326
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000327 if (!Flags)
328 continue;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000329 }
330
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000331 markInstruction(MI, Flags, Worklist);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000332 GlobalFlags |= Flags;
333 }
334 }
335
336 return GlobalFlags;
337}
338
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000339void SIWholeQuadMode::propagateInstruction(MachineInstr &MI,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000340 std::vector<WorkItem>& Worklist) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000341 MachineBasicBlock *MBB = MI.getParent();
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000342 InstrInfo II = Instructions[&MI]; // take a copy to prevent dangling references
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000343 BlockInfo &BI = Blocks[MBB];
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000344
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000345 // Control flow-type instructions and stores to temporary memory that are
346 // followed by WQM computations must themselves be in WQM.
347 if ((II.OutNeeds & StateWQM) && !II.Needs &&
348 (MI.isTerminator() || (TII->usesVM_CNT(MI) && MI.mayStore()))) {
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000349 Instructions[&MI].Needs = StateWQM;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000350 II.Needs = StateWQM;
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000351 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000352
353 // Propagate to block level
354 BI.Needs |= II.Needs;
355 if ((BI.InNeeds | II.Needs) != BI.InNeeds) {
356 BI.InNeeds |= II.Needs;
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000357 Worklist.push_back(MBB);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000358 }
359
360 // Propagate backwards within block
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000361 if (MachineInstr *PrevMI = MI.getPrevNode()) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000362 char InNeeds = II.Needs | II.OutNeeds;
363 if (!PrevMI->isPHI()) {
364 InstrInfo &PrevII = Instructions[PrevMI];
365 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
366 PrevII.OutNeeds |= InNeeds;
367 Worklist.push_back(PrevMI);
368 }
369 }
370 }
371
372 // Propagate WQM flag to instruction inputs
373 assert(II.Needs != (StateWQM | StateExact));
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000374
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000375 if (II.Needs == StateWQM)
376 markUsesWQM(MI, Worklist);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000377}
378
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000379void SIWholeQuadMode::propagateBlock(MachineBasicBlock &MBB,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000380 std::vector<WorkItem>& Worklist) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000381 BlockInfo BI = Blocks[&MBB]; // Make a copy to prevent dangling references.
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000382
383 // Propagate through instructions
384 if (!MBB.empty()) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000385 MachineInstr *LastMI = &*MBB.rbegin();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000386 InstrInfo &LastII = Instructions[LastMI];
387 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
388 LastII.OutNeeds |= BI.OutNeeds;
389 Worklist.push_back(LastMI);
390 }
391 }
392
393 // Predecessor blocks must provide for our WQM/Exact needs.
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000394 for (MachineBasicBlock *Pred : MBB.predecessors()) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000395 BlockInfo &PredBI = Blocks[Pred];
396 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
397 continue;
398
399 PredBI.OutNeeds |= BI.InNeeds;
400 PredBI.InNeeds |= BI.InNeeds;
401 Worklist.push_back(Pred);
402 }
403
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000404 // All successors must be prepared to accept the same set of WQM/Exact data.
405 for (MachineBasicBlock *Succ : MBB.successors()) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000406 BlockInfo &SuccBI = Blocks[Succ];
407 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
408 continue;
409
410 SuccBI.InNeeds |= BI.OutNeeds;
411 Worklist.push_back(Succ);
412 }
413}
414
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000415char SIWholeQuadMode::analyzeFunction(MachineFunction &MF) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000416 std::vector<WorkItem> Worklist;
417 char GlobalFlags = scanInstructions(MF, Worklist);
418
419 while (!Worklist.empty()) {
420 WorkItem WI = Worklist.back();
421 Worklist.pop_back();
422
423 if (WI.MI)
424 propagateInstruction(*WI.MI, Worklist);
425 else
426 propagateBlock(*WI.MBB, Worklist);
427 }
428
429 return GlobalFlags;
430}
431
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000432/// Whether \p MI really requires the exec state computed during analysis.
433///
434/// Scalar instructions must occasionally be marked WQM for correct propagation
435/// (e.g. thread masks leading up to branches), but when it comes to actual
436/// execution, they don't care about EXEC.
437bool SIWholeQuadMode::requiresCorrectState(const MachineInstr &MI) const {
438 if (MI.isTerminator())
439 return true;
440
441 // Skip instructions that are not affected by EXEC
442 if (TII->isScalarUnit(MI))
443 return false;
444
445 // Generic instructions such as COPY will either disappear by register
446 // coalescing or be lowered to SALU or VALU instructions.
447 if (MI.isTransient()) {
448 if (MI.getNumExplicitOperands() >= 1) {
449 const MachineOperand &Op = MI.getOperand(0);
450 if (Op.isReg()) {
451 if (TRI->isSGPRReg(*MRI, Op.getReg())) {
452 // SGPR instructions are not affected by EXEC
453 return false;
454 }
455 }
456 }
457 }
458
459 return true;
460}
461
462MachineBasicBlock::iterator
463SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB,
464 MachineBasicBlock::iterator Before) {
Marek Olsak79c05872016-11-25 17:37:09 +0000465 unsigned SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000466
467 MachineInstr *Save =
468 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg)
469 .addReg(AMDGPU::SCC);
470 MachineInstr *Restore =
471 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::SCC)
472 .addReg(SaveReg);
473
474 LIS->InsertMachineInstrInMaps(*Save);
475 LIS->InsertMachineInstrInMaps(*Restore);
476 LIS->createAndComputeVirtRegInterval(SaveReg);
477
478 return Restore;
479}
480
481// Return an iterator in the (inclusive) range [First, Last] at which
482// instructions can be safely inserted, keeping in mind that some of the
483// instructions we want to add necessarily clobber SCC.
484MachineBasicBlock::iterator SIWholeQuadMode::prepareInsertion(
485 MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
486 MachineBasicBlock::iterator Last, bool PreferLast, bool SaveSCC) {
487 if (!SaveSCC)
488 return PreferLast ? Last : First;
489
490 LiveRange &LR = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI));
491 auto MBBE = MBB.end();
492 SlotIndex FirstIdx = First != MBBE ? LIS->getInstructionIndex(*First)
493 : LIS->getMBBEndIdx(&MBB);
494 SlotIndex LastIdx =
495 Last != MBBE ? LIS->getInstructionIndex(*Last) : LIS->getMBBEndIdx(&MBB);
496 SlotIndex Idx = PreferLast ? LastIdx : FirstIdx;
497 const LiveRange::Segment *S;
498
499 for (;;) {
500 S = LR.getSegmentContaining(Idx);
501 if (!S)
502 break;
503
504 if (PreferLast) {
505 SlotIndex Next = S->start.getBaseIndex();
506 if (Next < FirstIdx)
507 break;
508 Idx = Next;
509 } else {
510 SlotIndex Next = S->end.getNextIndex().getBaseIndex();
511 if (Next > LastIdx)
512 break;
513 Idx = Next;
514 }
515 }
516
517 MachineBasicBlock::iterator MBBI;
518
519 if (MachineInstr *MI = LIS->getInstructionFromIndex(Idx))
520 MBBI = MI;
521 else {
522 assert(Idx == LIS->getMBBEndIdx(&MBB));
523 MBBI = MBB.end();
524 }
525
526 if (S)
527 MBBI = saveSCC(MBB, MBBI);
528
529 return MBBI;
530}
531
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000532void SIWholeQuadMode::toExact(MachineBasicBlock &MBB,
533 MachineBasicBlock::iterator Before,
Nicolai Haehnlea56e6b62016-03-21 20:39:24 +0000534 unsigned SaveWQM, unsigned LiveMaskReg) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000535 MachineInstr *MI;
536
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000537 if (SaveWQM) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000538 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_SAVEEXEC_B64),
539 SaveWQM)
540 .addReg(LiveMaskReg);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000541 } else {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000542 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_AND_B64),
543 AMDGPU::EXEC)
544 .addReg(AMDGPU::EXEC)
545 .addReg(LiveMaskReg);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000546 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000547
548 LIS->InsertMachineInstrInMaps(*MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000549}
550
551void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB,
552 MachineBasicBlock::iterator Before,
Nicolai Haehnlea56e6b62016-03-21 20:39:24 +0000553 unsigned SavedWQM) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000554 MachineInstr *MI;
555
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000556 if (SavedWQM) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000557 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::EXEC)
558 .addReg(SavedWQM);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000559 } else {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000560 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
561 AMDGPU::EXEC)
562 .addReg(AMDGPU::EXEC);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000563 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000564
565 LIS->InsertMachineInstrInMaps(*MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000566}
567
568void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
569 bool isEntry) {
570 auto BII = Blocks.find(&MBB);
571 if (BII == Blocks.end())
572 return;
573
574 const BlockInfo &BI = BII->second;
575
576 if (!(BI.InNeeds & StateWQM))
577 return;
578
579 // This is a non-entry block that is WQM throughout, so no need to do
580 // anything.
581 if (!isEntry && !(BI.Needs & StateExact) && BI.OutNeeds != StateExact)
582 return;
583
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000584 DEBUG(dbgs() << "\nProcessing block BB#" << MBB.getNumber() << ":\n");
585
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000586 unsigned SavedWQMReg = 0;
587 bool WQMFromExec = isEntry;
588 char State = isEntry ? StateExact : StateWQM;
589
590 auto II = MBB.getFirstNonPHI(), IE = MBB.end();
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000591 if (isEntry)
592 ++II; // Skip the instruction that saves LiveMask
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000593
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000594 MachineBasicBlock::iterator First = IE;
595 for (;;) {
596 MachineBasicBlock::iterator Next = II;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000597 char Needs = 0;
598 char OutNeeds = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000599
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000600 if (First == IE)
601 First = II;
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000602
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000603 if (II != IE) {
604 MachineInstr &MI = *II;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000605
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000606 if (requiresCorrectState(MI)) {
607 auto III = Instructions.find(&MI);
608 if (III != Instructions.end()) {
609 Needs = III->second.Needs;
610 OutNeeds = III->second.OutNeeds;
611 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000612 }
613
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000614 if (MI.isTerminator() && !Needs && OutNeeds == StateExact)
615 Needs = StateExact;
616
617 if (MI.getOpcode() == AMDGPU::SI_ELSE && BI.OutNeeds == StateExact)
618 MI.getOperand(3).setImm(1);
619
620 ++Next;
621 } else {
622 // End of basic block
623 if (BI.OutNeeds & StateWQM)
624 Needs = StateWQM;
625 else if (BI.OutNeeds == StateExact)
626 Needs = StateExact;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000627 }
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000628
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000629 if (Needs) {
630 if (Needs != State) {
631 MachineBasicBlock::iterator Before =
632 prepareInsertion(MBB, First, II, Needs == StateWQM,
633 Needs == StateExact || WQMFromExec);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000634
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000635 if (Needs == StateExact) {
636 if (!WQMFromExec && (OutNeeds & StateWQM))
637 SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
638
639 toExact(MBB, Before, SavedWQMReg, LiveMaskReg);
640 } else {
641 assert(WQMFromExec == (SavedWQMReg == 0));
642
643 toWQM(MBB, Before, SavedWQMReg);
644
645 if (SavedWQMReg) {
646 LIS->createAndComputeVirtRegInterval(SavedWQMReg);
647 SavedWQMReg = 0;
648 }
649 }
650
651 State = Needs;
652 }
653
654 First = IE;
655 }
656
657 if (II == IE)
658 break;
659 II = Next;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000660 }
661}
662
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000663void SIWholeQuadMode::lowerLiveMaskQueries(unsigned LiveMaskReg) {
664 for (MachineInstr *MI : LiveMaskQueries) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000665 const DebugLoc &DL = MI->getDebugLoc();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000666 unsigned Dest = MI->getOperand(0).getReg();
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000667 MachineInstr *Copy =
668 BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest)
669 .addReg(LiveMaskReg);
670
671 LIS->ReplaceMachineInstrInMaps(*MI, *Copy);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000672 MI->eraseFromParent();
673 }
674}
675
Connor Abbott8c217d02017-08-04 18:36:49 +0000676void SIWholeQuadMode::lowerCopyInstrs() {
677 for (MachineInstr *MI : LowerToCopyInstrs)
678 MI->setDesc(TII->get(AMDGPU::COPY));
679}
680
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000681bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000682 if (MF.getFunction()->getCallingConv() != CallingConv::AMDGPU_PS)
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000683 return false;
684
685 Instructions.clear();
686 Blocks.clear();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000687 LiveMaskQueries.clear();
Connor Abbott8c217d02017-08-04 18:36:49 +0000688 LowerToCopyInstrs.clear();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000689
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000690 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
691
692 TII = ST.getInstrInfo();
693 TRI = &TII->getRegisterInfo();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000694 MRI = &MF.getRegInfo();
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000695 LIS = &getAnalysis<LiveIntervals>();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000696
697 char GlobalFlags = analyzeFunction(MF);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000698 if (!(GlobalFlags & StateWQM)) {
699 lowerLiveMaskQueries(AMDGPU::EXEC);
700 return !LiveMaskQueries.empty();
701 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000702
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000703 // Store a copy of the original live mask when required
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000704 unsigned LiveMaskReg = 0;
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000705 {
706 MachineBasicBlock &Entry = MF.front();
707 MachineBasicBlock::iterator EntryMI = Entry.getFirstNonPHI();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000708
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000709 if (GlobalFlags & StateExact || !LiveMaskQueries.empty()) {
710 LiveMaskReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000711 MachineInstr *MI = BuildMI(Entry, EntryMI, DebugLoc(),
712 TII->get(AMDGPU::COPY), LiveMaskReg)
713 .addReg(AMDGPU::EXEC);
714 LIS->InsertMachineInstrInMaps(*MI);
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000715 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000716
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000717 if (GlobalFlags == StateWQM) {
718 // For a shader that needs only WQM, we can just set it once.
719 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
720 AMDGPU::EXEC)
721 .addReg(AMDGPU::EXEC);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000722
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000723 lowerLiveMaskQueries(LiveMaskReg);
Connor Abbott8c217d02017-08-04 18:36:49 +0000724 lowerCopyInstrs();
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000725 // EntryMI may become invalid here
726 return true;
727 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000728 }
729
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000730 DEBUG(printInfo());
731
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000732 lowerLiveMaskQueries(LiveMaskReg);
Connor Abbott8c217d02017-08-04 18:36:49 +0000733 lowerCopyInstrs();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000734
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000735 // Handle the general case
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000736 for (auto BII : Blocks)
737 processBlock(*BII.first, LiveMaskReg, BII.first == &*MF.begin());
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000738
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000739 // Physical registers like SCC aren't tracked by default anyway, so just
740 // removing the ranges we computed is the simplest option for maintaining
741 // the analysis results.
742 LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI));
743
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000744 return true;
745}