blob: cb4cf68d709ad60c2ca4267f20375e45893caf06 [file] [log] [blame]
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00001//===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Nicolai Haehnle213e87f2016-03-21 20:28:33 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// This pass adds instructions to enable whole quad mode for pixel
Connor Abbott92638ab2017-08-04 18:36:52 +000011/// shaders, and whole wavefront mode for all programs.
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000012///
13/// Whole quad mode is required for derivative computations, but it interferes
14/// with shader side effects (stores and atomics). This pass is run on the
15/// scheduled machine IR but before register coalescing, so that machine SSA is
16/// available for analysis. It ensures that WQM is enabled when necessary, but
17/// disabled around stores and atomics.
18///
19/// When necessary, this pass creates a function prolog
20///
21/// S_MOV_B64 LiveMask, EXEC
22/// S_WQM_B64 EXEC, EXEC
23///
24/// to enter WQM at the top of the function and surrounds blocks of Exact
25/// instructions by
26///
27/// S_AND_SAVEEXEC_B64 Tmp, LiveMask
28/// ...
29/// S_MOV_B64 EXEC, Tmp
30///
Connor Abbott92638ab2017-08-04 18:36:52 +000031/// We also compute when a sequence of instructions requires Whole Wavefront
32/// Mode (WWM) and insert instructions to save and restore it:
33///
34/// S_OR_SAVEEXEC_B64 Tmp, -1
35/// ...
36/// S_MOV_B64 EXEC, Tmp
37///
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000038/// In order to avoid excessive switching during sequences of Exact
39/// instructions, the pass first analyzes which instructions must be run in WQM
40/// (aka which instructions produce values that lead to derivative
41/// computations).
42///
43/// Basic blocks are always exited in WQM as long as some successor needs WQM.
44///
45/// There is room for improvement given better control flow analysis:
46///
47/// (1) at the top level (outside of control flow statements, and as long as
48/// kill hasn't been used), one SGPR can be saved by recovering WQM from
49/// the LiveMask (this is implemented for the entry block).
50///
51/// (2) when entire regions (e.g. if-else blocks or entire loops) only
52/// consist of exact and don't-care instructions, the switch only has to
53/// be done at the entry and exit points rather than potentially in each
54/// block of the region.
55///
56//===----------------------------------------------------------------------===//
57
58#include "AMDGPU.h"
59#include "AMDGPUSubtarget.h"
60#include "SIInstrInfo.h"
61#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000062#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000063#include "llvm/ADT/DenseMap.h"
Connor Abbottde068fe2017-08-04 18:36:50 +000064#include "llvm/ADT/PostOrderIterator.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000065#include "llvm/ADT/SmallVector.h"
66#include "llvm/ADT/StringRef.h"
67#include "llvm/CodeGen/LiveInterval.h"
Matthias Braunf8422972017-12-13 02:51:04 +000068#include "llvm/CodeGen/LiveIntervals.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000069#include "llvm/CodeGen/MachineBasicBlock.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000070#include "llvm/CodeGen/MachineFunction.h"
71#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000072#include "llvm/CodeGen/MachineInstr.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000073#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000074#include "llvm/CodeGen/MachineOperand.h"
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000075#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000076#include "llvm/CodeGen/SlotIndexes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000077#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000078#include "llvm/IR/CallingConv.h"
79#include "llvm/IR/DebugLoc.h"
80#include "llvm/MC/MCRegisterInfo.h"
81#include "llvm/Pass.h"
82#include "llvm/Support/Debug.h"
83#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000084#include <cassert>
85#include <vector>
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000086
87using namespace llvm;
88
89#define DEBUG_TYPE "si-wqm"
90
91namespace {
92
93enum {
94 StateWQM = 0x1,
Connor Abbott92638ab2017-08-04 18:36:52 +000095 StateWWM = 0x2,
96 StateExact = 0x4,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000097};
98
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +000099struct PrintState {
100public:
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000101 int State;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000102
103 explicit PrintState(int State) : State(State) {}
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000104};
105
Eric Christopher3148a1b2017-11-16 03:18:15 +0000106#ifndef NDEBUG
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000107static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) {
108 if (PS.State & StateWQM)
109 OS << "WQM";
Connor Abbott92638ab2017-08-04 18:36:52 +0000110 if (PS.State & StateWWM) {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000111 if (PS.State & StateWQM)
112 OS << '|';
Connor Abbott92638ab2017-08-04 18:36:52 +0000113 OS << "WWM";
114 }
115 if (PS.State & StateExact) {
116 if (PS.State & (StateWQM | StateWWM))
117 OS << '|';
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000118 OS << "Exact";
119 }
120
121 return OS;
122}
Eric Christopher3148a1b2017-11-16 03:18:15 +0000123#endif
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000124
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000125struct InstrInfo {
126 char Needs = 0;
Connor Abbottde068fe2017-08-04 18:36:50 +0000127 char Disabled = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000128 char OutNeeds = 0;
129};
130
131struct BlockInfo {
132 char Needs = 0;
133 char InNeeds = 0;
134 char OutNeeds = 0;
135};
136
137struct WorkItem {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000138 MachineBasicBlock *MBB = nullptr;
139 MachineInstr *MI = nullptr;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000140
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000141 WorkItem() = default;
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000142 WorkItem(MachineBasicBlock *MBB) : MBB(MBB) {}
143 WorkItem(MachineInstr *MI) : MI(MI) {}
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000144};
145
146class SIWholeQuadMode : public MachineFunctionPass {
147private:
Connor Abbott92638ab2017-08-04 18:36:52 +0000148 CallingConv::ID CallingConv;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000149 const SIInstrInfo *TII;
150 const SIRegisterInfo *TRI;
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000151 const GCNSubtarget *ST;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000152 MachineRegisterInfo *MRI;
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000153 LiveIntervals *LIS;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000154
155 DenseMap<const MachineInstr *, InstrInfo> Instructions;
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000156 DenseMap<MachineBasicBlock *, BlockInfo> Blocks;
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000157 SmallVector<MachineInstr *, 1> LiveMaskQueries;
Connor Abbott8c217d02017-08-04 18:36:49 +0000158 SmallVector<MachineInstr *, 4> LowerToCopyInstrs;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000159
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000160 void printInfo();
161
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000162 void markInstruction(MachineInstr &MI, char Flag,
163 std::vector<WorkItem> &Worklist);
Connor Abbottde068fe2017-08-04 18:36:50 +0000164 void markInstructionUses(const MachineInstr &MI, char Flag,
165 std::vector<WorkItem> &Worklist);
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000166 char scanInstructions(MachineFunction &MF, std::vector<WorkItem> &Worklist);
167 void propagateInstruction(MachineInstr &MI, std::vector<WorkItem> &Worklist);
168 void propagateBlock(MachineBasicBlock &MBB, std::vector<WorkItem> &Worklist);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000169 char analyzeFunction(MachineFunction &MF);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000170
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000171 bool requiresCorrectState(const MachineInstr &MI) const;
172
173 MachineBasicBlock::iterator saveSCC(MachineBasicBlock &MBB,
174 MachineBasicBlock::iterator Before);
175 MachineBasicBlock::iterator
176 prepareInsertion(MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
177 MachineBasicBlock::iterator Last, bool PreferLast,
178 bool SaveSCC);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000179 void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
180 unsigned SaveWQM, unsigned LiveMaskReg);
181 void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
182 unsigned SavedWQM);
Connor Abbott92638ab2017-08-04 18:36:52 +0000183 void toWWM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
184 unsigned SaveOrig);
185 void fromWWM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
186 unsigned SavedOrig);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000187 void processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg, bool isEntry);
188
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000189 void lowerLiveMaskQueries(unsigned LiveMaskReg);
Connor Abbott8c217d02017-08-04 18:36:49 +0000190 void lowerCopyInstrs();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000191
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000192public:
193 static char ID;
194
195 SIWholeQuadMode() :
196 MachineFunctionPass(ID) { }
197
198 bool runOnMachineFunction(MachineFunction &MF) override;
199
Mehdi Amini117296c2016-10-01 02:56:57 +0000200 StringRef getPassName() const override { return "SI Whole Quad Mode"; }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000201
202 void getAnalysisUsage(AnalysisUsage &AU) const override {
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000203 AU.addRequired<LiveIntervals>();
Matt Arsenaultfa284552019-03-25 16:47:42 +0000204 AU.addPreserved<SlotIndexes>();
205 AU.addPreserved<LiveIntervals>();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000206 AU.setPreservesCFG();
207 MachineFunctionPass::getAnalysisUsage(AU);
208 }
209};
210
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000211} // end anonymous namespace
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000212
213char SIWholeQuadMode::ID = 0;
214
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000215INITIALIZE_PASS_BEGIN(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
216 false)
217INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
218INITIALIZE_PASS_END(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
219 false)
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000220
221char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID;
222
223FunctionPass *llvm::createSIWholeQuadModePass() {
224 return new SIWholeQuadMode;
225}
226
Eric Christopher3148a1b2017-11-16 03:18:15 +0000227#ifndef NDEBUG
Eric Christopher63481882017-11-16 03:25:02 +0000228LLVM_DUMP_METHOD void SIWholeQuadMode::printInfo() {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000229 for (const auto &BII : Blocks) {
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +0000230 dbgs() << "\n"
231 << printMBBReference(*BII.first) << ":\n"
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000232 << " InNeeds = " << PrintState(BII.second.InNeeds)
233 << ", Needs = " << PrintState(BII.second.Needs)
234 << ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n";
235
236 for (const MachineInstr &MI : *BII.first) {
237 auto III = Instructions.find(&MI);
238 if (III == Instructions.end())
239 continue;
240
241 dbgs() << " " << MI << " Needs = " << PrintState(III->second.Needs)
242 << ", OutNeeds = " << PrintState(III->second.OutNeeds) << '\n';
243 }
244 }
245}
Eric Christopher3148a1b2017-11-16 03:18:15 +0000246#endif
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000247
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000248void SIWholeQuadMode::markInstruction(MachineInstr &MI, char Flag,
249 std::vector<WorkItem> &Worklist) {
250 InstrInfo &II = Instructions[&MI];
251
Connor Abbott92638ab2017-08-04 18:36:52 +0000252 assert(!(Flag & StateExact) && Flag != 0);
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000253
Connor Abbottde068fe2017-08-04 18:36:50 +0000254 // Remove any disabled states from the flag. The user that required it gets
255 // an undefined value in the helper lanes. For example, this can happen if
256 // the result of an atomic is used by instruction that requires WQM, where
257 // ignoring the request for WQM is correct as per the relevant specs.
258 Flag &= ~II.Disabled;
259
260 // Ignore if the flag is already encompassed by the existing needs, or we
261 // just disabled everything.
262 if ((II.Needs & Flag) == Flag)
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000263 return;
264
Connor Abbottde068fe2017-08-04 18:36:50 +0000265 II.Needs |= Flag;
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000266 Worklist.push_back(&MI);
267}
268
Connor Abbottde068fe2017-08-04 18:36:50 +0000269/// Mark all instructions defining the uses in \p MI with \p Flag.
270void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
271 std::vector<WorkItem> &Worklist) {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000272 for (const MachineOperand &Use : MI.uses()) {
273 if (!Use.isReg() || !Use.isUse())
274 continue;
275
Daniel Sanders0c476112019-08-15 19:22:08 +0000276 Register Reg = Use.getReg();
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000277
278 // Handle physical registers that we need to track; this is mostly relevant
279 // for VCC, which can appear as the (implicit) input of a uniform branch,
280 // e.g. when a loop counter is stored in a VGPR.
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000281 if (!Register::isVirtualRegister(Reg)) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000282 if (Reg == AMDGPU::EXEC || Reg == AMDGPU::EXEC_LO)
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000283 continue;
284
285 for (MCRegUnitIterator RegUnit(Reg, TRI); RegUnit.isValid(); ++RegUnit) {
286 LiveRange &LR = LIS->getRegUnit(*RegUnit);
287 const VNInfo *Value = LR.Query(LIS->getInstructionIndex(MI)).valueIn();
288 if (!Value)
289 continue;
290
291 // Since we're in machine SSA, we do not need to track physical
292 // registers across basic blocks.
293 if (Value->isPHIDef())
294 continue;
295
Connor Abbottde068fe2017-08-04 18:36:50 +0000296 markInstruction(*LIS->getInstructionFromIndex(Value->def), Flag,
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000297 Worklist);
298 }
299
300 continue;
301 }
302
303 for (MachineInstr &DefMI : MRI->def_instructions(Use.getReg()))
Connor Abbottde068fe2017-08-04 18:36:50 +0000304 markInstruction(DefMI, Flag, Worklist);
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000305 }
306}
307
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000308// Scan instructions to determine which ones require an Exact execmask and
309// which ones seed WQM requirements.
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000310char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000311 std::vector<WorkItem> &Worklist) {
312 char GlobalFlags = 0;
Matthias Braunf1caa282017-12-15 22:22:58 +0000313 bool WQMOutputs = MF.getFunction().hasFnAttribute("amdgpu-ps-wqm-outputs");
Connor Abbott66b9bd62017-08-04 18:36:54 +0000314 SmallVector<MachineInstr *, 4> SetInactiveInstrs;
Carl Ritson00e89b42019-07-26 09:54:12 +0000315 SmallVector<MachineInstr *, 4> SoftWQMInstrs;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000316
Connor Abbottde068fe2017-08-04 18:36:50 +0000317 // We need to visit the basic blocks in reverse post-order so that we visit
318 // defs before uses, in particular so that we don't accidentally mark an
319 // instruction as needing e.g. WQM before visiting it and realizing it needs
320 // WQM disabled.
321 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
322 for (auto BI = RPOT.begin(), BE = RPOT.end(); BI != BE; ++BI) {
323 MachineBasicBlock &MBB = **BI;
324 BlockInfo &BBI = Blocks[&MBB];
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000325
326 for (auto II = MBB.begin(), IE = MBB.end(); II != IE; ++II) {
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000327 MachineInstr &MI = *II;
Connor Abbottde068fe2017-08-04 18:36:50 +0000328 InstrInfo &III = Instructions[&MI];
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000329 unsigned Opcode = MI.getOpcode();
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000330 char Flags = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000331
Tim Renouf18a1e9d2018-05-07 13:21:26 +0000332 if (TII->isWQM(Opcode)) {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000333 // Sampling instructions don't need to produce results for all pixels
334 // in a quad, they just require all inputs of a quad to have been
335 // computed for derivatives.
Connor Abbottde068fe2017-08-04 18:36:50 +0000336 markInstructionUses(MI, StateWQM, Worklist);
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000337 GlobalFlags |= StateWQM;
338 continue;
Connor Abbott8c217d02017-08-04 18:36:49 +0000339 } else if (Opcode == AMDGPU::WQM) {
340 // The WQM intrinsic requires its output to have all the helper lanes
341 // correct, so we need it to be in WQM.
342 Flags = StateWQM;
343 LowerToCopyInstrs.push_back(&MI);
Carl Ritson00e89b42019-07-26 09:54:12 +0000344 } else if (Opcode == AMDGPU::SOFT_WQM) {
345 LowerToCopyInstrs.push_back(&MI);
346 SoftWQMInstrs.push_back(&MI);
347 continue;
Connor Abbott92638ab2017-08-04 18:36:52 +0000348 } else if (Opcode == AMDGPU::WWM) {
349 // The WWM intrinsic doesn't make the same guarantee, and plus it needs
350 // to be executed in WQM or Exact so that its copy doesn't clobber
351 // inactive lanes.
352 markInstructionUses(MI, StateWWM, Worklist);
353 GlobalFlags |= StateWWM;
354 LowerToCopyInstrs.push_back(&MI);
355 continue;
Connor Abbott66b9bd62017-08-04 18:36:54 +0000356 } else if (Opcode == AMDGPU::V_SET_INACTIVE_B32 ||
357 Opcode == AMDGPU::V_SET_INACTIVE_B64) {
358 III.Disabled = StateWWM;
359 MachineOperand &Inactive = MI.getOperand(2);
360 if (Inactive.isReg()) {
361 if (Inactive.isUndef()) {
362 LowerToCopyInstrs.push_back(&MI);
363 } else {
Daniel Sanders0c476112019-08-15 19:22:08 +0000364 Register Reg = Inactive.getReg();
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000365 if (Register::isVirtualRegister(Reg)) {
Connor Abbott66b9bd62017-08-04 18:36:54 +0000366 for (MachineInstr &DefMI : MRI->def_instructions(Reg))
367 markInstruction(DefMI, StateWWM, Worklist);
368 }
369 }
370 }
371 SetInactiveInstrs.push_back(&MI);
372 continue;
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000373 } else if (TII->isDisableWQM(MI)) {
Connor Abbottde068fe2017-08-04 18:36:50 +0000374 BBI.Needs |= StateExact;
375 if (!(BBI.InNeeds & StateExact)) {
376 BBI.InNeeds |= StateExact;
377 Worklist.push_back(&MBB);
378 }
379 GlobalFlags |= StateExact;
Connor Abbott92638ab2017-08-04 18:36:52 +0000380 III.Disabled = StateWQM | StateWWM;
Connor Abbottde068fe2017-08-04 18:36:50 +0000381 continue;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000382 } else {
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000383 if (Opcode == AMDGPU::SI_PS_LIVE) {
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000384 LiveMaskQueries.push_back(&MI);
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000385 } else if (WQMOutputs) {
386 // The function is in machine SSA form, which means that physical
387 // VGPRs correspond to shader inputs and outputs. Inputs are
388 // only used, outputs are only defined.
389 for (const MachineOperand &MO : MI.defs()) {
390 if (!MO.isReg())
391 continue;
392
Daniel Sanders0c476112019-08-15 19:22:08 +0000393 Register Reg = MO.getReg();
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000394
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000395 if (!Register::isVirtualRegister(Reg) &&
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000396 TRI->hasVectorRegisters(TRI->getPhysRegClass(Reg))) {
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000397 Flags = StateWQM;
398 break;
399 }
400 }
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000401 }
402
Nicolai Haehnlec00e03b2016-06-07 21:37:17 +0000403 if (!Flags)
404 continue;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000405 }
406
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000407 markInstruction(MI, Flags, Worklist);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000408 GlobalFlags |= Flags;
409 }
410 }
411
Connor Abbott66b9bd62017-08-04 18:36:54 +0000412 // Mark sure that any SET_INACTIVE instructions are computed in WQM if WQM is
413 // ever used anywhere in the function. This implements the corresponding
414 // semantics of @llvm.amdgcn.set.inactive.
Carl Ritson00e89b42019-07-26 09:54:12 +0000415 // Similarly for SOFT_WQM instructions, implementing @llvm.amdgcn.softwqm.
Connor Abbott66b9bd62017-08-04 18:36:54 +0000416 if (GlobalFlags & StateWQM) {
417 for (MachineInstr *MI : SetInactiveInstrs)
418 markInstruction(*MI, StateWQM, Worklist);
Carl Ritson00e89b42019-07-26 09:54:12 +0000419 for (MachineInstr *MI : SoftWQMInstrs)
420 markInstruction(*MI, StateWQM, Worklist);
Connor Abbott66b9bd62017-08-04 18:36:54 +0000421 }
422
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000423 return GlobalFlags;
424}
425
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000426void SIWholeQuadMode::propagateInstruction(MachineInstr &MI,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000427 std::vector<WorkItem>& Worklist) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000428 MachineBasicBlock *MBB = MI.getParent();
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000429 InstrInfo II = Instructions[&MI]; // take a copy to prevent dangling references
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000430 BlockInfo &BI = Blocks[MBB];
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000431
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000432 // Control flow-type instructions and stores to temporary memory that are
433 // followed by WQM computations must themselves be in WQM.
Connor Abbottde068fe2017-08-04 18:36:50 +0000434 if ((II.OutNeeds & StateWQM) && !(II.Disabled & StateWQM) &&
Nicolai Haehnle8a482b32016-08-02 19:31:14 +0000435 (MI.isTerminator() || (TII->usesVM_CNT(MI) && MI.mayStore()))) {
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000436 Instructions[&MI].Needs = StateWQM;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000437 II.Needs = StateWQM;
Nicolai Haehnle0a33abd2016-03-21 22:54:02 +0000438 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000439
440 // Propagate to block level
Connor Abbottde068fe2017-08-04 18:36:50 +0000441 if (II.Needs & StateWQM) {
442 BI.Needs |= StateWQM;
443 if (!(BI.InNeeds & StateWQM)) {
444 BI.InNeeds |= StateWQM;
445 Worklist.push_back(MBB);
446 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000447 }
448
449 // Propagate backwards within block
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000450 if (MachineInstr *PrevMI = MI.getPrevNode()) {
Connor Abbott92638ab2017-08-04 18:36:52 +0000451 char InNeeds = (II.Needs & ~StateWWM) | II.OutNeeds;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000452 if (!PrevMI->isPHI()) {
453 InstrInfo &PrevII = Instructions[PrevMI];
454 if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
455 PrevII.OutNeeds |= InNeeds;
456 Worklist.push_back(PrevMI);
457 }
458 }
459 }
460
461 // Propagate WQM flag to instruction inputs
Connor Abbottde068fe2017-08-04 18:36:50 +0000462 assert(!(II.Needs & StateExact));
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000463
Connor Abbottde068fe2017-08-04 18:36:50 +0000464 if (II.Needs != 0)
465 markInstructionUses(MI, II.Needs, Worklist);
Tim Renouf364edcd2018-05-27 17:26:11 +0000466
467 // Ensure we process a block containing WWM, even if it does not require any
468 // WQM transitions.
469 if (II.Needs & StateWWM)
470 BI.Needs |= StateWWM;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000471}
472
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000473void SIWholeQuadMode::propagateBlock(MachineBasicBlock &MBB,
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000474 std::vector<WorkItem>& Worklist) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000475 BlockInfo BI = Blocks[&MBB]; // Make a copy to prevent dangling references.
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000476
477 // Propagate through instructions
478 if (!MBB.empty()) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000479 MachineInstr *LastMI = &*MBB.rbegin();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000480 InstrInfo &LastII = Instructions[LastMI];
481 if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
482 LastII.OutNeeds |= BI.OutNeeds;
483 Worklist.push_back(LastMI);
484 }
485 }
486
487 // Predecessor blocks must provide for our WQM/Exact needs.
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000488 for (MachineBasicBlock *Pred : MBB.predecessors()) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000489 BlockInfo &PredBI = Blocks[Pred];
490 if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
491 continue;
492
493 PredBI.OutNeeds |= BI.InNeeds;
494 PredBI.InNeeds |= BI.InNeeds;
495 Worklist.push_back(Pred);
496 }
497
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000498 // All successors must be prepared to accept the same set of WQM/Exact data.
499 for (MachineBasicBlock *Succ : MBB.successors()) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000500 BlockInfo &SuccBI = Blocks[Succ];
501 if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
502 continue;
503
504 SuccBI.InNeeds |= BI.OutNeeds;
505 Worklist.push_back(Succ);
506 }
507}
508
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000509char SIWholeQuadMode::analyzeFunction(MachineFunction &MF) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000510 std::vector<WorkItem> Worklist;
511 char GlobalFlags = scanInstructions(MF, Worklist);
512
513 while (!Worklist.empty()) {
514 WorkItem WI = Worklist.back();
515 Worklist.pop_back();
516
517 if (WI.MI)
518 propagateInstruction(*WI.MI, Worklist);
519 else
520 propagateBlock(*WI.MBB, Worklist);
521 }
522
523 return GlobalFlags;
524}
525
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000526/// Whether \p MI really requires the exec state computed during analysis.
527///
528/// Scalar instructions must occasionally be marked WQM for correct propagation
529/// (e.g. thread masks leading up to branches), but when it comes to actual
530/// execution, they don't care about EXEC.
531bool SIWholeQuadMode::requiresCorrectState(const MachineInstr &MI) const {
532 if (MI.isTerminator())
533 return true;
534
535 // Skip instructions that are not affected by EXEC
536 if (TII->isScalarUnit(MI))
537 return false;
538
539 // Generic instructions such as COPY will either disappear by register
540 // coalescing or be lowered to SALU or VALU instructions.
541 if (MI.isTransient()) {
542 if (MI.getNumExplicitOperands() >= 1) {
543 const MachineOperand &Op = MI.getOperand(0);
544 if (Op.isReg()) {
545 if (TRI->isSGPRReg(*MRI, Op.getReg())) {
546 // SGPR instructions are not affected by EXEC
547 return false;
548 }
549 }
550 }
551 }
552
553 return true;
554}
555
556MachineBasicBlock::iterator
557SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB,
558 MachineBasicBlock::iterator Before) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000559 Register SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000560
561 MachineInstr *Save =
562 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg)
563 .addReg(AMDGPU::SCC);
564 MachineInstr *Restore =
565 BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::SCC)
566 .addReg(SaveReg);
567
568 LIS->InsertMachineInstrInMaps(*Save);
569 LIS->InsertMachineInstrInMaps(*Restore);
570 LIS->createAndComputeVirtRegInterval(SaveReg);
571
572 return Restore;
573}
574
575// Return an iterator in the (inclusive) range [First, Last] at which
576// instructions can be safely inserted, keeping in mind that some of the
577// instructions we want to add necessarily clobber SCC.
578MachineBasicBlock::iterator SIWholeQuadMode::prepareInsertion(
579 MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
580 MachineBasicBlock::iterator Last, bool PreferLast, bool SaveSCC) {
581 if (!SaveSCC)
582 return PreferLast ? Last : First;
583
584 LiveRange &LR = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI));
585 auto MBBE = MBB.end();
586 SlotIndex FirstIdx = First != MBBE ? LIS->getInstructionIndex(*First)
587 : LIS->getMBBEndIdx(&MBB);
588 SlotIndex LastIdx =
589 Last != MBBE ? LIS->getInstructionIndex(*Last) : LIS->getMBBEndIdx(&MBB);
590 SlotIndex Idx = PreferLast ? LastIdx : FirstIdx;
591 const LiveRange::Segment *S;
592
593 for (;;) {
594 S = LR.getSegmentContaining(Idx);
595 if (!S)
596 break;
597
598 if (PreferLast) {
599 SlotIndex Next = S->start.getBaseIndex();
600 if (Next < FirstIdx)
601 break;
602 Idx = Next;
603 } else {
604 SlotIndex Next = S->end.getNextIndex().getBaseIndex();
605 if (Next > LastIdx)
606 break;
607 Idx = Next;
608 }
609 }
610
611 MachineBasicBlock::iterator MBBI;
612
613 if (MachineInstr *MI = LIS->getInstructionFromIndex(Idx))
614 MBBI = MI;
615 else {
616 assert(Idx == LIS->getMBBEndIdx(&MBB));
617 MBBI = MBB.end();
618 }
619
620 if (S)
621 MBBI = saveSCC(MBB, MBBI);
622
623 return MBBI;
624}
625
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000626void SIWholeQuadMode::toExact(MachineBasicBlock &MBB,
627 MachineBasicBlock::iterator Before,
Nicolai Haehnlea56e6b62016-03-21 20:39:24 +0000628 unsigned SaveWQM, unsigned LiveMaskReg) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000629 MachineInstr *MI;
630
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000631 if (SaveWQM) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000632 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ?
633 AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64),
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000634 SaveWQM)
635 .addReg(LiveMaskReg);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000636 } else {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000637 unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
638 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ?
639 AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64),
640 Exec)
641 .addReg(Exec)
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000642 .addReg(LiveMaskReg);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000643 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000644
645 LIS->InsertMachineInstrInMaps(*MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000646}
647
648void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB,
649 MachineBasicBlock::iterator Before,
Nicolai Haehnlea56e6b62016-03-21 20:39:24 +0000650 unsigned SavedWQM) {
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000651 MachineInstr *MI;
652
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000653 unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000654 if (SavedWQM) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000655 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), Exec)
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000656 .addReg(SavedWQM);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000657 } else {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000658 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(ST->isWave32() ?
659 AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64),
660 Exec)
661 .addReg(Exec);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000662 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000663
664 LIS->InsertMachineInstrInMaps(*MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000665}
666
Connor Abbott92638ab2017-08-04 18:36:52 +0000667void SIWholeQuadMode::toWWM(MachineBasicBlock &MBB,
668 MachineBasicBlock::iterator Before,
669 unsigned SaveOrig) {
670 MachineInstr *MI;
671
672 assert(SaveOrig);
Neil Henning0a30f332019-04-01 15:19:52 +0000673 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::ENTER_WWM), SaveOrig)
Connor Abbott92638ab2017-08-04 18:36:52 +0000674 .addImm(-1);
675 LIS->InsertMachineInstrInMaps(*MI);
676}
677
678void SIWholeQuadMode::fromWWM(MachineBasicBlock &MBB,
679 MachineBasicBlock::iterator Before,
680 unsigned SavedOrig) {
681 MachineInstr *MI;
682
683 assert(SavedOrig);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000684 MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_WWM),
685 ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC)
Connor Abbott92638ab2017-08-04 18:36:52 +0000686 .addReg(SavedOrig);
687 LIS->InsertMachineInstrInMaps(*MI);
688}
689
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000690void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, unsigned LiveMaskReg,
691 bool isEntry) {
692 auto BII = Blocks.find(&MBB);
693 if (BII == Blocks.end())
694 return;
695
696 const BlockInfo &BI = BII->second;
697
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000698 // This is a non-entry block that is WQM throughout, so no need to do
699 // anything.
Connor Abbott92638ab2017-08-04 18:36:52 +0000700 if (!isEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact)
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000701 return;
702
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000703 LLVM_DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB)
704 << ":\n");
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000705
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000706 unsigned SavedWQMReg = 0;
Connor Abbott92638ab2017-08-04 18:36:52 +0000707 unsigned SavedNonWWMReg = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000708 bool WQMFromExec = isEntry;
Connor Abbott92638ab2017-08-04 18:36:52 +0000709 char State = (isEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM;
710 char NonWWMState = 0;
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000711 const TargetRegisterClass *BoolRC = TRI->getBoolRC();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000712
713 auto II = MBB.getFirstNonPHI(), IE = MBB.end();
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000714 if (isEntry)
715 ++II; // Skip the instruction that saves LiveMask
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000716
Connor Abbott92638ab2017-08-04 18:36:52 +0000717 // This stores the first instruction where it's safe to switch from WQM to
718 // Exact or vice versa.
719 MachineBasicBlock::iterator FirstWQM = IE;
720
721 // This stores the first instruction where it's safe to switch from WWM to
722 // Exact/WQM or to switch to WWM. It must always be the same as, or after,
723 // FirstWQM since if it's safe to switch to/from WWM, it must be safe to
724 // switch to/from WQM as well.
725 MachineBasicBlock::iterator FirstWWM = IE;
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000726 for (;;) {
727 MachineBasicBlock::iterator Next = II;
Connor Abbott92638ab2017-08-04 18:36:52 +0000728 char Needs = StateExact | StateWQM; // WWM is disabled by default
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000729 char OutNeeds = 0;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000730
Connor Abbott92638ab2017-08-04 18:36:52 +0000731 if (FirstWQM == IE)
732 FirstWQM = II;
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000733
Connor Abbott92638ab2017-08-04 18:36:52 +0000734 if (FirstWWM == IE)
735 FirstWWM = II;
736
737 // First, figure out the allowed states (Needs) based on the propagated
738 // flags.
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000739 if (II != IE) {
740 MachineInstr &MI = *II;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000741
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000742 if (requiresCorrectState(MI)) {
743 auto III = Instructions.find(&MI);
744 if (III != Instructions.end()) {
Connor Abbott92638ab2017-08-04 18:36:52 +0000745 if (III->second.Needs & StateWWM)
746 Needs = StateWWM;
747 else if (III->second.Needs & StateWQM)
Connor Abbottde068fe2017-08-04 18:36:50 +0000748 Needs = StateWQM;
749 else
750 Needs &= ~III->second.Disabled;
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000751 OutNeeds = III->second.OutNeeds;
752 }
Connor Abbott92638ab2017-08-04 18:36:52 +0000753 } else {
754 // If the instruction doesn't actually need a correct EXEC, then we can
755 // safely leave WWM enabled.
756 Needs = StateExact | StateWQM | StateWWM;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000757 }
758
Connor Abbottde068fe2017-08-04 18:36:50 +0000759 if (MI.isTerminator() && OutNeeds == StateExact)
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000760 Needs = StateExact;
761
762 if (MI.getOpcode() == AMDGPU::SI_ELSE && BI.OutNeeds == StateExact)
763 MI.getOperand(3).setImm(1);
764
765 ++Next;
766 } else {
767 // End of basic block
768 if (BI.OutNeeds & StateWQM)
769 Needs = StateWQM;
770 else if (BI.OutNeeds == StateExact)
771 Needs = StateExact;
Connor Abbottde068fe2017-08-04 18:36:50 +0000772 else
773 Needs = StateWQM | StateExact;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000774 }
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000775
Connor Abbott92638ab2017-08-04 18:36:52 +0000776 // Now, transition if necessary.
Connor Abbottde068fe2017-08-04 18:36:50 +0000777 if (!(Needs & State)) {
Connor Abbott92638ab2017-08-04 18:36:52 +0000778 MachineBasicBlock::iterator First;
779 if (State == StateWWM || Needs == StateWWM) {
780 // We must switch to or from WWM
781 First = FirstWWM;
782 } else {
783 // We only need to switch to/from WQM, so we can use FirstWQM
784 First = FirstWQM;
785 }
786
Connor Abbottde068fe2017-08-04 18:36:50 +0000787 MachineBasicBlock::iterator Before =
788 prepareInsertion(MBB, First, II, Needs == StateWQM,
789 Needs == StateExact || WQMFromExec);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000790
Connor Abbott92638ab2017-08-04 18:36:52 +0000791 if (State == StateWWM) {
792 assert(SavedNonWWMReg);
793 fromWWM(MBB, Before, SavedNonWWMReg);
794 State = NonWWMState;
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000795 }
796
Connor Abbott92638ab2017-08-04 18:36:52 +0000797 if (Needs == StateWWM) {
798 NonWWMState = State;
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000799 SavedNonWWMReg = MRI->createVirtualRegister(BoolRC);
Connor Abbott92638ab2017-08-04 18:36:52 +0000800 toWWM(MBB, Before, SavedNonWWMReg);
801 State = StateWWM;
802 } else {
803 if (State == StateWQM && (Needs & StateExact) && !(Needs & StateWQM)) {
804 if (!WQMFromExec && (OutNeeds & StateWQM))
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000805 SavedWQMReg = MRI->createVirtualRegister(BoolRC);
Connor Abbott92638ab2017-08-04 18:36:52 +0000806
807 toExact(MBB, Before, SavedWQMReg, LiveMaskReg);
808 State = StateExact;
809 } else if (State == StateExact && (Needs & StateWQM) &&
810 !(Needs & StateExact)) {
811 assert(WQMFromExec == (SavedWQMReg == 0));
812
813 toWQM(MBB, Before, SavedWQMReg);
814
815 if (SavedWQMReg) {
816 LIS->createAndComputeVirtRegInterval(SavedWQMReg);
817 SavedWQMReg = 0;
818 }
819 State = StateWQM;
820 } else {
821 // We can get here if we transitioned from WWM to a non-WWM state that
822 // already matches our needs, but we shouldn't need to do anything.
823 assert(Needs & State);
824 }
825 }
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000826 }
827
Connor Abbott92638ab2017-08-04 18:36:52 +0000828 if (Needs != (StateExact | StateWQM | StateWWM)) {
829 if (Needs != (StateExact | StateWQM))
830 FirstWQM = IE;
831 FirstWWM = IE;
832 }
Connor Abbottde068fe2017-08-04 18:36:50 +0000833
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000834 if (II == IE)
835 break;
836 II = Next;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000837 }
838}
839
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000840void SIWholeQuadMode::lowerLiveMaskQueries(unsigned LiveMaskReg) {
841 for (MachineInstr *MI : LiveMaskQueries) {
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000842 const DebugLoc &DL = MI->getDebugLoc();
Daniel Sanders0c476112019-08-15 19:22:08 +0000843 Register Dest = MI->getOperand(0).getReg();
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000844 MachineInstr *Copy =
845 BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest)
846 .addReg(LiveMaskReg);
847
848 LIS->ReplaceMachineInstrInMaps(*MI, *Copy);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000849 MI->eraseFromParent();
850 }
851}
852
Connor Abbott8c217d02017-08-04 18:36:49 +0000853void SIWholeQuadMode::lowerCopyInstrs() {
Connor Abbott66b9bd62017-08-04 18:36:54 +0000854 for (MachineInstr *MI : LowerToCopyInstrs) {
855 for (unsigned i = MI->getNumExplicitOperands() - 1; i > 1; i--)
856 MI->RemoveOperand(i);
Neil Henning0a30f332019-04-01 15:19:52 +0000857
Daniel Sanders0c476112019-08-15 19:22:08 +0000858 const Register Reg = MI->getOperand(0).getReg();
Neil Henning0a30f332019-04-01 15:19:52 +0000859
860 if (TRI->isVGPR(*MRI, Reg)) {
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000861 const TargetRegisterClass *regClass = Register::isVirtualRegister(Reg)
862 ? MRI->getRegClass(Reg)
863 : TRI->getPhysRegClass(Reg);
Neil Henning0a30f332019-04-01 15:19:52 +0000864
865 const unsigned MovOp = TII->getMovOpcode(regClass);
866 MI->setDesc(TII->get(MovOp));
867
868 // And make it implicitly depend on exec (like all VALU movs should do).
869 MI->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
870 } else {
871 MI->setDesc(TII->get(AMDGPU::COPY));
872 }
Connor Abbott66b9bd62017-08-04 18:36:54 +0000873 }
Connor Abbott8c217d02017-08-04 18:36:49 +0000874}
875
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000876bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000877 Instructions.clear();
878 Blocks.clear();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000879 LiveMaskQueries.clear();
Connor Abbott8c217d02017-08-04 18:36:49 +0000880 LowerToCopyInstrs.clear();
Matthias Braunf1caa282017-12-15 22:22:58 +0000881 CallingConv = MF.getFunction().getCallingConv();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000882
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000883 ST = &MF.getSubtarget<GCNSubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000884
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000885 TII = ST->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000886 TRI = &TII->getRegisterInfo();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000887 MRI = &MF.getRegInfo();
Nicolai Haehnlebef0e902016-08-02 19:17:37 +0000888 LIS = &getAnalysis<LiveIntervals>();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000889
890 char GlobalFlags = analyzeFunction(MF);
Connor Abbott92638ab2017-08-04 18:36:52 +0000891 unsigned LiveMaskReg = 0;
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000892 unsigned Exec = ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000893 if (!(GlobalFlags & StateWQM)) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000894 lowerLiveMaskQueries(Exec);
Carl Ritson00e89b42019-07-26 09:54:12 +0000895 if (!(GlobalFlags & StateWWM) && LowerToCopyInstrs.empty())
Connor Abbott92638ab2017-08-04 18:36:52 +0000896 return !LiveMaskQueries.empty();
897 } else {
898 // Store a copy of the original live mask when required
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000899 MachineBasicBlock &Entry = MF.front();
900 MachineBasicBlock::iterator EntryMI = Entry.getFirstNonPHI();
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000901
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000902 if (GlobalFlags & StateExact || !LiveMaskQueries.empty()) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000903 LiveMaskReg = MRI->createVirtualRegister(TRI->getBoolRC());
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000904 MachineInstr *MI = BuildMI(Entry, EntryMI, DebugLoc(),
905 TII->get(AMDGPU::COPY), LiveMaskReg)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000906 .addReg(Exec);
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000907 LIS->InsertMachineInstrInMaps(*MI);
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000908 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000909
Connor Abbott92638ab2017-08-04 18:36:52 +0000910 lowerLiveMaskQueries(LiveMaskReg);
911
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000912 if (GlobalFlags == StateWQM) {
913 // For a shader that needs only WQM, we can just set it once.
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000914 BuildMI(Entry, EntryMI, DebugLoc(), TII->get(ST->isWave32() ?
915 AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64),
916 Exec)
917 .addReg(Exec);
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000918
Connor Abbott8c217d02017-08-04 18:36:49 +0000919 lowerCopyInstrs();
Duncan P. N. Exon Smith4d295112016-07-08 19:16:05 +0000920 // EntryMI may become invalid here
921 return true;
922 }
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000923 }
924
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000925 LLVM_DEBUG(printInfo());
Nicolai Haehnle3bba6a82016-09-03 12:26:38 +0000926
Connor Abbott8c217d02017-08-04 18:36:49 +0000927 lowerCopyInstrs();
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000928
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000929 // Handle the general case
Matt Arsenault8dff86d2016-07-13 05:55:15 +0000930 for (auto BII : Blocks)
931 processBlock(*BII.first, LiveMaskReg, BII.first == &*MF.begin());
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000932
Nicolai Haehnlee58e0e32016-09-12 16:25:20 +0000933 // Physical registers like SCC aren't tracked by default anyway, so just
934 // removing the ranges we computed is the simplest option for maintaining
935 // the analysis results.
936 LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI));
937
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000938 return true;
939}