blob: 15ccef8ac83574b386513e2d693c2b4903b6901f [file] [log] [blame]
Tom Stellard1bd80722014-04-30 15:31:33 +00001//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard1bd80722014-04-30 15:31:33 +00006//
Tom Stellard1bd80722014-04-30 15:31:33 +00007//===----------------------------------------------------------------------===//
8//
Nicolai Haehnle814abb52018-10-31 13:27:08 +00009// This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10// to lane masks (64-bit scalar registers). The pass assumes machine SSA form
11// and a wave-level control flow graph.
12//
13// Before this pass, values that are semantically i1 and are defined and used
14// within the same basic block are already represented as lane masks in scalar
15// registers. However, values that cross basic blocks are always transferred
16// between basic blocks in vreg_1 virtual registers and are lowered by this
17// pass.
18//
19// The only instructions that use or define vreg_1 virtual registers are COPY,
20// PHI, and IMPLICIT_DEF.
21//
22//===----------------------------------------------------------------------===//
Tom Stellard1bd80722014-04-30 15:31:33 +000023
Tom Stellard1bd80722014-04-30 15:31:33 +000024#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000025#include "AMDGPUSubtarget.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000026#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Nicolai Haehnle814abb52018-10-31 13:27:08 +000027#include "SIInstrInfo.h"
28#include "llvm/CodeGen/MachineDominators.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000029#include "llvm/CodeGen/MachineFunctionPass.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
Nicolai Haehnle814abb52018-10-31 13:27:08 +000031#include "llvm/CodeGen/MachinePostDominators.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Nicolai Haehnle814abb52018-10-31 13:27:08 +000033#include "llvm/CodeGen/MachineSSAUpdater.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000034#include "llvm/IR/Function.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000035#include "llvm/IR/LLVMContext.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000036#include "llvm/Support/Debug.h"
37#include "llvm/Target/TargetMachine.h"
38
Nicolai Haehnle814abb52018-10-31 13:27:08 +000039#define DEBUG_TYPE "si-i1-copies"
40
Tom Stellard1bd80722014-04-30 15:31:33 +000041using namespace llvm;
42
Nicolai Haehnle814abb52018-10-31 13:27:08 +000043static unsigned createLaneMaskReg(MachineFunction &MF);
44static unsigned insertUndefLaneMask(MachineBasicBlock &MBB);
45
Tom Stellard1bd80722014-04-30 15:31:33 +000046namespace {
47
48class SILowerI1Copies : public MachineFunctionPass {
49public:
50 static char ID;
51
Nicolai Haehnle814abb52018-10-31 13:27:08 +000052private:
53 MachineFunction *MF = nullptr;
54 MachineDominatorTree *DT = nullptr;
55 MachinePostDominatorTree *PDT = nullptr;
56 MachineRegisterInfo *MRI = nullptr;
57 const GCNSubtarget *ST = nullptr;
58 const SIInstrInfo *TII = nullptr;
59
60 DenseSet<unsigned> ConstrainRegs;
61
Tom Stellard1bd80722014-04-30 15:31:33 +000062public:
63 SILowerI1Copies() : MachineFunctionPass(ID) {
64 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
65 }
66
Craig Topperfd38cbe2014-08-30 16:48:34 +000067 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1bd80722014-04-30 15:31:33 +000068
Mehdi Amini117296c2016-10-01 02:56:57 +000069 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
Tom Stellard1bd80722014-04-30 15:31:33 +000070
Craig Topperfd38cbe2014-08-30 16:48:34 +000071 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1bd80722014-04-30 15:31:33 +000072 AU.setPreservesCFG();
Nicolai Haehnle814abb52018-10-31 13:27:08 +000073 AU.addRequired<MachineDominatorTree>();
74 AU.addRequired<MachinePostDominatorTree>();
Tom Stellard1bd80722014-04-30 15:31:33 +000075 MachineFunctionPass::getAnalysisUsage(AU);
76 }
Nicolai Haehnle814abb52018-10-31 13:27:08 +000077
78private:
79 void lowerCopiesFromI1();
80 void lowerPhis();
81 void lowerCopiesToI1();
82 bool isConstantLaneMask(unsigned Reg, bool &Val) const;
83 void buildMergeLaneMasks(MachineBasicBlock &MBB,
84 MachineBasicBlock::iterator I, const DebugLoc &DL,
85 unsigned DstReg, unsigned PrevReg, unsigned CurReg);
86 MachineBasicBlock::iterator
87 getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
88
89 bool isLaneMaskReg(unsigned Reg) const {
90 return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
91 TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
92 ST->getWavefrontSize();
93 }
94};
95
96/// Helper class that determines the relationship between incoming values of a
97/// phi in the control flow graph to determine where an incoming value can
98/// simply be taken as a scalar lane mask as-is, and where it needs to be
99/// merged with another, previously defined lane mask.
100///
101/// The approach is as follows:
102/// - Determine all basic blocks which, starting from the incoming blocks,
103/// a wave may reach before entering the def block (the block containing the
104/// phi).
105/// - If an incoming block has no predecessors in this set, we can take the
106/// incoming value as a scalar lane mask as-is.
107/// -- A special case of this is when the def block has a self-loop.
108/// - Otherwise, the incoming value needs to be merged with a previously
109/// defined lane mask.
110/// - If there is a path into the set of reachable blocks that does _not_ go
111/// through an incoming block where we can take the scalar lane mask as-is,
112/// we need to invent an available value for the SSAUpdater. Choices are
113/// 0 and undef, with differing consequences for how to merge values etc.
114///
115/// TODO: We could use region analysis to quickly skip over SESE regions during
116/// the traversal.
117///
118class PhiIncomingAnalysis {
119 MachinePostDominatorTree &PDT;
120
121 // For each reachable basic block, whether it is a source in the induced
122 // subgraph of the CFG.
123 DenseMap<MachineBasicBlock *, bool> ReachableMap;
124 SmallVector<MachineBasicBlock *, 4> ReachableOrdered;
125 SmallVector<MachineBasicBlock *, 4> Stack;
126 SmallVector<MachineBasicBlock *, 4> Predecessors;
127
128public:
129 PhiIncomingAnalysis(MachinePostDominatorTree &PDT) : PDT(PDT) {}
130
131 /// Returns whether \p MBB is a source in the induced subgraph of reachable
132 /// blocks.
133 bool isSource(MachineBasicBlock &MBB) const {
134 return ReachableMap.find(&MBB)->second;
135 }
136
137 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
138
139 void analyze(MachineBasicBlock &DefBlock,
140 ArrayRef<MachineBasicBlock *> IncomingBlocks) {
141 assert(Stack.empty());
142 ReachableMap.clear();
143 ReachableOrdered.clear();
144 Predecessors.clear();
145
146 // Insert the def block first, so that it acts as an end point for the
147 // traversal.
148 ReachableMap.try_emplace(&DefBlock, false);
149 ReachableOrdered.push_back(&DefBlock);
150
151 for (MachineBasicBlock *MBB : IncomingBlocks) {
152 if (MBB == &DefBlock) {
153 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
154 continue;
155 }
156
157 ReachableMap.try_emplace(MBB, false);
158 ReachableOrdered.push_back(MBB);
159
160 // If this block has a divergent terminator and the def block is its
161 // post-dominator, the wave may first visit the other successors.
162 bool Divergent = false;
163 for (MachineInstr &MI : MBB->terminators()) {
164 if (MI.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO ||
165 MI.getOpcode() == AMDGPU::SI_IF ||
166 MI.getOpcode() == AMDGPU::SI_ELSE ||
167 MI.getOpcode() == AMDGPU::SI_LOOP) {
168 Divergent = true;
169 break;
170 }
171 }
172
173 if (Divergent && PDT.dominates(&DefBlock, MBB)) {
174 for (MachineBasicBlock *Succ : MBB->successors())
175 Stack.push_back(Succ);
176 }
177 }
178
179 while (!Stack.empty()) {
180 MachineBasicBlock *MBB = Stack.pop_back_val();
181 if (!ReachableMap.try_emplace(MBB, false).second)
182 continue;
183 ReachableOrdered.push_back(MBB);
184
185 for (MachineBasicBlock *Succ : MBB->successors())
186 Stack.push_back(Succ);
187 }
188
189 for (MachineBasicBlock *MBB : ReachableOrdered) {
190 bool HaveReachablePred = false;
191 for (MachineBasicBlock *Pred : MBB->predecessors()) {
192 if (ReachableMap.count(Pred)) {
193 HaveReachablePred = true;
194 } else {
195 Stack.push_back(Pred);
196 }
197 }
198 if (!HaveReachablePred)
199 ReachableMap[MBB] = true;
200 if (HaveReachablePred) {
201 for (MachineBasicBlock *UnreachablePred : Stack) {
202 if (llvm::find(Predecessors, UnreachablePred) == Predecessors.end())
203 Predecessors.push_back(UnreachablePred);
204 }
205 }
206 Stack.clear();
207 }
208 }
209};
210
211/// Helper class that detects loops which require us to lower an i1 COPY into
212/// bitwise manipulation.
213///
214/// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
215/// between loops with the same header. Consider this example:
216///
217/// A-+-+
218/// | | |
219/// B-+ |
220/// | |
221/// C---+
222///
223/// A is the header of a loop containing A, B, and C as far as LoopInfo is
224/// concerned. However, an i1 COPY in B that is used in C must be lowered to
225/// bitwise operations to combine results from different loop iterations when
226/// B has a divergent branch (since by default we will compile this code such
227/// that threads in a wave are merged at the entry of C).
228///
229/// The following rule is implemented to determine whether bitwise operations
230/// are required: use the bitwise lowering for a def in block B if a backward
231/// edge to B is reachable without going through the nearest common
232/// post-dominator of B and all uses of the def.
233///
234/// TODO: This rule is conservative because it does not check whether the
235/// relevant branches are actually divergent.
236///
237/// The class is designed to cache the CFG traversal so that it can be re-used
238/// for multiple defs within the same basic block.
239///
240/// TODO: We could use region analysis to quickly skip over SESE regions during
241/// the traversal.
242///
243class LoopFinder {
244 MachineDominatorTree &DT;
245 MachinePostDominatorTree &PDT;
246
247 // All visited / reachable block, tagged by level (level 0 is the def block,
248 // level 1 are all blocks reachable including but not going through the def
249 // block's IPDOM, etc.).
250 DenseMap<MachineBasicBlock *, unsigned> Visited;
251
252 // Nearest common dominator of all visited blocks by level (level 0 is the
253 // def block). Used for seeding the SSAUpdater.
254 SmallVector<MachineBasicBlock *, 4> CommonDominators;
255
256 // Post-dominator of all visited blocks.
257 MachineBasicBlock *VisitedPostDom = nullptr;
258
259 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
260 // reachable without going through the IPDOM of the def block (if the IPDOM
261 // itself has an edge to the def block, the loop level is 2), etc.
262 unsigned FoundLoopLevel = ~0u;
263
264 MachineBasicBlock *DefBlock = nullptr;
265 SmallVector<MachineBasicBlock *, 4> Stack;
266 SmallVector<MachineBasicBlock *, 4> NextLevel;
267
268public:
269 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
270 : DT(DT), PDT(PDT) {}
271
272 void initialize(MachineBasicBlock &MBB) {
273 Visited.clear();
274 CommonDominators.clear();
275 Stack.clear();
276 NextLevel.clear();
277 VisitedPostDom = nullptr;
278 FoundLoopLevel = ~0u;
279
280 DefBlock = &MBB;
281 }
282
283 /// Check whether a backward edge can be reached without going through the
284 /// given \p PostDom of the def block.
285 ///
286 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
287 unsigned findLoop(MachineBasicBlock *PostDom) {
288 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
289
290 if (!VisitedPostDom)
291 advanceLevel();
292
293 unsigned Level = 0;
294 while (PDNode->getBlock() != PostDom) {
295 if (PDNode->getBlock() == VisitedPostDom)
296 advanceLevel();
297 PDNode = PDNode->getIDom();
298 Level++;
299 if (FoundLoopLevel == Level)
300 return Level;
301 }
302
303 return 0;
304 }
305
306 /// Add undef values dominating the loop and the optionally given additional
307 /// blocks, so that the SSA updater doesn't have to search all the way to the
308 /// function entry.
309 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
310 ArrayRef<MachineBasicBlock *> Blocks = {}) {
311 assert(LoopLevel < CommonDominators.size());
312
313 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
314 for (MachineBasicBlock *MBB : Blocks)
315 Dom = DT.findNearestCommonDominator(Dom, MBB);
316
317 if (!inLoopLevel(*Dom, LoopLevel, Blocks)) {
318 SSAUpdater.AddAvailableValue(Dom, insertUndefLaneMask(*Dom));
319 } else {
320 // The dominator is part of the loop or the given blocks, so add the
321 // undef value to unreachable predecessors instead.
322 for (MachineBasicBlock *Pred : Dom->predecessors()) {
323 if (!inLoopLevel(*Pred, LoopLevel, Blocks))
324 SSAUpdater.AddAvailableValue(Pred, insertUndefLaneMask(*Pred));
325 }
326 }
327 }
328
329private:
330 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
331 ArrayRef<MachineBasicBlock *> Blocks) const {
332 auto DomIt = Visited.find(&MBB);
333 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
334 return true;
335
336 if (llvm::find(Blocks, &MBB) != Blocks.end())
337 return true;
338
339 return false;
340 }
341
342 void advanceLevel() {
343 MachineBasicBlock *VisitedDom;
344
345 if (!VisitedPostDom) {
346 VisitedPostDom = DefBlock;
347 VisitedDom = DefBlock;
348 Stack.push_back(DefBlock);
349 } else {
350 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
351 VisitedDom = CommonDominators.back();
352
353 for (unsigned i = 0; i < NextLevel.size();) {
354 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
355 Stack.push_back(NextLevel[i]);
356
357 NextLevel[i] = NextLevel.back();
358 NextLevel.pop_back();
359 } else {
360 i++;
361 }
362 }
363 }
364
365 unsigned Level = CommonDominators.size();
366 while (!Stack.empty()) {
367 MachineBasicBlock *MBB = Stack.pop_back_val();
368 if (!PDT.dominates(VisitedPostDom, MBB))
369 NextLevel.push_back(MBB);
370
371 Visited[MBB] = Level;
372 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
373
374 for (MachineBasicBlock *Succ : MBB->successors()) {
375 if (Succ == DefBlock) {
376 if (MBB == VisitedPostDom)
377 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
378 else
379 FoundLoopLevel = std::min(FoundLoopLevel, Level);
380 continue;
381 }
382
383 if (Visited.try_emplace(Succ, ~0u).second) {
384 if (MBB == VisitedPostDom)
385 NextLevel.push_back(Succ);
386 else
387 Stack.push_back(Succ);
388 }
389 }
390 }
391
392 CommonDominators.push_back(VisitedDom);
393 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000394};
395
396} // End anonymous namespace.
397
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000398INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
399 false)
400INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
401INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
402INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
403 false)
Tom Stellard1bd80722014-04-30 15:31:33 +0000404
405char SILowerI1Copies::ID = 0;
406
407char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
408
409FunctionPass *llvm::createSILowerI1CopiesPass() {
410 return new SILowerI1Copies();
411}
412
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000413static unsigned createLaneMaskReg(MachineFunction &MF) {
Tom Stellard1bd80722014-04-30 15:31:33 +0000414 MachineRegisterInfo &MRI = MF.getRegInfo();
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000415 return MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
416}
417
418static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) {
419 MachineFunction &MF = *MBB.getParent();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000420 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000421 const SIInstrInfo *TII = ST.getInstrInfo();
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000422 unsigned UndefReg = createLaneMaskReg(MF);
423 BuildMI(MBB, MBB.getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
424 UndefReg);
425 return UndefReg;
426}
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000427
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000428/// Lower all instructions that def or use vreg_1 registers.
429///
430/// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
431/// occur around inline assembly. We do this first, before vreg_1 registers
432/// are changed to scalar mask registers.
433///
434/// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
435/// all others, because phi lowering looks through copies and can therefore
436/// often make copy lowering unnecessary.
437bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
438 MF = &TheMF;
439 MRI = &MF->getRegInfo();
440 DT = &getAnalysis<MachineDominatorTree>();
441 PDT = &getAnalysis<MachinePostDominatorTree>();
Tom Stellard1bd80722014-04-30 15:31:33 +0000442
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000443 ST = &MF->getSubtarget<GCNSubtarget>();
444 TII = ST->getInstrInfo();
Tom Stellard1bd80722014-04-30 15:31:33 +0000445
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000446 lowerCopiesFromI1();
447 lowerPhis();
448 lowerCopiesToI1();
Tom Stellard1bd80722014-04-30 15:31:33 +0000449
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000450 for (unsigned Reg : ConstrainRegs)
451 MRI->constrainRegClass(Reg, &AMDGPU::SReg_64_XEXECRegClass);
452 ConstrainRegs.clear();
Matt Arsenault72858932014-11-14 18:43:41 +0000453
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000454 return true;
455}
456
457void SILowerI1Copies::lowerCopiesFromI1() {
458 SmallVector<MachineInstr *, 4> DeadCopies;
459
460 for (MachineBasicBlock &MBB : *MF) {
461 for (MachineInstr &MI : MBB) {
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000462 if (MI.getOpcode() != AMDGPU::COPY)
Tom Stellard1bd80722014-04-30 15:31:33 +0000463 continue;
464
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000465 unsigned DstReg = MI.getOperand(0).getReg();
466 unsigned SrcReg = MI.getOperand(1).getReg();
467 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
468 MRI->getRegClass(SrcReg) != &AMDGPU::VReg_1RegClass)
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000469 continue;
470
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000471 if (isLaneMaskReg(DstReg) ||
472 (TargetRegisterInfo::isVirtualRegister(DstReg) &&
473 MRI->getRegClass(DstReg) == &AMDGPU::VReg_1RegClass))
474 continue;
Tom Stellard1bd80722014-04-30 15:31:33 +0000475
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000476 // Copy into a 32-bit vector register.
477 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000478 DebugLoc DL = MI.getDebugLoc();
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000479
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000480 assert(TII->getRegisterInfo().getRegSizeInBits(DstReg, *MRI) == 32);
481 assert(!MI.getOperand(0).getSubReg());
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000482
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000483 ConstrainRegs.insert(SrcReg);
484 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
485 .addImm(0)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000486 .addImm(0)
487 .addImm(0)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000488 .addImm(-1)
489 .addReg(SrcReg);
490 DeadCopies.push_back(&MI);
491 }
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000492
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000493 for (MachineInstr *MI : DeadCopies)
494 MI->eraseFromParent();
495 DeadCopies.clear();
496 }
497}
498
499void SILowerI1Copies::lowerPhis() {
500 MachineSSAUpdater SSAUpdater(*MF);
501 LoopFinder LF(*DT, *PDT);
502 PhiIncomingAnalysis PIA(*PDT);
503 SmallVector<MachineInstr *, 4> DeadPhis;
504 SmallVector<MachineBasicBlock *, 4> IncomingBlocks;
505 SmallVector<unsigned, 4> IncomingRegs;
506 SmallVector<unsigned, 4> IncomingUpdated;
Nicolai Haehnle7edae4c2019-04-23 13:12:52 +0000507#ifndef NDEBUG
508 DenseSet<unsigned> PhiRegisters;
509#endif
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000510
511 for (MachineBasicBlock &MBB : *MF) {
512 LF.initialize(MBB);
513
514 for (MachineInstr &MI : MBB.phis()) {
515 unsigned DstReg = MI.getOperand(0).getReg();
516 if (MRI->getRegClass(DstReg) != &AMDGPU::VReg_1RegClass)
517 continue;
518
519 LLVM_DEBUG(dbgs() << "Lower PHI: " << MI);
520
521 MRI->setRegClass(DstReg, &AMDGPU::SReg_64RegClass);
522
523 // Collect incoming values.
524 for (unsigned i = 1; i < MI.getNumOperands(); i += 2) {
525 assert(i + 1 < MI.getNumOperands());
526 unsigned IncomingReg = MI.getOperand(i).getReg();
527 MachineBasicBlock *IncomingMBB = MI.getOperand(i + 1).getMBB();
528 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
529
530 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
531 IncomingReg = IncomingDef->getOperand(1).getReg();
532 assert(isLaneMaskReg(IncomingReg));
533 assert(!IncomingDef->getOperand(1).getSubReg());
534 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
535 continue;
536 } else {
Nicolai Haehnle7edae4c2019-04-23 13:12:52 +0000537 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000538 }
539
540 IncomingBlocks.push_back(IncomingMBB);
541 IncomingRegs.push_back(IncomingReg);
542 }
543
Nicolai Haehnle7edae4c2019-04-23 13:12:52 +0000544#ifndef NDEBUG
545 PhiRegisters.insert(DstReg);
546#endif
547
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000548 // Phis in a loop that are observed outside the loop receive a simple but
549 // conservatively correct treatment.
550 MachineBasicBlock *PostDomBound = &MBB;
551 for (MachineInstr &Use : MRI->use_instructions(DstReg)) {
552 PostDomBound =
553 PDT->findNearestCommonDominator(PostDomBound, Use.getParent());
554 }
555
556 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
557
558 SSAUpdater.Initialize(DstReg);
559
560 if (FoundLoopLevel) {
561 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, IncomingBlocks);
562
563 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
564 IncomingUpdated.push_back(createLaneMaskReg(*MF));
565 SSAUpdater.AddAvailableValue(IncomingBlocks[i],
566 IncomingUpdated.back());
567 }
568
569 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
570 MachineBasicBlock &IMBB = *IncomingBlocks[i];
571 buildMergeLaneMasks(
572 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
573 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
574 }
575 } else {
576 // The phi is not observed from outside a loop. Use a more accurate
577 // lowering.
578 PIA.analyze(MBB, IncomingBlocks);
579
580 for (MachineBasicBlock *MBB : PIA.predecessors())
581 SSAUpdater.AddAvailableValue(MBB, insertUndefLaneMask(*MBB));
582
583 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
584 MachineBasicBlock &IMBB = *IncomingBlocks[i];
585 if (PIA.isSource(IMBB)) {
586 IncomingUpdated.push_back(0);
587 SSAUpdater.AddAvailableValue(&IMBB, IncomingRegs[i]);
588 } else {
589 IncomingUpdated.push_back(createLaneMaskReg(*MF));
590 SSAUpdater.AddAvailableValue(&IMBB, IncomingUpdated.back());
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000591 }
592 }
593
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000594 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
595 if (!IncomingUpdated[i])
596 continue;
597
598 MachineBasicBlock &IMBB = *IncomingBlocks[i];
599 buildMergeLaneMasks(
600 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
601 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000602 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000603 }
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000604
605 unsigned NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
606 if (NewReg != DstReg) {
607 MRI->replaceRegWith(NewReg, DstReg);
608
609 // Ensure that DstReg has a single def and mark the old PHI node for
610 // deletion.
611 MI.getOperand(0).setReg(NewReg);
612 DeadPhis.push_back(&MI);
613 }
614
615 IncomingBlocks.clear();
616 IncomingRegs.clear();
617 IncomingUpdated.clear();
618 }
619
620 for (MachineInstr *MI : DeadPhis)
621 MI->eraseFromParent();
622 DeadPhis.clear();
623 }
624}
625
626void SILowerI1Copies::lowerCopiesToI1() {
627 MachineSSAUpdater SSAUpdater(*MF);
628 LoopFinder LF(*DT, *PDT);
629 SmallVector<MachineInstr *, 4> DeadCopies;
630
631 for (MachineBasicBlock &MBB : *MF) {
632 LF.initialize(MBB);
633
634 for (MachineInstr &MI : MBB) {
635 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
636 MI.getOpcode() != AMDGPU::COPY)
637 continue;
638
639 unsigned DstReg = MI.getOperand(0).getReg();
640 if (!TargetRegisterInfo::isVirtualRegister(DstReg) ||
641 MRI->getRegClass(DstReg) != &AMDGPU::VReg_1RegClass)
642 continue;
643
644 if (MRI->use_empty(DstReg)) {
645 DeadCopies.push_back(&MI);
646 continue;
647 }
648
649 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
650
651 MRI->setRegClass(DstReg, &AMDGPU::SReg_64RegClass);
652 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
653 continue;
654
655 DebugLoc DL = MI.getDebugLoc();
656 unsigned SrcReg = MI.getOperand(1).getReg();
657 assert(!MI.getOperand(1).getSubReg());
658
659 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
660 !isLaneMaskReg(SrcReg)) {
661 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
662 unsigned TmpReg = createLaneMaskReg(*MF);
663 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
664 .addReg(SrcReg)
665 .addImm(0);
666 MI.getOperand(1).setReg(TmpReg);
667 SrcReg = TmpReg;
668 }
669
670 // Defs in a loop that are observed outside the loop must be transformed
671 // into appropriate bit manipulation.
672 MachineBasicBlock *PostDomBound = &MBB;
673 for (MachineInstr &Use : MRI->use_instructions(DstReg)) {
674 PostDomBound =
675 PDT->findNearestCommonDominator(PostDomBound, Use.getParent());
676 }
677
678 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
679 if (FoundLoopLevel) {
680 SSAUpdater.Initialize(DstReg);
681 SSAUpdater.AddAvailableValue(&MBB, DstReg);
682 LF.addLoopEntries(FoundLoopLevel, SSAUpdater);
683
684 buildMergeLaneMasks(MBB, MI, DL, DstReg,
685 SSAUpdater.GetValueInMiddleOfBlock(&MBB), SrcReg);
686 DeadCopies.push_back(&MI);
687 }
688 }
689
690 for (MachineInstr *MI : DeadCopies)
691 MI->eraseFromParent();
692 DeadCopies.clear();
693 }
694}
695
696bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
697 const MachineInstr *MI;
698 for (;;) {
699 MI = MRI->getUniqueVRegDef(Reg);
700 if (MI->getOpcode() != AMDGPU::COPY)
701 break;
702
703 Reg = MI->getOperand(1).getReg();
704 if (!TargetRegisterInfo::isVirtualRegister(Reg))
705 return false;
706 if (!isLaneMaskReg(Reg))
707 return false;
708 }
709
710 if (MI->getOpcode() != AMDGPU::S_MOV_B64)
711 return false;
712
713 if (!MI->getOperand(1).isImm())
714 return false;
715
716 int64_t Imm = MI->getOperand(1).getImm();
717 if (Imm == 0) {
718 Val = false;
719 return true;
720 }
721 if (Imm == -1) {
722 Val = true;
723 return true;
724 }
725
726 return false;
727}
728
729static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
730 Def = false;
731 Use = false;
732
733 for (const MachineOperand &MO : MI.operands()) {
734 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
735 if (MO.isUse())
736 Use = true;
737 else
738 Def = true;
739 }
740 }
741}
742
743/// Return a point at the end of the given \p MBB to insert SALU instructions
744/// for lane mask calculation. Take terminators and SCC into account.
745MachineBasicBlock::iterator
746SILowerI1Copies::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
747 auto InsertionPt = MBB.getFirstTerminator();
748 bool TerminatorsUseSCC = false;
749 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
750 bool DefsSCC;
751 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
752 if (TerminatorsUseSCC || DefsSCC)
753 break;
754 }
755
756 if (!TerminatorsUseSCC)
757 return InsertionPt;
758
759 while (InsertionPt != MBB.begin()) {
760 InsertionPt--;
761
762 bool DefSCC, UseSCC;
763 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
764 if (DefSCC)
765 return InsertionPt;
766 }
767
768 // We should have at least seen an IMPLICIT_DEF or COPY
769 llvm_unreachable("SCC used by terminator but no def in block");
770}
771
772void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
773 MachineBasicBlock::iterator I,
774 const DebugLoc &DL, unsigned DstReg,
775 unsigned PrevReg, unsigned CurReg) {
776 bool PrevVal;
777 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
778 bool CurVal;
779 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
780
781 if (PrevConstant && CurConstant) {
782 if (PrevVal == CurVal) {
783 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
784 } else if (CurVal) {
785 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(AMDGPU::EXEC);
786 } else {
787 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), DstReg)
788 .addReg(AMDGPU::EXEC)
789 .addImm(-1);
790 }
791 return;
792 }
793
794 unsigned PrevMaskedReg = 0;
795 unsigned CurMaskedReg = 0;
796 if (!PrevConstant) {
797 if (CurConstant && CurVal) {
798 PrevMaskedReg = PrevReg;
799 } else {
800 PrevMaskedReg = createLaneMaskReg(*MF);
801 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ANDN2_B64), PrevMaskedReg)
802 .addReg(PrevReg)
803 .addReg(AMDGPU::EXEC);
804 }
805 }
806 if (!CurConstant) {
807 // TODO: check whether CurReg is already masked by EXEC
808 if (PrevConstant && PrevVal) {
809 CurMaskedReg = CurReg;
810 } else {
811 CurMaskedReg = createLaneMaskReg(*MF);
812 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), CurMaskedReg)
813 .addReg(CurReg)
814 .addReg(AMDGPU::EXEC);
Tom Stellard1bd80722014-04-30 15:31:33 +0000815 }
816 }
Tom Stellard365a2b42014-05-15 14:41:50 +0000817
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000818 if (PrevConstant && !PrevVal) {
819 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
820 .addReg(CurMaskedReg);
821 } else if (CurConstant && !CurVal) {
822 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
823 .addReg(PrevMaskedReg);
824 } else if (PrevConstant && PrevVal) {
825 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ORN2_B64), DstReg)
826 .addReg(CurMaskedReg)
827 .addReg(AMDGPU::EXEC);
828 } else {
829 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_OR_B64), DstReg)
830 .addReg(PrevMaskedReg)
831 .addReg(CurMaskedReg ? CurMaskedReg : (unsigned)AMDGPU::EXEC);
832 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000833}