blob: 0eb64972d6100d699a8802319e0630e9571245fd [file] [log] [blame]
Tom Stellard1bd80722014-04-30 15:31:33 +00001//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard1bd80722014-04-30 15:31:33 +00006//
Tom Stellard1bd80722014-04-30 15:31:33 +00007//===----------------------------------------------------------------------===//
8//
Nicolai Haehnle814abb52018-10-31 13:27:08 +00009// This pass lowers all occurrences of i1 values (with a vreg_1 register class)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +000010// to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11// form and a wave-level control flow graph.
Nicolai Haehnle814abb52018-10-31 13:27:08 +000012//
13// Before this pass, values that are semantically i1 and are defined and used
14// within the same basic block are already represented as lane masks in scalar
15// registers. However, values that cross basic blocks are always transferred
16// between basic blocks in vreg_1 virtual registers and are lowered by this
17// pass.
18//
19// The only instructions that use or define vreg_1 virtual registers are COPY,
20// PHI, and IMPLICIT_DEF.
21//
22//===----------------------------------------------------------------------===//
Tom Stellard1bd80722014-04-30 15:31:33 +000023
Tom Stellard1bd80722014-04-30 15:31:33 +000024#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000025#include "AMDGPUSubtarget.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000026#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Nicolai Haehnle814abb52018-10-31 13:27:08 +000027#include "SIInstrInfo.h"
28#include "llvm/CodeGen/MachineDominators.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000029#include "llvm/CodeGen/MachineFunctionPass.h"
30#include "llvm/CodeGen/MachineInstrBuilder.h"
Nicolai Haehnle814abb52018-10-31 13:27:08 +000031#include "llvm/CodeGen/MachinePostDominators.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Nicolai Haehnle814abb52018-10-31 13:27:08 +000033#include "llvm/CodeGen/MachineSSAUpdater.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000034#include "llvm/IR/Function.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000035#include "llvm/IR/LLVMContext.h"
Tom Stellard1bd80722014-04-30 15:31:33 +000036#include "llvm/Support/Debug.h"
37#include "llvm/Target/TargetMachine.h"
38
Nicolai Haehnle814abb52018-10-31 13:27:08 +000039#define DEBUG_TYPE "si-i1-copies"
40
Tom Stellard1bd80722014-04-30 15:31:33 +000041using namespace llvm;
42
Nicolai Haehnle814abb52018-10-31 13:27:08 +000043static unsigned createLaneMaskReg(MachineFunction &MF);
44static unsigned insertUndefLaneMask(MachineBasicBlock &MBB);
45
Tom Stellard1bd80722014-04-30 15:31:33 +000046namespace {
47
48class SILowerI1Copies : public MachineFunctionPass {
49public:
50 static char ID;
51
Nicolai Haehnle814abb52018-10-31 13:27:08 +000052private:
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +000053 bool IsWave32 = false;
Nicolai Haehnle814abb52018-10-31 13:27:08 +000054 MachineFunction *MF = nullptr;
55 MachineDominatorTree *DT = nullptr;
56 MachinePostDominatorTree *PDT = nullptr;
57 MachineRegisterInfo *MRI = nullptr;
58 const GCNSubtarget *ST = nullptr;
59 const SIInstrInfo *TII = nullptr;
60
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +000061 unsigned ExecReg;
62 unsigned MovOp;
63 unsigned AndOp;
64 unsigned OrOp;
65 unsigned XorOp;
66 unsigned AndN2Op;
67 unsigned OrN2Op;
68
Nicolai Haehnle814abb52018-10-31 13:27:08 +000069 DenseSet<unsigned> ConstrainRegs;
70
Tom Stellard1bd80722014-04-30 15:31:33 +000071public:
72 SILowerI1Copies() : MachineFunctionPass(ID) {
73 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
74 }
75
Craig Topperfd38cbe2014-08-30 16:48:34 +000076 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1bd80722014-04-30 15:31:33 +000077
Mehdi Amini117296c2016-10-01 02:56:57 +000078 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
Tom Stellard1bd80722014-04-30 15:31:33 +000079
Craig Topperfd38cbe2014-08-30 16:48:34 +000080 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1bd80722014-04-30 15:31:33 +000081 AU.setPreservesCFG();
Nicolai Haehnle814abb52018-10-31 13:27:08 +000082 AU.addRequired<MachineDominatorTree>();
83 AU.addRequired<MachinePostDominatorTree>();
Tom Stellard1bd80722014-04-30 15:31:33 +000084 MachineFunctionPass::getAnalysisUsage(AU);
85 }
Nicolai Haehnle814abb52018-10-31 13:27:08 +000086
87private:
88 void lowerCopiesFromI1();
89 void lowerPhis();
90 void lowerCopiesToI1();
91 bool isConstantLaneMask(unsigned Reg, bool &Val) const;
92 void buildMergeLaneMasks(MachineBasicBlock &MBB,
93 MachineBasicBlock::iterator I, const DebugLoc &DL,
94 unsigned DstReg, unsigned PrevReg, unsigned CurReg);
95 MachineBasicBlock::iterator
96 getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
97
Nicolai Haehnle32ef9292019-06-27 16:56:44 +000098 bool isVreg1(unsigned Reg) const {
Daniel Sanders2bea69b2019-08-01 23:27:28 +000099 return Register::isVirtualRegister(Reg) &&
Nicolai Haehnle32ef9292019-06-27 16:56:44 +0000100 MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
101 }
102
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000103 bool isLaneMaskReg(unsigned Reg) const {
104 return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
105 TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
106 ST->getWavefrontSize();
107 }
108};
109
110/// Helper class that determines the relationship between incoming values of a
111/// phi in the control flow graph to determine where an incoming value can
112/// simply be taken as a scalar lane mask as-is, and where it needs to be
113/// merged with another, previously defined lane mask.
114///
115/// The approach is as follows:
116/// - Determine all basic blocks which, starting from the incoming blocks,
117/// a wave may reach before entering the def block (the block containing the
118/// phi).
119/// - If an incoming block has no predecessors in this set, we can take the
120/// incoming value as a scalar lane mask as-is.
121/// -- A special case of this is when the def block has a self-loop.
122/// - Otherwise, the incoming value needs to be merged with a previously
123/// defined lane mask.
124/// - If there is a path into the set of reachable blocks that does _not_ go
125/// through an incoming block where we can take the scalar lane mask as-is,
126/// we need to invent an available value for the SSAUpdater. Choices are
127/// 0 and undef, with differing consequences for how to merge values etc.
128///
129/// TODO: We could use region analysis to quickly skip over SESE regions during
130/// the traversal.
131///
132class PhiIncomingAnalysis {
133 MachinePostDominatorTree &PDT;
134
135 // For each reachable basic block, whether it is a source in the induced
136 // subgraph of the CFG.
137 DenseMap<MachineBasicBlock *, bool> ReachableMap;
138 SmallVector<MachineBasicBlock *, 4> ReachableOrdered;
139 SmallVector<MachineBasicBlock *, 4> Stack;
140 SmallVector<MachineBasicBlock *, 4> Predecessors;
141
142public:
143 PhiIncomingAnalysis(MachinePostDominatorTree &PDT) : PDT(PDT) {}
144
145 /// Returns whether \p MBB is a source in the induced subgraph of reachable
146 /// blocks.
147 bool isSource(MachineBasicBlock &MBB) const {
148 return ReachableMap.find(&MBB)->second;
149 }
150
151 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
152
153 void analyze(MachineBasicBlock &DefBlock,
154 ArrayRef<MachineBasicBlock *> IncomingBlocks) {
155 assert(Stack.empty());
156 ReachableMap.clear();
157 ReachableOrdered.clear();
158 Predecessors.clear();
159
160 // Insert the def block first, so that it acts as an end point for the
161 // traversal.
162 ReachableMap.try_emplace(&DefBlock, false);
163 ReachableOrdered.push_back(&DefBlock);
164
165 for (MachineBasicBlock *MBB : IncomingBlocks) {
166 if (MBB == &DefBlock) {
167 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
168 continue;
169 }
170
171 ReachableMap.try_emplace(MBB, false);
172 ReachableOrdered.push_back(MBB);
173
174 // If this block has a divergent terminator and the def block is its
175 // post-dominator, the wave may first visit the other successors.
176 bool Divergent = false;
177 for (MachineInstr &MI : MBB->terminators()) {
178 if (MI.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO ||
179 MI.getOpcode() == AMDGPU::SI_IF ||
180 MI.getOpcode() == AMDGPU::SI_ELSE ||
181 MI.getOpcode() == AMDGPU::SI_LOOP) {
182 Divergent = true;
183 break;
184 }
185 }
186
187 if (Divergent && PDT.dominates(&DefBlock, MBB)) {
188 for (MachineBasicBlock *Succ : MBB->successors())
189 Stack.push_back(Succ);
190 }
191 }
192
193 while (!Stack.empty()) {
194 MachineBasicBlock *MBB = Stack.pop_back_val();
195 if (!ReachableMap.try_emplace(MBB, false).second)
196 continue;
197 ReachableOrdered.push_back(MBB);
198
199 for (MachineBasicBlock *Succ : MBB->successors())
200 Stack.push_back(Succ);
201 }
202
203 for (MachineBasicBlock *MBB : ReachableOrdered) {
204 bool HaveReachablePred = false;
205 for (MachineBasicBlock *Pred : MBB->predecessors()) {
206 if (ReachableMap.count(Pred)) {
207 HaveReachablePred = true;
208 } else {
209 Stack.push_back(Pred);
210 }
211 }
212 if (!HaveReachablePred)
213 ReachableMap[MBB] = true;
214 if (HaveReachablePred) {
215 for (MachineBasicBlock *UnreachablePred : Stack) {
216 if (llvm::find(Predecessors, UnreachablePred) == Predecessors.end())
217 Predecessors.push_back(UnreachablePred);
218 }
219 }
220 Stack.clear();
221 }
222 }
223};
224
225/// Helper class that detects loops which require us to lower an i1 COPY into
226/// bitwise manipulation.
227///
228/// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
229/// between loops with the same header. Consider this example:
230///
231/// A-+-+
232/// | | |
233/// B-+ |
234/// | |
235/// C---+
236///
237/// A is the header of a loop containing A, B, and C as far as LoopInfo is
238/// concerned. However, an i1 COPY in B that is used in C must be lowered to
239/// bitwise operations to combine results from different loop iterations when
240/// B has a divergent branch (since by default we will compile this code such
241/// that threads in a wave are merged at the entry of C).
242///
243/// The following rule is implemented to determine whether bitwise operations
244/// are required: use the bitwise lowering for a def in block B if a backward
245/// edge to B is reachable without going through the nearest common
246/// post-dominator of B and all uses of the def.
247///
248/// TODO: This rule is conservative because it does not check whether the
249/// relevant branches are actually divergent.
250///
251/// The class is designed to cache the CFG traversal so that it can be re-used
252/// for multiple defs within the same basic block.
253///
254/// TODO: We could use region analysis to quickly skip over SESE regions during
255/// the traversal.
256///
257class LoopFinder {
258 MachineDominatorTree &DT;
259 MachinePostDominatorTree &PDT;
260
261 // All visited / reachable block, tagged by level (level 0 is the def block,
262 // level 1 are all blocks reachable including but not going through the def
263 // block's IPDOM, etc.).
264 DenseMap<MachineBasicBlock *, unsigned> Visited;
265
266 // Nearest common dominator of all visited blocks by level (level 0 is the
267 // def block). Used for seeding the SSAUpdater.
268 SmallVector<MachineBasicBlock *, 4> CommonDominators;
269
270 // Post-dominator of all visited blocks.
271 MachineBasicBlock *VisitedPostDom = nullptr;
272
273 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
274 // reachable without going through the IPDOM of the def block (if the IPDOM
275 // itself has an edge to the def block, the loop level is 2), etc.
276 unsigned FoundLoopLevel = ~0u;
277
278 MachineBasicBlock *DefBlock = nullptr;
279 SmallVector<MachineBasicBlock *, 4> Stack;
280 SmallVector<MachineBasicBlock *, 4> NextLevel;
281
282public:
283 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
284 : DT(DT), PDT(PDT) {}
285
286 void initialize(MachineBasicBlock &MBB) {
287 Visited.clear();
288 CommonDominators.clear();
289 Stack.clear();
290 NextLevel.clear();
291 VisitedPostDom = nullptr;
292 FoundLoopLevel = ~0u;
293
294 DefBlock = &MBB;
295 }
296
297 /// Check whether a backward edge can be reached without going through the
298 /// given \p PostDom of the def block.
299 ///
300 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
301 unsigned findLoop(MachineBasicBlock *PostDom) {
302 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
303
304 if (!VisitedPostDom)
305 advanceLevel();
306
307 unsigned Level = 0;
308 while (PDNode->getBlock() != PostDom) {
309 if (PDNode->getBlock() == VisitedPostDom)
310 advanceLevel();
311 PDNode = PDNode->getIDom();
312 Level++;
313 if (FoundLoopLevel == Level)
314 return Level;
315 }
316
317 return 0;
318 }
319
320 /// Add undef values dominating the loop and the optionally given additional
321 /// blocks, so that the SSA updater doesn't have to search all the way to the
322 /// function entry.
323 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
324 ArrayRef<MachineBasicBlock *> Blocks = {}) {
325 assert(LoopLevel < CommonDominators.size());
326
327 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
328 for (MachineBasicBlock *MBB : Blocks)
329 Dom = DT.findNearestCommonDominator(Dom, MBB);
330
331 if (!inLoopLevel(*Dom, LoopLevel, Blocks)) {
332 SSAUpdater.AddAvailableValue(Dom, insertUndefLaneMask(*Dom));
333 } else {
334 // The dominator is part of the loop or the given blocks, so add the
335 // undef value to unreachable predecessors instead.
336 for (MachineBasicBlock *Pred : Dom->predecessors()) {
337 if (!inLoopLevel(*Pred, LoopLevel, Blocks))
338 SSAUpdater.AddAvailableValue(Pred, insertUndefLaneMask(*Pred));
339 }
340 }
341 }
342
343private:
344 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
345 ArrayRef<MachineBasicBlock *> Blocks) const {
346 auto DomIt = Visited.find(&MBB);
347 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
348 return true;
349
350 if (llvm::find(Blocks, &MBB) != Blocks.end())
351 return true;
352
353 return false;
354 }
355
356 void advanceLevel() {
357 MachineBasicBlock *VisitedDom;
358
359 if (!VisitedPostDom) {
360 VisitedPostDom = DefBlock;
361 VisitedDom = DefBlock;
362 Stack.push_back(DefBlock);
363 } else {
364 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
365 VisitedDom = CommonDominators.back();
366
367 for (unsigned i = 0; i < NextLevel.size();) {
368 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
369 Stack.push_back(NextLevel[i]);
370
371 NextLevel[i] = NextLevel.back();
372 NextLevel.pop_back();
373 } else {
374 i++;
375 }
376 }
377 }
378
379 unsigned Level = CommonDominators.size();
380 while (!Stack.empty()) {
381 MachineBasicBlock *MBB = Stack.pop_back_val();
382 if (!PDT.dominates(VisitedPostDom, MBB))
383 NextLevel.push_back(MBB);
384
385 Visited[MBB] = Level;
386 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
387
388 for (MachineBasicBlock *Succ : MBB->successors()) {
389 if (Succ == DefBlock) {
390 if (MBB == VisitedPostDom)
391 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
392 else
393 FoundLoopLevel = std::min(FoundLoopLevel, Level);
394 continue;
395 }
396
397 if (Visited.try_emplace(Succ, ~0u).second) {
398 if (MBB == VisitedPostDom)
399 NextLevel.push_back(Succ);
400 else
401 Stack.push_back(Succ);
402 }
403 }
404 }
405
406 CommonDominators.push_back(VisitedDom);
407 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000408};
409
410} // End anonymous namespace.
411
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000412INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
413 false)
414INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
415INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
416INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
417 false)
Tom Stellard1bd80722014-04-30 15:31:33 +0000418
419char SILowerI1Copies::ID = 0;
420
421char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
422
423FunctionPass *llvm::createSILowerI1CopiesPass() {
424 return new SILowerI1Copies();
425}
426
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000427static unsigned createLaneMaskReg(MachineFunction &MF) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000428 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Tom Stellard1bd80722014-04-30 15:31:33 +0000429 MachineRegisterInfo &MRI = MF.getRegInfo();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000430 return MRI.createVirtualRegister(ST.isWave32() ? &AMDGPU::SReg_32RegClass
431 : &AMDGPU::SReg_64RegClass);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000432}
433
434static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) {
435 MachineFunction &MF = *MBB.getParent();
Tom Stellard5bfbae52018-07-11 20:59:01 +0000436 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000437 const SIInstrInfo *TII = ST.getInstrInfo();
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000438 unsigned UndefReg = createLaneMaskReg(MF);
439 BuildMI(MBB, MBB.getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
440 UndefReg);
441 return UndefReg;
442}
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000443
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000444/// Lower all instructions that def or use vreg_1 registers.
445///
446/// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
447/// occur around inline assembly. We do this first, before vreg_1 registers
448/// are changed to scalar mask registers.
449///
450/// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
451/// all others, because phi lowering looks through copies and can therefore
452/// often make copy lowering unnecessary.
453bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
454 MF = &TheMF;
455 MRI = &MF->getRegInfo();
456 DT = &getAnalysis<MachineDominatorTree>();
457 PDT = &getAnalysis<MachinePostDominatorTree>();
Tom Stellard1bd80722014-04-30 15:31:33 +0000458
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000459 ST = &MF->getSubtarget<GCNSubtarget>();
460 TII = ST->getInstrInfo();
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000461 IsWave32 = ST->isWave32();
462
463 if (IsWave32) {
464 ExecReg = AMDGPU::EXEC_LO;
465 MovOp = AMDGPU::S_MOV_B32;
466 AndOp = AMDGPU::S_AND_B32;
467 OrOp = AMDGPU::S_OR_B32;
468 XorOp = AMDGPU::S_XOR_B32;
469 AndN2Op = AMDGPU::S_ANDN2_B32;
470 OrN2Op = AMDGPU::S_ORN2_B32;
471 } else {
472 ExecReg = AMDGPU::EXEC;
473 MovOp = AMDGPU::S_MOV_B64;
474 AndOp = AMDGPU::S_AND_B64;
475 OrOp = AMDGPU::S_OR_B64;
476 XorOp = AMDGPU::S_XOR_B64;
477 AndN2Op = AMDGPU::S_ANDN2_B64;
478 OrN2Op = AMDGPU::S_ORN2_B64;
479 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000480
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000481 lowerCopiesFromI1();
482 lowerPhis();
483 lowerCopiesToI1();
Tom Stellard1bd80722014-04-30 15:31:33 +0000484
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000485 for (unsigned Reg : ConstrainRegs)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000486 MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000487 ConstrainRegs.clear();
Matt Arsenault72858932014-11-14 18:43:41 +0000488
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000489 return true;
490}
491
Matt Arsenault8bc05d72019-09-09 18:43:29 +0000492#ifndef NDEBUG
493static bool isVRegCompatibleReg(const SIRegisterInfo &TRI,
494 const MachineRegisterInfo &MRI,
495 Register Reg) {
496 unsigned Size = TRI.getRegSizeInBits(Reg, MRI);
497 return Size == 1 || Size == 32;
498}
499#endif
500
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000501void SILowerI1Copies::lowerCopiesFromI1() {
502 SmallVector<MachineInstr *, 4> DeadCopies;
503
504 for (MachineBasicBlock &MBB : *MF) {
505 for (MachineInstr &MI : MBB) {
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000506 if (MI.getOpcode() != AMDGPU::COPY)
Tom Stellard1bd80722014-04-30 15:31:33 +0000507 continue;
508
Daniel Sanders0c476112019-08-15 19:22:08 +0000509 Register DstReg = MI.getOperand(0).getReg();
510 Register SrcReg = MI.getOperand(1).getReg();
Nicolai Haehnle32ef9292019-06-27 16:56:44 +0000511 if (!isVreg1(SrcReg))
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000512 continue;
513
Nicolai Haehnle32ef9292019-06-27 16:56:44 +0000514 if (isLaneMaskReg(DstReg) || isVreg1(DstReg))
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000515 continue;
Tom Stellard1bd80722014-04-30 15:31:33 +0000516
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000517 // Copy into a 32-bit vector register.
518 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000519 DebugLoc DL = MI.getDebugLoc();
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000520
Matt Arsenault8bc05d72019-09-09 18:43:29 +0000521 assert(isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg));
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000522 assert(!MI.getOperand(0).getSubReg());
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000523
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000524 ConstrainRegs.insert(SrcReg);
525 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
526 .addImm(0)
Tim Renouf2e94f6e2019-03-18 19:25:39 +0000527 .addImm(0)
528 .addImm(0)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000529 .addImm(-1)
530 .addReg(SrcReg);
531 DeadCopies.push_back(&MI);
532 }
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000533
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000534 for (MachineInstr *MI : DeadCopies)
535 MI->eraseFromParent();
536 DeadCopies.clear();
537 }
538}
539
540void SILowerI1Copies::lowerPhis() {
541 MachineSSAUpdater SSAUpdater(*MF);
542 LoopFinder LF(*DT, *PDT);
543 PhiIncomingAnalysis PIA(*PDT);
cdevadase921ede2019-10-26 14:33:36 +0530544 SmallVector<MachineInstr *, 4> Vreg1Phis;
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000545 SmallVector<MachineBasicBlock *, 4> IncomingBlocks;
546 SmallVector<unsigned, 4> IncomingRegs;
547 SmallVector<unsigned, 4> IncomingUpdated;
Nicolai Haehnle7edae4c2019-04-23 13:12:52 +0000548#ifndef NDEBUG
549 DenseSet<unsigned> PhiRegisters;
550#endif
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000551
552 for (MachineBasicBlock &MBB : *MF) {
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000553 for (MachineInstr &MI : MBB.phis()) {
cdevadase921ede2019-10-26 14:33:36 +0530554 if (isVreg1(MI.getOperand(0).getReg()))
555 Vreg1Phis.push_back(&MI);
556 }
557 }
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000558
cdevadase921ede2019-10-26 14:33:36 +0530559 MachineBasicBlock *PrevMBB = nullptr;
560 for (MachineInstr *MI : Vreg1Phis) {
561 MachineBasicBlock &MBB = *MI->getParent();
562 if (&MBB != PrevMBB) {
563 LF.initialize(MBB);
564 PrevMBB = &MBB;
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000565 }
566
cdevadase921ede2019-10-26 14:33:36 +0530567 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI);
568
569 Register DstReg = MI->getOperand(0).getReg();
570 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
571 : &AMDGPU::SReg_64RegClass);
572
573 // Collect incoming values.
574 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
575 assert(i + 1 < MI->getNumOperands());
576 Register IncomingReg = MI->getOperand(i).getReg();
577 MachineBasicBlock *IncomingMBB = MI->getOperand(i + 1).getMBB();
578 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
579
580 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
581 IncomingReg = IncomingDef->getOperand(1).getReg();
582 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg));
583 assert(!IncomingDef->getOperand(1).getSubReg());
584 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
585 continue;
586 } else {
587 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
588 }
589
590 IncomingBlocks.push_back(IncomingMBB);
591 IncomingRegs.push_back(IncomingReg);
592 }
593
594#ifndef NDEBUG
595 PhiRegisters.insert(DstReg);
596#endif
597
598 // Phis in a loop that are observed outside the loop receive a simple but
599 // conservatively correct treatment.
600 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
601 for (MachineInstr &Use : MRI->use_instructions(DstReg))
602 DomBlocks.push_back(Use.getParent());
603
604 MachineBasicBlock *PostDomBound =
605 PDT->findNearestCommonDominator(DomBlocks);
606 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
607
608 SSAUpdater.Initialize(DstReg);
609
610 if (FoundLoopLevel) {
611 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, IncomingBlocks);
612
613 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
614 IncomingUpdated.push_back(createLaneMaskReg(*MF));
615 SSAUpdater.AddAvailableValue(IncomingBlocks[i],
616 IncomingUpdated.back());
617 }
618
619 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
620 MachineBasicBlock &IMBB = *IncomingBlocks[i];
621 buildMergeLaneMasks(
622 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
623 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
624 }
625 } else {
626 // The phi is not observed from outside a loop. Use a more accurate
627 // lowering.
628 PIA.analyze(MBB, IncomingBlocks);
629
630 for (MachineBasicBlock *MBB : PIA.predecessors())
631 SSAUpdater.AddAvailableValue(MBB, insertUndefLaneMask(*MBB));
632
633 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
634 MachineBasicBlock &IMBB = *IncomingBlocks[i];
635 if (PIA.isSource(IMBB)) {
636 IncomingUpdated.push_back(0);
637 SSAUpdater.AddAvailableValue(&IMBB, IncomingRegs[i]);
638 } else {
639 IncomingUpdated.push_back(createLaneMaskReg(*MF));
640 SSAUpdater.AddAvailableValue(&IMBB, IncomingUpdated.back());
641 }
642 }
643
644 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
645 if (!IncomingUpdated[i])
646 continue;
647
648 MachineBasicBlock &IMBB = *IncomingBlocks[i];
649 buildMergeLaneMasks(
650 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
651 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
652 }
653 }
654
655 unsigned NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
656 if (NewReg != DstReg) {
657 MRI->replaceRegWith(NewReg, DstReg);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000658 MI->eraseFromParent();
cdevadase921ede2019-10-26 14:33:36 +0530659 }
660
661 IncomingBlocks.clear();
662 IncomingRegs.clear();
663 IncomingUpdated.clear();
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000664 }
665}
666
667void SILowerI1Copies::lowerCopiesToI1() {
668 MachineSSAUpdater SSAUpdater(*MF);
669 LoopFinder LF(*DT, *PDT);
670 SmallVector<MachineInstr *, 4> DeadCopies;
671
672 for (MachineBasicBlock &MBB : *MF) {
673 LF.initialize(MBB);
674
675 for (MachineInstr &MI : MBB) {
676 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
677 MI.getOpcode() != AMDGPU::COPY)
678 continue;
679
Daniel Sanders0c476112019-08-15 19:22:08 +0000680 Register DstReg = MI.getOperand(0).getReg();
Nicolai Haehnle32ef9292019-06-27 16:56:44 +0000681 if (!isVreg1(DstReg))
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000682 continue;
683
684 if (MRI->use_empty(DstReg)) {
685 DeadCopies.push_back(&MI);
686 continue;
687 }
688
689 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
690
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000691 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
692 : &AMDGPU::SReg_64RegClass);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000693 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
694 continue;
695
696 DebugLoc DL = MI.getDebugLoc();
Daniel Sanders0c476112019-08-15 19:22:08 +0000697 Register SrcReg = MI.getOperand(1).getReg();
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000698 assert(!MI.getOperand(1).getSubReg());
699
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000700 if (!Register::isVirtualRegister(SrcReg) ||
Nicolai Haehnle32ef9292019-06-27 16:56:44 +0000701 (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000702 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
703 unsigned TmpReg = createLaneMaskReg(*MF);
704 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
705 .addReg(SrcReg)
706 .addImm(0);
707 MI.getOperand(1).setReg(TmpReg);
708 SrcReg = TmpReg;
709 }
710
711 // Defs in a loop that are observed outside the loop must be transformed
712 // into appropriate bit manipulation.
Jakub Kuderski269bd152019-09-25 14:04:36 +0000713 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
714 for (MachineInstr &Use : MRI->use_instructions(DstReg))
715 DomBlocks.push_back(Use.getParent());
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000716
Jakub Kuderski269bd152019-09-25 14:04:36 +0000717 MachineBasicBlock *PostDomBound =
718 PDT->findNearestCommonDominator(DomBlocks);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000719 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
720 if (FoundLoopLevel) {
721 SSAUpdater.Initialize(DstReg);
722 SSAUpdater.AddAvailableValue(&MBB, DstReg);
723 LF.addLoopEntries(FoundLoopLevel, SSAUpdater);
724
725 buildMergeLaneMasks(MBB, MI, DL, DstReg,
726 SSAUpdater.GetValueInMiddleOfBlock(&MBB), SrcReg);
727 DeadCopies.push_back(&MI);
728 }
729 }
730
731 for (MachineInstr *MI : DeadCopies)
732 MI->eraseFromParent();
733 DeadCopies.clear();
734 }
735}
736
737bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
738 const MachineInstr *MI;
739 for (;;) {
740 MI = MRI->getUniqueVRegDef(Reg);
741 if (MI->getOpcode() != AMDGPU::COPY)
742 break;
743
744 Reg = MI->getOperand(1).getReg();
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000745 if (!Register::isVirtualRegister(Reg))
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000746 return false;
747 if (!isLaneMaskReg(Reg))
748 return false;
749 }
750
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000751 if (MI->getOpcode() != MovOp)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000752 return false;
753
754 if (!MI->getOperand(1).isImm())
755 return false;
756
757 int64_t Imm = MI->getOperand(1).getImm();
758 if (Imm == 0) {
759 Val = false;
760 return true;
761 }
762 if (Imm == -1) {
763 Val = true;
764 return true;
765 }
766
767 return false;
768}
769
770static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
771 Def = false;
772 Use = false;
773
774 for (const MachineOperand &MO : MI.operands()) {
775 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
776 if (MO.isUse())
777 Use = true;
778 else
779 Def = true;
780 }
781 }
782}
783
784/// Return a point at the end of the given \p MBB to insert SALU instructions
785/// for lane mask calculation. Take terminators and SCC into account.
786MachineBasicBlock::iterator
787SILowerI1Copies::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
788 auto InsertionPt = MBB.getFirstTerminator();
789 bool TerminatorsUseSCC = false;
790 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
791 bool DefsSCC;
792 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
793 if (TerminatorsUseSCC || DefsSCC)
794 break;
795 }
796
797 if (!TerminatorsUseSCC)
798 return InsertionPt;
799
800 while (InsertionPt != MBB.begin()) {
801 InsertionPt--;
802
803 bool DefSCC, UseSCC;
804 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
805 if (DefSCC)
806 return InsertionPt;
807 }
808
809 // We should have at least seen an IMPLICIT_DEF or COPY
810 llvm_unreachable("SCC used by terminator but no def in block");
811}
812
813void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
814 MachineBasicBlock::iterator I,
815 const DebugLoc &DL, unsigned DstReg,
816 unsigned PrevReg, unsigned CurReg) {
817 bool PrevVal;
818 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
819 bool CurVal;
820 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
821
822 if (PrevConstant && CurConstant) {
823 if (PrevVal == CurVal) {
824 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
825 } else if (CurVal) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000826 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000827 } else {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000828 BuildMI(MBB, I, DL, TII->get(XorOp), DstReg)
829 .addReg(ExecReg)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000830 .addImm(-1);
831 }
832 return;
833 }
834
835 unsigned PrevMaskedReg = 0;
836 unsigned CurMaskedReg = 0;
837 if (!PrevConstant) {
838 if (CurConstant && CurVal) {
839 PrevMaskedReg = PrevReg;
840 } else {
841 PrevMaskedReg = createLaneMaskReg(*MF);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000842 BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000843 .addReg(PrevReg)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000844 .addReg(ExecReg);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000845 }
846 }
847 if (!CurConstant) {
848 // TODO: check whether CurReg is already masked by EXEC
849 if (PrevConstant && PrevVal) {
850 CurMaskedReg = CurReg;
851 } else {
852 CurMaskedReg = createLaneMaskReg(*MF);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000853 BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000854 .addReg(CurReg)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000855 .addReg(ExecReg);
Tom Stellard1bd80722014-04-30 15:31:33 +0000856 }
857 }
Tom Stellard365a2b42014-05-15 14:41:50 +0000858
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000859 if (PrevConstant && !PrevVal) {
860 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
861 .addReg(CurMaskedReg);
862 } else if (CurConstant && !CurVal) {
863 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
864 .addReg(PrevMaskedReg);
865 } else if (PrevConstant && PrevVal) {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000866 BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000867 .addReg(CurMaskedReg)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000868 .addReg(ExecReg);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000869 } else {
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000870 BuildMI(MBB, I, DL, TII->get(OrOp), DstReg)
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000871 .addReg(PrevMaskedReg)
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +0000872 .addReg(CurMaskedReg ? CurMaskedReg : ExecReg);
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000873 }
Tom Stellard1bd80722014-04-30 15:31:33 +0000874}