blob: bc8cf5eb191764b9565c6cef897a1211fdb68df1 [file] [log] [blame]
Eugene Zelenko66203762017-01-21 00:53:49 +00001//===-- SIInsertSkips.cpp - Use predicates for control flow ---------------===//
Matt Arsenault78fc9da2016-08-22 19:33:16 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This pass inserts branches on the 0 exec mask over divergent branches
12/// branches when it's expected that jumping over the untaken control flow will
13/// be cheaper than having every workitem no-op through it.
14//
Eugene Zelenko66203762017-01-21 00:53:49 +000015//===----------------------------------------------------------------------===//
Matt Arsenault78fc9da2016-08-22 19:33:16 +000016
17#include "AMDGPU.h"
18#include "AMDGPUSubtarget.h"
19#include "SIInstrInfo.h"
20#include "SIMachineFunctionInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000021#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault78fc9da2016-08-22 19:33:16 +000024#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000026#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault78fc9da2016-08-22 19:33:16 +000027#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000028#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/DebugLoc.h"
Matt Arsenault78fc9da2016-08-22 19:33:16 +000031#include "llvm/MC/MCAsmInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000032#include "llvm/Pass.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Target/TargetMachine.h"
35#include <cassert>
36#include <cstdint>
37#include <iterator>
Matt Arsenault78fc9da2016-08-22 19:33:16 +000038
39using namespace llvm;
40
41#define DEBUG_TYPE "si-insert-skips"
42
Matt Arsenault78fc9da2016-08-22 19:33:16 +000043static cl::opt<unsigned> SkipThresholdFlag(
44 "amdgpu-skip-threshold",
45 cl::desc("Number of instructions before jumping over divergent control flow"),
46 cl::init(12), cl::Hidden);
47
Eugene Zelenko66203762017-01-21 00:53:49 +000048namespace {
49
Matt Arsenault78fc9da2016-08-22 19:33:16 +000050class SIInsertSkips : public MachineFunctionPass {
51private:
Eugene Zelenko66203762017-01-21 00:53:49 +000052 const SIRegisterInfo *TRI = nullptr;
53 const SIInstrInfo *TII = nullptr;
54 unsigned SkipThreshold = 0;
Matt Arsenault78fc9da2016-08-22 19:33:16 +000055
56 bool shouldSkip(const MachineBasicBlock &From,
57 const MachineBasicBlock &To) const;
58
59 bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB);
60
61 void kill(MachineInstr &MI);
62
63 MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB,
64 MachineBasicBlock::iterator I) const;
65
66 bool skipMaskBranch(MachineInstr &MI, MachineBasicBlock &MBB);
67
68public:
69 static char ID;
70
Eugene Zelenko66203762017-01-21 00:53:49 +000071 SIInsertSkips() : MachineFunctionPass(ID) {}
Matt Arsenault78fc9da2016-08-22 19:33:16 +000072
73 bool runOnMachineFunction(MachineFunction &MF) override;
74
Mehdi Amini117296c2016-10-01 02:56:57 +000075 StringRef getPassName() const override {
Matt Arsenault78fc9da2016-08-22 19:33:16 +000076 return "SI insert s_cbranch_execz instructions";
77 }
78
79 void getAnalysisUsage(AnalysisUsage &AU) const override {
80 MachineFunctionPass::getAnalysisUsage(AU);
81 }
82};
83
Eugene Zelenko66203762017-01-21 00:53:49 +000084} // end anonymous namespace
Matt Arsenault78fc9da2016-08-22 19:33:16 +000085
86char SIInsertSkips::ID = 0;
87
88INITIALIZE_PASS(SIInsertSkips, DEBUG_TYPE,
89 "SI insert s_cbranch_execz instructions", false, false)
90
91char &llvm::SIInsertSkipsPassID = SIInsertSkips::ID;
92
93static bool opcodeEmitsNoInsts(unsigned Opc) {
94 switch (Opc) {
95 case TargetOpcode::IMPLICIT_DEF:
96 case TargetOpcode::KILL:
97 case TargetOpcode::BUNDLE:
98 case TargetOpcode::CFI_INSTRUCTION:
99 case TargetOpcode::EH_LABEL:
100 case TargetOpcode::GC_LABEL:
101 case TargetOpcode::DBG_VALUE:
102 return true;
103 default:
104 return false;
105 }
106}
107
108bool SIInsertSkips::shouldSkip(const MachineBasicBlock &From,
109 const MachineBasicBlock &To) const {
110 if (From.succ_empty())
111 return false;
112
113 unsigned NumInstr = 0;
114 const MachineFunction *MF = From.getParent();
115
116 for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
117 MBBI != End && MBBI != ToI; ++MBBI) {
118 const MachineBasicBlock &MBB = *MBBI;
119
120 for (MachineBasicBlock::const_iterator I = MBB.begin(), E = MBB.end();
121 NumInstr < SkipThreshold && I != E; ++I) {
122 if (opcodeEmitsNoInsts(I->getOpcode()))
123 continue;
124
125 // FIXME: Since this is required for correctness, this should be inserted
126 // during SILowerControlFlow.
127
128 // When a uniform loop is inside non-uniform control flow, the branch
129 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
130 // when EXEC = 0. We should skip the loop lest it becomes infinite.
131 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
132 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
133 return true;
134
Alexander Timofeev46513962017-10-03 18:55:36 +0000135 // V_READFIRSTLANE/V_READLANE destination register may be used as operand
136 // by some SALU instruction. If exec mask is zero vector instruction
137 // defining the register that is used by the scalar one is not executed
138 // and scalar instruction will operate on undefined data. For
139 // V_READFIRSTLANE/V_READLANE we should avoid predicated execution.
140 if ((I->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) ||
141 (I->getOpcode() == AMDGPU::V_READLANE_B32)) {
142 return true;
143 }
144
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000145 if (I->isInlineAsm()) {
146 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
147 const char *AsmStr = I->getOperand(0).getSymbolName();
148
149 // inlineasm length estimate is number of bytes assuming the longest
150 // instruction.
151 uint64_t MaxAsmSize = TII->getInlineAsmLength(AsmStr, *MAI);
152 NumInstr += MaxAsmSize / MAI->getMaxInstLength();
153 } else {
154 ++NumInstr;
155 }
156
157 if (NumInstr >= SkipThreshold)
158 return true;
159 }
160 }
161
162 return false;
163}
164
165bool SIInsertSkips::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) {
166 MachineBasicBlock &MBB = *MI.getParent();
167 MachineFunction *MF = MBB.getParent();
168
Matthias Braunf1caa282017-12-15 22:22:58 +0000169 if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS ||
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000170 !shouldSkip(MBB, MBB.getParent()->back()))
171 return false;
172
173 MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator());
174
175 const DebugLoc &DL = MI.getDebugLoc();
176
177 // If the exec mask is non-zero, skip the next two instructions
178 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
179 .addMBB(&NextBB);
180
181 MachineBasicBlock::iterator Insert = SkipBB->begin();
182
183 // Exec mask is zero: Export to NULL target...
Matt Arsenault7bee6ac2016-12-05 20:23:10 +0000184 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE))
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000185 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000186 .addReg(AMDGPU::VGPR0, RegState::Undef)
187 .addReg(AMDGPU::VGPR0, RegState::Undef)
188 .addReg(AMDGPU::VGPR0, RegState::Undef)
Matt Arsenault7bee6ac2016-12-05 20:23:10 +0000189 .addReg(AMDGPU::VGPR0, RegState::Undef)
190 .addImm(1) // vm
191 .addImm(0) // compr
192 .addImm(0); // en
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000193
194 // ... and terminate wavefront.
195 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
196
197 return true;
198}
199
200void SIInsertSkips::kill(MachineInstr &MI) {
201 MachineBasicBlock &MBB = *MI.getParent();
202 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000203
Marek Olsakce76ea02017-10-24 10:27:13 +0000204 switch (MI.getOpcode()) {
205 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: {
206 unsigned Opcode = 0;
207
208 // The opcodes are inverted because the inline immediate has to be
209 // the first operand, e.g. from "x < imm" to "imm > x"
210 switch (MI.getOperand(2).getImm()) {
211 case ISD::SETOEQ:
212 case ISD::SETEQ:
Marek Olsak48057b52018-01-29 23:19:10 +0000213 Opcode = AMDGPU::V_CMPX_EQ_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000214 break;
215 case ISD::SETOGT:
216 case ISD::SETGT:
Marek Olsak48057b52018-01-29 23:19:10 +0000217 Opcode = AMDGPU::V_CMPX_LT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000218 break;
219 case ISD::SETOGE:
220 case ISD::SETGE:
Marek Olsak48057b52018-01-29 23:19:10 +0000221 Opcode = AMDGPU::V_CMPX_LE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000222 break;
223 case ISD::SETOLT:
224 case ISD::SETLT:
Marek Olsak48057b52018-01-29 23:19:10 +0000225 Opcode = AMDGPU::V_CMPX_GT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000226 break;
227 case ISD::SETOLE:
228 case ISD::SETLE:
Marek Olsak48057b52018-01-29 23:19:10 +0000229 Opcode = AMDGPU::V_CMPX_GE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000230 break;
231 case ISD::SETONE:
232 case ISD::SETNE:
Marek Olsak48057b52018-01-29 23:19:10 +0000233 Opcode = AMDGPU::V_CMPX_LG_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000234 break;
235 case ISD::SETO:
Marek Olsak48057b52018-01-29 23:19:10 +0000236 Opcode = AMDGPU::V_CMPX_O_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000237 break;
238 case ISD::SETUO:
Marek Olsak48057b52018-01-29 23:19:10 +0000239 Opcode = AMDGPU::V_CMPX_U_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000240 break;
241 case ISD::SETUEQ:
Marek Olsak48057b52018-01-29 23:19:10 +0000242 Opcode = AMDGPU::V_CMPX_NLG_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000243 break;
244 case ISD::SETUGT:
Marek Olsak48057b52018-01-29 23:19:10 +0000245 Opcode = AMDGPU::V_CMPX_NGE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000246 break;
247 case ISD::SETUGE:
Marek Olsak48057b52018-01-29 23:19:10 +0000248 Opcode = AMDGPU::V_CMPX_NGT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000249 break;
250 case ISD::SETULT:
Marek Olsak48057b52018-01-29 23:19:10 +0000251 Opcode = AMDGPU::V_CMPX_NLE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000252 break;
253 case ISD::SETULE:
Marek Olsak48057b52018-01-29 23:19:10 +0000254 Opcode = AMDGPU::V_CMPX_NLT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000255 break;
256 case ISD::SETUNE:
Marek Olsak48057b52018-01-29 23:19:10 +0000257 Opcode = AMDGPU::V_CMPX_NEQ_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000258 break;
259 default:
260 llvm_unreachable("invalid ISD:SET cond code");
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000261 }
Marek Olsakce76ea02017-10-24 10:27:13 +0000262
Marek Olsak48057b52018-01-29 23:19:10 +0000263 assert(MI.getOperand(0).isReg());
Marek Olsakce76ea02017-10-24 10:27:13 +0000264
Geoff Berry1d531012018-01-30 17:37:39 +0000265 MachineInstr *NewMI;
Marek Olsak48057b52018-01-29 23:19:10 +0000266 if (TRI->isVGPR(MBB.getParent()->getRegInfo(),
267 MI.getOperand(0).getReg())) {
268 Opcode = AMDGPU::getVOPe32(Opcode);
Geoff Berry1d531012018-01-30 17:37:39 +0000269 NewMI = BuildMI(MBB, &MI, DL, TII->get(Opcode))
Marek Olsak48057b52018-01-29 23:19:10 +0000270 .add(MI.getOperand(1))
271 .add(MI.getOperand(0));
272 } else {
Geoff Berry1d531012018-01-30 17:37:39 +0000273 NewMI = BuildMI(MBB, &MI, DL, TII->get(Opcode))
Marek Olsak48057b52018-01-29 23:19:10 +0000274 .addReg(AMDGPU::VCC, RegState::Define)
275 .addImm(0) // src0 modifiers
276 .add(MI.getOperand(1))
277 .addImm(0) // src1 modifiers
278 .add(MI.getOperand(0))
279 .addImm(0); // omod
280 }
Geoff Berry1d531012018-01-30 17:37:39 +0000281 // Clear isRenamable bit if new opcode requires it to be 0.
282 if (NewMI->hasExtraSrcRegAllocReq())
283 for (MachineOperand &NewMO : NewMI->uses())
284 if (NewMO.isReg() && NewMO.isUse())
285 NewMO.setIsRenamable(false);
Marek Olsakce76ea02017-10-24 10:27:13 +0000286 break;
287 }
288 case AMDGPU::SI_KILL_I1_TERMINATOR: {
289 const MachineOperand &Op = MI.getOperand(0);
290 int64_t KillVal = MI.getOperand(1).getImm();
291 assert(KillVal == 0 || KillVal == -1);
292
293 // Kill all threads if Op0 is an immediate and equal to the Kill value.
294 if (Op.isImm()) {
295 int64_t Imm = Op.getImm();
296 assert(Imm == 0 || Imm == -1);
297
298 if (Imm == KillVal)
299 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
300 .addImm(0);
301 break;
302 }
303
304 unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64;
305 BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC)
306 .addReg(AMDGPU::EXEC)
Diana Picus116bbab2017-01-13 09:58:52 +0000307 .add(Op);
Marek Olsakce76ea02017-10-24 10:27:13 +0000308 break;
309 }
310 default:
311 llvm_unreachable("invalid opcode, expected SI_KILL_*_TERMINATOR");
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000312 }
313}
314
315MachineBasicBlock *SIInsertSkips::insertSkipBlock(
316 MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
317 MachineFunction *MF = MBB.getParent();
318
319 MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock();
320 MachineFunction::iterator MBBI(MBB);
321 ++MBBI;
322
323 MF->insert(MBBI, SkipBB);
324 MBB.addSuccessor(SkipBB);
325
326 return SkipBB;
327}
328
329// Returns true if a branch over the block was inserted.
330bool SIInsertSkips::skipMaskBranch(MachineInstr &MI,
331 MachineBasicBlock &SrcMBB) {
332 MachineBasicBlock *DestBB = MI.getOperand(0).getMBB();
333
334 if (!shouldSkip(**SrcMBB.succ_begin(), *DestBB))
335 return false;
336
337 const DebugLoc &DL = MI.getDebugLoc();
338 MachineBasicBlock::iterator InsPt = std::next(MI.getIterator());
339
340 BuildMI(SrcMBB, InsPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
341 .addMBB(DestBB);
342
343 return true;
344}
345
346bool SIInsertSkips::runOnMachineFunction(MachineFunction &MF) {
347 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
348 TII = ST.getInstrInfo();
349 TRI = &TII->getRegisterInfo();
350 SkipThreshold = SkipThresholdFlag;
351
352 bool HaveKill = false;
353 bool MadeChange = false;
354
355 // Track depth of exec mask, divergent branches.
356 SmallVector<MachineBasicBlock *, 16> ExecBranchStack;
357
358 MachineFunction::iterator NextBB;
359
360 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
361
362 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
363 BI != BE; BI = NextBB) {
364 NextBB = std::next(BI);
365 MachineBasicBlock &MBB = *BI;
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000366 bool HaveSkipBlock = false;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000367
368 if (!ExecBranchStack.empty() && ExecBranchStack.back() == &MBB) {
369 // Reached convergence point for last divergent branch.
370 ExecBranchStack.pop_back();
371 }
372
373 if (HaveKill && ExecBranchStack.empty()) {
374 HaveKill = false;
375
376 // TODO: Insert skip if exec is 0?
377 }
378
379 MachineBasicBlock::iterator I, Next;
380 for (I = MBB.begin(); I != MBB.end(); I = Next) {
381 Next = std::next(I);
382
383 MachineInstr &MI = *I;
384
385 switch (MI.getOpcode()) {
Eugene Zelenko66203762017-01-21 00:53:49 +0000386 case AMDGPU::SI_MASK_BRANCH:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000387 ExecBranchStack.push_back(MI.getOperand(0).getMBB());
388 MadeChange |= skipMaskBranch(MI, MBB);
389 break;
Eugene Zelenko66203762017-01-21 00:53:49 +0000390
391 case AMDGPU::S_BRANCH:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000392 // Optimize out branches to the next block.
393 // FIXME: Shouldn't this be handled by BranchFolding?
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000394 if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000395 MI.eraseFromParent();
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000396 } else if (HaveSkipBlock) {
397 // Remove the given unconditional branch when a skip block has been
398 // inserted after the current one and let skip the two instructions
399 // performing the kill if the exec mask is non-zero.
400 MI.eraseFromParent();
401 }
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000402 break;
Eugene Zelenko66203762017-01-21 00:53:49 +0000403
Marek Olsakce76ea02017-10-24 10:27:13 +0000404 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
405 case AMDGPU::SI_KILL_I1_TERMINATOR:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000406 MadeChange = true;
407 kill(MI);
408
409 if (ExecBranchStack.empty()) {
410 if (skipIfDead(MI, *NextBB)) {
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000411 HaveSkipBlock = true;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000412 NextBB = std::next(BI);
413 BE = MF.end();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000414 }
415 } else {
416 HaveKill = true;
417 }
418
419 MI.eraseFromParent();
420 break;
Eugene Zelenko66203762017-01-21 00:53:49 +0000421
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000422 case AMDGPU::SI_RETURN_TO_EPILOG:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000423 // FIXME: Should move somewhere else
424 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
425
426 // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
427 // because external bytecode will be appended at the end.
428 if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000429 // SI_RETURN_TO_EPILOG is not the last instruction. Add an empty block at
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000430 // the end and jump there.
431 if (!EmptyMBBAtEnd) {
432 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
433 MF.insert(MF.end(), EmptyMBBAtEnd);
434 }
435
436 MBB.addSuccessor(EmptyMBBAtEnd);
437 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
438 .addMBB(EmptyMBBAtEnd);
439 I->eraseFromParent();
440 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000441 break;
442
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000443 default:
444 break;
445 }
446 }
447 }
448
449 return MadeChange;
450}