blob: 9539deeb7d9c370baa13ab4b9c1c75101c51fd8b [file] [log] [blame]
Eugene Zelenko66203762017-01-21 00:53:49 +00001//===-- SIInsertSkips.cpp - Use predicates for control flow ---------------===//
Matt Arsenault78fc9da2016-08-22 19:33:16 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Matt Arsenault78fc9da2016-08-22 19:33:16 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// This pass inserts branches on the 0 exec mask over divergent branches
Matt Arsenault78fc9da2016-08-22 19:33:16 +000011/// branches when it's expected that jumping over the untaken control flow will
12/// be cheaper than having every workitem no-op through it.
13//
Eugene Zelenko66203762017-01-21 00:53:49 +000014//===----------------------------------------------------------------------===//
Matt Arsenault78fc9da2016-08-22 19:33:16 +000015
16#include "AMDGPU.h"
17#include "AMDGPUSubtarget.h"
18#include "SIInstrInfo.h"
19#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000020#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000021#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault78fc9da2016-08-22 19:33:16 +000024#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000026#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault78fc9da2016-08-22 19:33:16 +000027#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000028#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/DebugLoc.h"
Matt Arsenault78fc9da2016-08-22 19:33:16 +000031#include "llvm/MC/MCAsmInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000032#include "llvm/Pass.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Target/TargetMachine.h"
35#include <cassert>
36#include <cstdint>
37#include <iterator>
Matt Arsenault78fc9da2016-08-22 19:33:16 +000038
39using namespace llvm;
40
41#define DEBUG_TYPE "si-insert-skips"
42
Matt Arsenault78fc9da2016-08-22 19:33:16 +000043static cl::opt<unsigned> SkipThresholdFlag(
44 "amdgpu-skip-threshold",
45 cl::desc("Number of instructions before jumping over divergent control flow"),
46 cl::init(12), cl::Hidden);
47
Eugene Zelenko66203762017-01-21 00:53:49 +000048namespace {
49
Matt Arsenault78fc9da2016-08-22 19:33:16 +000050class SIInsertSkips : public MachineFunctionPass {
51private:
Eugene Zelenko66203762017-01-21 00:53:49 +000052 const SIRegisterInfo *TRI = nullptr;
53 const SIInstrInfo *TII = nullptr;
54 unsigned SkipThreshold = 0;
Matt Arsenault78fc9da2016-08-22 19:33:16 +000055
56 bool shouldSkip(const MachineBasicBlock &From,
57 const MachineBasicBlock &To) const;
58
59 bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB);
60
61 void kill(MachineInstr &MI);
62
63 MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB,
64 MachineBasicBlock::iterator I) const;
65
66 bool skipMaskBranch(MachineInstr &MI, MachineBasicBlock &MBB);
67
Stanislav Mekhanoshine86c8d32018-11-12 18:48:17 +000068 bool optimizeVccBranch(MachineInstr &MI) const;
69
Matt Arsenault78fc9da2016-08-22 19:33:16 +000070public:
71 static char ID;
72
Eugene Zelenko66203762017-01-21 00:53:49 +000073 SIInsertSkips() : MachineFunctionPass(ID) {}
Matt Arsenault78fc9da2016-08-22 19:33:16 +000074
75 bool runOnMachineFunction(MachineFunction &MF) override;
76
Mehdi Amini117296c2016-10-01 02:56:57 +000077 StringRef getPassName() const override {
Matt Arsenault78fc9da2016-08-22 19:33:16 +000078 return "SI insert s_cbranch_execz instructions";
79 }
80
81 void getAnalysisUsage(AnalysisUsage &AU) const override {
82 MachineFunctionPass::getAnalysisUsage(AU);
83 }
84};
85
Eugene Zelenko66203762017-01-21 00:53:49 +000086} // end anonymous namespace
Matt Arsenault78fc9da2016-08-22 19:33:16 +000087
88char SIInsertSkips::ID = 0;
89
90INITIALIZE_PASS(SIInsertSkips, DEBUG_TYPE,
91 "SI insert s_cbranch_execz instructions", false, false)
92
93char &llvm::SIInsertSkipsPassID = SIInsertSkips::ID;
94
95static bool opcodeEmitsNoInsts(unsigned Opc) {
96 switch (Opc) {
97 case TargetOpcode::IMPLICIT_DEF:
98 case TargetOpcode::KILL:
99 case TargetOpcode::BUNDLE:
100 case TargetOpcode::CFI_INSTRUCTION:
101 case TargetOpcode::EH_LABEL:
102 case TargetOpcode::GC_LABEL:
103 case TargetOpcode::DBG_VALUE:
104 return true;
105 default:
106 return false;
107 }
108}
109
110bool SIInsertSkips::shouldSkip(const MachineBasicBlock &From,
111 const MachineBasicBlock &To) const {
112 if (From.succ_empty())
113 return false;
114
115 unsigned NumInstr = 0;
116 const MachineFunction *MF = From.getParent();
117
118 for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
119 MBBI != End && MBBI != ToI; ++MBBI) {
120 const MachineBasicBlock &MBB = *MBBI;
121
122 for (MachineBasicBlock::const_iterator I = MBB.begin(), E = MBB.end();
123 NumInstr < SkipThreshold && I != E; ++I) {
124 if (opcodeEmitsNoInsts(I->getOpcode()))
125 continue;
126
127 // FIXME: Since this is required for correctness, this should be inserted
128 // during SILowerControlFlow.
129
130 // When a uniform loop is inside non-uniform control flow, the branch
131 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
132 // when EXEC = 0. We should skip the loop lest it becomes infinite.
133 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
134 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
135 return true;
136
Nicolai Haehnle7f0d05d2018-07-30 09:23:59 +0000137 if (TII->hasUnwantedEffectsWhenEXECEmpty(*I))
Alexander Timofeev46513962017-10-03 18:55:36 +0000138 return true;
Alexander Timofeev46513962017-10-03 18:55:36 +0000139
Nicolai Haehnle7f0d05d2018-07-30 09:23:59 +0000140 ++NumInstr;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000141 if (NumInstr >= SkipThreshold)
142 return true;
143 }
144 }
145
146 return false;
147}
148
149bool SIInsertSkips::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) {
150 MachineBasicBlock &MBB = *MI.getParent();
151 MachineFunction *MF = MBB.getParent();
152
Matthias Braunf1caa282017-12-15 22:22:58 +0000153 if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS ||
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000154 !shouldSkip(MBB, MBB.getParent()->back()))
155 return false;
156
157 MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator());
158
159 const DebugLoc &DL = MI.getDebugLoc();
160
161 // If the exec mask is non-zero, skip the next two instructions
162 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
163 .addMBB(&NextBB);
164
165 MachineBasicBlock::iterator Insert = SkipBB->begin();
166
167 // Exec mask is zero: Export to NULL target...
Matt Arsenault7bee6ac2016-12-05 20:23:10 +0000168 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE))
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000169 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000170 .addReg(AMDGPU::VGPR0, RegState::Undef)
171 .addReg(AMDGPU::VGPR0, RegState::Undef)
172 .addReg(AMDGPU::VGPR0, RegState::Undef)
Matt Arsenault7bee6ac2016-12-05 20:23:10 +0000173 .addReg(AMDGPU::VGPR0, RegState::Undef)
174 .addImm(1) // vm
175 .addImm(0) // compr
176 .addImm(0); // en
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000177
178 // ... and terminate wavefront.
179 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
180
181 return true;
182}
183
184void SIInsertSkips::kill(MachineInstr &MI) {
185 MachineBasicBlock &MBB = *MI.getParent();
186 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000187
Marek Olsakce76ea02017-10-24 10:27:13 +0000188 switch (MI.getOpcode()) {
189 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: {
190 unsigned Opcode = 0;
191
192 // The opcodes are inverted because the inline immediate has to be
193 // the first operand, e.g. from "x < imm" to "imm > x"
194 switch (MI.getOperand(2).getImm()) {
195 case ISD::SETOEQ:
196 case ISD::SETEQ:
Marek Olsak48057b52018-01-29 23:19:10 +0000197 Opcode = AMDGPU::V_CMPX_EQ_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000198 break;
199 case ISD::SETOGT:
200 case ISD::SETGT:
Marek Olsak48057b52018-01-29 23:19:10 +0000201 Opcode = AMDGPU::V_CMPX_LT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000202 break;
203 case ISD::SETOGE:
204 case ISD::SETGE:
Marek Olsak48057b52018-01-29 23:19:10 +0000205 Opcode = AMDGPU::V_CMPX_LE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000206 break;
207 case ISD::SETOLT:
208 case ISD::SETLT:
Marek Olsak48057b52018-01-29 23:19:10 +0000209 Opcode = AMDGPU::V_CMPX_GT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000210 break;
211 case ISD::SETOLE:
212 case ISD::SETLE:
Marek Olsak48057b52018-01-29 23:19:10 +0000213 Opcode = AMDGPU::V_CMPX_GE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000214 break;
215 case ISD::SETONE:
216 case ISD::SETNE:
Marek Olsak48057b52018-01-29 23:19:10 +0000217 Opcode = AMDGPU::V_CMPX_LG_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000218 break;
219 case ISD::SETO:
Marek Olsak48057b52018-01-29 23:19:10 +0000220 Opcode = AMDGPU::V_CMPX_O_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000221 break;
222 case ISD::SETUO:
Marek Olsak48057b52018-01-29 23:19:10 +0000223 Opcode = AMDGPU::V_CMPX_U_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000224 break;
225 case ISD::SETUEQ:
Marek Olsak48057b52018-01-29 23:19:10 +0000226 Opcode = AMDGPU::V_CMPX_NLG_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000227 break;
228 case ISD::SETUGT:
Marek Olsak48057b52018-01-29 23:19:10 +0000229 Opcode = AMDGPU::V_CMPX_NGE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000230 break;
231 case ISD::SETUGE:
Marek Olsak48057b52018-01-29 23:19:10 +0000232 Opcode = AMDGPU::V_CMPX_NGT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000233 break;
234 case ISD::SETULT:
Marek Olsak48057b52018-01-29 23:19:10 +0000235 Opcode = AMDGPU::V_CMPX_NLE_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000236 break;
237 case ISD::SETULE:
Marek Olsak48057b52018-01-29 23:19:10 +0000238 Opcode = AMDGPU::V_CMPX_NLT_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000239 break;
240 case ISD::SETUNE:
Marek Olsak48057b52018-01-29 23:19:10 +0000241 Opcode = AMDGPU::V_CMPX_NEQ_F32_e64;
Marek Olsakce76ea02017-10-24 10:27:13 +0000242 break;
243 default:
244 llvm_unreachable("invalid ISD:SET cond code");
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000245 }
Marek Olsakce76ea02017-10-24 10:27:13 +0000246
Marek Olsak48057b52018-01-29 23:19:10 +0000247 assert(MI.getOperand(0).isReg());
Marek Olsakce76ea02017-10-24 10:27:13 +0000248
Marek Olsak48057b52018-01-29 23:19:10 +0000249 if (TRI->isVGPR(MBB.getParent()->getRegInfo(),
250 MI.getOperand(0).getReg())) {
251 Opcode = AMDGPU::getVOPe32(Opcode);
Geoff Berryd6ba3db2018-02-23 19:11:33 +0000252 BuildMI(MBB, &MI, DL, TII->get(Opcode))
Marek Olsak48057b52018-01-29 23:19:10 +0000253 .add(MI.getOperand(1))
254 .add(MI.getOperand(0));
255 } else {
Geoff Berryd6ba3db2018-02-23 19:11:33 +0000256 BuildMI(MBB, &MI, DL, TII->get(Opcode))
Marek Olsak48057b52018-01-29 23:19:10 +0000257 .addReg(AMDGPU::VCC, RegState::Define)
258 .addImm(0) // src0 modifiers
259 .add(MI.getOperand(1))
260 .addImm(0) // src1 modifiers
261 .add(MI.getOperand(0))
262 .addImm(0); // omod
263 }
Marek Olsakce76ea02017-10-24 10:27:13 +0000264 break;
265 }
266 case AMDGPU::SI_KILL_I1_TERMINATOR: {
267 const MachineOperand &Op = MI.getOperand(0);
268 int64_t KillVal = MI.getOperand(1).getImm();
269 assert(KillVal == 0 || KillVal == -1);
270
271 // Kill all threads if Op0 is an immediate and equal to the Kill value.
272 if (Op.isImm()) {
273 int64_t Imm = Op.getImm();
274 assert(Imm == 0 || Imm == -1);
275
276 if (Imm == KillVal)
277 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
278 .addImm(0);
279 break;
280 }
281
282 unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64;
283 BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC)
284 .addReg(AMDGPU::EXEC)
Diana Picus116bbab2017-01-13 09:58:52 +0000285 .add(Op);
Marek Olsakce76ea02017-10-24 10:27:13 +0000286 break;
287 }
288 default:
289 llvm_unreachable("invalid opcode, expected SI_KILL_*_TERMINATOR");
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000290 }
291}
292
293MachineBasicBlock *SIInsertSkips::insertSkipBlock(
294 MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
295 MachineFunction *MF = MBB.getParent();
296
297 MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock();
298 MachineFunction::iterator MBBI(MBB);
299 ++MBBI;
300
301 MF->insert(MBBI, SkipBB);
302 MBB.addSuccessor(SkipBB);
303
304 return SkipBB;
305}
306
307// Returns true if a branch over the block was inserted.
308bool SIInsertSkips::skipMaskBranch(MachineInstr &MI,
309 MachineBasicBlock &SrcMBB) {
310 MachineBasicBlock *DestBB = MI.getOperand(0).getMBB();
311
312 if (!shouldSkip(**SrcMBB.succ_begin(), *DestBB))
313 return false;
314
315 const DebugLoc &DL = MI.getDebugLoc();
316 MachineBasicBlock::iterator InsPt = std::next(MI.getIterator());
317
318 BuildMI(SrcMBB, InsPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
319 .addMBB(DestBB);
320
321 return true;
322}
323
Stanislav Mekhanoshine86c8d32018-11-12 18:48:17 +0000324bool SIInsertSkips::optimizeVccBranch(MachineInstr &MI) const {
325 // Match:
326 // sreg = -1
327 // vcc = S_AND_B64 exec, sreg
328 // S_CBRANCH_VCC[N]Z
329 // =>
330 // S_CBRANCH_EXEC[N]Z
331 bool Changed = false;
332 MachineBasicBlock &MBB = *MI.getParent();
333 const unsigned CondReg = AMDGPU::VCC;
334 const unsigned ExecReg = AMDGPU::EXEC;
335 const unsigned And = AMDGPU::S_AND_B64;
336
337 MachineBasicBlock::reverse_iterator A = MI.getReverseIterator(),
338 E = MBB.rend();
339 bool ReadsCond = false;
340 unsigned Threshold = 5;
341 for (++A ; A != E ; ++A) {
342 if (!--Threshold)
343 return false;
344 if (A->modifiesRegister(ExecReg, TRI))
345 return false;
346 if (A->modifiesRegister(CondReg, TRI)) {
347 if (!A->definesRegister(CondReg, TRI) || A->getOpcode() != And)
348 return false;
349 break;
350 }
351 ReadsCond |= A->readsRegister(CondReg, TRI);
352 }
353 if (A == E)
354 return false;
355
356 MachineOperand &Op1 = A->getOperand(1);
357 MachineOperand &Op2 = A->getOperand(2);
358 if (Op1.getReg() != ExecReg && Op2.isReg() && Op2.getReg() == ExecReg) {
359 TII->commuteInstruction(*A);
360 Changed = true;
361 }
362 if (Op1.getReg() != ExecReg)
363 return Changed;
364 if (Op2.isImm() && Op2.getImm() != -1)
365 return Changed;
366
367 unsigned SReg = AMDGPU::NoRegister;
368 if (Op2.isReg()) {
369 SReg = Op2.getReg();
370 auto M = std::next(A);
371 bool ReadsSreg = false;
372 for ( ; M != E ; ++M) {
373 if (M->definesRegister(SReg, TRI))
374 break;
375 if (M->modifiesRegister(SReg, TRI))
376 return Changed;
377 ReadsSreg |= M->readsRegister(SReg, TRI);
378 }
379 if (M == E ||
380 !M->isMoveImmediate() ||
381 !M->getOperand(1).isImm() ||
382 M->getOperand(1).getImm() != -1)
383 return Changed;
384 // First if sreg is only used in and instruction fold the immediate
385 // into that and.
386 if (!ReadsSreg && Op2.isKill()) {
387 A->getOperand(2).ChangeToImmediate(-1);
388 M->eraseFromParent();
389 }
390 }
391
392 if (!ReadsCond && A->registerDefIsDead(AMDGPU::SCC) &&
393 MI.killsRegister(CondReg, TRI))
394 A->eraseFromParent();
395
396 bool IsVCCZ = MI.getOpcode() == AMDGPU::S_CBRANCH_VCCZ;
397 if (SReg == ExecReg) {
398 if (IsVCCZ) {
399 MI.eraseFromParent();
400 return true;
401 }
402 MI.setDesc(TII->get(AMDGPU::S_BRANCH));
403 } else {
404 MI.setDesc(TII->get(IsVCCZ ? AMDGPU::S_CBRANCH_EXECZ
405 : AMDGPU::S_CBRANCH_EXECNZ));
406 }
407
408 MI.RemoveOperand(MI.findRegisterUseOperandIdx(CondReg, false /*Kill*/, TRI));
409 MI.addImplicitDefUseOperands(*MBB.getParent());
410
411 return true;
412}
413
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000414bool SIInsertSkips::runOnMachineFunction(MachineFunction &MF) {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000415 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000416 TII = ST.getInstrInfo();
417 TRI = &TII->getRegisterInfo();
418 SkipThreshold = SkipThresholdFlag;
419
420 bool HaveKill = false;
421 bool MadeChange = false;
422
423 // Track depth of exec mask, divergent branches.
424 SmallVector<MachineBasicBlock *, 16> ExecBranchStack;
425
426 MachineFunction::iterator NextBB;
427
428 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
429
430 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
431 BI != BE; BI = NextBB) {
432 NextBB = std::next(BI);
433 MachineBasicBlock &MBB = *BI;
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000434 bool HaveSkipBlock = false;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000435
436 if (!ExecBranchStack.empty() && ExecBranchStack.back() == &MBB) {
437 // Reached convergence point for last divergent branch.
438 ExecBranchStack.pop_back();
439 }
440
441 if (HaveKill && ExecBranchStack.empty()) {
442 HaveKill = false;
443
444 // TODO: Insert skip if exec is 0?
445 }
446
447 MachineBasicBlock::iterator I, Next;
448 for (I = MBB.begin(); I != MBB.end(); I = Next) {
449 Next = std::next(I);
450
451 MachineInstr &MI = *I;
452
453 switch (MI.getOpcode()) {
Eugene Zelenko66203762017-01-21 00:53:49 +0000454 case AMDGPU::SI_MASK_BRANCH:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000455 ExecBranchStack.push_back(MI.getOperand(0).getMBB());
456 MadeChange |= skipMaskBranch(MI, MBB);
457 break;
Eugene Zelenko66203762017-01-21 00:53:49 +0000458
459 case AMDGPU::S_BRANCH:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000460 // Optimize out branches to the next block.
461 // FIXME: Shouldn't this be handled by BranchFolding?
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000462 if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000463 MI.eraseFromParent();
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000464 } else if (HaveSkipBlock) {
465 // Remove the given unconditional branch when a skip block has been
466 // inserted after the current one and let skip the two instructions
467 // performing the kill if the exec mask is non-zero.
468 MI.eraseFromParent();
469 }
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000470 break;
Eugene Zelenko66203762017-01-21 00:53:49 +0000471
Marek Olsakce76ea02017-10-24 10:27:13 +0000472 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
473 case AMDGPU::SI_KILL_I1_TERMINATOR:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000474 MadeChange = true;
475 kill(MI);
476
477 if (ExecBranchStack.empty()) {
Matt Arsenaulteabb8dd2018-11-16 05:03:02 +0000478 if (NextBB != BE && skipIfDead(MI, *NextBB)) {
Matt Arsenaultbf67cf72017-01-24 22:18:39 +0000479 HaveSkipBlock = true;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000480 NextBB = std::next(BI);
481 BE = MF.end();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000482 }
483 } else {
484 HaveKill = true;
485 }
486
487 MI.eraseFromParent();
488 break;
Eugene Zelenko66203762017-01-21 00:53:49 +0000489
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000490 case AMDGPU::SI_RETURN_TO_EPILOG:
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000491 // FIXME: Should move somewhere else
492 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
493
494 // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
495 // because external bytecode will be appended at the end.
496 if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
Matt Arsenault5b20fbb2017-03-21 22:18:10 +0000497 // SI_RETURN_TO_EPILOG is not the last instruction. Add an empty block at
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000498 // the end and jump there.
499 if (!EmptyMBBAtEnd) {
500 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
501 MF.insert(MF.end(), EmptyMBBAtEnd);
502 }
503
504 MBB.addSuccessor(EmptyMBBAtEnd);
505 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
506 .addMBB(EmptyMBBAtEnd);
507 I->eraseFromParent();
508 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000509 break;
510
Stanislav Mekhanoshine86c8d32018-11-12 18:48:17 +0000511 case AMDGPU::S_CBRANCH_VCCZ:
512 case AMDGPU::S_CBRANCH_VCCNZ:
513 MadeChange |= optimizeVccBranch(MI);
514 break;
515
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000516 default:
517 break;
518 }
519 }
520 }
521
522 return MadeChange;
523}