blob: adb0919231c30493b9d1de7a1c3042cc08eb4e77 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Tom Stellardf8794352012-12-19 22:10:31 +000011/// \brief This pass lowers the pseudo control flow instructions to real
12/// machine instructions.
Tom Stellard75aadc22012-12-11 21:25:42 +000013///
Tom Stellardf8794352012-12-19 22:10:31 +000014/// All control flow is handled using predicated instructions and
Tom Stellard75aadc22012-12-11 21:25:42 +000015/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17/// by writting to the 64-bit EXEC register (each bit corresponds to a
18/// single vector ALU). Typically, for predicates, a vector ALU will write
19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20/// Vector ALU) and then the ScalarALU will AND the VCC register with the
21/// EXEC to update the predicates.
22///
23/// For example:
24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
Tom Stellardf8794352012-12-19 22:10:31 +000025/// %SGPR0 = SI_IF %VCC
Tom Stellard75aadc22012-12-11 21:25:42 +000026/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000027/// %SGPR0 = SI_ELSE %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000028/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000029/// SI_END_CF %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000030///
31/// becomes:
32///
33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
Tom Stellardf8794352012-12-19 22:10:31 +000035/// S_CBRANCH_EXECZ label0 // This instruction is an optional
Tom Stellard75aadc22012-12-11 21:25:42 +000036/// // optimization which allows us to
37/// // branch if all the bits of
38/// // EXEC are zero.
39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40///
41/// label0:
42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44/// S_BRANCH_EXECZ label1 // Use our branch optimization
45/// // instruction again.
46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
47/// label1:
Tom Stellardf8794352012-12-19 22:10:31 +000048/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
Tom Stellard75aadc22012-12-11 21:25:42 +000049//===----------------------------------------------------------------------===//
50
51#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000052#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000053#include "SIInstrInfo.h"
54#include "SIMachineFunctionInfo.h"
Matt Arsenault3cb4dde2016-06-22 23:40:57 +000055#include "llvm/CodeGen/LivePhysRegs.h"
Matt Arsenault3f981402014-09-15 15:41:53 +000056#include "llvm/CodeGen/MachineFrameInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000057#include "llvm/CodeGen/MachineFunction.h"
58#include "llvm/CodeGen/MachineFunctionPass.h"
59#include "llvm/CodeGen/MachineInstrBuilder.h"
60#include "llvm/CodeGen/MachineRegisterInfo.h"
Michel Danzer9e61c4b2014-02-27 01:47:09 +000061#include "llvm/IR/Constants.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000062
63using namespace llvm;
64
Matt Arsenault55d49cf2016-02-12 02:16:10 +000065#define DEBUG_TYPE "si-lower-control-flow"
66
Tom Stellard75aadc22012-12-11 21:25:42 +000067namespace {
68
Matt Arsenault55d49cf2016-02-12 02:16:10 +000069class SILowerControlFlow : public MachineFunctionPass {
Tom Stellard75aadc22012-12-11 21:25:42 +000070private:
Tom Stellarde7b907d2012-12-19 22:10:33 +000071 static const unsigned SkipThreshold = 12;
72
Tom Stellard1bd80722014-04-30 15:31:33 +000073 const SIRegisterInfo *TRI;
Tom Stellard5d7aaae2014-02-10 16:58:30 +000074 const SIInstrInfo *TII;
Tom Stellard75aadc22012-12-11 21:25:42 +000075
Tom Stellardbe8ebee2013-01-18 21:15:50 +000076 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
77
78 void Skip(MachineInstr &From, MachineOperand &To);
Matt Arsenault786724a2016-07-12 21:41:32 +000079 bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB);
Tom Stellarde7b907d2012-12-19 22:10:33 +000080
Tom Stellardf8794352012-12-19 22:10:31 +000081 void If(MachineInstr &MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000082 void Else(MachineInstr &MI, bool ExecModified);
Matt Arsenault48d70cb2016-07-09 17:18:39 +000083 void Break(MachineInstr &MI);
Tom Stellardf8794352012-12-19 22:10:31 +000084 void IfBreak(MachineInstr &MI);
85 void ElseBreak(MachineInstr &MI);
86 void Loop(MachineInstr &MI);
87 void EndCf(MachineInstr &MI);
Tom Stellard75aadc22012-12-11 21:25:42 +000088
Tom Stellardbe8ebee2013-01-18 21:15:50 +000089 void Kill(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000090 void Branch(MachineInstr &MI);
91
Matt Arsenault786724a2016-07-12 21:41:32 +000092 MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB,
93 MachineBasicBlock::iterator I) const;
94
Matt Arsenault657f8712016-07-12 19:01:23 +000095 std::pair<MachineBasicBlock *, MachineBasicBlock *>
96 splitBlock(MachineBasicBlock &MBB, MachineBasicBlock::iterator I);
97
98 void splitLoadM0BlockLiveIns(LivePhysRegs &RemainderLiveRegs,
99 const MachineRegisterInfo &MRI,
100 const MachineInstr &MI,
101 MachineBasicBlock &LoopBB,
102 MachineBasicBlock &RemainderBB,
103 unsigned SaveReg,
104 const MachineOperand &IdxReg);
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000105
Matt Arsenault9babdf42016-06-22 20:15:28 +0000106 void emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB, DebugLoc DL,
Matt Arsenault21a46252016-06-27 19:57:44 +0000107 MachineInstr *MovRel,
108 const MachineOperand &IdxReg,
109 int Offset);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000110
111 bool loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000112 std::pair<unsigned, int> computeIndirectRegAndOffset(unsigned VecReg,
113 int Offset) const;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000114 bool indirectSrc(MachineInstr &MI);
115 bool indirectDst(MachineInstr &MI);
Christian Konig2989ffc2013-03-18 11:34:16 +0000116
Tom Stellard75aadc22012-12-11 21:25:42 +0000117public:
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000118 static char ID;
119
120 SILowerControlFlow() :
Craig Topper062a2ba2014-04-25 05:30:21 +0000121 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
Tom Stellard75aadc22012-12-11 21:25:42 +0000122
Craig Topper5656db42014-04-29 07:57:24 +0000123 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000124
Craig Topper5656db42014-04-29 07:57:24 +0000125 const char *getPassName() const override {
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000126 return "SI Lower control flow pseudo instructions";
Tom Stellard75aadc22012-12-11 21:25:42 +0000127 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000128};
129
130} // End anonymous namespace
131
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000132char SILowerControlFlow::ID = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000133
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000134INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
135 "SI lower control flow", false, false)
136
137char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID;
138
139
140FunctionPass *llvm::createSILowerControlFlowPass() {
141 return new SILowerControlFlow();
Tom Stellard75aadc22012-12-11 21:25:42 +0000142}
143
Matt Arsenault701c21e2016-04-29 21:52:13 +0000144static bool opcodeEmitsNoInsts(unsigned Opc) {
145 switch (Opc) {
146 case TargetOpcode::IMPLICIT_DEF:
147 case TargetOpcode::KILL:
148 case TargetOpcode::BUNDLE:
149 case TargetOpcode::CFI_INSTRUCTION:
150 case TargetOpcode::EH_LABEL:
151 case TargetOpcode::GC_LABEL:
152 case TargetOpcode::DBG_VALUE:
153 return true;
154 default:
155 return false;
156 }
157}
158
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000159bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From,
160 MachineBasicBlock *To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000161
Tom Stellarde7b907d2012-12-19 22:10:33 +0000162 unsigned NumInstr = 0;
Matt Arsenault701c21e2016-04-29 21:52:13 +0000163 MachineFunction *MF = From->getParent();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000164
Matt Arsenault701c21e2016-04-29 21:52:13 +0000165 for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end();
166 MBBI != End && MBBI != ToI; ++MBBI) {
Tom Stellard92339e82016-03-21 18:56:58 +0000167 MachineBasicBlock &MBB = *MBBI;
168
169 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000170 NumInstr < SkipThreshold && I != E; ++I) {
Matt Arsenault701c21e2016-04-29 21:52:13 +0000171 if (opcodeEmitsNoInsts(I->getOpcode()))
172 continue;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000173
Matt Arsenault701c21e2016-04-29 21:52:13 +0000174 // When a uniform loop is inside non-uniform control flow, the branch
175 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
176 // when EXEC = 0. We should skip the loop lest it becomes infinite.
Matt Arsenault4318ea32016-05-19 18:20:25 +0000177 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
178 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
Matt Arsenault701c21e2016-04-29 21:52:13 +0000179 return true;
Nicolai Haehnleef160de2016-03-16 20:14:33 +0000180
Matt Arsenault657f8712016-07-12 19:01:23 +0000181 if (I->isInlineAsm()) {
182 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
183 const char *AsmStr = I->getOperand(0).getSymbolName();
184
185 // inlineasm length estimate is number of bytes assuming the longest
186 // instruction.
187 uint64_t MaxAsmSize = TII->getInlineAsmLength(AsmStr, *MAI);
188 NumInstr += MaxAsmSize / MAI->getMaxInstLength();
189 } else {
190 ++NumInstr;
191 }
192
193 if (NumInstr >= SkipThreshold)
Matt Arsenault701c21e2016-04-29 21:52:13 +0000194 return true;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000195 }
196 }
197
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000198 return false;
199}
200
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000201void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000202
203 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
Tom Stellarde7b907d2012-12-19 22:10:33 +0000204 return;
205
206 DebugLoc DL = From.getDebugLoc();
207 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000208 .addOperand(To);
Tom Stellarde7b907d2012-12-19 22:10:33 +0000209}
210
Matt Arsenault786724a2016-07-12 21:41:32 +0000211bool SILowerControlFlow::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000212 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault786724a2016-07-12 21:41:32 +0000213 MachineFunction *MF = MBB.getParent();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000214
Matt Arsenault786724a2016-07-12 21:41:32 +0000215 if (MF->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
Michel Danzer6f273c52014-02-27 01:47:02 +0000216 !shouldSkip(&MBB, &MBB.getParent()->back()))
Matt Arsenault657f8712016-07-12 19:01:23 +0000217 return false;
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000218
Matt Arsenault786724a2016-07-12 21:41:32 +0000219 MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator());
220 SkipBB->addSuccessor(&NextBB);
Matt Arsenault657f8712016-07-12 19:01:23 +0000221
222 const DebugLoc &DL = MI.getDebugLoc();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000223
224 // If the exec mask is non-zero, skip the next two instructions
Matt Arsenault657f8712016-07-12 19:01:23 +0000225 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault786724a2016-07-12 21:41:32 +0000226 .addMBB(&NextBB);
Matt Arsenault657f8712016-07-12 19:01:23 +0000227
228 MachineBasicBlock::iterator Insert = SkipBB->begin();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000229
230 // Exec mask is zero: Export to NULL target...
Matt Arsenault657f8712016-07-12 19:01:23 +0000231 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP))
232 .addImm(0)
233 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
234 .addImm(0)
235 .addImm(1)
236 .addImm(1)
237 .addReg(AMDGPU::VGPR0, RegState::Undef)
238 .addReg(AMDGPU::VGPR0, RegState::Undef)
239 .addReg(AMDGPU::VGPR0, RegState::Undef)
240 .addReg(AMDGPU::VGPR0, RegState::Undef);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000241
Matt Arsenault657f8712016-07-12 19:01:23 +0000242 // ... and terminate wavefront.
243 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
244
Matt Arsenault657f8712016-07-12 19:01:23 +0000245 return true;
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000246}
247
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000248void SILowerControlFlow::If(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000249 MachineBasicBlock &MBB = *MI.getParent();
250 DebugLoc DL = MI.getDebugLoc();
251 unsigned Reg = MI.getOperand(0).getReg();
252 unsigned Vcc = MI.getOperand(1).getReg();
253
254 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
255 .addReg(Vcc);
256
257 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
258 .addReg(AMDGPU::EXEC)
259 .addReg(Reg);
260
Tom Stellarde7b907d2012-12-19 22:10:33 +0000261 Skip(MI, MI.getOperand(2));
262
Matt Arsenault9babdf42016-06-22 20:15:28 +0000263 // Insert a pseudo terminator to help keep the verifier happy.
Matt Arsenaulta74374a2016-07-08 00:55:44 +0000264 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
265 .addOperand(MI.getOperand(2))
266 .addReg(Reg);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000267
Tom Stellardf8794352012-12-19 22:10:31 +0000268 MI.eraseFromParent();
269}
270
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000271void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) {
Tom Stellardf8794352012-12-19 22:10:31 +0000272 MachineBasicBlock &MBB = *MI.getParent();
273 DebugLoc DL = MI.getDebugLoc();
274 unsigned Dst = MI.getOperand(0).getReg();
275 unsigned Src = MI.getOperand(1).getReg();
276
Christian Konig6a9d3902013-03-26 14:03:44 +0000277 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
278 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
Tom Stellardf8794352012-12-19 22:10:31 +0000279 .addReg(Src); // Saved EXEC
280
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000281 if (ExecModified) {
282 // Adjust the saved exec to account for the modifications during the flow
283 // block that contains the ELSE. This can happen when WQM mode is switched
284 // off.
285 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
286 .addReg(AMDGPU::EXEC)
287 .addReg(Dst);
288 }
289
Tom Stellardf8794352012-12-19 22:10:31 +0000290 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
291 .addReg(AMDGPU::EXEC)
292 .addReg(Dst);
293
Tom Stellarde7b907d2012-12-19 22:10:33 +0000294 Skip(MI, MI.getOperand(2));
295
Matt Arsenault9babdf42016-06-22 20:15:28 +0000296 // Insert a pseudo terminator to help keep the verifier happy.
Matt Arsenaulta74374a2016-07-08 00:55:44 +0000297 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
298 .addOperand(MI.getOperand(2))
299 .addReg(Dst);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000300
Tom Stellardf8794352012-12-19 22:10:31 +0000301 MI.eraseFromParent();
302}
303
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000304void SILowerControlFlow::Break(MachineInstr &MI) {
305 MachineBasicBlock &MBB = *MI.getParent();
306 DebugLoc DL = MI.getDebugLoc();
307
308 unsigned Dst = MI.getOperand(0).getReg();
309 unsigned Src = MI.getOperand(1).getReg();
310
311 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
312 .addReg(AMDGPU::EXEC)
313 .addReg(Src);
314
315 MI.eraseFromParent();
316}
317
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000318void SILowerControlFlow::IfBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000319 MachineBasicBlock &MBB = *MI.getParent();
320 DebugLoc DL = MI.getDebugLoc();
321
322 unsigned Dst = MI.getOperand(0).getReg();
323 unsigned Vcc = MI.getOperand(1).getReg();
324 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000325
Tom Stellardf8794352012-12-19 22:10:31 +0000326 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
327 .addReg(Vcc)
328 .addReg(Src);
329
330 MI.eraseFromParent();
331}
332
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000333void SILowerControlFlow::ElseBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000334 MachineBasicBlock &MBB = *MI.getParent();
335 DebugLoc DL = MI.getDebugLoc();
336
337 unsigned Dst = MI.getOperand(0).getReg();
338 unsigned Saved = MI.getOperand(1).getReg();
339 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000340
Tom Stellardf8794352012-12-19 22:10:31 +0000341 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
342 .addReg(Saved)
343 .addReg(Src);
344
345 MI.eraseFromParent();
346}
347
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000348void SILowerControlFlow::Loop(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000349 MachineBasicBlock &MBB = *MI.getParent();
350 DebugLoc DL = MI.getDebugLoc();
351 unsigned Src = MI.getOperand(0).getReg();
352
353 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
354 .addReg(AMDGPU::EXEC)
355 .addReg(Src);
356
357 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000358 .addOperand(MI.getOperand(1));
Tom Stellardf8794352012-12-19 22:10:31 +0000359
360 MI.eraseFromParent();
361}
362
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000363void SILowerControlFlow::EndCf(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000364 MachineBasicBlock &MBB = *MI.getParent();
365 DebugLoc DL = MI.getDebugLoc();
366 unsigned Reg = MI.getOperand(0).getReg();
367
368 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
369 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
370 .addReg(AMDGPU::EXEC)
371 .addReg(Reg);
372
373 MI.eraseFromParent();
374}
375
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000376void SILowerControlFlow::Branch(MachineInstr &MI) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000377 MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
378 if (MBB == MI.getParent()->getNextNode())
Matt Arsenault71b71d22014-02-11 21:12:38 +0000379 MI.eraseFromParent();
380
381 // If these aren't equal, this is probably an infinite loop.
Tom Stellarde7b907d2012-12-19 22:10:33 +0000382}
383
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000384void SILowerControlFlow::Kill(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000385 MachineBasicBlock &MBB = *MI.getParent();
386 DebugLoc DL = MI.getDebugLoc();
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000387 const MachineOperand &Op = MI.getOperand(0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000388
Matt Arsenault762af962014-07-13 03:06:39 +0000389#ifndef NDEBUG
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000390 CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv();
Matt Arsenault762af962014-07-13 03:06:39 +0000391 // Kill is only allowed in pixel / geometry shaders.
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000392 assert(CallConv == CallingConv::AMDGPU_PS ||
393 CallConv == CallingConv::AMDGPU_GS);
Matt Arsenault762af962014-07-13 03:06:39 +0000394#endif
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000395
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000396 // Clear this thread from the exec mask if the operand is negative
Tom Stellardfb77f002015-01-13 22:59:41 +0000397 if ((Op.isImm())) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000398 // Constant operand: Set exec mask to 0 or do nothing
Tom Stellardfb77f002015-01-13 22:59:41 +0000399 if (Op.getImm() & 0x80000000) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000400 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
401 .addImm(0);
402 }
403 } else {
Matt Arsenault46359152015-08-08 00:41:48 +0000404 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000405 .addImm(0)
406 .addOperand(Op);
407 }
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000408
409 MI.eraseFromParent();
410}
411
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000412// All currently live registers must remain so in the remainder block.
Matt Arsenault657f8712016-07-12 19:01:23 +0000413void SILowerControlFlow::splitLoadM0BlockLiveIns(LivePhysRegs &RemainderLiveRegs,
414 const MachineRegisterInfo &MRI,
415 const MachineInstr &MI,
416 MachineBasicBlock &LoopBB,
417 MachineBasicBlock &RemainderBB,
418 unsigned SaveReg,
419 const MachineOperand &IdxReg) {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000420 // Add reg defined in loop body.
421 RemainderLiveRegs.addReg(SaveReg);
422
423 if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) {
Matt Arsenault21a46252016-06-27 19:57:44 +0000424 if (!Val->isUndef()) {
425 RemainderLiveRegs.addReg(Val->getReg());
426 LoopBB.addLiveIn(Val->getReg());
427 }
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000428 }
429
Matt Arsenault21a46252016-06-27 19:57:44 +0000430 for (unsigned Reg : RemainderLiveRegs) {
431 if (MRI.isAllocatable(Reg))
432 RemainderBB.addLiveIn(Reg);
433 }
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000434
Matt Arsenault21a46252016-06-27 19:57:44 +0000435 const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src);
436 if (!Src->isUndef())
437 LoopBB.addLiveIn(Src->getReg());
438
439 if (!IdxReg.isUndef())
440 LoopBB.addLiveIn(IdxReg.getReg());
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000441 LoopBB.sortUniqueLiveIns();
442}
443
Matt Arsenault9babdf42016-06-22 20:15:28 +0000444void SILowerControlFlow::emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB,
445 DebugLoc DL,
446 MachineInstr *MovRel,
Matt Arsenault21a46252016-06-27 19:57:44 +0000447 const MachineOperand &IdxReg,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000448 int Offset) {
449 MachineBasicBlock::iterator I = LoopBB.begin();
Christian Konig2989ffc2013-03-18 11:34:16 +0000450
Matt Arsenault9babdf42016-06-22 20:15:28 +0000451 // Read the next variant into VCC (lower 32 bits) <- also loop target
452 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), AMDGPU::VCC_LO)
Matt Arsenault21a46252016-06-27 19:57:44 +0000453 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000454
455 // Move index from VCC into M0
456 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
457 .addReg(AMDGPU::VCC_LO);
458
459 // Compare the just read M0 value to all possible Idx values
460 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
461 .addReg(AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000462 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000463
464 // Update EXEC, save the original EXEC value to VCC
465 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
466 .addReg(AMDGPU::VCC);
467
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000468 if (Offset != 0) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000469 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
470 .addReg(AMDGPU::M0)
471 .addImm(Offset);
472 }
473
474 // Do the actual move
475 LoopBB.insert(I, MovRel);
476
477 // Update EXEC, switch all done bits to 0 and all todo bits to 1
478 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
479 .addReg(AMDGPU::EXEC)
480 .addReg(AMDGPU::VCC);
481
482 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
483 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
484 .addMBB(&LoopBB);
485}
486
Matt Arsenault786724a2016-07-12 21:41:32 +0000487MachineBasicBlock *SILowerControlFlow::insertSkipBlock(
488 MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
489 MachineFunction *MF = MBB.getParent();
490
491 MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock();
492 MachineFunction::iterator MBBI(MBB);
493 ++MBBI;
494
495 MF->insert(MBBI, SkipBB);
496 MBB.addSuccessor(SkipBB);
497
498 return SkipBB;
499}
500
Matt Arsenault657f8712016-07-12 19:01:23 +0000501std::pair<MachineBasicBlock *, MachineBasicBlock *>
502SILowerControlFlow::splitBlock(MachineBasicBlock &MBB,
503 MachineBasicBlock::iterator I) {
504 MachineFunction *MF = MBB.getParent();
505
506 // To insert the loop we need to split the block. Move everything after this
507 // point to a new block, and insert a new empty block between the two.
508 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
509 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
510 MachineFunction::iterator MBBI(MBB);
511 ++MBBI;
512
513 MF->insert(MBBI, LoopBB);
514 MF->insert(MBBI, RemainderBB);
515
516 // Move the rest of the block into a new block.
517 RemainderBB->transferSuccessors(&MBB);
518 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
519
520 MBB.addSuccessor(LoopBB);
521
522 return std::make_pair(LoopBB, RemainderBB);
523}
524
Matt Arsenault9babdf42016-06-22 20:15:28 +0000525// Returns true if a new block was inserted.
526bool SILowerControlFlow::loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000527 MachineBasicBlock &MBB = *MI.getParent();
528 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000529 MachineBasicBlock::iterator I(&MI);
Christian Konig2989ffc2013-03-18 11:34:16 +0000530
Matt Arsenault21a46252016-06-27 19:57:44 +0000531 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
Christian Konig2989ffc2013-03-18 11:34:16 +0000532
Matt Arsenault21a46252016-06-27 19:57:44 +0000533 if (AMDGPU::SReg_32RegClass.contains(Idx->getReg())) {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000534 if (Offset != 0) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000535 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000536 .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()))
Matt Arsenault9babdf42016-06-22 20:15:28 +0000537 .addImm(Offset);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000538 } else {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000539 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000540 .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()));
Tom Stellard8b0182a2015-04-23 20:32:01 +0000541 }
Matt Arsenault9babdf42016-06-22 20:15:28 +0000542
Christian Konig2989ffc2013-03-18 11:34:16 +0000543 MBB.insert(I, MovRel);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000544 MI.eraseFromParent();
545 return false;
Christian Konig2989ffc2013-03-18 11:34:16 +0000546 }
Matt Arsenault9babdf42016-06-22 20:15:28 +0000547
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000548 MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
549 SaveOp->setIsDead(false);
550 unsigned Save = SaveOp->getReg();
Matt Arsenault9babdf42016-06-22 20:15:28 +0000551
552 // Reading from a VGPR requires looping over all workitems in the wavefront.
553 assert(AMDGPU::SReg_64RegClass.contains(Save) &&
Matt Arsenault21a46252016-06-27 19:57:44 +0000554 AMDGPU::VGPR_32RegClass.contains(Idx->getReg()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000555
556 // Save the EXEC mask
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000557 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), Save)
Matt Arsenault9babdf42016-06-22 20:15:28 +0000558 .addReg(AMDGPU::EXEC);
559
Matt Arsenault657f8712016-07-12 19:01:23 +0000560 LivePhysRegs RemainderLiveRegs(TRI);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000561
Matt Arsenault657f8712016-07-12 19:01:23 +0000562 RemainderLiveRegs.addLiveOuts(MBB);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000563
Matt Arsenault657f8712016-07-12 19:01:23 +0000564 MachineBasicBlock *LoopBB;
565 MachineBasicBlock *RemainderBB;
566
567 std::tie(LoopBB, RemainderBB) = splitBlock(MBB, I);
568
569 for (const MachineInstr &Inst : reverse(*RemainderBB))
570 RemainderLiveRegs.stepBackward(Inst);
571
572 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
Matt Arsenault9babdf42016-06-22 20:15:28 +0000573 LoopBB->addSuccessor(RemainderBB);
Matt Arsenault657f8712016-07-12 19:01:23 +0000574 LoopBB->addSuccessor(LoopBB);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000575
Matt Arsenault657f8712016-07-12 19:01:23 +0000576 splitLoadM0BlockLiveIns(RemainderLiveRegs, MRI, MI, *LoopBB,
577 *RemainderBB, Save, *Idx);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000578
Matt Arsenault21a46252016-06-27 19:57:44 +0000579 emitLoadM0FromVGPRLoop(*LoopBB, DL, MovRel, *Idx, Offset);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000580
581 MachineBasicBlock::iterator First = RemainderBB->begin();
582 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
583 .addReg(Save);
584
Christian Konig2989ffc2013-03-18 11:34:16 +0000585 MI.eraseFromParent();
Matt Arsenault9babdf42016-06-22 20:15:28 +0000586 return true;
Christian Konig2989ffc2013-03-18 11:34:16 +0000587}
588
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000589/// \param @VecReg The register which holds element zero of the vector being
590/// addressed into.
591//
592/// \param[in] @Idx The index operand from the movrel instruction. This must be
593// a register, but may be NoRegister.
594///
595/// \param[in] @Offset As an input, this is the constant offset part of the
596// indirect Index. e.g. v0 = v[VecReg + Offset] As an output, this is a constant
597// value that needs to be added to the value stored in M0.
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000598std::pair<unsigned, int>
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000599SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg, int Offset) const {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000600 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
601 if (!SubReg)
602 SubReg = VecReg;
603
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000604 const TargetRegisterClass *SuperRC = TRI->getPhysRegClass(VecReg);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000605 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000606 int NumElts = SuperRC->getSize() / RC->getSize();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000607
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000608 int BaseRegIdx = TRI->getHWRegIndex(SubReg);
609
610 // Skip out of bounds offsets, or else we would end up using an undefined
611 // register.
612 if (Offset >= NumElts)
613 return std::make_pair(RC->getRegister(BaseRegIdx), Offset);
614
615 int RegIdx = BaseRegIdx + Offset;
Tom Stellard8b0182a2015-04-23 20:32:01 +0000616 if (RegIdx < 0) {
617 Offset = RegIdx;
618 RegIdx = 0;
619 } else {
620 Offset = 0;
621 }
622
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000623 unsigned Reg = RC->getRegister(RegIdx);
624 return std::make_pair(Reg, Offset);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000625}
626
Matt Arsenault9babdf42016-06-22 20:15:28 +0000627// Return true if a new block was inserted.
628bool SILowerControlFlow::indirectSrc(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000629 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000630 const DebugLoc &DL = MI.getDebugLoc();
Christian Konig2989ffc2013-03-18 11:34:16 +0000631
632 unsigned Dst = MI.getOperand(0).getReg();
Matt Arsenault21a46252016-06-27 19:57:44 +0000633 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000634 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000635 unsigned Reg;
636
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000637 std::tie(Reg, Offset) = computeIndirectRegAndOffset(SrcVec->getReg(), Offset);
638
639 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
640 if (Idx->getReg() == AMDGPU::NoRegister) {
641 // Only had a constant offset, copy the register directly.
642 BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
643 .addReg(Reg, getUndefRegState(SrcVec->isUndef()));
644 MI.eraseFromParent();
645 return false;
646 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000647
Tom Stellard81d871d2013-11-13 23:36:50 +0000648 MachineInstr *MovRel =
Christian Konig2989ffc2013-03-18 11:34:16 +0000649 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Matt Arsenault21a46252016-06-27 19:57:44 +0000650 .addReg(Reg, getUndefRegState(SrcVec->isUndef()))
651 .addReg(SrcVec->getReg(), RegState::Implicit);
Christian Konig2989ffc2013-03-18 11:34:16 +0000652
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000653 return loadM0(MI, MovRel, Offset);
Christian Konig2989ffc2013-03-18 11:34:16 +0000654}
655
Matt Arsenault9babdf42016-06-22 20:15:28 +0000656// Return true if a new block was inserted.
657bool SILowerControlFlow::indirectDst(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000658 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000659 const DebugLoc &DL = MI.getDebugLoc();
Christian Konig2989ffc2013-03-18 11:34:16 +0000660
661 unsigned Dst = MI.getOperand(0).getReg();
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000662 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000663 unsigned Reg;
664
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000665 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
666 std::tie(Reg, Offset) = computeIndirectRegAndOffset(Dst, Offset);
667
668 MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
669 if (Idx->getReg() == AMDGPU::NoRegister) {
670 // Only had a constant offset, copy the register directly.
671 BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Reg)
672 .addOperand(*Val);
673 MI.eraseFromParent();
674 return false;
675 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000676
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000677 MachineInstr *MovRel =
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000678 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32), Reg)
Matt Arsenault21a46252016-06-27 19:57:44 +0000679 .addReg(Val->getReg(), getUndefRegState(Val->isUndef()))
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000680 .addReg(Dst, RegState::Implicit);
Christian Konig2989ffc2013-03-18 11:34:16 +0000681
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000682 return loadM0(MI, MovRel, Offset);
Christian Konig2989ffc2013-03-18 11:34:16 +0000683}
684
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000685bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000686 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
687 TII = ST.getInstrInfo();
688 TRI = &TII->getRegisterInfo();
689
Tom Stellardd50bb3c2013-09-05 18:37:52 +0000690 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000691
692 bool HaveKill = false;
Matt Arsenault3f981402014-09-15 15:41:53 +0000693 bool NeedFlat = false;
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000694 unsigned Depth = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000695
Matt Arsenault9babdf42016-06-22 20:15:28 +0000696 MachineFunction::iterator NextBB;
Tom Stellardf8794352012-12-19 22:10:31 +0000697
Matt Arsenault9babdf42016-06-22 20:15:28 +0000698 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
699 BI != BE; BI = NextBB) {
700 NextBB = std::next(BI);
Tom Stellardf8794352012-12-19 22:10:31 +0000701 MachineBasicBlock &MBB = *BI;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000702
703 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
Tim Northover24f46612014-03-28 13:52:56 +0000704 MachineBasicBlock::iterator I, Next;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000705 bool ExecModified = false;
706
Tim Northover24f46612014-03-28 13:52:56 +0000707 for (I = MBB.begin(); I != MBB.end(); I = Next) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000708 Next = std::next(I);
Tim Northover24f46612014-03-28 13:52:56 +0000709
Tom Stellard75aadc22012-12-11 21:25:42 +0000710 MachineInstr &MI = *I;
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000711
Matt Arsenault3f981402014-09-15 15:41:53 +0000712 // Flat uses m0 in case it needs to access LDS.
Matt Arsenault3add6432015-10-20 04:35:43 +0000713 if (TII->isFLAT(MI))
Matt Arsenault3f981402014-09-15 15:41:53 +0000714 NeedFlat = true;
Matt Arsenault3f981402014-09-15 15:41:53 +0000715
Matt Arsenaultb63f18c2016-07-08 17:06:48 +0000716 if (I->modifiesRegister(AMDGPU::EXEC, TRI))
Matt Arsenaultd4a84b12016-07-08 00:55:39 +0000717 ExecModified = true;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000718
Tom Stellard75aadc22012-12-11 21:25:42 +0000719 switch (MI.getOpcode()) {
720 default: break;
Tom Stellardf8794352012-12-19 22:10:31 +0000721 case AMDGPU::SI_IF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000722 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000723 If(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000724 break;
725
Tom Stellardf8794352012-12-19 22:10:31 +0000726 case AMDGPU::SI_ELSE:
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000727 Else(MI, ExecModified);
Tom Stellard75aadc22012-12-11 21:25:42 +0000728 break;
729
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000730 case AMDGPU::SI_BREAK:
731 Break(MI);
732 break;
733
Tom Stellardf8794352012-12-19 22:10:31 +0000734 case AMDGPU::SI_IF_BREAK:
735 IfBreak(MI);
736 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000737
Tom Stellardf8794352012-12-19 22:10:31 +0000738 case AMDGPU::SI_ELSE_BREAK:
739 ElseBreak(MI);
740 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000741
Tom Stellardf8794352012-12-19 22:10:31 +0000742 case AMDGPU::SI_LOOP:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000743 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000744 Loop(MI);
745 break;
746
747 case AMDGPU::SI_END_CF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000748 if (--Depth == 0 && HaveKill) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000749 HaveKill = false;
Matt Arsenault657f8712016-07-12 19:01:23 +0000750
Matt Arsenault786724a2016-07-12 21:41:32 +0000751 if (skipIfDead(MI, *NextBB)) {
Matt Arsenault657f8712016-07-12 19:01:23 +0000752 NextBB = std::next(BI);
753 BE = MF.end();
754 Next = MBB.end();
755 }
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000756 }
Tom Stellardf8794352012-12-19 22:10:31 +0000757 EndCf(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000758 break;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000759
Matt Arsenault786724a2016-07-12 21:41:32 +0000760 case AMDGPU::SI_KILL_TERMINATOR:
Matt Arsenault657f8712016-07-12 19:01:23 +0000761 if (Depth == 0) {
Matt Arsenault786724a2016-07-12 21:41:32 +0000762 if (skipIfDead(MI, *NextBB)) {
Matt Arsenault657f8712016-07-12 19:01:23 +0000763 NextBB = std::next(BI);
764 BE = MF.end();
765 Next = MBB.end();
766 }
767 } else
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000768 HaveKill = true;
769 Kill(MI);
770 break;
771
Tom Stellarde7b907d2012-12-19 22:10:33 +0000772 case AMDGPU::S_BRANCH:
773 Branch(MI);
774 break;
Christian Konig2989ffc2013-03-18 11:34:16 +0000775
Matt Arsenault28419272015-10-07 00:42:51 +0000776 case AMDGPU::SI_INDIRECT_SRC_V1:
777 case AMDGPU::SI_INDIRECT_SRC_V2:
778 case AMDGPU::SI_INDIRECT_SRC_V4:
779 case AMDGPU::SI_INDIRECT_SRC_V8:
780 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000781 if (indirectSrc(MI)) {
782 // The block was split at this point. We can safely skip the middle
783 // inserted block to the following which contains the rest of this
784 // block's instructions.
785 NextBB = std::next(BI);
786 BE = MF.end();
787 Next = MBB.end();
788 }
789
Christian Konig2989ffc2013-03-18 11:34:16 +0000790 break;
791
Tom Stellard81d871d2013-11-13 23:36:50 +0000792 case AMDGPU::SI_INDIRECT_DST_V1:
Christian Konig2989ffc2013-03-18 11:34:16 +0000793 case AMDGPU::SI_INDIRECT_DST_V2:
794 case AMDGPU::SI_INDIRECT_DST_V4:
795 case AMDGPU::SI_INDIRECT_DST_V8:
796 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000797 if (indirectDst(MI)) {
798 // The block was split at this point. We can safely skip the middle
799 // inserted block to the following which contains the rest of this
800 // block's instructions.
801 NextBB = std::next(BI);
802 BE = MF.end();
803 Next = MBB.end();
804 }
805
Christian Konig2989ffc2013-03-18 11:34:16 +0000806 break;
Marek Olsaked2213e2016-03-14 15:57:14 +0000807
Nicolai Haehnlee40530e2016-07-06 08:35:17 +0000808 case AMDGPU::SI_RETURN: {
809 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
Marek Olsaked2213e2016-03-14 15:57:14 +0000810
811 // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
812 // because external bytecode will be appended at the end.
813 if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
Nicolai Haehnlee40530e2016-07-06 08:35:17 +0000814 // SI_RETURN is not the last instruction. Add an empty block at
Marek Olsaked2213e2016-03-14 15:57:14 +0000815 // the end and jump there.
816 if (!EmptyMBBAtEnd) {
817 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
818 MF.insert(MF.end(), EmptyMBBAtEnd);
819 }
820
821 MBB.addSuccessor(EmptyMBBAtEnd);
822 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
823 .addMBB(EmptyMBBAtEnd);
Nicolai Haehnlee40530e2016-07-06 08:35:17 +0000824 I->eraseFromParent();
Marek Olsaked2213e2016-03-14 15:57:14 +0000825 }
Marek Olsaked2213e2016-03-14 15:57:14 +0000826 break;
827 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000828 }
829 }
830 }
Tom Stellardf8794352012-12-19 22:10:31 +0000831
Matt Arsenault3f981402014-09-15 15:41:53 +0000832 if (NeedFlat && MFI->IsKernel) {
Matt Arsenault3f981402014-09-15 15:41:53 +0000833 // TODO: What to use with function calls?
Matt Arsenault296b8492016-02-12 06:31:30 +0000834 // We will need to Initialize the flat scratch register pair.
835 if (NeedFlat)
836 MFI->setHasFlatInstructions(true);
Matt Arsenault3f981402014-09-15 15:41:53 +0000837 }
838
Tom Stellard75aadc22012-12-11 21:25:42 +0000839 return true;
840}