blob: 3d6fc9eeba7fa8f5427d62c1e84879c8361a3a2e [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Tom Stellardf8794352012-12-19 22:10:31 +000011/// \brief This pass lowers the pseudo control flow instructions to real
12/// machine instructions.
Tom Stellard75aadc22012-12-11 21:25:42 +000013///
Tom Stellardf8794352012-12-19 22:10:31 +000014/// All control flow is handled using predicated instructions and
Tom Stellard75aadc22012-12-11 21:25:42 +000015/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17/// by writting to the 64-bit EXEC register (each bit corresponds to a
18/// single vector ALU). Typically, for predicates, a vector ALU will write
19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20/// Vector ALU) and then the ScalarALU will AND the VCC register with the
21/// EXEC to update the predicates.
22///
23/// For example:
24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
Tom Stellardf8794352012-12-19 22:10:31 +000025/// %SGPR0 = SI_IF %VCC
Tom Stellard75aadc22012-12-11 21:25:42 +000026/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000027/// %SGPR0 = SI_ELSE %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000028/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000029/// SI_END_CF %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000030///
31/// becomes:
32///
33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
Tom Stellardf8794352012-12-19 22:10:31 +000035/// S_CBRANCH_EXECZ label0 // This instruction is an optional
Tom Stellard75aadc22012-12-11 21:25:42 +000036/// // optimization which allows us to
37/// // branch if all the bits of
38/// // EXEC are zero.
39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40///
41/// label0:
42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44/// S_BRANCH_EXECZ label1 // Use our branch optimization
45/// // instruction again.
46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
47/// label1:
Tom Stellardf8794352012-12-19 22:10:31 +000048/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
Tom Stellard75aadc22012-12-11 21:25:42 +000049//===----------------------------------------------------------------------===//
50
51#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000052#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000053#include "SIInstrInfo.h"
54#include "SIMachineFunctionInfo.h"
Matt Arsenault3cb4dde2016-06-22 23:40:57 +000055#include "llvm/CodeGen/LivePhysRegs.h"
Matt Arsenault3f981402014-09-15 15:41:53 +000056#include "llvm/CodeGen/MachineFrameInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000057#include "llvm/CodeGen/MachineFunction.h"
58#include "llvm/CodeGen/MachineFunctionPass.h"
59#include "llvm/CodeGen/MachineInstrBuilder.h"
60#include "llvm/CodeGen/MachineRegisterInfo.h"
Michel Danzer9e61c4b2014-02-27 01:47:09 +000061#include "llvm/IR/Constants.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000062
63using namespace llvm;
64
Matt Arsenault55d49cf2016-02-12 02:16:10 +000065#define DEBUG_TYPE "si-lower-control-flow"
66
Tom Stellard75aadc22012-12-11 21:25:42 +000067namespace {
68
Matt Arsenault55d49cf2016-02-12 02:16:10 +000069class SILowerControlFlow : public MachineFunctionPass {
Tom Stellard75aadc22012-12-11 21:25:42 +000070private:
Tom Stellarde7b907d2012-12-19 22:10:33 +000071 static const unsigned SkipThreshold = 12;
72
Tom Stellard1bd80722014-04-30 15:31:33 +000073 const SIRegisterInfo *TRI;
Tom Stellard5d7aaae2014-02-10 16:58:30 +000074 const SIInstrInfo *TII;
Tom Stellard75aadc22012-12-11 21:25:42 +000075
Tom Stellardbe8ebee2013-01-18 21:15:50 +000076 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
77
78 void Skip(MachineInstr &From, MachineOperand &To);
79 void SkipIfDead(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000080
Tom Stellardf8794352012-12-19 22:10:31 +000081 void If(MachineInstr &MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000082 void Else(MachineInstr &MI, bool ExecModified);
Matt Arsenault48d70cb2016-07-09 17:18:39 +000083 void Break(MachineInstr &MI);
Tom Stellardf8794352012-12-19 22:10:31 +000084 void IfBreak(MachineInstr &MI);
85 void ElseBreak(MachineInstr &MI);
86 void Loop(MachineInstr &MI);
87 void EndCf(MachineInstr &MI);
Tom Stellard75aadc22012-12-11 21:25:42 +000088
Tom Stellardbe8ebee2013-01-18 21:15:50 +000089 void Kill(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000090 void Branch(MachineInstr &MI);
91
Matt Arsenault3cb4dde2016-06-22 23:40:57 +000092 void splitBlockLiveIns(const MachineBasicBlock &MBB,
93 const MachineInstr &MI,
94 MachineBasicBlock &LoopBB,
95 MachineBasicBlock &RemainderBB,
96 unsigned SaveReg,
Matt Arsenault21a46252016-06-27 19:57:44 +000097 const MachineOperand &IdxReg);
Matt Arsenault3cb4dde2016-06-22 23:40:57 +000098
Matt Arsenault9babdf42016-06-22 20:15:28 +000099 void emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB, DebugLoc DL,
Matt Arsenault21a46252016-06-27 19:57:44 +0000100 MachineInstr *MovRel,
101 const MachineOperand &IdxReg,
102 int Offset);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000103
104 bool loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000105 std::pair<unsigned, int> computeIndirectRegAndOffset(unsigned VecReg,
106 int Offset) const;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000107 bool indirectSrc(MachineInstr &MI);
108 bool indirectDst(MachineInstr &MI);
Christian Konig2989ffc2013-03-18 11:34:16 +0000109
Tom Stellard75aadc22012-12-11 21:25:42 +0000110public:
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000111 static char ID;
112
113 SILowerControlFlow() :
Craig Topper062a2ba2014-04-25 05:30:21 +0000114 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
Tom Stellard75aadc22012-12-11 21:25:42 +0000115
Craig Topper5656db42014-04-29 07:57:24 +0000116 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000117
Craig Topper5656db42014-04-29 07:57:24 +0000118 const char *getPassName() const override {
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000119 return "SI Lower control flow pseudo instructions";
Tom Stellard75aadc22012-12-11 21:25:42 +0000120 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000121};
122
123} // End anonymous namespace
124
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000125char SILowerControlFlow::ID = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000126
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000127INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
128 "SI lower control flow", false, false)
129
130char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID;
131
132
133FunctionPass *llvm::createSILowerControlFlowPass() {
134 return new SILowerControlFlow();
Tom Stellard75aadc22012-12-11 21:25:42 +0000135}
136
Matt Arsenault701c21e2016-04-29 21:52:13 +0000137static bool opcodeEmitsNoInsts(unsigned Opc) {
138 switch (Opc) {
139 case TargetOpcode::IMPLICIT_DEF:
140 case TargetOpcode::KILL:
141 case TargetOpcode::BUNDLE:
142 case TargetOpcode::CFI_INSTRUCTION:
143 case TargetOpcode::EH_LABEL:
144 case TargetOpcode::GC_LABEL:
145 case TargetOpcode::DBG_VALUE:
146 return true;
147 default:
148 return false;
149 }
150}
151
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000152bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From,
153 MachineBasicBlock *To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000154
Tom Stellarde7b907d2012-12-19 22:10:33 +0000155 unsigned NumInstr = 0;
Matt Arsenault701c21e2016-04-29 21:52:13 +0000156 MachineFunction *MF = From->getParent();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000157
Matt Arsenault701c21e2016-04-29 21:52:13 +0000158 for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end();
159 MBBI != End && MBBI != ToI; ++MBBI) {
Tom Stellard92339e82016-03-21 18:56:58 +0000160 MachineBasicBlock &MBB = *MBBI;
161
162 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000163 NumInstr < SkipThreshold && I != E; ++I) {
Matt Arsenault701c21e2016-04-29 21:52:13 +0000164 if (opcodeEmitsNoInsts(I->getOpcode()))
165 continue;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000166
Matt Arsenault701c21e2016-04-29 21:52:13 +0000167 // When a uniform loop is inside non-uniform control flow, the branch
168 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
169 // when EXEC = 0. We should skip the loop lest it becomes infinite.
Matt Arsenault4318ea32016-05-19 18:20:25 +0000170 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
171 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
Matt Arsenault701c21e2016-04-29 21:52:13 +0000172 return true;
Nicolai Haehnleef160de2016-03-16 20:14:33 +0000173
Matt Arsenault701c21e2016-04-29 21:52:13 +0000174 if (++NumInstr >= SkipThreshold)
175 return true;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000176 }
177 }
178
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000179 return false;
180}
181
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000182void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000183
184 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
Tom Stellarde7b907d2012-12-19 22:10:33 +0000185 return;
186
187 DebugLoc DL = From.getDebugLoc();
188 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000189 .addOperand(To);
Tom Stellarde7b907d2012-12-19 22:10:33 +0000190}
191
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000192void SILowerControlFlow::SkipIfDead(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000193
194 MachineBasicBlock &MBB = *MI.getParent();
195 DebugLoc DL = MI.getDebugLoc();
196
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000197 if (MBB.getParent()->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
Michel Danzer6f273c52014-02-27 01:47:02 +0000198 !shouldSkip(&MBB, &MBB.getParent()->back()))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000199 return;
200
201 MachineBasicBlock::iterator Insert = &MI;
202 ++Insert;
203
204 // If the exec mask is non-zero, skip the next two instructions
205 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000206 .addImm(3);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000207
208 // Exec mask is zero: Export to NULL target...
209 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
210 .addImm(0)
211 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
212 .addImm(0)
213 .addImm(1)
214 .addImm(1)
Christian Konigc756cb992013-02-16 11:28:22 +0000215 .addReg(AMDGPU::VGPR0)
216 .addReg(AMDGPU::VGPR0)
217 .addReg(AMDGPU::VGPR0)
218 .addReg(AMDGPU::VGPR0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000219
220 // ... and terminate wavefront
221 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
222}
223
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000224void SILowerControlFlow::If(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000225 MachineBasicBlock &MBB = *MI.getParent();
226 DebugLoc DL = MI.getDebugLoc();
227 unsigned Reg = MI.getOperand(0).getReg();
228 unsigned Vcc = MI.getOperand(1).getReg();
229
230 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
231 .addReg(Vcc);
232
233 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
234 .addReg(AMDGPU::EXEC)
235 .addReg(Reg);
236
Tom Stellarde7b907d2012-12-19 22:10:33 +0000237 Skip(MI, MI.getOperand(2));
238
Matt Arsenault9babdf42016-06-22 20:15:28 +0000239 // Insert a pseudo terminator to help keep the verifier happy.
Matt Arsenaulta74374a2016-07-08 00:55:44 +0000240 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
241 .addOperand(MI.getOperand(2))
242 .addReg(Reg);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000243
Tom Stellardf8794352012-12-19 22:10:31 +0000244 MI.eraseFromParent();
245}
246
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000247void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) {
Tom Stellardf8794352012-12-19 22:10:31 +0000248 MachineBasicBlock &MBB = *MI.getParent();
249 DebugLoc DL = MI.getDebugLoc();
250 unsigned Dst = MI.getOperand(0).getReg();
251 unsigned Src = MI.getOperand(1).getReg();
252
Christian Konig6a9d3902013-03-26 14:03:44 +0000253 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
254 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
Tom Stellardf8794352012-12-19 22:10:31 +0000255 .addReg(Src); // Saved EXEC
256
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000257 if (ExecModified) {
258 // Adjust the saved exec to account for the modifications during the flow
259 // block that contains the ELSE. This can happen when WQM mode is switched
260 // off.
261 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
262 .addReg(AMDGPU::EXEC)
263 .addReg(Dst);
264 }
265
Tom Stellardf8794352012-12-19 22:10:31 +0000266 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
267 .addReg(AMDGPU::EXEC)
268 .addReg(Dst);
269
Tom Stellarde7b907d2012-12-19 22:10:33 +0000270 Skip(MI, MI.getOperand(2));
271
Matt Arsenault9babdf42016-06-22 20:15:28 +0000272 // Insert a pseudo terminator to help keep the verifier happy.
Matt Arsenaulta74374a2016-07-08 00:55:44 +0000273 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
274 .addOperand(MI.getOperand(2))
275 .addReg(Dst);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000276
Tom Stellardf8794352012-12-19 22:10:31 +0000277 MI.eraseFromParent();
278}
279
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000280void SILowerControlFlow::Break(MachineInstr &MI) {
281 MachineBasicBlock &MBB = *MI.getParent();
282 DebugLoc DL = MI.getDebugLoc();
283
284 unsigned Dst = MI.getOperand(0).getReg();
285 unsigned Src = MI.getOperand(1).getReg();
286
287 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
288 .addReg(AMDGPU::EXEC)
289 .addReg(Src);
290
291 MI.eraseFromParent();
292}
293
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000294void SILowerControlFlow::IfBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000295 MachineBasicBlock &MBB = *MI.getParent();
296 DebugLoc DL = MI.getDebugLoc();
297
298 unsigned Dst = MI.getOperand(0).getReg();
299 unsigned Vcc = MI.getOperand(1).getReg();
300 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000301
Tom Stellardf8794352012-12-19 22:10:31 +0000302 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
303 .addReg(Vcc)
304 .addReg(Src);
305
306 MI.eraseFromParent();
307}
308
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000309void SILowerControlFlow::ElseBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000310 MachineBasicBlock &MBB = *MI.getParent();
311 DebugLoc DL = MI.getDebugLoc();
312
313 unsigned Dst = MI.getOperand(0).getReg();
314 unsigned Saved = MI.getOperand(1).getReg();
315 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000316
Tom Stellardf8794352012-12-19 22:10:31 +0000317 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
318 .addReg(Saved)
319 .addReg(Src);
320
321 MI.eraseFromParent();
322}
323
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000324void SILowerControlFlow::Loop(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000325 MachineBasicBlock &MBB = *MI.getParent();
326 DebugLoc DL = MI.getDebugLoc();
327 unsigned Src = MI.getOperand(0).getReg();
328
329 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
330 .addReg(AMDGPU::EXEC)
331 .addReg(Src);
332
333 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000334 .addOperand(MI.getOperand(1));
Tom Stellardf8794352012-12-19 22:10:31 +0000335
336 MI.eraseFromParent();
337}
338
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000339void SILowerControlFlow::EndCf(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000340 MachineBasicBlock &MBB = *MI.getParent();
341 DebugLoc DL = MI.getDebugLoc();
342 unsigned Reg = MI.getOperand(0).getReg();
343
344 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
345 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
346 .addReg(AMDGPU::EXEC)
347 .addReg(Reg);
348
349 MI.eraseFromParent();
350}
351
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000352void SILowerControlFlow::Branch(MachineInstr &MI) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000353 MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
354 if (MBB == MI.getParent()->getNextNode())
Matt Arsenault71b71d22014-02-11 21:12:38 +0000355 MI.eraseFromParent();
356
357 // If these aren't equal, this is probably an infinite loop.
Tom Stellarde7b907d2012-12-19 22:10:33 +0000358}
359
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000360void SILowerControlFlow::Kill(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000361 MachineBasicBlock &MBB = *MI.getParent();
362 DebugLoc DL = MI.getDebugLoc();
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000363 const MachineOperand &Op = MI.getOperand(0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000364
Matt Arsenault762af962014-07-13 03:06:39 +0000365#ifndef NDEBUG
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000366 CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv();
Matt Arsenault762af962014-07-13 03:06:39 +0000367 // Kill is only allowed in pixel / geometry shaders.
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000368 assert(CallConv == CallingConv::AMDGPU_PS ||
369 CallConv == CallingConv::AMDGPU_GS);
Matt Arsenault762af962014-07-13 03:06:39 +0000370#endif
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000371
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000372 // Clear this thread from the exec mask if the operand is negative
Tom Stellardfb77f002015-01-13 22:59:41 +0000373 if ((Op.isImm())) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000374 // Constant operand: Set exec mask to 0 or do nothing
Tom Stellardfb77f002015-01-13 22:59:41 +0000375 if (Op.getImm() & 0x80000000) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000376 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
377 .addImm(0);
378 }
379 } else {
Matt Arsenault46359152015-08-08 00:41:48 +0000380 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000381 .addImm(0)
382 .addOperand(Op);
383 }
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000384
385 MI.eraseFromParent();
386}
387
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000388// All currently live registers must remain so in the remainder block.
389void SILowerControlFlow::splitBlockLiveIns(const MachineBasicBlock &MBB,
390 const MachineInstr &MI,
391 MachineBasicBlock &LoopBB,
392 MachineBasicBlock &RemainderBB,
393 unsigned SaveReg,
Matt Arsenault21a46252016-06-27 19:57:44 +0000394 const MachineOperand &IdxReg) {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000395 LivePhysRegs RemainderLiveRegs(TRI);
396
397 RemainderLiveRegs.addLiveOuts(MBB);
398 for (MachineBasicBlock::const_reverse_iterator I = MBB.rbegin(), E(&MI);
399 I != E; ++I) {
400 RemainderLiveRegs.stepBackward(*I);
401 }
402
403 // Add reg defined in loop body.
404 RemainderLiveRegs.addReg(SaveReg);
405
406 if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) {
Matt Arsenault21a46252016-06-27 19:57:44 +0000407 if (!Val->isUndef()) {
408 RemainderLiveRegs.addReg(Val->getReg());
409 LoopBB.addLiveIn(Val->getReg());
410 }
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000411 }
412
Matt Arsenault21a46252016-06-27 19:57:44 +0000413 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
414 for (unsigned Reg : RemainderLiveRegs) {
415 if (MRI.isAllocatable(Reg))
416 RemainderBB.addLiveIn(Reg);
417 }
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000418
Matt Arsenault21a46252016-06-27 19:57:44 +0000419
420 const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src);
421 if (!Src->isUndef())
422 LoopBB.addLiveIn(Src->getReg());
423
424 if (!IdxReg.isUndef())
425 LoopBB.addLiveIn(IdxReg.getReg());
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000426 LoopBB.sortUniqueLiveIns();
427}
428
Matt Arsenault9babdf42016-06-22 20:15:28 +0000429void SILowerControlFlow::emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB,
430 DebugLoc DL,
431 MachineInstr *MovRel,
Matt Arsenault21a46252016-06-27 19:57:44 +0000432 const MachineOperand &IdxReg,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000433 int Offset) {
434 MachineBasicBlock::iterator I = LoopBB.begin();
Christian Konig2989ffc2013-03-18 11:34:16 +0000435
Matt Arsenault9babdf42016-06-22 20:15:28 +0000436 // Read the next variant into VCC (lower 32 bits) <- also loop target
437 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), AMDGPU::VCC_LO)
Matt Arsenault21a46252016-06-27 19:57:44 +0000438 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000439
440 // Move index from VCC into M0
441 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
442 .addReg(AMDGPU::VCC_LO);
443
444 // Compare the just read M0 value to all possible Idx values
445 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
446 .addReg(AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000447 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000448
449 // Update EXEC, save the original EXEC value to VCC
450 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
451 .addReg(AMDGPU::VCC);
452
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000453 if (Offset != 0) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000454 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
455 .addReg(AMDGPU::M0)
456 .addImm(Offset);
457 }
458
459 // Do the actual move
460 LoopBB.insert(I, MovRel);
461
462 // Update EXEC, switch all done bits to 0 and all todo bits to 1
463 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
464 .addReg(AMDGPU::EXEC)
465 .addReg(AMDGPU::VCC);
466
467 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
468 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
469 .addMBB(&LoopBB);
470}
471
472// Returns true if a new block was inserted.
473bool SILowerControlFlow::loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000474 MachineBasicBlock &MBB = *MI.getParent();
475 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000476 MachineBasicBlock::iterator I(&MI);
Christian Konig2989ffc2013-03-18 11:34:16 +0000477
Matt Arsenault21a46252016-06-27 19:57:44 +0000478 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
Christian Konig2989ffc2013-03-18 11:34:16 +0000479
Matt Arsenault21a46252016-06-27 19:57:44 +0000480 if (AMDGPU::SReg_32RegClass.contains(Idx->getReg())) {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000481 if (Offset != 0) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000482 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000483 .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()))
Matt Arsenault9babdf42016-06-22 20:15:28 +0000484 .addImm(Offset);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000485 } else {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000486 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000487 .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()));
Tom Stellard8b0182a2015-04-23 20:32:01 +0000488 }
Matt Arsenault9babdf42016-06-22 20:15:28 +0000489
Christian Konig2989ffc2013-03-18 11:34:16 +0000490 MBB.insert(I, MovRel);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000491 MI.eraseFromParent();
492 return false;
Christian Konig2989ffc2013-03-18 11:34:16 +0000493 }
Matt Arsenault9babdf42016-06-22 20:15:28 +0000494
495 MachineFunction &MF = *MBB.getParent();
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000496 MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
497 SaveOp->setIsDead(false);
498 unsigned Save = SaveOp->getReg();
Matt Arsenault9babdf42016-06-22 20:15:28 +0000499
500 // Reading from a VGPR requires looping over all workitems in the wavefront.
501 assert(AMDGPU::SReg_64RegClass.contains(Save) &&
Matt Arsenault21a46252016-06-27 19:57:44 +0000502 AMDGPU::VGPR_32RegClass.contains(Idx->getReg()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000503
504 // Save the EXEC mask
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000505 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), Save)
Matt Arsenault9babdf42016-06-22 20:15:28 +0000506 .addReg(AMDGPU::EXEC);
507
508 // To insert the loop we need to split the block. Move everything after this
509 // point to a new block, and insert a new empty block between the two.
510 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
511 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
512 MachineFunction::iterator MBBI(MBB);
513 ++MBBI;
514
515 MF.insert(MBBI, LoopBB);
516 MF.insert(MBBI, RemainderBB);
517
518 LoopBB->addSuccessor(LoopBB);
519 LoopBB->addSuccessor(RemainderBB);
520
Matt Arsenault21a46252016-06-27 19:57:44 +0000521 splitBlockLiveIns(MBB, MI, *LoopBB, *RemainderBB, Save, *Idx);
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000522
Matt Arsenault9babdf42016-06-22 20:15:28 +0000523 // Move the rest of the block into a new block.
524 RemainderBB->transferSuccessors(&MBB);
525 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
Matt Arsenaultc1142722016-06-30 20:49:28 +0000526 MBB.addSuccessor(LoopBB);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000527
Matt Arsenault21a46252016-06-27 19:57:44 +0000528 emitLoadM0FromVGPRLoop(*LoopBB, DL, MovRel, *Idx, Offset);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000529
530 MachineBasicBlock::iterator First = RemainderBB->begin();
531 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
532 .addReg(Save);
533
Christian Konig2989ffc2013-03-18 11:34:16 +0000534 MI.eraseFromParent();
Matt Arsenault9babdf42016-06-22 20:15:28 +0000535 return true;
Christian Konig2989ffc2013-03-18 11:34:16 +0000536}
537
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000538/// \param @VecReg The register which holds element zero of the vector being
539/// addressed into.
540//
541/// \param[in] @Idx The index operand from the movrel instruction. This must be
542// a register, but may be NoRegister.
543///
544/// \param[in] @Offset As an input, this is the constant offset part of the
545// indirect Index. e.g. v0 = v[VecReg + Offset] As an output, this is a constant
546// value that needs to be added to the value stored in M0.
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000547std::pair<unsigned, int>
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000548SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg, int Offset) const {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000549 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
550 if (!SubReg)
551 SubReg = VecReg;
552
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000553 const TargetRegisterClass *SuperRC = TRI->getPhysRegClass(VecReg);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000554 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000555 int NumElts = SuperRC->getSize() / RC->getSize();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000556
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000557 int BaseRegIdx = TRI->getHWRegIndex(SubReg);
558
559 // Skip out of bounds offsets, or else we would end up using an undefined
560 // register.
561 if (Offset >= NumElts)
562 return std::make_pair(RC->getRegister(BaseRegIdx), Offset);
563
564 int RegIdx = BaseRegIdx + Offset;
Tom Stellard8b0182a2015-04-23 20:32:01 +0000565 if (RegIdx < 0) {
566 Offset = RegIdx;
567 RegIdx = 0;
568 } else {
569 Offset = 0;
570 }
571
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000572 unsigned Reg = RC->getRegister(RegIdx);
573 return std::make_pair(Reg, Offset);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000574}
575
Matt Arsenault9babdf42016-06-22 20:15:28 +0000576// Return true if a new block was inserted.
577bool SILowerControlFlow::indirectSrc(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000578 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000579 const DebugLoc &DL = MI.getDebugLoc();
Christian Konig2989ffc2013-03-18 11:34:16 +0000580
581 unsigned Dst = MI.getOperand(0).getReg();
Matt Arsenault21a46252016-06-27 19:57:44 +0000582 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000583 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000584 unsigned Reg;
585
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000586 std::tie(Reg, Offset) = computeIndirectRegAndOffset(SrcVec->getReg(), Offset);
587
588 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
589 if (Idx->getReg() == AMDGPU::NoRegister) {
590 // Only had a constant offset, copy the register directly.
591 BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
592 .addReg(Reg, getUndefRegState(SrcVec->isUndef()));
593 MI.eraseFromParent();
594 return false;
595 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000596
Tom Stellard81d871d2013-11-13 23:36:50 +0000597 MachineInstr *MovRel =
Christian Konig2989ffc2013-03-18 11:34:16 +0000598 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Matt Arsenault21a46252016-06-27 19:57:44 +0000599 .addReg(Reg, getUndefRegState(SrcVec->isUndef()))
600 .addReg(SrcVec->getReg(), RegState::Implicit);
Christian Konig2989ffc2013-03-18 11:34:16 +0000601
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000602 return loadM0(MI, MovRel, Offset);
Christian Konig2989ffc2013-03-18 11:34:16 +0000603}
604
Matt Arsenault9babdf42016-06-22 20:15:28 +0000605// Return true if a new block was inserted.
606bool SILowerControlFlow::indirectDst(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000607 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000608 const DebugLoc &DL = MI.getDebugLoc();
Christian Konig2989ffc2013-03-18 11:34:16 +0000609
610 unsigned Dst = MI.getOperand(0).getReg();
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000611 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000612 unsigned Reg;
613
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000614 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
615 std::tie(Reg, Offset) = computeIndirectRegAndOffset(Dst, Offset);
616
617 MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
618 if (Idx->getReg() == AMDGPU::NoRegister) {
619 // Only had a constant offset, copy the register directly.
620 BuildMI(MBB, MI.getIterator(), DL, TII->get(AMDGPU::V_MOV_B32_e32), Reg)
621 .addOperand(*Val);
622 MI.eraseFromParent();
623 return false;
624 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000625
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000626 MachineInstr *MovRel =
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000627 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32), Reg)
Matt Arsenault21a46252016-06-27 19:57:44 +0000628 .addReg(Val->getReg(), getUndefRegState(Val->isUndef()))
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000629 .addReg(Dst, RegState::Implicit);
Christian Konig2989ffc2013-03-18 11:34:16 +0000630
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000631 return loadM0(MI, MovRel, Offset);
Christian Konig2989ffc2013-03-18 11:34:16 +0000632}
633
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000634bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000635 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
636 TII = ST.getInstrInfo();
637 TRI = &TII->getRegisterInfo();
638
Tom Stellardd50bb3c2013-09-05 18:37:52 +0000639 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000640
641 bool HaveKill = false;
Matt Arsenault3f981402014-09-15 15:41:53 +0000642 bool NeedFlat = false;
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000643 unsigned Depth = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000644
Matt Arsenault9babdf42016-06-22 20:15:28 +0000645 MachineFunction::iterator NextBB;
Tom Stellardf8794352012-12-19 22:10:31 +0000646
Matt Arsenault9babdf42016-06-22 20:15:28 +0000647 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
648 BI != BE; BI = NextBB) {
649 NextBB = std::next(BI);
Tom Stellardf8794352012-12-19 22:10:31 +0000650 MachineBasicBlock &MBB = *BI;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000651
652 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
Tim Northover24f46612014-03-28 13:52:56 +0000653 MachineBasicBlock::iterator I, Next;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000654 bool ExecModified = false;
655
Tim Northover24f46612014-03-28 13:52:56 +0000656 for (I = MBB.begin(); I != MBB.end(); I = Next) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000657 Next = std::next(I);
Tim Northover24f46612014-03-28 13:52:56 +0000658
Tom Stellard75aadc22012-12-11 21:25:42 +0000659 MachineInstr &MI = *I;
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000660
Matt Arsenault3f981402014-09-15 15:41:53 +0000661 // Flat uses m0 in case it needs to access LDS.
Matt Arsenault3add6432015-10-20 04:35:43 +0000662 if (TII->isFLAT(MI))
Matt Arsenault3f981402014-09-15 15:41:53 +0000663 NeedFlat = true;
Matt Arsenault3f981402014-09-15 15:41:53 +0000664
Matt Arsenaultb63f18c2016-07-08 17:06:48 +0000665 if (I->modifiesRegister(AMDGPU::EXEC, TRI))
Matt Arsenaultd4a84b12016-07-08 00:55:39 +0000666 ExecModified = true;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000667
Tom Stellard75aadc22012-12-11 21:25:42 +0000668 switch (MI.getOpcode()) {
669 default: break;
Tom Stellardf8794352012-12-19 22:10:31 +0000670 case AMDGPU::SI_IF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000671 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000672 If(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000673 break;
674
Tom Stellardf8794352012-12-19 22:10:31 +0000675 case AMDGPU::SI_ELSE:
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000676 Else(MI, ExecModified);
Tom Stellard75aadc22012-12-11 21:25:42 +0000677 break;
678
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000679 case AMDGPU::SI_BREAK:
680 Break(MI);
681 break;
682
Tom Stellardf8794352012-12-19 22:10:31 +0000683 case AMDGPU::SI_IF_BREAK:
684 IfBreak(MI);
685 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000686
Tom Stellardf8794352012-12-19 22:10:31 +0000687 case AMDGPU::SI_ELSE_BREAK:
688 ElseBreak(MI);
689 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000690
Tom Stellardf8794352012-12-19 22:10:31 +0000691 case AMDGPU::SI_LOOP:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000692 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000693 Loop(MI);
694 break;
695
696 case AMDGPU::SI_END_CF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000697 if (--Depth == 0 && HaveKill) {
698 SkipIfDead(MI);
699 HaveKill = false;
700 }
Tom Stellardf8794352012-12-19 22:10:31 +0000701 EndCf(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000702 break;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000703
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000704 case AMDGPU::SI_KILL:
705 if (Depth == 0)
706 SkipIfDead(MI);
707 else
708 HaveKill = true;
709 Kill(MI);
710 break;
711
Tom Stellarde7b907d2012-12-19 22:10:33 +0000712 case AMDGPU::S_BRANCH:
713 Branch(MI);
714 break;
Christian Konig2989ffc2013-03-18 11:34:16 +0000715
Matt Arsenault28419272015-10-07 00:42:51 +0000716 case AMDGPU::SI_INDIRECT_SRC_V1:
717 case AMDGPU::SI_INDIRECT_SRC_V2:
718 case AMDGPU::SI_INDIRECT_SRC_V4:
719 case AMDGPU::SI_INDIRECT_SRC_V8:
720 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000721 if (indirectSrc(MI)) {
722 // The block was split at this point. We can safely skip the middle
723 // inserted block to the following which contains the rest of this
724 // block's instructions.
725 NextBB = std::next(BI);
726 BE = MF.end();
727 Next = MBB.end();
728 }
729
Christian Konig2989ffc2013-03-18 11:34:16 +0000730 break;
731
Tom Stellard81d871d2013-11-13 23:36:50 +0000732 case AMDGPU::SI_INDIRECT_DST_V1:
Christian Konig2989ffc2013-03-18 11:34:16 +0000733 case AMDGPU::SI_INDIRECT_DST_V2:
734 case AMDGPU::SI_INDIRECT_DST_V4:
735 case AMDGPU::SI_INDIRECT_DST_V8:
736 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000737 if (indirectDst(MI)) {
738 // The block was split at this point. We can safely skip the middle
739 // inserted block to the following which contains the rest of this
740 // block's instructions.
741 NextBB = std::next(BI);
742 BE = MF.end();
743 Next = MBB.end();
744 }
745
Christian Konig2989ffc2013-03-18 11:34:16 +0000746 break;
Marek Olsaked2213e2016-03-14 15:57:14 +0000747
Nicolai Haehnlee40530e2016-07-06 08:35:17 +0000748 case AMDGPU::SI_RETURN: {
749 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
Marek Olsaked2213e2016-03-14 15:57:14 +0000750
751 // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
752 // because external bytecode will be appended at the end.
753 if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
Nicolai Haehnlee40530e2016-07-06 08:35:17 +0000754 // SI_RETURN is not the last instruction. Add an empty block at
Marek Olsaked2213e2016-03-14 15:57:14 +0000755 // the end and jump there.
756 if (!EmptyMBBAtEnd) {
757 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
758 MF.insert(MF.end(), EmptyMBBAtEnd);
759 }
760
761 MBB.addSuccessor(EmptyMBBAtEnd);
762 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
763 .addMBB(EmptyMBBAtEnd);
Nicolai Haehnlee40530e2016-07-06 08:35:17 +0000764 I->eraseFromParent();
Marek Olsaked2213e2016-03-14 15:57:14 +0000765 }
Marek Olsaked2213e2016-03-14 15:57:14 +0000766 break;
767 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000768 }
769 }
770 }
Tom Stellardf8794352012-12-19 22:10:31 +0000771
Matt Arsenault3f981402014-09-15 15:41:53 +0000772 if (NeedFlat && MFI->IsKernel) {
Matt Arsenault3f981402014-09-15 15:41:53 +0000773 // TODO: What to use with function calls?
Matt Arsenault296b8492016-02-12 06:31:30 +0000774 // We will need to Initialize the flat scratch register pair.
775 if (NeedFlat)
776 MFI->setHasFlatInstructions(true);
Matt Arsenault3f981402014-09-15 15:41:53 +0000777 }
778
Tom Stellard75aadc22012-12-11 21:25:42 +0000779 return true;
780}