blob: 2262ce2f35bf0f35ba0690611bb46d8dde7ed9f7 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Tom Stellardf8794352012-12-19 22:10:31 +000011/// \brief This pass lowers the pseudo control flow instructions to real
12/// machine instructions.
Tom Stellard75aadc22012-12-11 21:25:42 +000013///
Tom Stellardf8794352012-12-19 22:10:31 +000014/// All control flow is handled using predicated instructions and
Tom Stellard75aadc22012-12-11 21:25:42 +000015/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17/// by writting to the 64-bit EXEC register (each bit corresponds to a
18/// single vector ALU). Typically, for predicates, a vector ALU will write
19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20/// Vector ALU) and then the ScalarALU will AND the VCC register with the
21/// EXEC to update the predicates.
22///
23/// For example:
24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
Tom Stellardf8794352012-12-19 22:10:31 +000025/// %SGPR0 = SI_IF %VCC
Tom Stellard75aadc22012-12-11 21:25:42 +000026/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000027/// %SGPR0 = SI_ELSE %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000028/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000029/// SI_END_CF %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000030///
31/// becomes:
32///
33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
Tom Stellardf8794352012-12-19 22:10:31 +000035/// S_CBRANCH_EXECZ label0 // This instruction is an optional
Tom Stellard75aadc22012-12-11 21:25:42 +000036/// // optimization which allows us to
37/// // branch if all the bits of
38/// // EXEC are zero.
39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40///
41/// label0:
42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44/// S_BRANCH_EXECZ label1 // Use our branch optimization
45/// // instruction again.
46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
47/// label1:
Tom Stellardf8794352012-12-19 22:10:31 +000048/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
Tom Stellard75aadc22012-12-11 21:25:42 +000049//===----------------------------------------------------------------------===//
50
51#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000052#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000053#include "SIInstrInfo.h"
54#include "SIMachineFunctionInfo.h"
Matt Arsenault3cb4dde2016-06-22 23:40:57 +000055#include "llvm/CodeGen/LivePhysRegs.h"
Matt Arsenault3f981402014-09-15 15:41:53 +000056#include "llvm/CodeGen/MachineFrameInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000057#include "llvm/CodeGen/MachineFunction.h"
58#include "llvm/CodeGen/MachineFunctionPass.h"
59#include "llvm/CodeGen/MachineInstrBuilder.h"
60#include "llvm/CodeGen/MachineRegisterInfo.h"
Michel Danzer9e61c4b2014-02-27 01:47:09 +000061#include "llvm/IR/Constants.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000062
63using namespace llvm;
64
Matt Arsenault55d49cf2016-02-12 02:16:10 +000065#define DEBUG_TYPE "si-lower-control-flow"
66
Tom Stellard75aadc22012-12-11 21:25:42 +000067namespace {
68
Matt Arsenault55d49cf2016-02-12 02:16:10 +000069class SILowerControlFlow : public MachineFunctionPass {
Tom Stellard75aadc22012-12-11 21:25:42 +000070private:
Tom Stellarde7b907d2012-12-19 22:10:33 +000071 static const unsigned SkipThreshold = 12;
72
Tom Stellard1bd80722014-04-30 15:31:33 +000073 const SIRegisterInfo *TRI;
Tom Stellard5d7aaae2014-02-10 16:58:30 +000074 const SIInstrInfo *TII;
Tom Stellard75aadc22012-12-11 21:25:42 +000075
Tom Stellardbe8ebee2013-01-18 21:15:50 +000076 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
77
78 void Skip(MachineInstr &From, MachineOperand &To);
79 void SkipIfDead(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000080
Tom Stellardf8794352012-12-19 22:10:31 +000081 void If(MachineInstr &MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000082 void Else(MachineInstr &MI, bool ExecModified);
Tom Stellardf8794352012-12-19 22:10:31 +000083 void Break(MachineInstr &MI);
84 void IfBreak(MachineInstr &MI);
85 void ElseBreak(MachineInstr &MI);
86 void Loop(MachineInstr &MI);
87 void EndCf(MachineInstr &MI);
Tom Stellard75aadc22012-12-11 21:25:42 +000088
Tom Stellardbe8ebee2013-01-18 21:15:50 +000089 void Kill(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000090 void Branch(MachineInstr &MI);
91
Matt Arsenault3cb4dde2016-06-22 23:40:57 +000092 void splitBlockLiveIns(const MachineBasicBlock &MBB,
93 const MachineInstr &MI,
94 MachineBasicBlock &LoopBB,
95 MachineBasicBlock &RemainderBB,
96 unsigned SaveReg,
Matt Arsenault21a46252016-06-27 19:57:44 +000097 const MachineOperand &IdxReg);
Matt Arsenault3cb4dde2016-06-22 23:40:57 +000098
Matt Arsenault9babdf42016-06-22 20:15:28 +000099 void emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB, DebugLoc DL,
Matt Arsenault21a46252016-06-27 19:57:44 +0000100 MachineInstr *MovRel,
101 const MachineOperand &IdxReg,
102 int Offset);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000103
104 bool loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000105 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000106 bool indirectSrc(MachineInstr &MI);
107 bool indirectDst(MachineInstr &MI);
Christian Konig2989ffc2013-03-18 11:34:16 +0000108
Tom Stellard75aadc22012-12-11 21:25:42 +0000109public:
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000110 static char ID;
111
112 SILowerControlFlow() :
Craig Topper062a2ba2014-04-25 05:30:21 +0000113 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
Tom Stellard75aadc22012-12-11 21:25:42 +0000114
Craig Topper5656db42014-04-29 07:57:24 +0000115 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000116
Craig Topper5656db42014-04-29 07:57:24 +0000117 const char *getPassName() const override {
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000118 return "SI Lower control flow pseudo instructions";
Tom Stellard75aadc22012-12-11 21:25:42 +0000119 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000120};
121
122} // End anonymous namespace
123
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000124char SILowerControlFlow::ID = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000125
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000126INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
127 "SI lower control flow", false, false)
128
129char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID;
130
131
132FunctionPass *llvm::createSILowerControlFlowPass() {
133 return new SILowerControlFlow();
Tom Stellard75aadc22012-12-11 21:25:42 +0000134}
135
Matt Arsenault701c21e2016-04-29 21:52:13 +0000136static bool opcodeEmitsNoInsts(unsigned Opc) {
137 switch (Opc) {
138 case TargetOpcode::IMPLICIT_DEF:
139 case TargetOpcode::KILL:
140 case TargetOpcode::BUNDLE:
141 case TargetOpcode::CFI_INSTRUCTION:
142 case TargetOpcode::EH_LABEL:
143 case TargetOpcode::GC_LABEL:
144 case TargetOpcode::DBG_VALUE:
145 return true;
146 default:
147 return false;
148 }
149}
150
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000151bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From,
152 MachineBasicBlock *To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000153
Tom Stellarde7b907d2012-12-19 22:10:33 +0000154 unsigned NumInstr = 0;
Matt Arsenault701c21e2016-04-29 21:52:13 +0000155 MachineFunction *MF = From->getParent();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000156
Matt Arsenault701c21e2016-04-29 21:52:13 +0000157 for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end();
158 MBBI != End && MBBI != ToI; ++MBBI) {
Tom Stellard92339e82016-03-21 18:56:58 +0000159 MachineBasicBlock &MBB = *MBBI;
160
161 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000162 NumInstr < SkipThreshold && I != E; ++I) {
Matt Arsenault701c21e2016-04-29 21:52:13 +0000163 if (opcodeEmitsNoInsts(I->getOpcode()))
164 continue;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000165
Matt Arsenault701c21e2016-04-29 21:52:13 +0000166 // When a uniform loop is inside non-uniform control flow, the branch
167 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
168 // when EXEC = 0. We should skip the loop lest it becomes infinite.
Matt Arsenault4318ea32016-05-19 18:20:25 +0000169 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
170 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
Matt Arsenault701c21e2016-04-29 21:52:13 +0000171 return true;
Nicolai Haehnleef160de2016-03-16 20:14:33 +0000172
Matt Arsenault701c21e2016-04-29 21:52:13 +0000173 if (++NumInstr >= SkipThreshold)
174 return true;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000175 }
176 }
177
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000178 return false;
179}
180
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000181void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000182
183 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
Tom Stellarde7b907d2012-12-19 22:10:33 +0000184 return;
185
186 DebugLoc DL = From.getDebugLoc();
187 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000188 .addOperand(To);
Tom Stellarde7b907d2012-12-19 22:10:33 +0000189}
190
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000191void SILowerControlFlow::SkipIfDead(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000192
193 MachineBasicBlock &MBB = *MI.getParent();
194 DebugLoc DL = MI.getDebugLoc();
195
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000196 if (MBB.getParent()->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
Michel Danzer6f273c52014-02-27 01:47:02 +0000197 !shouldSkip(&MBB, &MBB.getParent()->back()))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000198 return;
199
200 MachineBasicBlock::iterator Insert = &MI;
201 ++Insert;
202
203 // If the exec mask is non-zero, skip the next two instructions
204 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000205 .addImm(3);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000206
207 // Exec mask is zero: Export to NULL target...
208 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
209 .addImm(0)
210 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
211 .addImm(0)
212 .addImm(1)
213 .addImm(1)
Christian Konigc756cb992013-02-16 11:28:22 +0000214 .addReg(AMDGPU::VGPR0)
215 .addReg(AMDGPU::VGPR0)
216 .addReg(AMDGPU::VGPR0)
217 .addReg(AMDGPU::VGPR0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000218
219 // ... and terminate wavefront
220 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
221}
222
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000223void SILowerControlFlow::If(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000224 MachineBasicBlock &MBB = *MI.getParent();
225 DebugLoc DL = MI.getDebugLoc();
226 unsigned Reg = MI.getOperand(0).getReg();
227 unsigned Vcc = MI.getOperand(1).getReg();
228
229 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
230 .addReg(Vcc);
231
232 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
233 .addReg(AMDGPU::EXEC)
234 .addReg(Reg);
235
Tom Stellarde7b907d2012-12-19 22:10:33 +0000236 Skip(MI, MI.getOperand(2));
237
Matt Arsenault9babdf42016-06-22 20:15:28 +0000238 // Insert a pseudo terminator to help keep the verifier happy.
239 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH), Reg)
240 .addOperand(MI.getOperand(2));
241
Tom Stellardf8794352012-12-19 22:10:31 +0000242 MI.eraseFromParent();
243}
244
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000245void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) {
Tom Stellardf8794352012-12-19 22:10:31 +0000246 MachineBasicBlock &MBB = *MI.getParent();
247 DebugLoc DL = MI.getDebugLoc();
248 unsigned Dst = MI.getOperand(0).getReg();
249 unsigned Src = MI.getOperand(1).getReg();
250
Christian Konig6a9d3902013-03-26 14:03:44 +0000251 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
252 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
Tom Stellardf8794352012-12-19 22:10:31 +0000253 .addReg(Src); // Saved EXEC
254
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000255 if (ExecModified) {
256 // Adjust the saved exec to account for the modifications during the flow
257 // block that contains the ELSE. This can happen when WQM mode is switched
258 // off.
259 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
260 .addReg(AMDGPU::EXEC)
261 .addReg(Dst);
262 }
263
Tom Stellardf8794352012-12-19 22:10:31 +0000264 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
265 .addReg(AMDGPU::EXEC)
266 .addReg(Dst);
267
Tom Stellarde7b907d2012-12-19 22:10:33 +0000268 Skip(MI, MI.getOperand(2));
269
Matt Arsenault9babdf42016-06-22 20:15:28 +0000270 // Insert a pseudo terminator to help keep the verifier happy.
271 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH), Dst)
272 .addOperand(MI.getOperand(2));
273
Tom Stellardf8794352012-12-19 22:10:31 +0000274 MI.eraseFromParent();
275}
276
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000277void SILowerControlFlow::Break(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000278 MachineBasicBlock &MBB = *MI.getParent();
279 DebugLoc DL = MI.getDebugLoc();
280
281 unsigned Dst = MI.getOperand(0).getReg();
282 unsigned Src = MI.getOperand(1).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000283
Tom Stellardf8794352012-12-19 22:10:31 +0000284 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
285 .addReg(AMDGPU::EXEC)
286 .addReg(Src);
287
288 MI.eraseFromParent();
289}
290
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000291void SILowerControlFlow::IfBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000292 MachineBasicBlock &MBB = *MI.getParent();
293 DebugLoc DL = MI.getDebugLoc();
294
295 unsigned Dst = MI.getOperand(0).getReg();
296 unsigned Vcc = MI.getOperand(1).getReg();
297 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000298
Tom Stellardf8794352012-12-19 22:10:31 +0000299 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
300 .addReg(Vcc)
301 .addReg(Src);
302
303 MI.eraseFromParent();
304}
305
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000306void SILowerControlFlow::ElseBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000307 MachineBasicBlock &MBB = *MI.getParent();
308 DebugLoc DL = MI.getDebugLoc();
309
310 unsigned Dst = MI.getOperand(0).getReg();
311 unsigned Saved = MI.getOperand(1).getReg();
312 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000313
Tom Stellardf8794352012-12-19 22:10:31 +0000314 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
315 .addReg(Saved)
316 .addReg(Src);
317
318 MI.eraseFromParent();
319}
320
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000321void SILowerControlFlow::Loop(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000322 MachineBasicBlock &MBB = *MI.getParent();
323 DebugLoc DL = MI.getDebugLoc();
324 unsigned Src = MI.getOperand(0).getReg();
325
326 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
327 .addReg(AMDGPU::EXEC)
328 .addReg(Src);
329
330 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000331 .addOperand(MI.getOperand(1));
Tom Stellardf8794352012-12-19 22:10:31 +0000332
333 MI.eraseFromParent();
334}
335
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000336void SILowerControlFlow::EndCf(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000337 MachineBasicBlock &MBB = *MI.getParent();
338 DebugLoc DL = MI.getDebugLoc();
339 unsigned Reg = MI.getOperand(0).getReg();
340
341 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
342 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
343 .addReg(AMDGPU::EXEC)
344 .addReg(Reg);
345
346 MI.eraseFromParent();
347}
348
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000349void SILowerControlFlow::Branch(MachineInstr &MI) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000350 MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
351 if (MBB == MI.getParent()->getNextNode())
Matt Arsenault71b71d22014-02-11 21:12:38 +0000352 MI.eraseFromParent();
353
354 // If these aren't equal, this is probably an infinite loop.
Tom Stellarde7b907d2012-12-19 22:10:33 +0000355}
356
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000357void SILowerControlFlow::Kill(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000358 MachineBasicBlock &MBB = *MI.getParent();
359 DebugLoc DL = MI.getDebugLoc();
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000360 const MachineOperand &Op = MI.getOperand(0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000361
Matt Arsenault762af962014-07-13 03:06:39 +0000362#ifndef NDEBUG
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000363 CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv();
Matt Arsenault762af962014-07-13 03:06:39 +0000364 // Kill is only allowed in pixel / geometry shaders.
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000365 assert(CallConv == CallingConv::AMDGPU_PS ||
366 CallConv == CallingConv::AMDGPU_GS);
Matt Arsenault762af962014-07-13 03:06:39 +0000367#endif
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000368
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000369 // Clear this thread from the exec mask if the operand is negative
Tom Stellardfb77f002015-01-13 22:59:41 +0000370 if ((Op.isImm())) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000371 // Constant operand: Set exec mask to 0 or do nothing
Tom Stellardfb77f002015-01-13 22:59:41 +0000372 if (Op.getImm() & 0x80000000) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000373 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
374 .addImm(0);
375 }
376 } else {
Matt Arsenault46359152015-08-08 00:41:48 +0000377 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000378 .addImm(0)
379 .addOperand(Op);
380 }
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000381
382 MI.eraseFromParent();
383}
384
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000385// All currently live registers must remain so in the remainder block.
386void SILowerControlFlow::splitBlockLiveIns(const MachineBasicBlock &MBB,
387 const MachineInstr &MI,
388 MachineBasicBlock &LoopBB,
389 MachineBasicBlock &RemainderBB,
390 unsigned SaveReg,
Matt Arsenault21a46252016-06-27 19:57:44 +0000391 const MachineOperand &IdxReg) {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000392 LivePhysRegs RemainderLiveRegs(TRI);
393
394 RemainderLiveRegs.addLiveOuts(MBB);
395 for (MachineBasicBlock::const_reverse_iterator I = MBB.rbegin(), E(&MI);
396 I != E; ++I) {
397 RemainderLiveRegs.stepBackward(*I);
398 }
399
400 // Add reg defined in loop body.
401 RemainderLiveRegs.addReg(SaveReg);
402
403 if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) {
Matt Arsenault21a46252016-06-27 19:57:44 +0000404 if (!Val->isUndef()) {
405 RemainderLiveRegs.addReg(Val->getReg());
406 LoopBB.addLiveIn(Val->getReg());
407 }
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000408 }
409
Matt Arsenault21a46252016-06-27 19:57:44 +0000410 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
411 for (unsigned Reg : RemainderLiveRegs) {
412 if (MRI.isAllocatable(Reg))
413 RemainderBB.addLiveIn(Reg);
414 }
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000415
Matt Arsenault21a46252016-06-27 19:57:44 +0000416
417 const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src);
418 if (!Src->isUndef())
419 LoopBB.addLiveIn(Src->getReg());
420
421 if (!IdxReg.isUndef())
422 LoopBB.addLiveIn(IdxReg.getReg());
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000423 LoopBB.sortUniqueLiveIns();
424}
425
Matt Arsenault9babdf42016-06-22 20:15:28 +0000426void SILowerControlFlow::emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB,
427 DebugLoc DL,
428 MachineInstr *MovRel,
Matt Arsenault21a46252016-06-27 19:57:44 +0000429 const MachineOperand &IdxReg,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000430 int Offset) {
431 MachineBasicBlock::iterator I = LoopBB.begin();
Christian Konig2989ffc2013-03-18 11:34:16 +0000432
Matt Arsenault9babdf42016-06-22 20:15:28 +0000433 // Read the next variant into VCC (lower 32 bits) <- also loop target
434 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), AMDGPU::VCC_LO)
Matt Arsenault21a46252016-06-27 19:57:44 +0000435 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000436
437 // Move index from VCC into M0
438 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
439 .addReg(AMDGPU::VCC_LO);
440
441 // Compare the just read M0 value to all possible Idx values
442 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
443 .addReg(AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000444 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000445
446 // Update EXEC, save the original EXEC value to VCC
447 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
448 .addReg(AMDGPU::VCC);
449
450 if (Offset) {
451 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
452 .addReg(AMDGPU::M0)
453 .addImm(Offset);
454 }
455
456 // Do the actual move
457 LoopBB.insert(I, MovRel);
458
459 // Update EXEC, switch all done bits to 0 and all todo bits to 1
460 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
461 .addReg(AMDGPU::EXEC)
462 .addReg(AMDGPU::VCC);
463
464 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
465 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
466 .addMBB(&LoopBB);
467}
468
469// Returns true if a new block was inserted.
470bool SILowerControlFlow::loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000471 MachineBasicBlock &MBB = *MI.getParent();
472 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000473 MachineBasicBlock::iterator I(&MI);
Christian Konig2989ffc2013-03-18 11:34:16 +0000474
Matt Arsenault21a46252016-06-27 19:57:44 +0000475 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
Christian Konig2989ffc2013-03-18 11:34:16 +0000476
Matt Arsenault21a46252016-06-27 19:57:44 +0000477 if (AMDGPU::SReg_32RegClass.contains(Idx->getReg())) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000478 if (Offset) {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000479 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000480 .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()))
Matt Arsenault9babdf42016-06-22 20:15:28 +0000481 .addImm(Offset);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000482 } else {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000483 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Matt Arsenault21a46252016-06-27 19:57:44 +0000484 .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()));
Tom Stellard8b0182a2015-04-23 20:32:01 +0000485 }
Matt Arsenault9babdf42016-06-22 20:15:28 +0000486
Christian Konig2989ffc2013-03-18 11:34:16 +0000487 MBB.insert(I, MovRel);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000488 MI.eraseFromParent();
489 return false;
Christian Konig2989ffc2013-03-18 11:34:16 +0000490 }
Matt Arsenault9babdf42016-06-22 20:15:28 +0000491
492 MachineFunction &MF = *MBB.getParent();
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000493 MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
494 SaveOp->setIsDead(false);
495 unsigned Save = SaveOp->getReg();
Matt Arsenault9babdf42016-06-22 20:15:28 +0000496
497 // Reading from a VGPR requires looping over all workitems in the wavefront.
498 assert(AMDGPU::SReg_64RegClass.contains(Save) &&
Matt Arsenault21a46252016-06-27 19:57:44 +0000499 AMDGPU::VGPR_32RegClass.contains(Idx->getReg()));
Matt Arsenault9babdf42016-06-22 20:15:28 +0000500
501 // Save the EXEC mask
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000502 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), Save)
Matt Arsenault9babdf42016-06-22 20:15:28 +0000503 .addReg(AMDGPU::EXEC);
504
505 // To insert the loop we need to split the block. Move everything after this
506 // point to a new block, and insert a new empty block between the two.
507 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
508 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
509 MachineFunction::iterator MBBI(MBB);
510 ++MBBI;
511
512 MF.insert(MBBI, LoopBB);
513 MF.insert(MBBI, RemainderBB);
514
515 LoopBB->addSuccessor(LoopBB);
516 LoopBB->addSuccessor(RemainderBB);
517
Matt Arsenault21a46252016-06-27 19:57:44 +0000518 splitBlockLiveIns(MBB, MI, *LoopBB, *RemainderBB, Save, *Idx);
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000519
Matt Arsenault9babdf42016-06-22 20:15:28 +0000520 // Move the rest of the block into a new block.
521 RemainderBB->transferSuccessors(&MBB);
522 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
523
Matt Arsenault21a46252016-06-27 19:57:44 +0000524 emitLoadM0FromVGPRLoop(*LoopBB, DL, MovRel, *Idx, Offset);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000525
526 MachineBasicBlock::iterator First = RemainderBB->begin();
527 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
528 .addReg(Save);
529
Christian Konig2989ffc2013-03-18 11:34:16 +0000530 MI.eraseFromParent();
Matt Arsenault9babdf42016-06-22 20:15:28 +0000531 return true;
Christian Konig2989ffc2013-03-18 11:34:16 +0000532}
533
Tom Stellard8b0182a2015-04-23 20:32:01 +0000534/// \param @VecReg The register which holds element zero of the vector
535/// being addressed into.
536/// \param[out] @Reg The base register to use in the indirect addressing instruction.
537/// \param[in,out] @Offset As an input, this is the constant offset part of the
538// indirect Index. e.g. v0 = v[VecReg + Offset]
539// As an output, this is a constant value that needs
540// to be added to the value stored in M0.
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000541void SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg,
542 unsigned &Reg,
543 int &Offset) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000544 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
545 if (!SubReg)
546 SubReg = VecReg;
547
548 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
549 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
550
551 if (RegIdx < 0) {
552 Offset = RegIdx;
553 RegIdx = 0;
554 } else {
555 Offset = 0;
556 }
557
558 Reg = RC->getRegister(RegIdx);
559}
560
Matt Arsenault9babdf42016-06-22 20:15:28 +0000561// Return true if a new block was inserted.
562bool SILowerControlFlow::indirectSrc(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000563 MachineBasicBlock &MBB = *MI.getParent();
564 DebugLoc DL = MI.getDebugLoc();
565
566 unsigned Dst = MI.getOperand(0).getReg();
Matt Arsenault21a46252016-06-27 19:57:44 +0000567 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000568 int Off = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000569 unsigned Reg;
570
Matt Arsenault21a46252016-06-27 19:57:44 +0000571 computeIndirectRegAndOffset(SrcVec->getReg(), Reg, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000572
Tom Stellard81d871d2013-11-13 23:36:50 +0000573 MachineInstr *MovRel =
Christian Konig2989ffc2013-03-18 11:34:16 +0000574 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Matt Arsenault21a46252016-06-27 19:57:44 +0000575 .addReg(Reg, getUndefRegState(SrcVec->isUndef()))
576 .addReg(SrcVec->getReg(), RegState::Implicit);
Christian Konig2989ffc2013-03-18 11:34:16 +0000577
Matt Arsenault9babdf42016-06-22 20:15:28 +0000578 return loadM0(MI, MovRel, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000579}
580
Matt Arsenault9babdf42016-06-22 20:15:28 +0000581// Return true if a new block was inserted.
582bool SILowerControlFlow::indirectDst(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000583 MachineBasicBlock &MBB = *MI.getParent();
584 DebugLoc DL = MI.getDebugLoc();
585
586 unsigned Dst = MI.getOperand(0).getReg();
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000587 int Off = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
Matt Arsenault21a46252016-06-27 19:57:44 +0000588 MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
Tom Stellard8b0182a2015-04-23 20:32:01 +0000589 unsigned Reg;
590
591 computeIndirectRegAndOffset(Dst, Reg, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000592
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000593 MachineInstr *MovRel =
Christian Konig2989ffc2013-03-18 11:34:16 +0000594 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000595 .addReg(Reg, RegState::Define)
Matt Arsenault21a46252016-06-27 19:57:44 +0000596 .addReg(Val->getReg(), getUndefRegState(Val->isUndef()))
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000597 .addReg(Dst, RegState::Implicit);
Christian Konig2989ffc2013-03-18 11:34:16 +0000598
Matt Arsenault9babdf42016-06-22 20:15:28 +0000599 return loadM0(MI, MovRel, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000600}
601
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000602bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000603 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
604 TII = ST.getInstrInfo();
605 TRI = &TII->getRegisterInfo();
606
Tom Stellardd50bb3c2013-09-05 18:37:52 +0000607 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000608
609 bool HaveKill = false;
Matt Arsenault3f981402014-09-15 15:41:53 +0000610 bool NeedFlat = false;
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000611 unsigned Depth = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000612
Matt Arsenault9babdf42016-06-22 20:15:28 +0000613 MachineFunction::iterator NextBB;
Tom Stellardf8794352012-12-19 22:10:31 +0000614
Matt Arsenault9babdf42016-06-22 20:15:28 +0000615 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
616 BI != BE; BI = NextBB) {
617 NextBB = std::next(BI);
Tom Stellardf8794352012-12-19 22:10:31 +0000618 MachineBasicBlock &MBB = *BI;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000619
620 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
Tim Northover24f46612014-03-28 13:52:56 +0000621 MachineBasicBlock::iterator I, Next;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000622 bool ExecModified = false;
623
Tim Northover24f46612014-03-28 13:52:56 +0000624 for (I = MBB.begin(); I != MBB.end(); I = Next) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000625 Next = std::next(I);
Tim Northover24f46612014-03-28 13:52:56 +0000626
Tom Stellard75aadc22012-12-11 21:25:42 +0000627 MachineInstr &MI = *I;
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000628
Matt Arsenault3f981402014-09-15 15:41:53 +0000629 // Flat uses m0 in case it needs to access LDS.
Matt Arsenault3add6432015-10-20 04:35:43 +0000630 if (TII->isFLAT(MI))
Matt Arsenault3f981402014-09-15 15:41:53 +0000631 NeedFlat = true;
Matt Arsenault3f981402014-09-15 15:41:53 +0000632
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000633 for (const auto &Def : I->defs()) {
634 if (Def.isReg() && Def.isDef() && Def.getReg() == AMDGPU::EXEC) {
635 ExecModified = true;
636 break;
637 }
638 }
639
Tom Stellard75aadc22012-12-11 21:25:42 +0000640 switch (MI.getOpcode()) {
641 default: break;
Tom Stellardf8794352012-12-19 22:10:31 +0000642 case AMDGPU::SI_IF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000643 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000644 If(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000645 break;
646
Tom Stellardf8794352012-12-19 22:10:31 +0000647 case AMDGPU::SI_ELSE:
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000648 Else(MI, ExecModified);
Tom Stellard75aadc22012-12-11 21:25:42 +0000649 break;
650
Tom Stellardf8794352012-12-19 22:10:31 +0000651 case AMDGPU::SI_BREAK:
652 Break(MI);
653 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000654
Tom Stellardf8794352012-12-19 22:10:31 +0000655 case AMDGPU::SI_IF_BREAK:
656 IfBreak(MI);
657 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000658
Tom Stellardf8794352012-12-19 22:10:31 +0000659 case AMDGPU::SI_ELSE_BREAK:
660 ElseBreak(MI);
661 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000662
Tom Stellardf8794352012-12-19 22:10:31 +0000663 case AMDGPU::SI_LOOP:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000664 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000665 Loop(MI);
666 break;
667
668 case AMDGPU::SI_END_CF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000669 if (--Depth == 0 && HaveKill) {
670 SkipIfDead(MI);
671 HaveKill = false;
672 }
Tom Stellardf8794352012-12-19 22:10:31 +0000673 EndCf(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000674 break;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000675
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000676 case AMDGPU::SI_KILL:
677 if (Depth == 0)
678 SkipIfDead(MI);
679 else
680 HaveKill = true;
681 Kill(MI);
682 break;
683
Tom Stellarde7b907d2012-12-19 22:10:33 +0000684 case AMDGPU::S_BRANCH:
685 Branch(MI);
686 break;
Christian Konig2989ffc2013-03-18 11:34:16 +0000687
Matt Arsenault28419272015-10-07 00:42:51 +0000688 case AMDGPU::SI_INDIRECT_SRC_V1:
689 case AMDGPU::SI_INDIRECT_SRC_V2:
690 case AMDGPU::SI_INDIRECT_SRC_V4:
691 case AMDGPU::SI_INDIRECT_SRC_V8:
692 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000693 if (indirectSrc(MI)) {
694 // The block was split at this point. We can safely skip the middle
695 // inserted block to the following which contains the rest of this
696 // block's instructions.
697 NextBB = std::next(BI);
698 BE = MF.end();
699 Next = MBB.end();
700 }
701
Christian Konig2989ffc2013-03-18 11:34:16 +0000702 break;
703
Tom Stellard81d871d2013-11-13 23:36:50 +0000704 case AMDGPU::SI_INDIRECT_DST_V1:
Christian Konig2989ffc2013-03-18 11:34:16 +0000705 case AMDGPU::SI_INDIRECT_DST_V2:
706 case AMDGPU::SI_INDIRECT_DST_V4:
707 case AMDGPU::SI_INDIRECT_DST_V8:
708 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000709 if (indirectDst(MI)) {
710 // The block was split at this point. We can safely skip the middle
711 // inserted block to the following which contains the rest of this
712 // block's instructions.
713 NextBB = std::next(BI);
714 BE = MF.end();
715 Next = MBB.end();
716 }
717
Christian Konig2989ffc2013-03-18 11:34:16 +0000718 break;
Marek Olsaked2213e2016-03-14 15:57:14 +0000719
720 case AMDGPU::S_ENDPGM: {
721 if (MF.getInfo<SIMachineFunctionInfo>()->returnsVoid())
722 break;
723
724 // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
725 // because external bytecode will be appended at the end.
726 if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
727 // S_ENDPGM is not the last instruction. Add an empty block at
728 // the end and jump there.
729 if (!EmptyMBBAtEnd) {
730 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
731 MF.insert(MF.end(), EmptyMBBAtEnd);
732 }
733
734 MBB.addSuccessor(EmptyMBBAtEnd);
735 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
736 .addMBB(EmptyMBBAtEnd);
737 }
738
739 I->eraseFromParent();
740 break;
741 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000742 }
743 }
744 }
Tom Stellardf8794352012-12-19 22:10:31 +0000745
Matt Arsenault3f981402014-09-15 15:41:53 +0000746 if (NeedFlat && MFI->IsKernel) {
Matt Arsenault3f981402014-09-15 15:41:53 +0000747 // TODO: What to use with function calls?
Matt Arsenault296b8492016-02-12 06:31:30 +0000748 // We will need to Initialize the flat scratch register pair.
749 if (NeedFlat)
750 MFI->setHasFlatInstructions(true);
Matt Arsenault3f981402014-09-15 15:41:53 +0000751 }
752
Tom Stellard75aadc22012-12-11 21:25:42 +0000753 return true;
754}