blob: 68609946f3bbe5dab4c916232c039479a44e1b65 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Tom Stellardf8794352012-12-19 22:10:31 +000011/// \brief This pass lowers the pseudo control flow instructions to real
12/// machine instructions.
Tom Stellard75aadc22012-12-11 21:25:42 +000013///
Tom Stellardf8794352012-12-19 22:10:31 +000014/// All control flow is handled using predicated instructions and
Tom Stellard75aadc22012-12-11 21:25:42 +000015/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17/// by writting to the 64-bit EXEC register (each bit corresponds to a
18/// single vector ALU). Typically, for predicates, a vector ALU will write
19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20/// Vector ALU) and then the ScalarALU will AND the VCC register with the
21/// EXEC to update the predicates.
22///
23/// For example:
24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
Tom Stellardf8794352012-12-19 22:10:31 +000025/// %SGPR0 = SI_IF %VCC
Tom Stellard75aadc22012-12-11 21:25:42 +000026/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000027/// %SGPR0 = SI_ELSE %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000028/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000029/// SI_END_CF %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000030///
31/// becomes:
32///
33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
Tom Stellardf8794352012-12-19 22:10:31 +000035/// S_CBRANCH_EXECZ label0 // This instruction is an optional
Tom Stellard75aadc22012-12-11 21:25:42 +000036/// // optimization which allows us to
37/// // branch if all the bits of
38/// // EXEC are zero.
39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40///
41/// label0:
42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44/// S_BRANCH_EXECZ label1 // Use our branch optimization
45/// // instruction again.
46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
47/// label1:
Tom Stellardf8794352012-12-19 22:10:31 +000048/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
Tom Stellard75aadc22012-12-11 21:25:42 +000049//===----------------------------------------------------------------------===//
50
51#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000052#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000053#include "SIInstrInfo.h"
54#include "SIMachineFunctionInfo.h"
Matt Arsenault3f981402014-09-15 15:41:53 +000055#include "llvm/CodeGen/MachineFrameInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000056#include "llvm/CodeGen/MachineFunction.h"
57#include "llvm/CodeGen/MachineFunctionPass.h"
58#include "llvm/CodeGen/MachineInstrBuilder.h"
59#include "llvm/CodeGen/MachineRegisterInfo.h"
Michel Danzer9e61c4b2014-02-27 01:47:09 +000060#include "llvm/IR/Constants.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000061
62using namespace llvm;
63
Matt Arsenault55d49cf2016-02-12 02:16:10 +000064#define DEBUG_TYPE "si-lower-control-flow"
65
Tom Stellard75aadc22012-12-11 21:25:42 +000066namespace {
67
Matt Arsenault55d49cf2016-02-12 02:16:10 +000068class SILowerControlFlow : public MachineFunctionPass {
Tom Stellard75aadc22012-12-11 21:25:42 +000069private:
Tom Stellarde7b907d2012-12-19 22:10:33 +000070 static const unsigned SkipThreshold = 12;
71
Tom Stellard1bd80722014-04-30 15:31:33 +000072 const SIRegisterInfo *TRI;
Tom Stellard5d7aaae2014-02-10 16:58:30 +000073 const SIInstrInfo *TII;
Tom Stellard75aadc22012-12-11 21:25:42 +000074
Tom Stellardbe8ebee2013-01-18 21:15:50 +000075 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
76
77 void Skip(MachineInstr &From, MachineOperand &To);
78 void SkipIfDead(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000079
Tom Stellardf8794352012-12-19 22:10:31 +000080 void If(MachineInstr &MI);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +000081 void Else(MachineInstr &MI, bool ExecModified);
Tom Stellardf8794352012-12-19 22:10:31 +000082 void Break(MachineInstr &MI);
83 void IfBreak(MachineInstr &MI);
84 void ElseBreak(MachineInstr &MI);
85 void Loop(MachineInstr &MI);
86 void EndCf(MachineInstr &MI);
Tom Stellard75aadc22012-12-11 21:25:42 +000087
Tom Stellardbe8ebee2013-01-18 21:15:50 +000088 void Kill(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000089 void Branch(MachineInstr &MI);
90
Tom Stellard8b0182a2015-04-23 20:32:01 +000091 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
92 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
Christian Konig2989ffc2013-03-18 11:34:16 +000093 void IndirectSrc(MachineInstr &MI);
94 void IndirectDst(MachineInstr &MI);
95
Tom Stellard75aadc22012-12-11 21:25:42 +000096public:
Matt Arsenault55d49cf2016-02-12 02:16:10 +000097 static char ID;
98
99 SILowerControlFlow() :
Craig Topper062a2ba2014-04-25 05:30:21 +0000100 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
Tom Stellard75aadc22012-12-11 21:25:42 +0000101
Craig Topper5656db42014-04-29 07:57:24 +0000102 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000103
Craig Topper5656db42014-04-29 07:57:24 +0000104 const char *getPassName() const override {
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000105 return "SI Lower control flow pseudo instructions";
Tom Stellard75aadc22012-12-11 21:25:42 +0000106 }
107
Matt Arsenault0cb85172015-09-25 17:21:28 +0000108 void getAnalysisUsage(AnalysisUsage &AU) const override {
109 AU.setPreservesCFG();
110 MachineFunctionPass::getAnalysisUsage(AU);
111 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000112};
113
114} // End anonymous namespace
115
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000116char SILowerControlFlow::ID = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000117
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000118INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
119 "SI lower control flow", false, false)
120
121char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID;
122
123
124FunctionPass *llvm::createSILowerControlFlowPass() {
125 return new SILowerControlFlow();
Tom Stellard75aadc22012-12-11 21:25:42 +0000126}
127
Matt Arsenault701c21e2016-04-29 21:52:13 +0000128static bool opcodeEmitsNoInsts(unsigned Opc) {
129 switch (Opc) {
130 case TargetOpcode::IMPLICIT_DEF:
131 case TargetOpcode::KILL:
132 case TargetOpcode::BUNDLE:
133 case TargetOpcode::CFI_INSTRUCTION:
134 case TargetOpcode::EH_LABEL:
135 case TargetOpcode::GC_LABEL:
136 case TargetOpcode::DBG_VALUE:
137 return true;
138 default:
139 return false;
140 }
141}
142
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000143bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From,
144 MachineBasicBlock *To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000145
Tom Stellarde7b907d2012-12-19 22:10:33 +0000146 unsigned NumInstr = 0;
Matt Arsenault701c21e2016-04-29 21:52:13 +0000147 MachineFunction *MF = From->getParent();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000148
Matt Arsenault701c21e2016-04-29 21:52:13 +0000149 for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end();
150 MBBI != End && MBBI != ToI; ++MBBI) {
Tom Stellard92339e82016-03-21 18:56:58 +0000151 MachineBasicBlock &MBB = *MBBI;
152
153 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000154 NumInstr < SkipThreshold && I != E; ++I) {
Matt Arsenault701c21e2016-04-29 21:52:13 +0000155 if (opcodeEmitsNoInsts(I->getOpcode()))
156 continue;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000157
Matt Arsenault701c21e2016-04-29 21:52:13 +0000158 // When a uniform loop is inside non-uniform control flow, the branch
159 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
160 // when EXEC = 0. We should skip the loop lest it becomes infinite.
Matt Arsenault4318ea32016-05-19 18:20:25 +0000161 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
162 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
Matt Arsenault701c21e2016-04-29 21:52:13 +0000163 return true;
Nicolai Haehnleef160de2016-03-16 20:14:33 +0000164
Matt Arsenault701c21e2016-04-29 21:52:13 +0000165 if (++NumInstr >= SkipThreshold)
166 return true;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000167 }
168 }
169
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000170 return false;
171}
172
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000173void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000174
175 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
Tom Stellarde7b907d2012-12-19 22:10:33 +0000176 return;
177
178 DebugLoc DL = From.getDebugLoc();
179 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000180 .addOperand(To);
Tom Stellarde7b907d2012-12-19 22:10:33 +0000181}
182
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000183void SILowerControlFlow::SkipIfDead(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000184
185 MachineBasicBlock &MBB = *MI.getParent();
186 DebugLoc DL = MI.getDebugLoc();
187
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000188 if (MBB.getParent()->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
Michel Danzer6f273c52014-02-27 01:47:02 +0000189 !shouldSkip(&MBB, &MBB.getParent()->back()))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000190 return;
191
192 MachineBasicBlock::iterator Insert = &MI;
193 ++Insert;
194
195 // If the exec mask is non-zero, skip the next two instructions
196 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000197 .addImm(3);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000198
199 // Exec mask is zero: Export to NULL target...
200 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
201 .addImm(0)
202 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
203 .addImm(0)
204 .addImm(1)
205 .addImm(1)
Christian Konigc756cb992013-02-16 11:28:22 +0000206 .addReg(AMDGPU::VGPR0)
207 .addReg(AMDGPU::VGPR0)
208 .addReg(AMDGPU::VGPR0)
209 .addReg(AMDGPU::VGPR0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000210
211 // ... and terminate wavefront
212 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
213}
214
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000215void SILowerControlFlow::If(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000216 MachineBasicBlock &MBB = *MI.getParent();
217 DebugLoc DL = MI.getDebugLoc();
218 unsigned Reg = MI.getOperand(0).getReg();
219 unsigned Vcc = MI.getOperand(1).getReg();
220
221 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
222 .addReg(Vcc);
223
224 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
225 .addReg(AMDGPU::EXEC)
226 .addReg(Reg);
227
Tom Stellarde7b907d2012-12-19 22:10:33 +0000228 Skip(MI, MI.getOperand(2));
229
Tom Stellardf8794352012-12-19 22:10:31 +0000230 MI.eraseFromParent();
231}
232
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000233void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) {
Tom Stellardf8794352012-12-19 22:10:31 +0000234 MachineBasicBlock &MBB = *MI.getParent();
235 DebugLoc DL = MI.getDebugLoc();
236 unsigned Dst = MI.getOperand(0).getReg();
237 unsigned Src = MI.getOperand(1).getReg();
238
Christian Konig6a9d3902013-03-26 14:03:44 +0000239 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
240 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
Tom Stellardf8794352012-12-19 22:10:31 +0000241 .addReg(Src); // Saved EXEC
242
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000243 if (ExecModified) {
244 // Adjust the saved exec to account for the modifications during the flow
245 // block that contains the ELSE. This can happen when WQM mode is switched
246 // off.
247 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
248 .addReg(AMDGPU::EXEC)
249 .addReg(Dst);
250 }
251
Tom Stellardf8794352012-12-19 22:10:31 +0000252 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
253 .addReg(AMDGPU::EXEC)
254 .addReg(Dst);
255
Tom Stellarde7b907d2012-12-19 22:10:33 +0000256 Skip(MI, MI.getOperand(2));
257
Tom Stellardf8794352012-12-19 22:10:31 +0000258 MI.eraseFromParent();
259}
260
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000261void SILowerControlFlow::Break(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000262 MachineBasicBlock &MBB = *MI.getParent();
263 DebugLoc DL = MI.getDebugLoc();
264
265 unsigned Dst = MI.getOperand(0).getReg();
266 unsigned Src = MI.getOperand(1).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000267
Tom Stellardf8794352012-12-19 22:10:31 +0000268 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
269 .addReg(AMDGPU::EXEC)
270 .addReg(Src);
271
272 MI.eraseFromParent();
273}
274
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000275void SILowerControlFlow::IfBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000276 MachineBasicBlock &MBB = *MI.getParent();
277 DebugLoc DL = MI.getDebugLoc();
278
279 unsigned Dst = MI.getOperand(0).getReg();
280 unsigned Vcc = MI.getOperand(1).getReg();
281 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000282
Tom Stellardf8794352012-12-19 22:10:31 +0000283 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
284 .addReg(Vcc)
285 .addReg(Src);
286
287 MI.eraseFromParent();
288}
289
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000290void SILowerControlFlow::ElseBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000291 MachineBasicBlock &MBB = *MI.getParent();
292 DebugLoc DL = MI.getDebugLoc();
293
294 unsigned Dst = MI.getOperand(0).getReg();
295 unsigned Saved = MI.getOperand(1).getReg();
296 unsigned Src = MI.getOperand(2).getReg();
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000297
Tom Stellardf8794352012-12-19 22:10:31 +0000298 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
299 .addReg(Saved)
300 .addReg(Src);
301
302 MI.eraseFromParent();
303}
304
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000305void SILowerControlFlow::Loop(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000306 MachineBasicBlock &MBB = *MI.getParent();
307 DebugLoc DL = MI.getDebugLoc();
308 unsigned Src = MI.getOperand(0).getReg();
309
310 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
311 .addReg(AMDGPU::EXEC)
312 .addReg(Src);
313
314 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000315 .addOperand(MI.getOperand(1));
Tom Stellardf8794352012-12-19 22:10:31 +0000316
317 MI.eraseFromParent();
318}
319
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000320void SILowerControlFlow::EndCf(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000321 MachineBasicBlock &MBB = *MI.getParent();
322 DebugLoc DL = MI.getDebugLoc();
323 unsigned Reg = MI.getOperand(0).getReg();
324
325 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
326 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
327 .addReg(AMDGPU::EXEC)
328 .addReg(Reg);
329
330 MI.eraseFromParent();
331}
332
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000333void SILowerControlFlow::Branch(MachineInstr &MI) {
Matt Arsenault71b71d22014-02-11 21:12:38 +0000334 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
335 MI.eraseFromParent();
336
337 // If these aren't equal, this is probably an infinite loop.
Tom Stellarde7b907d2012-12-19 22:10:33 +0000338}
339
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000340void SILowerControlFlow::Kill(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000341 MachineBasicBlock &MBB = *MI.getParent();
342 DebugLoc DL = MI.getDebugLoc();
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000343 const MachineOperand &Op = MI.getOperand(0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000344
Matt Arsenault762af962014-07-13 03:06:39 +0000345#ifndef NDEBUG
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000346 CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv();
Matt Arsenault762af962014-07-13 03:06:39 +0000347 // Kill is only allowed in pixel / geometry shaders.
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000348 assert(CallConv == CallingConv::AMDGPU_PS ||
349 CallConv == CallingConv::AMDGPU_GS);
Matt Arsenault762af962014-07-13 03:06:39 +0000350#endif
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000351
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000352 // Clear this thread from the exec mask if the operand is negative
Tom Stellardfb77f002015-01-13 22:59:41 +0000353 if ((Op.isImm())) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000354 // Constant operand: Set exec mask to 0 or do nothing
Tom Stellardfb77f002015-01-13 22:59:41 +0000355 if (Op.getImm() & 0x80000000) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000356 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
357 .addImm(0);
358 }
359 } else {
Matt Arsenault46359152015-08-08 00:41:48 +0000360 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000361 .addImm(0)
362 .addOperand(Op);
363 }
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000364
365 MI.eraseFromParent();
366}
367
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000368void SILowerControlFlow::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000369
370 MachineBasicBlock &MBB = *MI.getParent();
371 DebugLoc DL = MI.getDebugLoc();
372 MachineBasicBlock::iterator I = MI;
373
374 unsigned Save = MI.getOperand(1).getReg();
375 unsigned Idx = MI.getOperand(3).getReg();
376
377 if (AMDGPU::SReg_32RegClass.contains(Idx)) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000378 if (Offset) {
379 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
380 .addReg(Idx)
381 .addImm(Offset);
382 } else {
383 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
384 .addReg(Idx);
385 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000386 MBB.insert(I, MovRel);
Tom Stellard89422762014-06-17 16:53:04 +0000387 } else {
388
389 assert(AMDGPU::SReg_64RegClass.contains(Save));
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000390 assert(AMDGPU::VGPR_32RegClass.contains(Idx));
Tom Stellard89422762014-06-17 16:53:04 +0000391
392 // Save the EXEC mask
393 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
394 .addReg(AMDGPU::EXEC);
395
396 // Read the next variant into VCC (lower 32 bits) <- also loop target
397 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
398 AMDGPU::VCC_LO)
399 .addReg(Idx);
400
401 // Move index from VCC into M0
402 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
403 .addReg(AMDGPU::VCC_LO);
404
405 // Compare the just read M0 value to all possible Idx values
Matt Arsenault46359152015-08-08 00:41:48 +0000406 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
407 .addReg(AMDGPU::M0)
408 .addReg(Idx);
Tom Stellard89422762014-06-17 16:53:04 +0000409
410 // Update EXEC, save the original EXEC value to VCC
411 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
412 .addReg(AMDGPU::VCC);
413
Tom Stellard8b0182a2015-04-23 20:32:01 +0000414 if (Offset) {
415 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
416 .addReg(AMDGPU::M0)
417 .addImm(Offset);
418 }
Tom Stellard89422762014-06-17 16:53:04 +0000419 // Do the actual move
420 MBB.insert(I, MovRel);
421
422 // Update EXEC, switch all done bits to 0 and all todo bits to 1
423 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
424 .addReg(AMDGPU::EXEC)
425 .addReg(AMDGPU::VCC);
426
427 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
428 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
Matt Arsenault95f06062015-08-05 16:42:57 +0000429 .addImm(-7);
Tom Stellard89422762014-06-17 16:53:04 +0000430
431 // Restore EXEC
432 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
433 .addReg(Save);
434
Christian Konig2989ffc2013-03-18 11:34:16 +0000435 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000436 MI.eraseFromParent();
437}
438
Tom Stellard8b0182a2015-04-23 20:32:01 +0000439/// \param @VecReg The register which holds element zero of the vector
440/// being addressed into.
441/// \param[out] @Reg The base register to use in the indirect addressing instruction.
442/// \param[in,out] @Offset As an input, this is the constant offset part of the
443// indirect Index. e.g. v0 = v[VecReg + Offset]
444// As an output, this is a constant value that needs
445// to be added to the value stored in M0.
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000446void SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg,
447 unsigned &Reg,
448 int &Offset) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000449 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
450 if (!SubReg)
451 SubReg = VecReg;
452
453 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
454 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
455
456 if (RegIdx < 0) {
457 Offset = RegIdx;
458 RegIdx = 0;
459 } else {
460 Offset = 0;
461 }
462
463 Reg = RC->getRegister(RegIdx);
464}
465
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000466void SILowerControlFlow::IndirectSrc(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000467
468 MachineBasicBlock &MBB = *MI.getParent();
469 DebugLoc DL = MI.getDebugLoc();
470
471 unsigned Dst = MI.getOperand(0).getReg();
472 unsigned Vec = MI.getOperand(2).getReg();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000473 int Off = MI.getOperand(4).getImm();
474 unsigned Reg;
475
476 computeIndirectRegAndOffset(Vec, Reg, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000477
Tom Stellard81d871d2013-11-13 23:36:50 +0000478 MachineInstr *MovRel =
Christian Konig2989ffc2013-03-18 11:34:16 +0000479 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Tom Stellard8b0182a2015-04-23 20:32:01 +0000480 .addReg(Reg)
Christian Konig2989ffc2013-03-18 11:34:16 +0000481 .addReg(Vec, RegState::Implicit);
482
Tom Stellard8b0182a2015-04-23 20:32:01 +0000483 LoadM0(MI, MovRel, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000484}
485
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000486void SILowerControlFlow::IndirectDst(MachineInstr &MI) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000487
488 MachineBasicBlock &MBB = *MI.getParent();
489 DebugLoc DL = MI.getDebugLoc();
490
491 unsigned Dst = MI.getOperand(0).getReg();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000492 int Off = MI.getOperand(4).getImm();
Christian Konig2989ffc2013-03-18 11:34:16 +0000493 unsigned Val = MI.getOperand(5).getReg();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000494 unsigned Reg;
495
496 computeIndirectRegAndOffset(Dst, Reg, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000497
Matt Arsenault806dd0a2016-02-12 02:16:07 +0000498 MachineInstr *MovRel =
Christian Konig2989ffc2013-03-18 11:34:16 +0000499 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
Tom Stellard8b0182a2015-04-23 20:32:01 +0000500 .addReg(Reg, RegState::Define)
Christian Konig2989ffc2013-03-18 11:34:16 +0000501 .addReg(Val)
Christian Konig2989ffc2013-03-18 11:34:16 +0000502 .addReg(Dst, RegState::Implicit);
503
Tom Stellard8b0182a2015-04-23 20:32:01 +0000504 LoadM0(MI, MovRel, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000505}
506
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000507bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
Eric Christopherfc6de422014-08-05 02:39:49 +0000508 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
509 TRI =
510 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
Tom Stellardd50bb3c2013-09-05 18:37:52 +0000511 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000512
513 bool HaveKill = false;
Matt Arsenault3f981402014-09-15 15:41:53 +0000514 bool NeedFlat = false;
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000515 unsigned Depth = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000516
Tom Stellardf8794352012-12-19 22:10:31 +0000517 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
518 BI != BE; ++BI) {
519
Marek Olsaked2213e2016-03-14 15:57:14 +0000520 MachineBasicBlock *EmptyMBBAtEnd = NULL;
Tom Stellardf8794352012-12-19 22:10:31 +0000521 MachineBasicBlock &MBB = *BI;
Tim Northover24f46612014-03-28 13:52:56 +0000522 MachineBasicBlock::iterator I, Next;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000523 bool ExecModified = false;
524
Tim Northover24f46612014-03-28 13:52:56 +0000525 for (I = MBB.begin(); I != MBB.end(); I = Next) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000526 Next = std::next(I);
Tim Northover24f46612014-03-28 13:52:56 +0000527
Tom Stellard75aadc22012-12-11 21:25:42 +0000528 MachineInstr &MI = *I;
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000529
Matt Arsenault3f981402014-09-15 15:41:53 +0000530 // Flat uses m0 in case it needs to access LDS.
Matt Arsenault3add6432015-10-20 04:35:43 +0000531 if (TII->isFLAT(MI))
Matt Arsenault3f981402014-09-15 15:41:53 +0000532 NeedFlat = true;
Matt Arsenault3f981402014-09-15 15:41:53 +0000533
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000534 for (const auto &Def : I->defs()) {
535 if (Def.isReg() && Def.isDef() && Def.getReg() == AMDGPU::EXEC) {
536 ExecModified = true;
537 break;
538 }
539 }
540
Tom Stellard75aadc22012-12-11 21:25:42 +0000541 switch (MI.getOpcode()) {
542 default: break;
Tom Stellardf8794352012-12-19 22:10:31 +0000543 case AMDGPU::SI_IF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000544 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000545 If(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000546 break;
547
Tom Stellardf8794352012-12-19 22:10:31 +0000548 case AMDGPU::SI_ELSE:
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000549 Else(MI, ExecModified);
Tom Stellard75aadc22012-12-11 21:25:42 +0000550 break;
551
Tom Stellardf8794352012-12-19 22:10:31 +0000552 case AMDGPU::SI_BREAK:
553 Break(MI);
554 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000555
Tom Stellardf8794352012-12-19 22:10:31 +0000556 case AMDGPU::SI_IF_BREAK:
557 IfBreak(MI);
558 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000559
Tom Stellardf8794352012-12-19 22:10:31 +0000560 case AMDGPU::SI_ELSE_BREAK:
561 ElseBreak(MI);
562 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000563
Tom Stellardf8794352012-12-19 22:10:31 +0000564 case AMDGPU::SI_LOOP:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000565 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000566 Loop(MI);
567 break;
568
569 case AMDGPU::SI_END_CF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000570 if (--Depth == 0 && HaveKill) {
571 SkipIfDead(MI);
572 HaveKill = false;
573 }
Tom Stellardf8794352012-12-19 22:10:31 +0000574 EndCf(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000575 break;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000576
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000577 case AMDGPU::SI_KILL:
578 if (Depth == 0)
579 SkipIfDead(MI);
580 else
581 HaveKill = true;
582 Kill(MI);
583 break;
584
Tom Stellarde7b907d2012-12-19 22:10:33 +0000585 case AMDGPU::S_BRANCH:
586 Branch(MI);
587 break;
Christian Konig2989ffc2013-03-18 11:34:16 +0000588
Matt Arsenault28419272015-10-07 00:42:51 +0000589 case AMDGPU::SI_INDIRECT_SRC_V1:
590 case AMDGPU::SI_INDIRECT_SRC_V2:
591 case AMDGPU::SI_INDIRECT_SRC_V4:
592 case AMDGPU::SI_INDIRECT_SRC_V8:
593 case AMDGPU::SI_INDIRECT_SRC_V16:
Christian Konig2989ffc2013-03-18 11:34:16 +0000594 IndirectSrc(MI);
595 break;
596
Tom Stellard81d871d2013-11-13 23:36:50 +0000597 case AMDGPU::SI_INDIRECT_DST_V1:
Christian Konig2989ffc2013-03-18 11:34:16 +0000598 case AMDGPU::SI_INDIRECT_DST_V2:
599 case AMDGPU::SI_INDIRECT_DST_V4:
600 case AMDGPU::SI_INDIRECT_DST_V8:
601 case AMDGPU::SI_INDIRECT_DST_V16:
602 IndirectDst(MI);
603 break;
Marek Olsaked2213e2016-03-14 15:57:14 +0000604
605 case AMDGPU::S_ENDPGM: {
606 if (MF.getInfo<SIMachineFunctionInfo>()->returnsVoid())
607 break;
608
609 // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
610 // because external bytecode will be appended at the end.
611 if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
612 // S_ENDPGM is not the last instruction. Add an empty block at
613 // the end and jump there.
614 if (!EmptyMBBAtEnd) {
615 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
616 MF.insert(MF.end(), EmptyMBBAtEnd);
617 }
618
619 MBB.addSuccessor(EmptyMBBAtEnd);
620 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
621 .addMBB(EmptyMBBAtEnd);
622 }
623
624 I->eraseFromParent();
625 break;
626 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000627 }
628 }
629 }
Tom Stellardf8794352012-12-19 22:10:31 +0000630
Matt Arsenault3f981402014-09-15 15:41:53 +0000631 if (NeedFlat && MFI->IsKernel) {
Matt Arsenault3f981402014-09-15 15:41:53 +0000632 // TODO: What to use with function calls?
Matt Arsenault296b8492016-02-12 06:31:30 +0000633 // We will need to Initialize the flat scratch register pair.
634 if (NeedFlat)
635 MFI->setHasFlatInstructions(true);
Matt Arsenault3f981402014-09-15 15:41:53 +0000636 }
637
Tom Stellard75aadc22012-12-11 21:25:42 +0000638 return true;
639}