blob: c319b32111feb5e2702c24def97d4000ed29df75 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Tom Stellardf8794352012-12-19 22:10:31 +000011/// \brief This pass lowers the pseudo control flow instructions to real
12/// machine instructions.
Tom Stellard75aadc22012-12-11 21:25:42 +000013///
Tom Stellardf8794352012-12-19 22:10:31 +000014/// All control flow is handled using predicated instructions and
Tom Stellard75aadc22012-12-11 21:25:42 +000015/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17/// by writting to the 64-bit EXEC register (each bit corresponds to a
18/// single vector ALU). Typically, for predicates, a vector ALU will write
19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20/// Vector ALU) and then the ScalarALU will AND the VCC register with the
21/// EXEC to update the predicates.
22///
23/// For example:
24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
Tom Stellardf8794352012-12-19 22:10:31 +000025/// %SGPR0 = SI_IF %VCC
Tom Stellard75aadc22012-12-11 21:25:42 +000026/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000027/// %SGPR0 = SI_ELSE %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000028/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
Tom Stellardf8794352012-12-19 22:10:31 +000029/// SI_END_CF %SGPR0
Tom Stellard75aadc22012-12-11 21:25:42 +000030///
31/// becomes:
32///
33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
Tom Stellardf8794352012-12-19 22:10:31 +000035/// S_CBRANCH_EXECZ label0 // This instruction is an optional
Tom Stellard75aadc22012-12-11 21:25:42 +000036/// // optimization which allows us to
37/// // branch if all the bits of
38/// // EXEC are zero.
39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40///
41/// label0:
42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44/// S_BRANCH_EXECZ label1 // Use our branch optimization
45/// // instruction again.
46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
47/// label1:
Tom Stellardf8794352012-12-19 22:10:31 +000048/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
Tom Stellard75aadc22012-12-11 21:25:42 +000049//===----------------------------------------------------------------------===//
50
51#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000052#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000053#include "SIInstrInfo.h"
54#include "SIMachineFunctionInfo.h"
Matt Arsenault3f981402014-09-15 15:41:53 +000055#include "llvm/CodeGen/MachineFrameInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000056#include "llvm/CodeGen/MachineFunction.h"
57#include "llvm/CodeGen/MachineFunctionPass.h"
58#include "llvm/CodeGen/MachineInstrBuilder.h"
59#include "llvm/CodeGen/MachineRegisterInfo.h"
Michel Danzer9e61c4b2014-02-27 01:47:09 +000060#include "llvm/IR/Constants.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000061
62using namespace llvm;
63
64namespace {
65
66class SILowerControlFlowPass : public MachineFunctionPass {
67
68private:
Tom Stellarde7b907d2012-12-19 22:10:33 +000069 static const unsigned SkipThreshold = 12;
70
Tom Stellard75aadc22012-12-11 21:25:42 +000071 static char ID;
Tom Stellard1bd80722014-04-30 15:31:33 +000072 const SIRegisterInfo *TRI;
Tom Stellard5d7aaae2014-02-10 16:58:30 +000073 const SIInstrInfo *TII;
Tom Stellard75aadc22012-12-11 21:25:42 +000074
Tom Stellardbe8ebee2013-01-18 21:15:50 +000075 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
76
77 void Skip(MachineInstr &From, MachineOperand &To);
78 void SkipIfDead(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000079
Tom Stellardf8794352012-12-19 22:10:31 +000080 void If(MachineInstr &MI);
81 void Else(MachineInstr &MI);
82 void Break(MachineInstr &MI);
83 void IfBreak(MachineInstr &MI);
84 void ElseBreak(MachineInstr &MI);
85 void Loop(MachineInstr &MI);
86 void EndCf(MachineInstr &MI);
Tom Stellard75aadc22012-12-11 21:25:42 +000087
Tom Stellardbe8ebee2013-01-18 21:15:50 +000088 void Kill(MachineInstr &MI);
Tom Stellarde7b907d2012-12-19 22:10:33 +000089 void Branch(MachineInstr &MI);
90
Tom Stellard8b0182a2015-04-23 20:32:01 +000091 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
92 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
Christian Konig2989ffc2013-03-18 11:34:16 +000093 void IndirectSrc(MachineInstr &MI);
94 void IndirectDst(MachineInstr &MI);
95
Tom Stellard75aadc22012-12-11 21:25:42 +000096public:
97 SILowerControlFlowPass(TargetMachine &tm) :
Craig Topper062a2ba2014-04-25 05:30:21 +000098 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
Tom Stellard75aadc22012-12-11 21:25:42 +000099
Craig Topper5656db42014-04-29 07:57:24 +0000100 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000101
Craig Topper5656db42014-04-29 07:57:24 +0000102 const char *getPassName() const override {
Tom Stellard75aadc22012-12-11 21:25:42 +0000103 return "SI Lower control flow instructions";
104 }
105
106};
107
108} // End anonymous namespace
109
110char SILowerControlFlowPass::ID = 0;
111
112FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) {
113 return new SILowerControlFlowPass(tm);
114}
115
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000116bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From,
117 MachineBasicBlock *To) {
118
Tom Stellarde7b907d2012-12-19 22:10:33 +0000119 unsigned NumInstr = 0;
120
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000121 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty();
Tom Stellarde7b907d2012-12-19 22:10:33 +0000122 MBB = *MBB->succ_begin()) {
123
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
125 NumInstr < SkipThreshold && I != E; ++I) {
126
127 if (I->isBundle() || !I->isBundled())
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000128 if (++NumInstr >= SkipThreshold)
129 return true;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000130 }
131 }
132
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000133 return false;
134}
135
136void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
137
138 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
Tom Stellarde7b907d2012-12-19 22:10:33 +0000139 return;
140
141 DebugLoc DL = From.getDebugLoc();
142 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
143 .addOperand(To)
144 .addReg(AMDGPU::EXEC);
145}
146
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000147void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
148
149 MachineBasicBlock &MBB = *MI.getParent();
150 DebugLoc DL = MI.getDebugLoc();
151
Matt Arsenault762af962014-07-13 03:06:39 +0000152 if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() !=
Michel Danzer6f273c52014-02-27 01:47:02 +0000153 ShaderType::PIXEL ||
154 !shouldSkip(&MBB, &MBB.getParent()->back()))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000155 return;
156
157 MachineBasicBlock::iterator Insert = &MI;
158 ++Insert;
159
160 // If the exec mask is non-zero, skip the next two instructions
161 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
162 .addImm(3)
163 .addReg(AMDGPU::EXEC);
164
165 // Exec mask is zero: Export to NULL target...
166 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
167 .addImm(0)
168 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
169 .addImm(0)
170 .addImm(1)
171 .addImm(1)
Christian Konigc756cb992013-02-16 11:28:22 +0000172 .addReg(AMDGPU::VGPR0)
173 .addReg(AMDGPU::VGPR0)
174 .addReg(AMDGPU::VGPR0)
175 .addReg(AMDGPU::VGPR0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000176
177 // ... and terminate wavefront
178 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
179}
180
Tom Stellardf8794352012-12-19 22:10:31 +0000181void SILowerControlFlowPass::If(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000182 MachineBasicBlock &MBB = *MI.getParent();
183 DebugLoc DL = MI.getDebugLoc();
184 unsigned Reg = MI.getOperand(0).getReg();
185 unsigned Vcc = MI.getOperand(1).getReg();
186
187 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
188 .addReg(Vcc);
189
190 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
191 .addReg(AMDGPU::EXEC)
192 .addReg(Reg);
193
Tom Stellarde7b907d2012-12-19 22:10:33 +0000194 Skip(MI, MI.getOperand(2));
195
Tom Stellardf8794352012-12-19 22:10:31 +0000196 MI.eraseFromParent();
197}
198
199void SILowerControlFlowPass::Else(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000200 MachineBasicBlock &MBB = *MI.getParent();
201 DebugLoc DL = MI.getDebugLoc();
202 unsigned Dst = MI.getOperand(0).getReg();
203 unsigned Src = MI.getOperand(1).getReg();
204
Christian Konig6a9d3902013-03-26 14:03:44 +0000205 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
206 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
Tom Stellardf8794352012-12-19 22:10:31 +0000207 .addReg(Src); // Saved EXEC
208
209 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
210 .addReg(AMDGPU::EXEC)
211 .addReg(Dst);
212
Tom Stellarde7b907d2012-12-19 22:10:33 +0000213 Skip(MI, MI.getOperand(2));
214
Tom Stellardf8794352012-12-19 22:10:31 +0000215 MI.eraseFromParent();
216}
217
218void SILowerControlFlowPass::Break(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000219 MachineBasicBlock &MBB = *MI.getParent();
220 DebugLoc DL = MI.getDebugLoc();
221
222 unsigned Dst = MI.getOperand(0).getReg();
223 unsigned Src = MI.getOperand(1).getReg();
224
225 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
226 .addReg(AMDGPU::EXEC)
227 .addReg(Src);
228
229 MI.eraseFromParent();
230}
231
232void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000233 MachineBasicBlock &MBB = *MI.getParent();
234 DebugLoc DL = MI.getDebugLoc();
235
236 unsigned Dst = MI.getOperand(0).getReg();
237 unsigned Vcc = MI.getOperand(1).getReg();
238 unsigned Src = MI.getOperand(2).getReg();
239
240 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
241 .addReg(Vcc)
242 .addReg(Src);
243
244 MI.eraseFromParent();
245}
246
247void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000248 MachineBasicBlock &MBB = *MI.getParent();
249 DebugLoc DL = MI.getDebugLoc();
250
251 unsigned Dst = MI.getOperand(0).getReg();
252 unsigned Saved = MI.getOperand(1).getReg();
253 unsigned Src = MI.getOperand(2).getReg();
254
255 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
256 .addReg(Saved)
257 .addReg(Src);
258
259 MI.eraseFromParent();
260}
261
262void SILowerControlFlowPass::Loop(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000263 MachineBasicBlock &MBB = *MI.getParent();
264 DebugLoc DL = MI.getDebugLoc();
265 unsigned Src = MI.getOperand(0).getReg();
266
267 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
268 .addReg(AMDGPU::EXEC)
269 .addReg(Src);
270
271 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
272 .addOperand(MI.getOperand(1))
273 .addReg(AMDGPU::EXEC);
274
275 MI.eraseFromParent();
276}
277
278void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
Tom Stellardf8794352012-12-19 22:10:31 +0000279 MachineBasicBlock &MBB = *MI.getParent();
280 DebugLoc DL = MI.getDebugLoc();
281 unsigned Reg = MI.getOperand(0).getReg();
282
283 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
284 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
285 .addReg(AMDGPU::EXEC)
286 .addReg(Reg);
287
288 MI.eraseFromParent();
289}
290
Tom Stellarde7b907d2012-12-19 22:10:33 +0000291void SILowerControlFlowPass::Branch(MachineInstr &MI) {
Matt Arsenault71b71d22014-02-11 21:12:38 +0000292 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
293 MI.eraseFromParent();
294
295 // If these aren't equal, this is probably an infinite loop.
Tom Stellarde7b907d2012-12-19 22:10:33 +0000296}
297
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000298void SILowerControlFlowPass::Kill(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000299 MachineBasicBlock &MBB = *MI.getParent();
300 DebugLoc DL = MI.getDebugLoc();
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000301 const MachineOperand &Op = MI.getOperand(0);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000302
Matt Arsenault762af962014-07-13 03:06:39 +0000303#ifndef NDEBUG
304 const SIMachineFunctionInfo *MFI
305 = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
306 // Kill is only allowed in pixel / geometry shaders.
307 assert(MFI->getShaderType() == ShaderType::PIXEL ||
308 MFI->getShaderType() == ShaderType::GEOMETRY);
309#endif
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000310
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000311 // Clear this thread from the exec mask if the operand is negative
Tom Stellardfb77f002015-01-13 22:59:41 +0000312 if ((Op.isImm())) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000313 // Constant operand: Set exec mask to 0 or do nothing
Tom Stellardfb77f002015-01-13 22:59:41 +0000314 if (Op.getImm() & 0x80000000) {
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000315 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
316 .addImm(0);
317 }
318 } else {
319 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC)
320 .addImm(0)
321 .addOperand(Op);
322 }
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000323
324 MI.eraseFromParent();
325}
326
Tom Stellard8b0182a2015-04-23 20:32:01 +0000327void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
Christian Konig2989ffc2013-03-18 11:34:16 +0000328
329 MachineBasicBlock &MBB = *MI.getParent();
330 DebugLoc DL = MI.getDebugLoc();
331 MachineBasicBlock::iterator I = MI;
332
333 unsigned Save = MI.getOperand(1).getReg();
334 unsigned Idx = MI.getOperand(3).getReg();
335
336 if (AMDGPU::SReg_32RegClass.contains(Idx)) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000337 if (Offset) {
338 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
339 .addReg(Idx)
340 .addImm(Offset);
341 } else {
342 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
343 .addReg(Idx);
344 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000345 MBB.insert(I, MovRel);
Tom Stellard89422762014-06-17 16:53:04 +0000346 } else {
347
348 assert(AMDGPU::SReg_64RegClass.contains(Save));
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000349 assert(AMDGPU::VGPR_32RegClass.contains(Idx));
Tom Stellard89422762014-06-17 16:53:04 +0000350
351 // Save the EXEC mask
352 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
353 .addReg(AMDGPU::EXEC);
354
355 // Read the next variant into VCC (lower 32 bits) <- also loop target
356 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
357 AMDGPU::VCC_LO)
358 .addReg(Idx);
359
360 // Move index from VCC into M0
361 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
362 .addReg(AMDGPU::VCC_LO);
363
364 // Compare the just read M0 value to all possible Idx values
365 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC)
366 .addReg(AMDGPU::M0)
367 .addReg(Idx);
368
369 // Update EXEC, save the original EXEC value to VCC
370 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
371 .addReg(AMDGPU::VCC);
372
Tom Stellard8b0182a2015-04-23 20:32:01 +0000373 if (Offset) {
374 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
375 .addReg(AMDGPU::M0)
376 .addImm(Offset);
377 }
Tom Stellard89422762014-06-17 16:53:04 +0000378 // Do the actual move
379 MBB.insert(I, MovRel);
380
381 // Update EXEC, switch all done bits to 0 and all todo bits to 1
382 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
383 .addReg(AMDGPU::EXEC)
384 .addReg(AMDGPU::VCC);
385
386 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
387 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
388 .addImm(-7)
389 .addReg(AMDGPU::EXEC);
390
391 // Restore EXEC
392 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
393 .addReg(Save);
394
Christian Konig2989ffc2013-03-18 11:34:16 +0000395 }
Christian Konig2989ffc2013-03-18 11:34:16 +0000396 MI.eraseFromParent();
397}
398
Tom Stellard8b0182a2015-04-23 20:32:01 +0000399/// \param @VecReg The register which holds element zero of the vector
400/// being addressed into.
401/// \param[out] @Reg The base register to use in the indirect addressing instruction.
402/// \param[in,out] @Offset As an input, this is the constant offset part of the
403// indirect Index. e.g. v0 = v[VecReg + Offset]
404// As an output, this is a constant value that needs
405// to be added to the value stored in M0.
406void SILowerControlFlowPass::computeIndirectRegAndOffset(unsigned VecReg,
407 unsigned &Reg,
408 int &Offset) {
409 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
410 if (!SubReg)
411 SubReg = VecReg;
412
413 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
414 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
415
416 if (RegIdx < 0) {
417 Offset = RegIdx;
418 RegIdx = 0;
419 } else {
420 Offset = 0;
421 }
422
423 Reg = RC->getRegister(RegIdx);
424}
425
Christian Konig2989ffc2013-03-18 11:34:16 +0000426void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
427
428 MachineBasicBlock &MBB = *MI.getParent();
429 DebugLoc DL = MI.getDebugLoc();
430
431 unsigned Dst = MI.getOperand(0).getReg();
432 unsigned Vec = MI.getOperand(2).getReg();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000433 int Off = MI.getOperand(4).getImm();
434 unsigned Reg;
435
436 computeIndirectRegAndOffset(Vec, Reg, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000437
Tom Stellard81d871d2013-11-13 23:36:50 +0000438 MachineInstr *MovRel =
Christian Konig2989ffc2013-03-18 11:34:16 +0000439 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Tom Stellard8b0182a2015-04-23 20:32:01 +0000440 .addReg(Reg)
Christian Konig2989ffc2013-03-18 11:34:16 +0000441 .addReg(AMDGPU::M0, RegState::Implicit)
442 .addReg(Vec, RegState::Implicit);
443
Tom Stellard8b0182a2015-04-23 20:32:01 +0000444 LoadM0(MI, MovRel, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000445}
446
447void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
448
449 MachineBasicBlock &MBB = *MI.getParent();
450 DebugLoc DL = MI.getDebugLoc();
451
452 unsigned Dst = MI.getOperand(0).getReg();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000453 int Off = MI.getOperand(4).getImm();
Christian Konig2989ffc2013-03-18 11:34:16 +0000454 unsigned Val = MI.getOperand(5).getReg();
Tom Stellard8b0182a2015-04-23 20:32:01 +0000455 unsigned Reg;
456
457 computeIndirectRegAndOffset(Dst, Reg, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000458
459 MachineInstr *MovRel =
460 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
Tom Stellard8b0182a2015-04-23 20:32:01 +0000461 .addReg(Reg, RegState::Define)
Christian Konig2989ffc2013-03-18 11:34:16 +0000462 .addReg(Val)
463 .addReg(AMDGPU::M0, RegState::Implicit)
464 .addReg(Dst, RegState::Implicit);
465
Tom Stellard8b0182a2015-04-23 20:32:01 +0000466 LoadM0(MI, MovRel, Off);
Christian Konig2989ffc2013-03-18 11:34:16 +0000467}
468
Tom Stellard75aadc22012-12-11 21:25:42 +0000469bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
Eric Christopherfc6de422014-08-05 02:39:49 +0000470 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
471 TRI =
472 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
Tom Stellardd50bb3c2013-09-05 18:37:52 +0000473 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000474
475 bool HaveKill = false;
Christian Konig737d4a12013-03-26 14:03:50 +0000476 bool NeedWQM = false;
Matt Arsenault3f981402014-09-15 15:41:53 +0000477 bool NeedFlat = false;
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000478 unsigned Depth = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000479
Tom Stellardf8794352012-12-19 22:10:31 +0000480 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
481 BI != BE; ++BI) {
482
483 MachineBasicBlock &MBB = *BI;
Tim Northover24f46612014-03-28 13:52:56 +0000484 MachineBasicBlock::iterator I, Next;
485 for (I = MBB.begin(); I != MBB.end(); I = Next) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000486 Next = std::next(I);
Tim Northover24f46612014-03-28 13:52:56 +0000487
Tom Stellard75aadc22012-12-11 21:25:42 +0000488 MachineInstr &MI = *I;
Michel Danzer494391b2015-02-06 02:51:20 +0000489 if (TII->isWQM(MI.getOpcode()) || TII->isDS(MI.getOpcode()))
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000490 NeedWQM = true;
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000491
Matt Arsenault3f981402014-09-15 15:41:53 +0000492 // Flat uses m0 in case it needs to access LDS.
Aaron Ballman41580de2014-11-24 14:03:16 +0000493 if (TII->isFLAT(MI.getOpcode()))
Matt Arsenault3f981402014-09-15 15:41:53 +0000494 NeedFlat = true;
Matt Arsenault3f981402014-09-15 15:41:53 +0000495
Tom Stellard75aadc22012-12-11 21:25:42 +0000496 switch (MI.getOpcode()) {
497 default: break;
Tom Stellardf8794352012-12-19 22:10:31 +0000498 case AMDGPU::SI_IF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000499 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000500 If(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000501 break;
502
Tom Stellardf8794352012-12-19 22:10:31 +0000503 case AMDGPU::SI_ELSE:
504 Else(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000505 break;
506
Tom Stellardf8794352012-12-19 22:10:31 +0000507 case AMDGPU::SI_BREAK:
508 Break(MI);
509 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000510
Tom Stellardf8794352012-12-19 22:10:31 +0000511 case AMDGPU::SI_IF_BREAK:
512 IfBreak(MI);
513 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000514
Tom Stellardf8794352012-12-19 22:10:31 +0000515 case AMDGPU::SI_ELSE_BREAK:
516 ElseBreak(MI);
517 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000518
Tom Stellardf8794352012-12-19 22:10:31 +0000519 case AMDGPU::SI_LOOP:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000520 ++Depth;
Tom Stellardf8794352012-12-19 22:10:31 +0000521 Loop(MI);
522 break;
523
524 case AMDGPU::SI_END_CF:
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000525 if (--Depth == 0 && HaveKill) {
526 SkipIfDead(MI);
527 HaveKill = false;
528 }
Tom Stellardf8794352012-12-19 22:10:31 +0000529 EndCf(MI);
Tom Stellard75aadc22012-12-11 21:25:42 +0000530 break;
Tom Stellarde7b907d2012-12-19 22:10:33 +0000531
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000532 case AMDGPU::SI_KILL:
533 if (Depth == 0)
534 SkipIfDead(MI);
535 else
536 HaveKill = true;
537 Kill(MI);
538 break;
539
Tom Stellarde7b907d2012-12-19 22:10:33 +0000540 case AMDGPU::S_BRANCH:
541 Branch(MI);
542 break;
Christian Konig2989ffc2013-03-18 11:34:16 +0000543
544 case AMDGPU::SI_INDIRECT_SRC:
545 IndirectSrc(MI);
546 break;
547
Tom Stellard81d871d2013-11-13 23:36:50 +0000548 case AMDGPU::SI_INDIRECT_DST_V1:
Christian Konig2989ffc2013-03-18 11:34:16 +0000549 case AMDGPU::SI_INDIRECT_DST_V2:
550 case AMDGPU::SI_INDIRECT_DST_V4:
551 case AMDGPU::SI_INDIRECT_DST_V8:
552 case AMDGPU::SI_INDIRECT_DST_V16:
553 IndirectDst(MI);
554 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000555 }
556 }
557 }
Tom Stellardf8794352012-12-19 22:10:31 +0000558
Matt Arsenault762af962014-07-13 03:06:39 +0000559 if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
Christian Konig737d4a12013-03-26 14:03:50 +0000560 MachineBasicBlock &MBB = MF.front();
561 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
562 AMDGPU::EXEC).addReg(AMDGPU::EXEC);
563 }
564
Matt Arsenault3f981402014-09-15 15:41:53 +0000565 // FIXME: This seems inappropriate to do here.
566 if (NeedFlat && MFI->IsKernel) {
567 // Insert the prologue initializing the SGPRs pointing to the scratch space
568 // for flat accesses.
569 const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
570
571 // TODO: What to use with function calls?
572
573 // FIXME: This is reporting stack size that is used in a scratch buffer
574 // rather than registers as well.
575 uint64_t StackSizeBytes = FrameInfo->getStackSize();
576
577 int IndirectBegin
578 = static_cast<const AMDGPUInstrInfo*>(TII)->getIndirectIndexBegin(MF);
579 // Convert register index to 256-byte unit.
580 uint64_t StackOffset = IndirectBegin < 0 ? 0 : (4 * IndirectBegin / 256);
581
582 assert((StackSizeBytes < 0xffff) && StackOffset < 0xffff &&
583 "Stack limits should be smaller than 16-bits");
584
585 // Initialize the flat scratch register pair.
586 // TODO: Can we use one s_mov_b64 here?
587
588 // Offset is in units of 256-bytes.
589 MachineBasicBlock &MBB = MF.front();
590 DebugLoc NoDL;
591 MachineBasicBlock::iterator Start = MBB.getFirstNonPHI();
592 const MCInstrDesc &SMovK = TII->get(AMDGPU::S_MOVK_I32);
593
Matt Arsenault77849922014-11-13 20:44:23 +0000594 assert(isInt<16>(StackOffset) && isInt<16>(StackSizeBytes));
595
Matt Arsenault3f981402014-09-15 15:41:53 +0000596 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_LO)
597 .addImm(StackOffset);
598
599 // Documentation says size is "per-thread scratch size in bytes"
600 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_HI)
601 .addImm(StackSizeBytes);
602 }
603
Tom Stellard75aadc22012-12-11 21:25:42 +0000604 return true;
605}