blob: a399f7715a253a59a1844881aef0cb44ba434057 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// This pass lowers the pseudo control flow instructions to real
Tom Stellardf8794352012-12-19 22:10:31 +000011/// machine instructions.
Tom Stellard75aadc22012-12-11 21:25:42 +000012///
Tom Stellardf8794352012-12-19 22:10:31 +000013/// All control flow is handled using predicated instructions and
Tom Stellard75aadc22012-12-11 21:25:42 +000014/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
15/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
16/// by writting to the 64-bit EXEC register (each bit corresponds to a
17/// single vector ALU). Typically, for predicates, a vector ALU will write
18/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
19/// Vector ALU) and then the ScalarALU will AND the VCC register with the
20/// EXEC to update the predicates.
21///
22/// For example:
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +000023/// %vcc = V_CMP_GT_F32 %vgpr1, %vgpr2
24/// %sgpr0 = SI_IF %vcc
25/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0
26/// %sgpr0 = SI_ELSE %sgpr0
27/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr0
28/// SI_END_CF %sgpr0
Tom Stellard75aadc22012-12-11 21:25:42 +000029///
30/// becomes:
31///
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +000032/// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc // Save and update the exec mask
33/// %sgpr0 = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask
Tom Stellardf8794352012-12-19 22:10:31 +000034/// S_CBRANCH_EXECZ label0 // This instruction is an optional
Tom Stellard75aadc22012-12-11 21:25:42 +000035/// // optimization which allows us to
36/// // branch if all the bits of
37/// // EXEC are zero.
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +000038/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch
Tom Stellard75aadc22012-12-11 21:25:42 +000039///
40/// label0:
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +000041/// %sgpr0 = S_OR_SAVEEXEC_B64 %exec // Restore the exec mask for the Then block
42/// %exec = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask
Tom Stellard75aadc22012-12-11 21:25:42 +000043/// S_BRANCH_EXECZ label1 // Use our branch optimization
44/// // instruction again.
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +000045/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the THEN block
Tom Stellard75aadc22012-12-11 21:25:42 +000046/// label1:
Francis Visoiu Mistrih9d7bb0c2017-11-28 17:15:09 +000047/// %exec = S_OR_B64 %exec, %sgpr0 // Re-enable saved exec mask bits
Tom Stellard75aadc22012-12-11 21:25:42 +000048//===----------------------------------------------------------------------===//
49
50#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000051#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000052#include "SIInstrInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000053#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000054#include "llvm/ADT/SmallVector.h"
55#include "llvm/ADT/StringRef.h"
Matthias Braunf8422972017-12-13 02:51:04 +000056#include "llvm/CodeGen/LiveIntervals.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000057#include "llvm/CodeGen/MachineBasicBlock.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000058#include "llvm/CodeGen/MachineFunction.h"
59#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000060#include "llvm/CodeGen/MachineInstr.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000061#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000062#include "llvm/CodeGen/MachineOperand.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000063#include "llvm/CodeGen/MachineRegisterInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000064#include "llvm/CodeGen/Passes.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000065#include "llvm/CodeGen/SlotIndexes.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000066#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000067#include "llvm/MC/MCRegisterInfo.h"
68#include "llvm/Pass.h"
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000069#include <cassert>
70#include <iterator>
Tom Stellard75aadc22012-12-11 21:25:42 +000071
72using namespace llvm;
73
Matt Arsenault55d49cf2016-02-12 02:16:10 +000074#define DEBUG_TYPE "si-lower-control-flow"
75
Tom Stellard75aadc22012-12-11 21:25:42 +000076namespace {
77
Matt Arsenault55d49cf2016-02-12 02:16:10 +000078class SILowerControlFlow : public MachineFunctionPass {
Tom Stellard75aadc22012-12-11 21:25:42 +000079private:
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000080 const SIRegisterInfo *TRI = nullptr;
81 const SIInstrInfo *TII = nullptr;
Matt Arsenault396653f2019-04-03 20:53:20 +000082 LiveIntervals *LIS = nullptr;
Mark Searles76c5b622019-04-27 00:51:18 +000083 MachineRegisterInfo *MRI = nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +000084
Matt Arsenault78fc9da2016-08-22 19:33:16 +000085 void emitIf(MachineInstr &MI);
86 void emitElse(MachineInstr &MI);
Matt Arsenault78fc9da2016-08-22 19:33:16 +000087 void emitIfBreak(MachineInstr &MI);
Matt Arsenault78fc9da2016-08-22 19:33:16 +000088 void emitLoop(MachineInstr &MI);
89 void emitEndCf(MachineInstr &MI);
Tom Stellardbe8ebee2013-01-18 21:15:50 +000090
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +000091 void findMaskOperands(MachineInstr &MI, unsigned OpNo,
92 SmallVectorImpl<MachineOperand> &Src) const;
93
94 void combineMasks(MachineInstr &MI);
95
Tom Stellard75aadc22012-12-11 21:25:42 +000096public:
Matt Arsenault55d49cf2016-02-12 02:16:10 +000097 static char ID;
98
Eugene Zelenko734bb7b2017-01-20 17:52:16 +000099 SILowerControlFlow() : MachineFunctionPass(ID) {}
Tom Stellard75aadc22012-12-11 21:25:42 +0000100
Craig Topper5656db42014-04-29 07:57:24 +0000101 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard75aadc22012-12-11 21:25:42 +0000102
Mehdi Amini117296c2016-10-01 02:56:57 +0000103 StringRef getPassName() const override {
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000104 return "SI Lower control flow pseudo instructions";
Tom Stellard75aadc22012-12-11 21:25:42 +0000105 }
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000106
107 void getAnalysisUsage(AnalysisUsage &AU) const override {
Matt Arsenaulte6740752016-09-29 01:44:16 +0000108 // Should preserve the same set that TwoAddressInstructions does.
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000109 AU.addPreserved<SlotIndexes>();
Matt Arsenaulte6740752016-09-29 01:44:16 +0000110 AU.addPreserved<LiveIntervals>();
111 AU.addPreservedID(LiveVariablesID);
112 AU.addPreservedID(MachineLoopInfoID);
113 AU.addPreservedID(MachineDominatorsID);
Mark Searles76c5b622019-04-27 00:51:18 +0000114 AU.setPreservesCFG();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000115 MachineFunctionPass::getAnalysisUsage(AU);
116 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000117};
118
Eugene Zelenko734bb7b2017-01-20 17:52:16 +0000119} // end anonymous namespace
Tom Stellard75aadc22012-12-11 21:25:42 +0000120
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000121char SILowerControlFlow::ID = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +0000122
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000123INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000124 "SI lower control flow", false, false)
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000125
Matt Arsenaulte6740752016-09-29 01:44:16 +0000126static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
127 MachineOperand &ImpDefSCC = MI.getOperand(3);
128 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
129
130 ImpDefSCC.setIsDead(IsDead);
131}
132
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000133char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000134
Marek Olsakce76ea02017-10-24 10:27:13 +0000135static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI,
136 const SIInstrInfo *TII) {
Stanislav Mekhanoshin6c7a8d02017-08-04 06:58:42 +0000137 unsigned SaveExecReg = MI.getOperand(0).getReg();
138 auto U = MRI->use_instr_nodbg_begin(SaveExecReg);
139
140 if (U == MRI->use_instr_nodbg_end() ||
141 std::next(U) != MRI->use_instr_nodbg_end() ||
142 U->getOpcode() != AMDGPU::SI_END_CF)
143 return false;
144
Marek Olsakce76ea02017-10-24 10:27:13 +0000145 // Check for SI_KILL_*_TERMINATOR on path from if to endif.
Stanislav Mekhanoshin6c7a8d02017-08-04 06:58:42 +0000146 // if there is any such terminator simplififcations are not safe.
147 auto SMBB = MI.getParent();
148 auto EMBB = U->getParent();
149 DenseSet<const MachineBasicBlock*> Visited;
150 SmallVector<MachineBasicBlock*, 4> Worklist(SMBB->succ_begin(),
151 SMBB->succ_end());
152
153 while (!Worklist.empty()) {
154 MachineBasicBlock *MBB = Worklist.pop_back_val();
155
156 if (MBB == EMBB || !Visited.insert(MBB).second)
157 continue;
158 for(auto &Term : MBB->terminators())
Marek Olsakce76ea02017-10-24 10:27:13 +0000159 if (TII->isKillTerminator(Term.getOpcode()))
Stanislav Mekhanoshin6c7a8d02017-08-04 06:58:42 +0000160 return false;
161
162 Worklist.append(MBB->succ_begin(), MBB->succ_end());
163 }
164
165 return true;
166}
167
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000168void SILowerControlFlow::emitIf(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000169 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000170 const DebugLoc &DL = MI.getDebugLoc();
171 MachineBasicBlock::iterator I(&MI);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000172
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000173 MachineOperand &SaveExec = MI.getOperand(0);
174 MachineOperand &Cond = MI.getOperand(1);
175 assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister &&
176 Cond.getSubReg() == AMDGPU::NoSubRegister);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000177
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000178 unsigned SaveExecReg = SaveExec.getReg();
Matt Arsenault657f8712016-07-12 19:01:23 +0000179
Matt Arsenaulte6740752016-09-29 01:44:16 +0000180 MachineOperand &ImpDefSCC = MI.getOperand(4);
181 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
182
Stanislav Mekhanoshin3197eb62017-07-26 21:29:15 +0000183 // If there is only one use of save exec register and that use is SI_END_CF,
184 // we can optimize SI_IF by returning the full saved exec mask instead of
185 // just cleared bits.
Marek Olsakce76ea02017-10-24 10:27:13 +0000186 bool SimpleIf = isSimpleIf(MI, MRI, TII);
Stanislav Mekhanoshin3197eb62017-07-26 21:29:15 +0000187
Matt Arsenaulte6740752016-09-29 01:44:16 +0000188 // Add an implicit def of exec to discourage scheduling VALU after this which
189 // will interfere with trying to form s_and_saveexec_b64 later.
Stanislav Mekhanoshin3197eb62017-07-26 21:29:15 +0000190 unsigned CopyReg = SimpleIf ? SaveExecReg
191 : MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000192 MachineInstr *CopyExec =
Stanislav Mekhanoshinae0f66202016-11-22 01:42:34 +0000193 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
Matt Arsenaulte6740752016-09-29 01:44:16 +0000194 .addReg(AMDGPU::EXEC)
195 .addReg(AMDGPU::EXEC, RegState::ImplicitDefine);
196
197 unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
198
199 MachineInstr *And =
200 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp)
Stanislav Mekhanoshinae0f66202016-11-22 01:42:34 +0000201 .addReg(CopyReg)
Matt Arsenault87039772019-03-05 18:38:00 +0000202 .add(Cond);
203
Matt Arsenaulte6740752016-09-29 01:44:16 +0000204 setImpSCCDefDead(*And, true);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000205
Stanislav Mekhanoshin3197eb62017-07-26 21:29:15 +0000206 MachineInstr *Xor = nullptr;
207 if (!SimpleIf) {
208 Xor =
209 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg)
210 .addReg(Tmp)
211 .addReg(CopyReg);
212 setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
213 }
Matt Arsenaulte6740752016-09-29 01:44:16 +0000214
215 // Use a copy that is a terminator to get correct spill code placement it with
216 // fast regalloc.
217 MachineInstr *SetExec =
218 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_term), AMDGPU::EXEC)
219 .addReg(Tmp, RegState::Kill);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000220
221 // Insert a pseudo terminator to help keep the verifier happy. This will also
222 // be used later when inserting skips.
Diana Picus116bbab2017-01-13 09:58:52 +0000223 MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
224 .add(MI.getOperand(2));
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000225
226 if (!LIS) {
227 MI.eraseFromParent();
228 return;
229 }
230
Matt Arsenaulte6740752016-09-29 01:44:16 +0000231 LIS->InsertMachineInstrInMaps(*CopyExec);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000232
Matt Arsenaulte6740752016-09-29 01:44:16 +0000233 // Replace with and so we don't need to fix the live interval for condition
234 // register.
235 LIS->ReplaceMachineInstrInMaps(MI, *And);
236
Stanislav Mekhanoshin3197eb62017-07-26 21:29:15 +0000237 if (!SimpleIf)
238 LIS->InsertMachineInstrInMaps(*Xor);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000239 LIS->InsertMachineInstrInMaps(*SetExec);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000240 LIS->InsertMachineInstrInMaps(*NewBr);
241
Matt Arsenault476e26b2019-02-22 19:03:36 +0000242 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000243 MI.eraseFromParent();
244
245 // FIXME: Is there a better way of adjusting the liveness? It shouldn't be
246 // hard to add another def here but I'm not sure how to correctly update the
247 // valno.
248 LIS->removeInterval(SaveExecReg);
249 LIS->createAndComputeVirtRegInterval(SaveExecReg);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000250 LIS->createAndComputeVirtRegInterval(Tmp);
Stanislav Mekhanoshin3197eb62017-07-26 21:29:15 +0000251 if (!SimpleIf)
252 LIS->createAndComputeVirtRegInterval(CopyReg);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000253}
254
255void SILowerControlFlow::emitElse(MachineInstr &MI) {
256 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault657f8712016-07-12 19:01:23 +0000257 const DebugLoc &DL = MI.getDebugLoc();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000258
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000259 unsigned DstReg = MI.getOperand(0).getReg();
260 assert(MI.getOperand(0).getSubReg() == AMDGPU::NoSubRegister);
Matt Arsenault657f8712016-07-12 19:01:23 +0000261
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000262 bool ExecModified = MI.getOperand(3).getImm() != 0;
263 MachineBasicBlock::iterator Start = MBB.begin();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000264
Matt Arsenaulte6740752016-09-29 01:44:16 +0000265 // We are running before TwoAddressInstructions, and si_else's operands are
266 // tied. In order to correctly tie the registers, split this into a copy of
267 // the src like it does.
Stanislav Mekhanoshinae0f66202016-11-22 01:42:34 +0000268 unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
Stanislav Mekhanoshin68257702017-01-19 21:26:22 +0000269 MachineInstr *CopyExec =
270 BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
Diana Picus116bbab2017-01-13 09:58:52 +0000271 .add(MI.getOperand(1)); // Saved EXEC
Matt Arsenaulte6740752016-09-29 01:44:16 +0000272
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000273 // This must be inserted before phis and any spill code inserted before the
274 // else.
Stanislav Mekhanoshinae0f66202016-11-22 01:42:34 +0000275 unsigned SaveReg = ExecModified ?
276 MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass) : DstReg;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000277 MachineInstr *OrSaveExec =
Stanislav Mekhanoshinae0f66202016-11-22 01:42:34 +0000278 BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), SaveReg)
279 .addReg(CopyReg);
Matt Arsenaulte6740752016-09-29 01:44:16 +0000280
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000281 MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000282
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000283 MachineBasicBlock::iterator ElsePt(MI);
Matt Arsenault657f8712016-07-12 19:01:23 +0000284
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000285 if (ExecModified) {
286 MachineInstr *And =
287 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_AND_B64), DstReg)
288 .addReg(AMDGPU::EXEC)
Stanislav Mekhanoshinae0f66202016-11-22 01:42:34 +0000289 .addReg(SaveReg);
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000290
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000291 if (LIS)
292 LIS->InsertMachineInstrInMaps(*And);
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000293 }
294
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000295 MachineInstr *Xor =
Matt Arsenaulte6740752016-09-29 01:44:16 +0000296 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC)
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000297 .addReg(AMDGPU::EXEC)
298 .addReg(DstReg);
Tom Stellardf8794352012-12-19 22:10:31 +0000299
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000300 MachineInstr *Branch =
Matt Arsenaulte6740752016-09-29 01:44:16 +0000301 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
Matt Arsenaultf98a5962016-08-27 00:42:21 +0000302 .addMBB(DestBB);
Matt Arsenault9babdf42016-06-22 20:15:28 +0000303
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000304 if (!LIS) {
305 MI.eraseFromParent();
306 return;
307 }
308
309 LIS->RemoveMachineInstrFromMaps(MI);
Tom Stellardf8794352012-12-19 22:10:31 +0000310 MI.eraseFromParent();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000311
Stanislav Mekhanoshin68257702017-01-19 21:26:22 +0000312 LIS->InsertMachineInstrInMaps(*CopyExec);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000313 LIS->InsertMachineInstrInMaps(*OrSaveExec);
314
315 LIS->InsertMachineInstrInMaps(*Xor);
316 LIS->InsertMachineInstrInMaps(*Branch);
317
318 // src reg is tied to dst reg.
319 LIS->removeInterval(DstReg);
320 LIS->createAndComputeVirtRegInterval(DstReg);
Stanislav Mekhanoshinae0f66202016-11-22 01:42:34 +0000321 LIS->createAndComputeVirtRegInterval(CopyReg);
322 if (ExecModified)
323 LIS->createAndComputeVirtRegInterval(SaveReg);
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000324
325 // Let this be recomputed.
Matt Arsenault476e26b2019-02-22 19:03:36 +0000326 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
Tom Stellardf8794352012-12-19 22:10:31 +0000327}
328
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000329void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
Tim Renoufad8b7c12018-05-25 07:55:04 +0000330 MachineBasicBlock &MBB = *MI.getParent();
331 const DebugLoc &DL = MI.getDebugLoc();
332 auto Dst = MI.getOperand(0).getReg();
333
334 // Skip ANDing with exec if the break condition is already masked by exec
335 // because it is a V_CMP in the same basic block. (We know the break
336 // condition operand was an i1 in IR, so if it is a VALU instruction it must
337 // be one with a carry-out.)
338 bool SkipAnding = false;
339 if (MI.getOperand(1).isReg()) {
340 if (MachineInstr *Def = MRI->getUniqueVRegDef(MI.getOperand(1).getReg())) {
341 SkipAnding = Def->getParent() == MI.getParent()
342 && SIInstrInfo::isVALU(*Def);
343 }
344 }
345
346 // AND the break condition operand with exec, then OR that into the "loop
347 // exit" mask.
348 MachineInstr *And = nullptr, *Or = nullptr;
349 if (!SkipAnding) {
350 And = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
351 .addReg(AMDGPU::EXEC)
352 .add(MI.getOperand(1));
353 Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
354 .addReg(Dst)
355 .add(MI.getOperand(2));
356 } else
357 Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
358 .add(MI.getOperand(1))
359 .add(MI.getOperand(2));
360
361 if (LIS) {
362 if (And)
363 LIS->InsertMachineInstrInMaps(*And);
364 LIS->ReplaceMachineInstrInMaps(MI, *Or);
365 }
366
367 MI.eraseFromParent();
Tom Stellardf8794352012-12-19 22:10:31 +0000368}
369
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000370void SILowerControlFlow::emitLoop(MachineInstr &MI) {
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000371 MachineBasicBlock &MBB = *MI.getParent();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000372 const DebugLoc &DL = MI.getDebugLoc();
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000373
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000374 MachineInstr *AndN2 =
Diana Picus116bbab2017-01-13 09:58:52 +0000375 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC)
376 .addReg(AMDGPU::EXEC)
377 .add(MI.getOperand(0));
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000378
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000379 MachineInstr *Branch =
Diana Picus116bbab2017-01-13 09:58:52 +0000380 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
381 .add(MI.getOperand(1));
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000382
383 if (LIS) {
384 LIS->ReplaceMachineInstrInMaps(MI, *AndN2);
385 LIS->InsertMachineInstrInMaps(*Branch);
Michel Danzer9e61c4b2014-02-27 01:47:09 +0000386 }
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000387
388 MI.eraseFromParent();
389}
390
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000391void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
392 MachineBasicBlock &MBB = *MI.getParent();
393 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenault786724a2016-07-12 21:41:32 +0000394
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000395 MachineBasicBlock::iterator InsPt = MBB.begin();
Mark Searles76c5b622019-04-27 00:51:18 +0000396 MachineInstr *NewMI =
397 BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
398 .addReg(AMDGPU::EXEC)
399 .add(MI.getOperand(0));
Matt Arsenault786724a2016-07-12 21:41:32 +0000400
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000401 if (LIS)
Mark Searles76c5b622019-04-27 00:51:18 +0000402 LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
Matt Arsenault786724a2016-07-12 21:41:32 +0000403
Mark Searles76c5b622019-04-27 00:51:18 +0000404 MI.eraseFromParent();
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000405
Mark Searles76c5b622019-04-27 00:51:18 +0000406 if (LIS)
407 LIS->handleMove(*NewMI);
Matt Arsenault786724a2016-07-12 21:41:32 +0000408}
409
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000410// Returns replace operands for a logical operation, either single result
411// for exec or two operands if source was another equivalent operation.
412void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
413 SmallVectorImpl<MachineOperand> &Src) const {
414 MachineOperand &Op = MI.getOperand(OpNo);
415 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) {
416 Src.push_back(Op);
417 return;
418 }
419
420 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
421 if (!Def || Def->getParent() != MI.getParent() ||
422 !(Def->isFullCopy() || (Def->getOpcode() == MI.getOpcode())))
423 return;
424
425 // Make sure we do not modify exec between def and use.
426 // A copy with implcitly defined exec inserted earlier is an exclusion, it
427 // does not really modify exec.
428 for (auto I = Def->getIterator(); I != MI.getIterator(); ++I)
429 if (I->modifiesRegister(AMDGPU::EXEC, TRI) &&
430 !(I->isCopy() && I->getOperand(0).getReg() != AMDGPU::EXEC))
431 return;
432
433 for (const auto &SrcOp : Def->explicit_operands())
Mark Searles987f2922018-06-12 00:41:26 +0000434 if (SrcOp.isReg() && SrcOp.isUse() &&
435 (TargetRegisterInfo::isVirtualRegister(SrcOp.getReg()) ||
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000436 SrcOp.getReg() == AMDGPU::EXEC))
437 Src.push_back(SrcOp);
438}
439
440// Search and combine pairs of equivalent instructions, like
441// S_AND_B64 x, (S_AND_B64 x, y) => S_AND_B64 x, y
442// S_OR_B64 x, (S_OR_B64 x, y) => S_OR_B64 x, y
443// One of the operands is exec mask.
444void SILowerControlFlow::combineMasks(MachineInstr &MI) {
445 assert(MI.getNumExplicitOperands() == 3);
446 SmallVector<MachineOperand, 4> Ops;
447 unsigned OpToReplace = 1;
448 findMaskOperands(MI, 1, Ops);
449 if (Ops.size() == 1) OpToReplace = 2; // First operand can be exec or its copy
450 findMaskOperands(MI, 2, Ops);
451 if (Ops.size() != 3) return;
452
453 unsigned UniqueOpndIdx;
454 if (Ops[0].isIdenticalTo(Ops[1])) UniqueOpndIdx = 2;
455 else if (Ops[0].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
456 else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
457 else return;
458
459 unsigned Reg = MI.getOperand(OpToReplace).getReg();
460 MI.RemoveOperand(OpToReplace);
461 MI.addOperand(Ops[UniqueOpndIdx]);
462 if (MRI->use_empty(Reg))
463 MRI->getUniqueVRegDef(Reg)->eraseFromParent();
464}
465
Matt Arsenault55d49cf2016-02-12 02:16:10 +0000466bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000467 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000468 TII = ST.getInstrInfo();
469 TRI = &TII->getRegisterInfo();
470
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000471 // This doesn't actually need LiveIntervals, but we can preserve them.
472 LIS = getAnalysisIfAvailable<LiveIntervals>();
Matt Arsenaulte6740752016-09-29 01:44:16 +0000473 MRI = &MF.getRegInfo();
Tom Stellard75aadc22012-12-11 21:25:42 +0000474
Matt Arsenault9babdf42016-06-22 20:15:28 +0000475 MachineFunction::iterator NextBB;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000476 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
477 BI != BE; BI = NextBB) {
478 NextBB = std::next(BI);
Mark Searles76c5b622019-04-27 00:51:18 +0000479 MachineBasicBlock &MBB = *BI;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000480
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000481 MachineBasicBlock::iterator I, Next, Last;
Nicolai Haehnle213e87f2016-03-21 20:28:33 +0000482
Mark Searles76c5b622019-04-27 00:51:18 +0000483 for (I = MBB.begin(), Last = MBB.end(); I != MBB.end(); I = Next) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000484 Next = std::next(I);
Tom Stellard75aadc22012-12-11 21:25:42 +0000485 MachineInstr &MI = *I;
Tom Stellard5d7aaae2014-02-10 16:58:30 +0000486
Tom Stellard75aadc22012-12-11 21:25:42 +0000487 switch (MI.getOpcode()) {
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000488 case AMDGPU::SI_IF:
489 emitIf(MI);
490 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000491
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000492 case AMDGPU::SI_ELSE:
493 emitElse(MI);
494 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000495
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000496 case AMDGPU::SI_IF_BREAK:
497 emitIfBreak(MI);
498 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000499
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000500 case AMDGPU::SI_LOOP:
501 emitLoop(MI);
502 break;
Tom Stellardf8794352012-12-19 22:10:31 +0000503
Mark Searles76c5b622019-04-27 00:51:18 +0000504 case AMDGPU::SI_END_CF:
Matt Arsenault396653f2019-04-03 20:53:20 +0000505 emitEndCf(MI);
Matt Arsenault396653f2019-04-03 20:53:20 +0000506 break;
Mark Searles76c5b622019-04-27 00:51:18 +0000507
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000508 case AMDGPU::S_AND_B64:
509 case AMDGPU::S_OR_B64:
510 // Cleanup bit manipulations on exec mask
511 combineMasks(MI);
512 Last = I;
513 continue;
514
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000515 default:
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000516 Last = I;
517 continue;
Tom Stellard75aadc22012-12-11 21:25:42 +0000518 }
Stanislav Mekhanoshin0ee250e2016-11-28 18:58:49 +0000519
520 // Replay newly inserted code to combine masks
Mark Searles76c5b622019-04-27 00:51:18 +0000521 Next = (Last == MBB.end()) ? MBB.begin() : Last;
Tom Stellard75aadc22012-12-11 21:25:42 +0000522 }
523 }
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000524
Tom Stellard75aadc22012-12-11 21:25:42 +0000525 return true;
526}