blob: b0f0bf04a89105010be11726fdc93cefdc76feea [file] [log] [blame]
Tom Stellard2f7cdda2013-08-06 23:08:28 +00001//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Copies from VGPR to SGPR registers are illegal and the register coalescer
12/// will sometimes generate these illegal copies in situations like this:
13///
14/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
15///
16/// BB0:
17/// %vreg0 <sgpr> = SCALAR_INST
18/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
19/// ...
20/// BRANCH %cond BB1, BB2
21/// BB1:
22/// %vreg2 <vgpr> = VECTOR_INST
23/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
24/// BB2:
25/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000026/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000027///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000028///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000029/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30/// code will look like this:
31///
32/// BB0:
33/// %vreg0 <sgpr> = SCALAR_INST
34/// ...
35/// BRANCH %cond BB1, BB2
36/// BB1:
37/// %vreg2 <vgpr> = VECTOR_INST
38/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
39/// BB2:
40/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
42///
43/// Now that the result of the PHI instruction is an SGPR, the register
44/// allocator is now forced to constrain the register class of %vreg3 to
45/// <sgpr> so we end up with final code like this:
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000046///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000047/// BB0:
48/// %vreg0 <sgpr> = SCALAR_INST
49/// ...
50/// BRANCH %cond BB1, BB2
51/// BB1:
52/// %vreg2 <vgpr> = VECTOR_INST
53/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
54/// BB2:
55/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
57///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000058/// Now this code contains an illegal copy from a VGPR to an SGPR.
Tom Stellard2f7cdda2013-08-06 23:08:28 +000059///
60/// In order to avoid this problem, this pass searches for PHI instructions
61/// which define a <vsrc> register and constrains its definition class to
62/// <vgpr> if the user of the PHI's definition register is a vector instruction.
63/// If the PHI's definition class is constrained to <vgpr> then the coalescer
64/// will be unable to perform the COPY removal from the above example which
65/// ultimately led to the creation of an illegal COPY.
66//===----------------------------------------------------------------------===//
67
Wei Ding74da3502017-04-12 23:51:47 +000068#include "llvm/ADT/DenseSet.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000069#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000070#include "AMDGPUSubtarget.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000071#include "SIInstrInfo.h"
Tom Stellard0bc68812016-11-29 00:46:46 +000072#include "llvm/CodeGen/MachineDominators.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000073#include "llvm/CodeGen/MachineFunctionPass.h"
Tom Stellard82166022013-11-13 23:36:37 +000074#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000075#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard82166022013-11-13 23:36:37 +000076#include "llvm/Support/Debug.h"
Hans Wennborga74fd702013-11-14 23:24:09 +000077#include "llvm/Support/raw_ostream.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000078#include "llvm/Target/TargetMachine.h"
79
80using namespace llvm;
81
Matt Arsenault98f83942016-04-21 18:21:54 +000082#define DEBUG_TYPE "si-fix-sgpr-copies"
Chandler Carruth84e68b22014-04-22 02:41:26 +000083
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +000084static cl::opt<bool> EnableM0Merge(
85 "amdgpu-enable-merge-m0",
86 cl::desc("Merge and hoist M0 initializations"),
87 cl::init(false));
88
Tom Stellard2f7cdda2013-08-06 23:08:28 +000089namespace {
90
91class SIFixSGPRCopies : public MachineFunctionPass {
Tom Stellard0bc68812016-11-29 00:46:46 +000092
93 MachineDominatorTree *MDT;
94
Matt Arsenault782c03b2015-11-03 22:30:13 +000095public:
Tom Stellard2f7cdda2013-08-06 23:08:28 +000096 static char ID;
Tom Stellard2f7cdda2013-08-06 23:08:28 +000097
Matt Arsenault782c03b2015-11-03 22:30:13 +000098 SIFixSGPRCopies() : MachineFunctionPass(ID) { }
Tom Stellard2f7cdda2013-08-06 23:08:28 +000099
Craig Topper5656db42014-04-29 07:57:24 +0000100 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000101
Mehdi Amini117296c2016-10-01 02:56:57 +0000102 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000103
Matt Arsenault0cb85172015-09-25 17:21:28 +0000104 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard0bc68812016-11-29 00:46:46 +0000105 AU.addRequired<MachineDominatorTree>();
106 AU.addPreserved<MachineDominatorTree>();
Matt Arsenault0cb85172015-09-25 17:21:28 +0000107 AU.setPreservesCFG();
108 MachineFunctionPass::getAnalysisUsage(AU);
109 }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000110};
111
112} // End anonymous namespace
113
Tom Stellard0bc68812016-11-29 00:46:46 +0000114INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
115 "SI Fix SGPR copies", false, false)
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000116INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
Tom Stellard0bc68812016-11-29 00:46:46 +0000117INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
118 "SI Fix SGPR copies", false, false)
119
Matt Arsenault782c03b2015-11-03 22:30:13 +0000120
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000121char SIFixSGPRCopies::ID = 0;
122
Matt Arsenault782c03b2015-11-03 22:30:13 +0000123char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
124
125FunctionPass *llvm::createSIFixSGPRCopiesPass() {
126 return new SIFixSGPRCopies();
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000127}
128
Tom Stellard82166022013-11-13 23:36:37 +0000129static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
130 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
131 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
132 if (!MI.getOperand(i).isReg() ||
133 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
134 continue;
135
136 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
137 return true;
138 }
139 return false;
140}
141
Matt Arsenault0de924b2015-11-02 23:15:42 +0000142static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
143getCopyRegClasses(const MachineInstr &Copy,
144 const SIRegisterInfo &TRI,
145 const MachineRegisterInfo &MRI) {
Tom Stellard82166022013-11-13 23:36:37 +0000146 unsigned DstReg = Copy.getOperand(0).getReg();
147 unsigned SrcReg = Copy.getOperand(1).getReg();
Matt Arsenault120a0c92014-12-03 05:22:39 +0000148
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000149 const TargetRegisterClass *SrcRC =
150 TargetRegisterInfo::isVirtualRegister(SrcReg) ?
151 MRI.getRegClass(SrcReg) :
152 TRI.getPhysRegClass(SrcReg);
Tom Stellardd33d7f12015-05-12 14:18:11 +0000153
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000154 // We don't really care about the subregister here.
155 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
Tom Stellard82166022013-11-13 23:36:37 +0000156
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000157 const TargetRegisterClass *DstRC =
158 TargetRegisterInfo::isVirtualRegister(DstReg) ?
159 MRI.getRegClass(DstReg) :
160 TRI.getPhysRegClass(DstReg);
161
162 return std::make_pair(SrcRC, DstRC);
163}
164
Matt Arsenault0de924b2015-11-02 23:15:42 +0000165static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
166 const TargetRegisterClass *DstRC,
167 const SIRegisterInfo &TRI) {
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000168 return TRI.isSGPRClass(DstRC) && TRI.hasVGPRs(SrcRC);
169}
170
Matt Arsenault0de924b2015-11-02 23:15:42 +0000171static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
172 const TargetRegisterClass *DstRC,
173 const SIRegisterInfo &TRI) {
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000174 return TRI.isSGPRClass(SrcRC) && TRI.hasVGPRs(DstRC);
Tom Stellard82166022013-11-13 23:36:37 +0000175}
176
Matt Arsenault0de924b2015-11-02 23:15:42 +0000177// Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
178//
179// SGPRx = ...
180// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
181// VGPRz = COPY SGPRy
182//
183// ==>
184//
185// VGPRx = COPY SGPRx
186// VGPRz = REG_SEQUENCE VGPRx, sub0
187//
188// This exposes immediate folding opportunities when materializing 64-bit
189// immediates.
190static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
191 const SIRegisterInfo *TRI,
192 const SIInstrInfo *TII,
193 MachineRegisterInfo &MRI) {
194 assert(MI.isRegSequence());
195
196 unsigned DstReg = MI.getOperand(0).getReg();
197 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
198 return false;
199
200 if (!MRI.hasOneUse(DstReg))
201 return false;
202
203 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
204 if (!CopyUse.isCopy())
205 return false;
206
Matt Arsenaultfe78ffb2017-04-11 22:29:19 +0000207 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
208 if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
209 return false;
210
Matt Arsenault0de924b2015-11-02 23:15:42 +0000211 const TargetRegisterClass *SrcRC, *DstRC;
212 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
213
214 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
215 return false;
216
217 // TODO: Could have multiple extracts?
218 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
219 if (SubReg != AMDGPU::NoSubRegister)
220 return false;
221
222 MRI.setRegClass(DstReg, DstRC);
223
224 // SGPRx = ...
225 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
226 // VGPRz = COPY SGPRy
227
228 // =>
229 // VGPRx = COPY SGPRx
230 // VGPRz = REG_SEQUENCE VGPRx, sub0
231
232 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
233
234 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
235 unsigned SrcReg = MI.getOperand(I).getReg();
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000236 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
Matt Arsenault0de924b2015-11-02 23:15:42 +0000237
238 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
239 assert(TRI->isSGPRClass(SrcRC) &&
240 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
241
242 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
243 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
244
245 unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
246
Diana Picus116bbab2017-01-13 09:58:52 +0000247 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
248 TmpReg)
249 .add(MI.getOperand(I));
Matt Arsenault0de924b2015-11-02 23:15:42 +0000250
251 MI.getOperand(I).setReg(TmpReg);
252 }
253
254 CopyUse.eraseFromParent();
255 return true;
256}
257
Tom Stellard9fdbec82016-11-11 23:35:42 +0000258static bool phiHasVGPROperands(const MachineInstr &PHI,
259 const MachineRegisterInfo &MRI,
260 const SIRegisterInfo *TRI,
261 const SIInstrInfo *TII) {
262
263 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
264 unsigned Reg = PHI.getOperand(i).getReg();
265 if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
266 return true;
267 }
268 return false;
269}
270static bool phiHasBreakDef(const MachineInstr &PHI,
271 const MachineRegisterInfo &MRI,
272 SmallSet<unsigned, 8> &Visited) {
273
274 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
275 unsigned Reg = PHI.getOperand(i).getReg();
276 if (Visited.count(Reg))
277 continue;
278
279 Visited.insert(Reg);
280
281 MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
282 assert(DefInstr);
283 switch (DefInstr->getOpcode()) {
284 default:
285 break;
286 case AMDGPU::SI_BREAK:
287 case AMDGPU::SI_IF_BREAK:
288 case AMDGPU::SI_ELSE_BREAK:
289 return true;
290 case AMDGPU::PHI:
291 if (phiHasBreakDef(*DefInstr, MRI, Visited))
292 return true;
293 }
294 }
295 return false;
296}
297
Tom Stellard0bc68812016-11-29 00:46:46 +0000298static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
299 const TargetRegisterInfo &TRI) {
300 for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
301 E = MBB.end(); I != E; ++I) {
302 if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
303 return true;
304 }
305 return false;
306}
307
Tom Stellard00cfa742016-12-06 21:13:30 +0000308static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
309 const MachineInstr *MoveImm,
310 const SIInstrInfo *TII,
311 unsigned &SMovOp,
312 int64_t &Imm) {
313
314 if (!MoveImm->isMoveImmediate())
315 return false;
316
317 const MachineOperand *ImmOp =
318 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
319 if (!ImmOp->isImm())
320 return false;
321
322 // FIXME: Handle copies with sub-regs.
323 if (Copy->getOperand(0).getSubReg())
324 return false;
325
326 switch (MoveImm->getOpcode()) {
327 default:
328 return false;
329 case AMDGPU::V_MOV_B32_e32:
330 SMovOp = AMDGPU::S_MOV_B32;
331 break;
332 case AMDGPU::V_MOV_B64_PSEUDO:
333 SMovOp = AMDGPU::S_MOV_B64;
334 break;
335 }
336 Imm = ImmOp->getImm();
337 return true;
338}
339
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000340template <class UnaryPredicate>
341bool searchPredecessors(const MachineBasicBlock *MBB,
342 const MachineBasicBlock *CutOff,
343 UnaryPredicate Predicate) {
344
345 if (MBB == CutOff)
346 return false;
347
348 DenseSet<const MachineBasicBlock*> Visited;
Wei Ding74da3502017-04-12 23:51:47 +0000349 SmallVector<MachineBasicBlock*, 4> Worklist(MBB->pred_begin(),
350 MBB->pred_end());
351
352 while (!Worklist.empty()) {
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000353 MachineBasicBlock *MBB = Worklist.pop_back_val();
Wei Ding74da3502017-04-12 23:51:47 +0000354
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000355 if (!Visited.insert(MBB).second)
Wei Ding74da3502017-04-12 23:51:47 +0000356 continue;
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000357 if (MBB == CutOff)
358 continue;
359 if (Predicate(MBB))
Wei Ding74da3502017-04-12 23:51:47 +0000360 return true;
361
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000362 Worklist.append(MBB->pred_begin(), MBB->pred_end());
Wei Ding74da3502017-04-12 23:51:47 +0000363 }
364
365 return false;
366}
367
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000368static bool predsHasDivergentTerminator(MachineBasicBlock *MBB,
369 const TargetRegisterInfo *TRI) {
370 return searchPredecessors(MBB, nullptr, [TRI](MachineBasicBlock *MBB) {
371 return hasTerminatorThatModifiesExec(*MBB, *TRI); });
372}
373
374// Checks if there is potential path From instruction To instruction.
375// If CutOff is specified and it sits in between of that path we ignore
376// a higher portion of the path and report it is not reachable.
377static bool isReachable(const MachineInstr *From,
378 const MachineInstr *To,
379 const MachineBasicBlock *CutOff,
380 MachineDominatorTree &MDT) {
381 // If either From block dominates To block or instructions are in the same
382 // block and From is higher.
383 if (MDT.dominates(From, To))
384 return true;
385
386 const MachineBasicBlock *MBBFrom = From->getParent();
387 const MachineBasicBlock *MBBTo = To->getParent();
388 if (MBBFrom == MBBTo)
389 return false;
390
391 // Instructions are in different blocks, do predecessor search.
392 // We should almost never get here since we do not usually produce M0 stores
393 // other than -1.
394 return searchPredecessors(MBBTo, CutOff, [MBBFrom]
395 (const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
396}
397
398// Hoist and merge identical SGPR initializations into a common predecessor.
399// This is intended to combine M0 initializations, but can work with any
400// SGPR. A VGPR cannot be processed since we cannot guarantee vector
401// executioon.
402static bool hoistAndMergeSGPRInits(unsigned Reg,
403 const MachineRegisterInfo &MRI,
404 MachineDominatorTree &MDT) {
405 // List of inits by immediate value.
406 typedef std::map<unsigned, std::list<MachineInstr*>> InitListMap;
407 InitListMap Inits;
408 // List of clobbering instructions.
409 SmallVector<MachineInstr*, 8> Clobbers;
410 bool Changed = false;
411
412 for (auto &MI : MRI.def_instructions(Reg)) {
413 MachineOperand *Imm = nullptr;
414 for (auto &MO: MI.operands()) {
415 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
416 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
417 Imm = nullptr;
418 break;
419 } else if (MO.isImm())
420 Imm = &MO;
421 }
422 if (Imm)
423 Inits[Imm->getImm()].push_front(&MI);
424 else
425 Clobbers.push_back(&MI);
426 }
427
428 for (auto &Init : Inits) {
429 auto &Defs = Init.second;
430
431 for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
432 MachineInstr *MI1 = *I1;
433
434 for (auto I2 = std::next(I1); I2 != E; ) {
435 MachineInstr *MI2 = *I2;
436
437 // Check any possible interference
438 auto intereferes = [&](MachineBasicBlock::iterator From,
439 MachineBasicBlock::iterator To) -> bool {
440
441 assert(MDT.dominates(&*To, &*From));
442
443 auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
444 const MachineBasicBlock *MBBFrom = From->getParent();
445 const MachineBasicBlock *MBBTo = To->getParent();
446 bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
447 bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
448 if (!MayClobberFrom && !MayClobberTo)
449 return false;
450 if ((MayClobberFrom && !MayClobberTo) ||
451 (!MayClobberFrom && MayClobberTo))
452 return true;
453 // Both can clobber, this is not an interference only if both are
454 // dominated by Clobber and belong to the same block or if Clobber
455 // properly dominates To, given that To >> From, so it dominates
456 // both and located in a common dominator.
457 return !((MBBFrom == MBBTo &&
458 MDT.dominates(Clobber, &*From) &&
459 MDT.dominates(Clobber, &*To)) ||
460 MDT.properlyDominates(Clobber->getParent(), MBBTo));
461 };
462
463 return (any_of(Clobbers, interferes)) ||
464 (any_of(Inits, [&](InitListMap::value_type &C) {
465 return C.first != Init.first && any_of(C.second, interferes);
466 }));
467 };
468
469 if (MDT.dominates(MI1, MI2)) {
470 if (!intereferes(MI2, MI1)) {
471 DEBUG(dbgs() << "Erasing from BB#" << MI2->getParent()->getNumber()
472 << " " << *MI2);
473 MI2->eraseFromParent();
474 Defs.erase(I2++);
475 Changed = true;
476 continue;
477 }
478 } else if (MDT.dominates(MI2, MI1)) {
479 if (!intereferes(MI1, MI2)) {
480 DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber()
481 << " " << *MI1);
482 MI1->eraseFromParent();
483 Defs.erase(I1++);
484 Changed = true;
485 break;
486 }
487 } else {
488 auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
489 MI2->getParent());
490 if (!MBB) {
491 ++I2;
492 continue;
493 }
494
495 MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
496 if (!intereferes(MI1, I) && !intereferes(MI2, I)) {
497 DEBUG(dbgs() << "Erasing from BB#" << MI1->getParent()->getNumber()
498 << " " << *MI1 << "and moving from BB#"
499 << MI2->getParent()->getNumber() << " to BB#"
500 << I->getParent()->getNumber() << " " << *MI2);
501 I->getParent()->splice(I, MI2->getParent(), MI2);
502 MI1->eraseFromParent();
503 Defs.erase(I1++);
504 Changed = true;
505 break;
506 }
507 }
508 ++I2;
509 }
510 ++I1;
511 }
512 }
513
514 if (Changed)
515 MRI.clearKillFlags(Reg);
516
517 return Changed;
518}
519
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000520bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000521 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000522 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000523 const SIRegisterInfo *TRI = ST.getRegisterInfo();
524 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard0bc68812016-11-29 00:46:46 +0000525 MDT = &getAnalysis<MachineDominatorTree>();
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000526
527 SmallVector<MachineInstr *, 16> Worklist;
528
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000529 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
530 BI != BE; ++BI) {
531
532 MachineBasicBlock &MBB = *BI;
533 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000534 I != E; ++I) {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000535 MachineInstr &MI = *I;
Tom Stellard82166022013-11-13 23:36:37 +0000536
537 switch (MI.getOpcode()) {
Matt Arsenault85441dd2015-09-21 16:27:22 +0000538 default:
539 continue;
540 case AMDGPU::COPY: {
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000541 // If the destination register is a physical register there isn't really
542 // much we can do to fix this.
543 if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()))
544 continue;
545
546 const TargetRegisterClass *SrcRC, *DstRC;
547 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
548 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
Tom Stellard00cfa742016-12-06 21:13:30 +0000549 MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg());
550 unsigned SMovOp;
551 int64_t Imm;
552 // If we are just copying an immediate, we can replace the copy with
553 // s_mov_b32.
554 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
555 MI.getOperand(1).ChangeToImmediate(Imm);
556 MI.addImplicitDefUseOperands(MF);
557 MI.setDesc(TII->get(SMovOp));
558 break;
559 }
Matt Arsenault85441dd2015-09-21 16:27:22 +0000560 TII->moveToVALU(MI);
561 }
562
563 break;
564 }
Tom Stellard82166022013-11-13 23:36:37 +0000565 case AMDGPU::PHI: {
Tom Stellard82166022013-11-13 23:36:37 +0000566 unsigned Reg = MI.getOperand(0).getReg();
Tom Stellard82166022013-11-13 23:36:37 +0000567 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
568 break;
569
Tom Stellard0bc68812016-11-29 00:46:46 +0000570 // We don't need to fix the PHI if the common dominator of the
571 // two incoming blocks terminates with a uniform branch.
572 if (MI.getNumExplicitOperands() == 5) {
573 MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB();
574 MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB();
575
Wei Ding74da3502017-04-12 23:51:47 +0000576 if (!predsHasDivergentTerminator(MBB0, TRI) &&
577 !predsHasDivergentTerminator(MBB1, TRI)) {
Tom Stellard0bc68812016-11-29 00:46:46 +0000578 DEBUG(dbgs() << "Not fixing PHI for uniform branch: " << MI << '\n');
579 break;
580 }
581 }
582
Tom Stellard82166022013-11-13 23:36:37 +0000583 // If a PHI node defines an SGPR and any of its operands are VGPRs,
584 // then we need to move it to the VALU.
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000585 //
586 // Also, if a PHI node defines an SGPR and has all SGPR operands
587 // we must move it to the VALU, because the SGPR operands will
588 // all end up being assigned the same register, which means
589 // there is a potential for a conflict if different threads take
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000590 // different control flow paths.
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000591 //
592 // For Example:
593 //
594 // sgpr0 = def;
595 // ...
596 // sgpr1 = def;
597 // ...
598 // sgpr2 = PHI sgpr0, sgpr1
599 // use sgpr2;
600 //
601 // Will Become:
602 //
603 // sgpr2 = def;
604 // ...
605 // sgpr2 = def;
606 // ...
607 // use sgpr2
608 //
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000609 // The one exception to this rule is when one of the operands
610 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
611 // instruction. In this case, there we know the program will
612 // never enter the second block (the loop) without entering
613 // the first block (where the condition is computed), so there
614 // is no chance for values to be over-written.
615
Tom Stellard9fdbec82016-11-11 23:35:42 +0000616 SmallSet<unsigned, 8> Visited;
617 if (phiHasVGPROperands(MI, MRI, TRI, TII) ||
Tom Stellard0bc68812016-11-29 00:46:46 +0000618 !phiHasBreakDef(MI, MRI, Visited)) {
619 DEBUG(dbgs() << "Fixing PHI: " << MI);
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000620 TII->moveToVALU(MI);
Tom Stellard9fdbec82016-11-11 23:35:42 +0000621 }
Tom Stellard82166022013-11-13 23:36:37 +0000622 break;
623 }
624 case AMDGPU::REG_SEQUENCE: {
625 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
Matt Arsenault0de924b2015-11-02 23:15:42 +0000626 !hasVGPROperands(MI, TRI)) {
627 foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
Tom Stellard82166022013-11-13 23:36:37 +0000628 continue;
Matt Arsenault0de924b2015-11-02 23:15:42 +0000629 }
Tom Stellard82166022013-11-13 23:36:37 +0000630
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000631 DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
Tom Stellard82166022013-11-13 23:36:37 +0000632
633 TII->moveToVALU(MI);
Tom Stellard82166022013-11-13 23:36:37 +0000634 break;
635 }
Tom Stellard204e61b2014-04-07 19:45:45 +0000636 case AMDGPU::INSERT_SUBREG: {
Tom Stellarda5687382014-05-15 14:41:55 +0000637 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
Tom Stellard204e61b2014-04-07 19:45:45 +0000638 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
Tom Stellarda5687382014-05-15 14:41:55 +0000639 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
640 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
641 if (TRI->isSGPRClass(DstRC) &&
642 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000643 DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
Tom Stellarda5687382014-05-15 14:41:55 +0000644 TII->moveToVALU(MI);
645 }
646 break;
Tom Stellard204e61b2014-04-07 19:45:45 +0000647 }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000648 }
649 }
650 }
Matt Arsenault6f679782014-11-17 21:11:34 +0000651
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000652 if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
653 hoistAndMergeSGPRInits(AMDGPU::M0, MRI, *MDT);
654
Matt Arsenault6f679782014-11-17 21:11:34 +0000655 return true;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000656}