blob: dad9fedff25fb546a3826df4723c3900aac432c4 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===//
Tom Stellard2f7cdda2013-08-06 23:08:28 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard2f7cdda2013-08-06 23:08:28 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// Copies from VGPR to SGPR registers are illegal and the register coalescer
11/// will sometimes generate these illegal copies in situations like this:
12///
13/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
14///
15/// BB0:
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000016/// %0 <sgpr> = SCALAR_INST
17/// %1 <vsrc> = COPY %0 <sgpr>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000018/// ...
19/// BRANCH %cond BB1, BB2
20/// BB1:
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000021/// %2 <vgpr> = VECTOR_INST
22/// %3 <vsrc> = COPY %2 <vgpr>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000023/// BB2:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000024/// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1>
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000025/// %5 <vgpr> = VECTOR_INST %4 <vsrc>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000026///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000027///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000028/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
29/// code will look like this:
30///
31/// BB0:
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000032/// %0 <sgpr> = SCALAR_INST
Tom Stellard2f7cdda2013-08-06 23:08:28 +000033/// ...
34/// BRANCH %cond BB1, BB2
35/// BB1:
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000036/// %2 <vgpr> = VECTOR_INST
37/// %3 <vsrc> = COPY %2 <vgpr>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000038/// BB2:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000039/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1>
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000040/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000041///
42/// Now that the result of the PHI instruction is an SGPR, the register
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000043/// allocator is now forced to constrain the register class of %3 to
Tom Stellard2f7cdda2013-08-06 23:08:28 +000044/// <sgpr> so we end up with final code like this:
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000045///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000046/// BB0:
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000047/// %0 <sgpr> = SCALAR_INST
Tom Stellard2f7cdda2013-08-06 23:08:28 +000048/// ...
49/// BRANCH %cond BB1, BB2
50/// BB1:
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000051/// %2 <vgpr> = VECTOR_INST
52/// %3 <sgpr> = COPY %2 <vgpr>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000053/// BB2:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000054/// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1>
Francis Visoiu Mistrih93ef1452017-11-30 12:12:19 +000055/// %5 <vgpr> = VECTOR_INST %4 <sgpr>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000056///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000057/// Now this code contains an illegal copy from a VGPR to an SGPR.
Tom Stellard2f7cdda2013-08-06 23:08:28 +000058///
59/// In order to avoid this problem, this pass searches for PHI instructions
60/// which define a <vsrc> register and constrains its definition class to
61/// <vgpr> if the user of the PHI's definition register is a vector instruction.
62/// If the PHI's definition class is constrained to <vgpr> then the coalescer
63/// will be unable to perform the COPY removal from the above example which
64/// ultimately led to the creation of an illegal COPY.
65//===----------------------------------------------------------------------===//
66
67#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000068#include "AMDGPUSubtarget.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000069#include "SIInstrInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000070#include "SIRegisterInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000071#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000072#include "llvm/ADT/DenseSet.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000073#include "llvm/ADT/STLExtras.h"
74#include "llvm/ADT/SmallSet.h"
75#include "llvm/ADT/SmallVector.h"
76#include "llvm/CodeGen/MachineBasicBlock.h"
Tom Stellard0bc68812016-11-29 00:46:46 +000077#include "llvm/CodeGen/MachineDominators.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000078#include "llvm/CodeGen/MachineFunction.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000079#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000080#include "llvm/CodeGen/MachineInstr.h"
Tom Stellard82166022013-11-13 23:36:37 +000081#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000082#include "llvm/CodeGen/MachineOperand.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000083#include "llvm/CodeGen/MachineRegisterInfo.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000084#include "llvm/CodeGen/TargetRegisterInfo.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000085#include "llvm/Pass.h"
86#include "llvm/Support/CodeGen.h"
87#include "llvm/Support/CommandLine.h"
Tom Stellard82166022013-11-13 23:36:37 +000088#include "llvm/Support/Debug.h"
Hans Wennborga74fd702013-11-14 23:24:09 +000089#include "llvm/Support/raw_ostream.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000090#include "llvm/Target/TargetMachine.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000091#include <cassert>
92#include <cstdint>
93#include <iterator>
94#include <list>
95#include <map>
96#include <tuple>
97#include <utility>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000098
99using namespace llvm;
100
Matt Arsenault98f83942016-04-21 18:21:54 +0000101#define DEBUG_TYPE "si-fix-sgpr-copies"
Chandler Carruth84e68b22014-04-22 02:41:26 +0000102
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000103static cl::opt<bool> EnableM0Merge(
104 "amdgpu-enable-merge-m0",
105 cl::desc("Merge and hoist M0 initializations"),
Austin Kerbow423b4a12019-07-15 22:07:05 +0000106 cl::init(true));
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000107
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000108namespace {
109
110class SIFixSGPRCopies : public MachineFunctionPass {
Tom Stellard0bc68812016-11-29 00:46:46 +0000111 MachineDominatorTree *MDT;
Alexander Timofeevb9347282018-04-25 12:32:46 +0000112
Matt Arsenault782c03b2015-11-03 22:30:13 +0000113public:
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000114 static char ID;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000115
Eugene Zelenko59e12822017-08-08 00:47:13 +0000116 SIFixSGPRCopies() : MachineFunctionPass(ID) {}
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000117
Craig Topper5656db42014-04-29 07:57:24 +0000118 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000119
Mehdi Amini117296c2016-10-01 02:56:57 +0000120 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000121
Matt Arsenault0cb85172015-09-25 17:21:28 +0000122 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard0bc68812016-11-29 00:46:46 +0000123 AU.addRequired<MachineDominatorTree>();
124 AU.addPreserved<MachineDominatorTree>();
Matt Arsenault0cb85172015-09-25 17:21:28 +0000125 AU.setPreservesCFG();
126 MachineFunctionPass::getAnalysisUsage(AU);
127 }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000128};
129
Eugene Zelenko59e12822017-08-08 00:47:13 +0000130} // end anonymous namespace
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000131
Tom Stellard0bc68812016-11-29 00:46:46 +0000132INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
133 "SI Fix SGPR copies", false, false)
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000134INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
Tom Stellard0bc68812016-11-29 00:46:46 +0000135INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
136 "SI Fix SGPR copies", false, false)
137
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000138char SIFixSGPRCopies::ID = 0;
139
Matt Arsenault782c03b2015-11-03 22:30:13 +0000140char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
141
142FunctionPass *llvm::createSIFixSGPRCopiesPass() {
143 return new SIFixSGPRCopies();
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000144}
145
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000146static bool hasVectorOperands(const MachineInstr &MI,
147 const SIRegisterInfo *TRI) {
Tom Stellard82166022013-11-13 23:36:37 +0000148 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
149 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
150 if (!MI.getOperand(i).isReg() ||
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000151 !Register::isVirtualRegister(MI.getOperand(i).getReg()))
Tom Stellard82166022013-11-13 23:36:37 +0000152 continue;
153
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000154 if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg())))
Tom Stellard82166022013-11-13 23:36:37 +0000155 return true;
156 }
157 return false;
158}
159
Matt Arsenault0de924b2015-11-02 23:15:42 +0000160static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
161getCopyRegClasses(const MachineInstr &Copy,
162 const SIRegisterInfo &TRI,
163 const MachineRegisterInfo &MRI) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000164 Register DstReg = Copy.getOperand(0).getReg();
165 Register SrcReg = Copy.getOperand(1).getReg();
Matt Arsenault120a0c92014-12-03 05:22:39 +0000166
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000167 const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg)
168 ? MRI.getRegClass(SrcReg)
169 : TRI.getPhysRegClass(SrcReg);
Tom Stellardd33d7f12015-05-12 14:18:11 +0000170
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000171 // We don't really care about the subregister here.
172 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
Tom Stellard82166022013-11-13 23:36:37 +0000173
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000174 const TargetRegisterClass *DstRC = Register::isVirtualRegister(DstReg)
175 ? MRI.getRegClass(DstReg)
176 : TRI.getPhysRegClass(DstReg);
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000177
178 return std::make_pair(SrcRC, DstRC);
179}
180
Matt Arsenault0de924b2015-11-02 23:15:42 +0000181static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
182 const TargetRegisterClass *DstRC,
183 const SIRegisterInfo &TRI) {
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000184 return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) &&
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000185 TRI.hasVectorRegisters(SrcRC);
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000186}
187
Matt Arsenault0de924b2015-11-02 23:15:42 +0000188static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
189 const TargetRegisterClass *DstRC,
190 const SIRegisterInfo &TRI) {
Nicolai Haehnle814abb52018-10-31 13:27:08 +0000191 return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) &&
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000192 TRI.hasVectorRegisters(DstRC);
Tom Stellard82166022013-11-13 23:36:37 +0000193}
194
Stanislav Mekhanoshin465a1ff2017-06-20 18:32:42 +0000195static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI,
196 const SIRegisterInfo *TRI,
197 const SIInstrInfo *TII) {
198 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
199 auto &Src = MI.getOperand(1);
Daniel Sanders0c476112019-08-15 19:22:08 +0000200 Register DstReg = MI.getOperand(0).getReg();
201 Register SrcReg = Src.getReg();
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000202 if (!Register::isVirtualRegister(SrcReg) ||
203 !Register::isVirtualRegister(DstReg))
Stanislav Mekhanoshin465a1ff2017-06-20 18:32:42 +0000204 return false;
205
206 for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) {
207 const auto *UseMI = MO.getParent();
208 if (UseMI == &MI)
209 continue;
210 if (MO.isDef() || UseMI->getParent() != MI.getParent() ||
211 UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END ||
212 !TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src))
213 return false;
214 }
215 // Change VGPR to SGPR destination.
216 MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg)));
217 return true;
218}
219
Matt Arsenault0de924b2015-11-02 23:15:42 +0000220// Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
221//
222// SGPRx = ...
223// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
224// VGPRz = COPY SGPRy
225//
226// ==>
227//
228// VGPRx = COPY SGPRx
229// VGPRz = REG_SEQUENCE VGPRx, sub0
230//
231// This exposes immediate folding opportunities when materializing 64-bit
232// immediates.
233static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
234 const SIRegisterInfo *TRI,
235 const SIInstrInfo *TII,
236 MachineRegisterInfo &MRI) {
237 assert(MI.isRegSequence());
238
Daniel Sanders0c476112019-08-15 19:22:08 +0000239 Register DstReg = MI.getOperand(0).getReg();
Matt Arsenault0de924b2015-11-02 23:15:42 +0000240 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
241 return false;
242
243 if (!MRI.hasOneUse(DstReg))
244 return false;
245
246 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
247 if (!CopyUse.isCopy())
248 return false;
249
Matt Arsenaultfe78ffb2017-04-11 22:29:19 +0000250 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000251 if (Register::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
Matt Arsenaultfe78ffb2017-04-11 22:29:19 +0000252 return false;
253
Matt Arsenault0de924b2015-11-02 23:15:42 +0000254 const TargetRegisterClass *SrcRC, *DstRC;
255 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
256
257 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
258 return false;
259
Stanislav Mekhanoshin465a1ff2017-06-20 18:32:42 +0000260 if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII))
261 return true;
262
Matt Arsenault0de924b2015-11-02 23:15:42 +0000263 // TODO: Could have multiple extracts?
264 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
265 if (SubReg != AMDGPU::NoSubRegister)
266 return false;
267
268 MRI.setRegClass(DstReg, DstRC);
269
270 // SGPRx = ...
271 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
272 // VGPRz = COPY SGPRy
273
274 // =>
275 // VGPRx = COPY SGPRx
276 // VGPRz = REG_SEQUENCE VGPRx, sub0
277
278 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000279 bool IsAGPR = TRI->hasAGPRs(DstRC);
Matt Arsenault0de924b2015-11-02 23:15:42 +0000280
281 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000282 Register SrcReg = MI.getOperand(I).getReg();
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000283 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
Matt Arsenault0de924b2015-11-02 23:15:42 +0000284
285 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
286 assert(TRI->isSGPRClass(SrcRC) &&
287 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
288
289 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
290 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
291
Daniel Sanders0c476112019-08-15 19:22:08 +0000292 Register TmpReg = MRI.createVirtualRegister(NewSrcRC);
Matt Arsenault0de924b2015-11-02 23:15:42 +0000293
Diana Picus116bbab2017-01-13 09:58:52 +0000294 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
295 TmpReg)
296 .add(MI.getOperand(I));
Matt Arsenault0de924b2015-11-02 23:15:42 +0000297
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000298 if (IsAGPR) {
299 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentAGPRClass(SrcRC);
Daniel Sanders0c476112019-08-15 19:22:08 +0000300 Register TmpAReg = MRI.createVirtualRegister(NewSrcRC);
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000301 unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ?
302 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY;
303 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(Opc),
304 TmpAReg)
305 .addReg(TmpReg, RegState::Kill);
306 TmpReg = TmpAReg;
307 }
308
Matt Arsenault0de924b2015-11-02 23:15:42 +0000309 MI.getOperand(I).setReg(TmpReg);
310 }
311
312 CopyUse.eraseFromParent();
313 return true;
314}
315
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000316static bool phiHasVGPROperands(const MachineInstr &PHI,
317 const MachineRegisterInfo &MRI,
318 const SIRegisterInfo *TRI,
319 const SIInstrInfo *TII) {
320 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000321 Register Reg = PHI.getOperand(i).getReg();
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000322 if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
323 return true;
324 }
325 return false;
326}
327
328static bool phiHasBreakDef(const MachineInstr &PHI,
329 const MachineRegisterInfo &MRI,
330 SmallSet<unsigned, 8> &Visited) {
331 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000332 Register Reg = PHI.getOperand(i).getReg();
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000333 if (Visited.count(Reg))
334 continue;
335
336 Visited.insert(Reg);
337
338 MachineInstr *DefInstr = MRI.getVRegDef(Reg);
339 switch (DefInstr->getOpcode()) {
340 default:
341 break;
342 case AMDGPU::SI_IF_BREAK:
343 return true;
344 case AMDGPU::PHI:
345 if (phiHasBreakDef(*DefInstr, MRI, Visited))
346 return true;
347 }
348 }
349 return false;
350}
351
352static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
353 const TargetRegisterInfo &TRI) {
354 for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
355 E = MBB.end(); I != E; ++I) {
356 if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
357 return true;
358 }
359 return false;
360}
361
Tom Stellard00cfa742016-12-06 21:13:30 +0000362static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
363 const MachineInstr *MoveImm,
364 const SIInstrInfo *TII,
365 unsigned &SMovOp,
366 int64_t &Imm) {
Connor Abbott8c217d02017-08-04 18:36:49 +0000367 if (Copy->getOpcode() != AMDGPU::COPY)
368 return false;
369
Tom Stellard00cfa742016-12-06 21:13:30 +0000370 if (!MoveImm->isMoveImmediate())
371 return false;
372
373 const MachineOperand *ImmOp =
374 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
375 if (!ImmOp->isImm())
376 return false;
377
378 // FIXME: Handle copies with sub-regs.
379 if (Copy->getOperand(0).getSubReg())
380 return false;
381
382 switch (MoveImm->getOpcode()) {
383 default:
384 return false;
385 case AMDGPU::V_MOV_B32_e32:
386 SMovOp = AMDGPU::S_MOV_B32;
387 break;
388 case AMDGPU::V_MOV_B64_PSEUDO:
389 SMovOp = AMDGPU::S_MOV_B64;
390 break;
391 }
392 Imm = ImmOp->getImm();
393 return true;
394}
395
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000396template <class UnaryPredicate>
397bool searchPredecessors(const MachineBasicBlock *MBB,
398 const MachineBasicBlock *CutOff,
399 UnaryPredicate Predicate) {
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000400 if (MBB == CutOff)
401 return false;
402
Eugene Zelenko59e12822017-08-08 00:47:13 +0000403 DenseSet<const MachineBasicBlock *> Visited;
404 SmallVector<MachineBasicBlock *, 4> Worklist(MBB->pred_begin(),
405 MBB->pred_end());
Wei Ding74da3502017-04-12 23:51:47 +0000406
407 while (!Worklist.empty()) {
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000408 MachineBasicBlock *MBB = Worklist.pop_back_val();
Wei Ding74da3502017-04-12 23:51:47 +0000409
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000410 if (!Visited.insert(MBB).second)
Wei Ding74da3502017-04-12 23:51:47 +0000411 continue;
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000412 if (MBB == CutOff)
413 continue;
414 if (Predicate(MBB))
Wei Ding74da3502017-04-12 23:51:47 +0000415 return true;
416
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000417 Worklist.append(MBB->pred_begin(), MBB->pred_end());
Wei Ding74da3502017-04-12 23:51:47 +0000418 }
419
420 return false;
421}
422
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000423static bool predsHasDivergentTerminator(MachineBasicBlock *MBB,
424 const TargetRegisterInfo *TRI) {
425 return searchPredecessors(MBB, nullptr, [TRI](MachineBasicBlock *MBB) {
426 return hasTerminatorThatModifiesExec(*MBB, *TRI); });
427}
428
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000429// Checks if there is potential path From instruction To instruction.
430// If CutOff is specified and it sits in between of that path we ignore
431// a higher portion of the path and report it is not reachable.
432static bool isReachable(const MachineInstr *From,
433 const MachineInstr *To,
434 const MachineBasicBlock *CutOff,
435 MachineDominatorTree &MDT) {
436 // If either From block dominates To block or instructions are in the same
437 // block and From is higher.
438 if (MDT.dominates(From, To))
439 return true;
440
441 const MachineBasicBlock *MBBFrom = From->getParent();
442 const MachineBasicBlock *MBBTo = To->getParent();
443 if (MBBFrom == MBBTo)
444 return false;
445
446 // Instructions are in different blocks, do predecessor search.
447 // We should almost never get here since we do not usually produce M0 stores
448 // other than -1.
449 return searchPredecessors(MBBTo, CutOff, [MBBFrom]
450 (const MachineBasicBlock *MBB) { return MBB == MBBFrom; });
451}
452
Austin Kerbow423b4a12019-07-15 22:07:05 +0000453// Return the first non-prologue instruction in the block.
454static MachineBasicBlock::iterator
455getFirstNonPrologue(MachineBasicBlock *MBB, const TargetInstrInfo *TII) {
456 MachineBasicBlock::iterator I = MBB->getFirstNonPHI();
457 while (I != MBB->end() && TII->isBasicBlockPrologue(*I))
458 ++I;
459
460 return I;
461}
462
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000463// Hoist and merge identical SGPR initializations into a common predecessor.
464// This is intended to combine M0 initializations, but can work with any
465// SGPR. A VGPR cannot be processed since we cannot guarantee vector
466// executioon.
467static bool hoistAndMergeSGPRInits(unsigned Reg,
468 const MachineRegisterInfo &MRI,
Austin Kerbow666af672019-09-11 21:28:41 +0000469 const TargetRegisterInfo *TRI,
Austin Kerbow423b4a12019-07-15 22:07:05 +0000470 MachineDominatorTree &MDT,
471 const TargetInstrInfo *TII) {
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000472 // List of inits by immediate value.
Eugene Zelenko59e12822017-08-08 00:47:13 +0000473 using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000474 InitListMap Inits;
475 // List of clobbering instructions.
476 SmallVector<MachineInstr*, 8> Clobbers;
Austin Kerbow423b4a12019-07-15 22:07:05 +0000477 // List of instructions marked for deletion.
478 SmallSet<MachineInstr*, 8> MergedInstrs;
479
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000480 bool Changed = false;
481
482 for (auto &MI : MRI.def_instructions(Reg)) {
483 MachineOperand *Imm = nullptr;
Austin Kerbow666af672019-09-11 21:28:41 +0000484 for (auto &MO : MI.operands()) {
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000485 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
486 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
487 Imm = nullptr;
488 break;
489 } else if (MO.isImm())
490 Imm = &MO;
491 }
492 if (Imm)
493 Inits[Imm->getImm()].push_front(&MI);
494 else
495 Clobbers.push_back(&MI);
496 }
497
498 for (auto &Init : Inits) {
499 auto &Defs = Init.second;
500
501 for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
502 MachineInstr *MI1 = *I1;
503
504 for (auto I2 = std::next(I1); I2 != E; ) {
505 MachineInstr *MI2 = *I2;
506
507 // Check any possible interference
Austin Kerbow423b4a12019-07-15 22:07:05 +0000508 auto interferes = [&](MachineBasicBlock::iterator From,
509 MachineBasicBlock::iterator To) -> bool {
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000510
511 assert(MDT.dominates(&*To, &*From));
512
513 auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool {
514 const MachineBasicBlock *MBBFrom = From->getParent();
515 const MachineBasicBlock *MBBTo = To->getParent();
516 bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT);
517 bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT);
518 if (!MayClobberFrom && !MayClobberTo)
519 return false;
520 if ((MayClobberFrom && !MayClobberTo) ||
521 (!MayClobberFrom && MayClobberTo))
522 return true;
523 // Both can clobber, this is not an interference only if both are
524 // dominated by Clobber and belong to the same block or if Clobber
525 // properly dominates To, given that To >> From, so it dominates
526 // both and located in a common dominator.
527 return !((MBBFrom == MBBTo &&
528 MDT.dominates(Clobber, &*From) &&
529 MDT.dominates(Clobber, &*To)) ||
530 MDT.properlyDominates(Clobber->getParent(), MBBTo));
531 };
532
Eugene Zelenko59e12822017-08-08 00:47:13 +0000533 return (llvm::any_of(Clobbers, interferes)) ||
534 (llvm::any_of(Inits, [&](InitListMap::value_type &C) {
535 return C.first != Init.first &&
536 llvm::any_of(C.second, interferes);
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000537 }));
538 };
539
540 if (MDT.dominates(MI1, MI2)) {
Austin Kerbow423b4a12019-07-15 22:07:05 +0000541 if (!interferes(MI2, MI1)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000542 LLVM_DEBUG(dbgs()
543 << "Erasing from "
544 << printMBBReference(*MI2->getParent()) << " " << *MI2);
Austin Kerbow423b4a12019-07-15 22:07:05 +0000545 MergedInstrs.insert(MI2);
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000546 Changed = true;
Austin Kerbow423b4a12019-07-15 22:07:05 +0000547 ++I2;
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000548 continue;
549 }
550 } else if (MDT.dominates(MI2, MI1)) {
Austin Kerbow423b4a12019-07-15 22:07:05 +0000551 if (!interferes(MI1, MI2)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000552 LLVM_DEBUG(dbgs()
553 << "Erasing from "
554 << printMBBReference(*MI1->getParent()) << " " << *MI1);
Austin Kerbow423b4a12019-07-15 22:07:05 +0000555 MergedInstrs.insert(MI1);
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000556 Changed = true;
Austin Kerbow423b4a12019-07-15 22:07:05 +0000557 ++I1;
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000558 break;
559 }
560 } else {
561 auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(),
562 MI2->getParent());
563 if (!MBB) {
564 ++I2;
565 continue;
566 }
567
Austin Kerbow423b4a12019-07-15 22:07:05 +0000568 MachineBasicBlock::iterator I = getFirstNonPrologue(MBB, TII);
569 if (!interferes(MI1, I) && !interferes(MI2, I)) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000570 LLVM_DEBUG(dbgs()
571 << "Erasing from "
572 << printMBBReference(*MI1->getParent()) << " " << *MI1
573 << "and moving from "
574 << printMBBReference(*MI2->getParent()) << " to "
575 << printMBBReference(*I->getParent()) << " " << *MI2);
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000576 I->getParent()->splice(I, MI2->getParent(), MI2);
Austin Kerbow423b4a12019-07-15 22:07:05 +0000577 MergedInstrs.insert(MI1);
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000578 Changed = true;
Austin Kerbow423b4a12019-07-15 22:07:05 +0000579 ++I1;
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000580 break;
581 }
582 }
583 ++I2;
584 }
585 ++I1;
586 }
587 }
588
Austin Kerbow666af672019-09-11 21:28:41 +0000589 // Remove initializations that were merged into another.
590 for (auto &Init : Inits) {
591 auto &Defs = Init.second;
Austin Kerbowcf321f42019-09-12 19:12:21 +0000592 auto I = Defs.begin();
593 while (I != Defs.end()) {
Austin Kerbow666af672019-09-11 21:28:41 +0000594 if (MergedInstrs.count(*I)) {
595 (*I)->eraseFromParent();
596 I = Defs.erase(I);
Austin Kerbowcf321f42019-09-12 19:12:21 +0000597 } else
598 ++I;
599 }
Austin Kerbow666af672019-09-11 21:28:41 +0000600 }
601
602 // Try to schedule SGPR initializations as early as possible in the MBB.
603 for (auto &Init : Inits) {
604 auto &Defs = Init.second;
605 for (auto MI : Defs) {
606 auto MBB = MI->getParent();
607 MachineInstr &BoundaryMI = *getFirstNonPrologue(MBB, TII);
608 MachineBasicBlock::reverse_iterator B(BoundaryMI);
609 // Check if B should actually be a bondary. If not set the previous
610 // instruction as the boundary instead.
611 if (!TII->isBasicBlockPrologue(*B))
612 B++;
613
614 auto R = std::next(MI->getReverseIterator());
615 const unsigned Threshold = 50;
616 // Search until B or Threashold for a place to insert the initialization.
617 for (unsigned I = 0; R != B && I < Threshold; ++R, ++I)
618 if (R->readsRegister(Reg, TRI) || R->definesRegister(Reg, TRI) ||
619 TII->isSchedulingBoundary(*R, MBB, *MBB->getParent()))
620 break;
621
622 // Move to directly after R.
623 if (&*--R != MI)
624 MBB->splice(*R, MBB, MI);
625 }
626 }
Austin Kerbow423b4a12019-07-15 22:07:05 +0000627
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000628 if (Changed)
629 MRI.clearKillFlags(Reg);
630
631 return Changed;
632}
633
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000634bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000635 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000636 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000637 const SIRegisterInfo *TRI = ST.getRegisterInfo();
638 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard0bc68812016-11-29 00:46:46 +0000639 MDT = &getAnalysis<MachineDominatorTree>();
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000640
641 SmallVector<MachineInstr *, 16> Worklist;
642
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000643 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
644 BI != BE; ++BI) {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000645 MachineBasicBlock &MBB = *BI;
646 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000647 I != E; ++I) {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000648 MachineInstr &MI = *I;
Tom Stellard82166022013-11-13 23:36:37 +0000649
650 switch (MI.getOpcode()) {
Matt Arsenault85441dd2015-09-21 16:27:22 +0000651 default:
652 continue;
Connor Abbott8c217d02017-08-04 18:36:49 +0000653 case AMDGPU::COPY:
Connor Abbott92638ab2017-08-04 18:36:52 +0000654 case AMDGPU::WQM:
Carl Ritson00e89b42019-07-26 09:54:12 +0000655 case AMDGPU::SOFT_WQM:
Connor Abbott92638ab2017-08-04 18:36:52 +0000656 case AMDGPU::WWM: {
Matt Arsenaultaff29952019-08-01 18:27:11 +0000657 Register DstReg = MI.getOperand(0).getReg();
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000658
659 const TargetRegisterClass *SrcRC, *DstRC;
660 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
Matt Arsenaultaff29952019-08-01 18:27:11 +0000661
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000662 if (!Register::isVirtualRegister(DstReg)) {
Matt Arsenaultaff29952019-08-01 18:27:11 +0000663 // If the destination register is a physical register there isn't
664 // really much we can do to fix this.
665 // Some special instructions use M0 as an input. Some even only use
666 // the first lane. Insert a readfirstlane and hope for the best.
667 if (DstReg == AMDGPU::M0 && TRI->hasVectorRegisters(SrcRC)) {
668 Register TmpReg
669 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
670
671 BuildMI(MBB, MI, MI.getDebugLoc(),
672 TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg)
673 .add(MI.getOperand(1));
674 MI.getOperand(1).setReg(TmpReg);
675 }
676
677 continue;
678 }
679
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000680 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
Daniel Sanders0c476112019-08-15 19:22:08 +0000681 Register SrcReg = MI.getOperand(1).getReg();
Daniel Sanders2bea69b2019-08-01 23:27:28 +0000682 if (!Register::isVirtualRegister(SrcReg)) {
Scott Linder823549a2018-10-08 18:47:01 +0000683 TII->moveToVALU(MI, MDT);
Matt Arsenault2a803692017-04-29 01:26:34 +0000684 break;
685 }
686
687 MachineInstr *DefMI = MRI.getVRegDef(SrcReg);
Tom Stellard00cfa742016-12-06 21:13:30 +0000688 unsigned SMovOp;
689 int64_t Imm;
690 // If we are just copying an immediate, we can replace the copy with
691 // s_mov_b32.
692 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
693 MI.getOperand(1).ChangeToImmediate(Imm);
694 MI.addImplicitDefUseOperands(MF);
695 MI.setDesc(TII->get(SMovOp));
696 break;
697 }
Scott Linder823549a2018-10-08 18:47:01 +0000698 TII->moveToVALU(MI, MDT);
Stanislav Mekhanoshin465a1ff2017-06-20 18:32:42 +0000699 } else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) {
700 tryChangeVGPRtoSGPRinCopy(MI, TRI, TII);
Matt Arsenault85441dd2015-09-21 16:27:22 +0000701 }
702
703 break;
704 }
Tom Stellard82166022013-11-13 23:36:37 +0000705 case AMDGPU::PHI: {
Daniel Sanders0c476112019-08-15 19:22:08 +0000706 Register Reg = MI.getOperand(0).getReg();
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000707 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
708 break;
Tom Stellard82166022013-11-13 23:36:37 +0000709
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000710 // We don't need to fix the PHI if the common dominator of the
711 // two incoming blocks terminates with a uniform branch.
712 bool HasVGPROperand = phiHasVGPROperands(MI, MRI, TRI, TII);
713 if (MI.getNumExplicitOperands() == 5 && !HasVGPROperand) {
714 MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB();
715 MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB();
Alexander Timofeevb9347282018-04-25 12:32:46 +0000716
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000717 if (!predsHasDivergentTerminator(MBB0, TRI) &&
718 !predsHasDivergentTerminator(MBB1, TRI)) {
719 LLVM_DEBUG(dbgs()
720 << "Not fixing PHI for uniform branch: " << MI << '\n');
Tom Stellard0bc68812016-11-29 00:46:46 +0000721 break;
722 }
723 }
724
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000725 // If a PHI node defines an SGPR and any of its operands are VGPRs,
726 // then we need to move it to the VALU.
727 //
728 // Also, if a PHI node defines an SGPR and has all SGPR operands
729 // we must move it to the VALU, because the SGPR operands will
730 // all end up being assigned the same register, which means
731 // there is a potential for a conflict if different threads take
732 // different control flow paths.
733 //
734 // For Example:
735 //
736 // sgpr0 = def;
737 // ...
738 // sgpr1 = def;
739 // ...
740 // sgpr2 = PHI sgpr0, sgpr1
741 // use sgpr2;
742 //
743 // Will Become:
744 //
745 // sgpr2 = def;
746 // ...
747 // sgpr2 = def;
748 // ...
749 // use sgpr2
750 //
751 // The one exception to this rule is when one of the operands
752 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
753 // instruction. In this case, there we know the program will
754 // never enter the second block (the loop) without entering
755 // the first block (where the condition is computed), so there
756 // is no chance for values to be over-written.
757
758 SmallSet<unsigned, 8> Visited;
759 if (HasVGPROperand || !phiHasBreakDef(MI, MRI, Visited)) {
Michael Liao71668432019-05-28 16:29:39 +0000760 LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
Alexander Timofeev37bd9bd2019-06-06 21:13:02 +0000761 TII->moveToVALU(MI, MDT);
Tom Stellard9fdbec82016-11-11 23:35:42 +0000762 }
Alexander Timofeevba447ba2019-05-26 20:33:26 +0000763
Tom Stellard82166022013-11-13 23:36:37 +0000764 break;
765 }
Eugene Zelenko59e12822017-08-08 00:47:13 +0000766 case AMDGPU::REG_SEQUENCE:
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000767 if (TRI->hasVectorRegisters(TII->getOpRegClass(MI, 0)) ||
768 !hasVectorOperands(MI, TRI)) {
Matt Arsenault0de924b2015-11-02 23:15:42 +0000769 foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
Tom Stellard82166022013-11-13 23:36:37 +0000770 continue;
Matt Arsenault0de924b2015-11-02 23:15:42 +0000771 }
Tom Stellard82166022013-11-13 23:36:37 +0000772
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000773 LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
Tom Stellard82166022013-11-13 23:36:37 +0000774
Scott Linder823549a2018-10-08 18:47:01 +0000775 TII->moveToVALU(MI, MDT);
Tom Stellard82166022013-11-13 23:36:37 +0000776 break;
Tom Stellard204e61b2014-04-07 19:45:45 +0000777 case AMDGPU::INSERT_SUBREG: {
Tom Stellarda5687382014-05-15 14:41:55 +0000778 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
Tom Stellard204e61b2014-04-07 19:45:45 +0000779 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
Tom Stellarda5687382014-05-15 14:41:55 +0000780 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
781 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
782 if (TRI->isSGPRClass(DstRC) &&
Stanislav Mekhanoshine67cc382019-07-11 21:19:33 +0000783 (TRI->hasVectorRegisters(Src0RC) ||
784 TRI->hasVectorRegisters(Src1RC))) {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000785 LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
Scott Linder823549a2018-10-08 18:47:01 +0000786 TII->moveToVALU(MI, MDT);
Tom Stellarda5687382014-05-15 14:41:55 +0000787 }
788 break;
Tom Stellard204e61b2014-04-07 19:45:45 +0000789 }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000790 }
791 }
792 }
Matt Arsenault6f679782014-11-17 21:11:34 +0000793
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000794 if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge)
Austin Kerbow666af672019-09-11 21:28:41 +0000795 hoistAndMergeSGPRInits(AMDGPU::M0, MRI, TRI, *MDT, TII);
Stanislav Mekhanoshinbd5394b2017-04-24 19:37:54 +0000796
Matt Arsenault6f679782014-11-17 21:11:34 +0000797 return true;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000798}