blob: 34cd6f704a12f51e0fb00bb3c8899c6784ce2eff [file] [log] [blame]
Tom Stellard2f7cdda2013-08-06 23:08:28 +00001//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Copies from VGPR to SGPR registers are illegal and the register coalescer
12/// will sometimes generate these illegal copies in situations like this:
13///
14/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
15///
16/// BB0:
17/// %vreg0 <sgpr> = SCALAR_INST
18/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
19/// ...
20/// BRANCH %cond BB1, BB2
21/// BB1:
22/// %vreg2 <vgpr> = VECTOR_INST
23/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
24/// BB2:
25/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000026/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000027///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000028///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000029/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30/// code will look like this:
31///
32/// BB0:
33/// %vreg0 <sgpr> = SCALAR_INST
34/// ...
35/// BRANCH %cond BB1, BB2
36/// BB1:
37/// %vreg2 <vgpr> = VECTOR_INST
38/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
39/// BB2:
40/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
42///
43/// Now that the result of the PHI instruction is an SGPR, the register
44/// allocator is now forced to constrain the register class of %vreg3 to
45/// <sgpr> so we end up with final code like this:
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000046///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000047/// BB0:
48/// %vreg0 <sgpr> = SCALAR_INST
49/// ...
50/// BRANCH %cond BB1, BB2
51/// BB1:
52/// %vreg2 <vgpr> = VECTOR_INST
53/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
54/// BB2:
55/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
57///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000058/// Now this code contains an illegal copy from a VGPR to an SGPR.
Tom Stellard2f7cdda2013-08-06 23:08:28 +000059///
60/// In order to avoid this problem, this pass searches for PHI instructions
61/// which define a <vsrc> register and constrains its definition class to
62/// <vgpr> if the user of the PHI's definition register is a vector instruction.
63/// If the PHI's definition class is constrained to <vgpr> then the coalescer
64/// will be unable to perform the COPY removal from the above example which
65/// ultimately led to the creation of an illegal COPY.
66//===----------------------------------------------------------------------===//
67
68#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000069#include "AMDGPUSubtarget.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000070#include "SIInstrInfo.h"
Tom Stellard0bc68812016-11-29 00:46:46 +000071#include "llvm/CodeGen/MachineDominators.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000072#include "llvm/CodeGen/MachineFunctionPass.h"
Tom Stellard82166022013-11-13 23:36:37 +000073#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000074#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard82166022013-11-13 23:36:37 +000075#include "llvm/Support/Debug.h"
Hans Wennborga74fd702013-11-14 23:24:09 +000076#include "llvm/Support/raw_ostream.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000077#include "llvm/Target/TargetMachine.h"
78
79using namespace llvm;
80
Matt Arsenault98f83942016-04-21 18:21:54 +000081#define DEBUG_TYPE "si-fix-sgpr-copies"
Chandler Carruth84e68b22014-04-22 02:41:26 +000082
Tom Stellard2f7cdda2013-08-06 23:08:28 +000083namespace {
84
85class SIFixSGPRCopies : public MachineFunctionPass {
Tom Stellard0bc68812016-11-29 00:46:46 +000086
87 MachineDominatorTree *MDT;
88
Matt Arsenault782c03b2015-11-03 22:30:13 +000089public:
Tom Stellard2f7cdda2013-08-06 23:08:28 +000090 static char ID;
Tom Stellard2f7cdda2013-08-06 23:08:28 +000091
Matt Arsenault782c03b2015-11-03 22:30:13 +000092 SIFixSGPRCopies() : MachineFunctionPass(ID) { }
Tom Stellard2f7cdda2013-08-06 23:08:28 +000093
Craig Topper5656db42014-04-29 07:57:24 +000094 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard2f7cdda2013-08-06 23:08:28 +000095
Mehdi Amini117296c2016-10-01 02:56:57 +000096 StringRef getPassName() const override { return "SI Fix SGPR copies"; }
Tom Stellard2f7cdda2013-08-06 23:08:28 +000097
Matt Arsenault0cb85172015-09-25 17:21:28 +000098 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard0bc68812016-11-29 00:46:46 +000099 AU.addRequired<MachineDominatorTree>();
100 AU.addPreserved<MachineDominatorTree>();
Matt Arsenault0cb85172015-09-25 17:21:28 +0000101 AU.setPreservesCFG();
102 MachineFunctionPass::getAnalysisUsage(AU);
103 }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000104};
105
106} // End anonymous namespace
107
Tom Stellard0bc68812016-11-29 00:46:46 +0000108INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE,
109 "SI Fix SGPR copies", false, false)
110INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
111INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE,
112 "SI Fix SGPR copies", false, false)
113
Matt Arsenault782c03b2015-11-03 22:30:13 +0000114
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000115char SIFixSGPRCopies::ID = 0;
116
Matt Arsenault782c03b2015-11-03 22:30:13 +0000117char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID;
118
119FunctionPass *llvm::createSIFixSGPRCopiesPass() {
120 return new SIFixSGPRCopies();
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000121}
122
Tom Stellard82166022013-11-13 23:36:37 +0000123static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
124 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
125 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
126 if (!MI.getOperand(i).isReg() ||
127 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
128 continue;
129
130 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
131 return true;
132 }
133 return false;
134}
135
Matt Arsenault0de924b2015-11-02 23:15:42 +0000136static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
137getCopyRegClasses(const MachineInstr &Copy,
138 const SIRegisterInfo &TRI,
139 const MachineRegisterInfo &MRI) {
Tom Stellard82166022013-11-13 23:36:37 +0000140 unsigned DstReg = Copy.getOperand(0).getReg();
141 unsigned SrcReg = Copy.getOperand(1).getReg();
Matt Arsenault120a0c92014-12-03 05:22:39 +0000142
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000143 const TargetRegisterClass *SrcRC =
144 TargetRegisterInfo::isVirtualRegister(SrcReg) ?
145 MRI.getRegClass(SrcReg) :
146 TRI.getPhysRegClass(SrcReg);
Tom Stellardd33d7f12015-05-12 14:18:11 +0000147
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000148 // We don't really care about the subregister here.
149 // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg());
Tom Stellard82166022013-11-13 23:36:37 +0000150
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000151 const TargetRegisterClass *DstRC =
152 TargetRegisterInfo::isVirtualRegister(DstReg) ?
153 MRI.getRegClass(DstReg) :
154 TRI.getPhysRegClass(DstReg);
155
156 return std::make_pair(SrcRC, DstRC);
157}
158
Matt Arsenault0de924b2015-11-02 23:15:42 +0000159static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC,
160 const TargetRegisterClass *DstRC,
161 const SIRegisterInfo &TRI) {
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000162 return TRI.isSGPRClass(DstRC) && TRI.hasVGPRs(SrcRC);
163}
164
Matt Arsenault0de924b2015-11-02 23:15:42 +0000165static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC,
166 const TargetRegisterClass *DstRC,
167 const SIRegisterInfo &TRI) {
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000168 return TRI.isSGPRClass(SrcRC) && TRI.hasVGPRs(DstRC);
Tom Stellard82166022013-11-13 23:36:37 +0000169}
170
Matt Arsenault0de924b2015-11-02 23:15:42 +0000171// Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE.
172//
173// SGPRx = ...
174// SGPRy = REG_SEQUENCE SGPRx, sub0 ...
175// VGPRz = COPY SGPRy
176//
177// ==>
178//
179// VGPRx = COPY SGPRx
180// VGPRz = REG_SEQUENCE VGPRx, sub0
181//
182// This exposes immediate folding opportunities when materializing 64-bit
183// immediates.
184static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI,
185 const SIRegisterInfo *TRI,
186 const SIInstrInfo *TII,
187 MachineRegisterInfo &MRI) {
188 assert(MI.isRegSequence());
189
190 unsigned DstReg = MI.getOperand(0).getReg();
191 if (!TRI->isSGPRClass(MRI.getRegClass(DstReg)))
192 return false;
193
194 if (!MRI.hasOneUse(DstReg))
195 return false;
196
197 MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg);
198 if (!CopyUse.isCopy())
199 return false;
200
Matt Arsenaultfe78ffb2017-04-11 22:29:19 +0000201 // It is illegal to have vreg inputs to a physreg defining reg_sequence.
202 if (TargetRegisterInfo::isPhysicalRegister(CopyUse.getOperand(0).getReg()))
203 return false;
204
Matt Arsenault0de924b2015-11-02 23:15:42 +0000205 const TargetRegisterClass *SrcRC, *DstRC;
206 std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI);
207
208 if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI))
209 return false;
210
211 // TODO: Could have multiple extracts?
212 unsigned SubReg = CopyUse.getOperand(1).getSubReg();
213 if (SubReg != AMDGPU::NoSubRegister)
214 return false;
215
216 MRI.setRegClass(DstReg, DstRC);
217
218 // SGPRx = ...
219 // SGPRy = REG_SEQUENCE SGPRx, sub0 ...
220 // VGPRz = COPY SGPRy
221
222 // =>
223 // VGPRx = COPY SGPRx
224 // VGPRz = REG_SEQUENCE VGPRx, sub0
225
226 MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg());
227
228 for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) {
229 unsigned SrcReg = MI.getOperand(I).getReg();
Nicolai Haehnle82fc9622016-01-07 17:10:29 +0000230 unsigned SrcSubReg = MI.getOperand(I).getSubReg();
Matt Arsenault0de924b2015-11-02 23:15:42 +0000231
232 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
233 assert(TRI->isSGPRClass(SrcRC) &&
234 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
235
236 SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg);
237 const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC);
238
239 unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC);
240
Diana Picus116bbab2017-01-13 09:58:52 +0000241 BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY),
242 TmpReg)
243 .add(MI.getOperand(I));
Matt Arsenault0de924b2015-11-02 23:15:42 +0000244
245 MI.getOperand(I).setReg(TmpReg);
246 }
247
248 CopyUse.eraseFromParent();
249 return true;
250}
251
Tom Stellard9fdbec82016-11-11 23:35:42 +0000252static bool phiHasVGPROperands(const MachineInstr &PHI,
253 const MachineRegisterInfo &MRI,
254 const SIRegisterInfo *TRI,
255 const SIInstrInfo *TII) {
256
257 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
258 unsigned Reg = PHI.getOperand(i).getReg();
259 if (TRI->hasVGPRs(MRI.getRegClass(Reg)))
260 return true;
261 }
262 return false;
263}
264static bool phiHasBreakDef(const MachineInstr &PHI,
265 const MachineRegisterInfo &MRI,
266 SmallSet<unsigned, 8> &Visited) {
267
268 for (unsigned i = 1; i < PHI.getNumOperands(); i += 2) {
269 unsigned Reg = PHI.getOperand(i).getReg();
270 if (Visited.count(Reg))
271 continue;
272
273 Visited.insert(Reg);
274
275 MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
276 assert(DefInstr);
277 switch (DefInstr->getOpcode()) {
278 default:
279 break;
280 case AMDGPU::SI_BREAK:
281 case AMDGPU::SI_IF_BREAK:
282 case AMDGPU::SI_ELSE_BREAK:
283 return true;
284 case AMDGPU::PHI:
285 if (phiHasBreakDef(*DefInstr, MRI, Visited))
286 return true;
287 }
288 }
289 return false;
290}
291
Tom Stellard0bc68812016-11-29 00:46:46 +0000292static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
293 const TargetRegisterInfo &TRI) {
294 for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
295 E = MBB.end(); I != E; ++I) {
296 if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
297 return true;
298 }
299 return false;
300}
301
Tom Stellard00cfa742016-12-06 21:13:30 +0000302static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy,
303 const MachineInstr *MoveImm,
304 const SIInstrInfo *TII,
305 unsigned &SMovOp,
306 int64_t &Imm) {
307
308 if (!MoveImm->isMoveImmediate())
309 return false;
310
311 const MachineOperand *ImmOp =
312 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
313 if (!ImmOp->isImm())
314 return false;
315
316 // FIXME: Handle copies with sub-regs.
317 if (Copy->getOperand(0).getSubReg())
318 return false;
319
320 switch (MoveImm->getOpcode()) {
321 default:
322 return false;
323 case AMDGPU::V_MOV_B32_e32:
324 SMovOp = AMDGPU::S_MOV_B32;
325 break;
326 case AMDGPU::V_MOV_B64_PSEUDO:
327 SMovOp = AMDGPU::S_MOV_B64;
328 break;
329 }
330 Imm = ImmOp->getImm();
331 return true;
332}
333
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000334bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000335 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000336 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000337 const SIRegisterInfo *TRI = ST.getRegisterInfo();
338 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard0bc68812016-11-29 00:46:46 +0000339 MDT = &getAnalysis<MachineDominatorTree>();
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000340
341 SmallVector<MachineInstr *, 16> Worklist;
342
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000343 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
344 BI != BE; ++BI) {
345
346 MachineBasicBlock &MBB = *BI;
347 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
Matt Arsenaultf1aebbf2015-11-02 23:30:48 +0000348 I != E; ++I) {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000349 MachineInstr &MI = *I;
Tom Stellard82166022013-11-13 23:36:37 +0000350
351 switch (MI.getOpcode()) {
Matt Arsenault85441dd2015-09-21 16:27:22 +0000352 default:
353 continue;
354 case AMDGPU::COPY: {
Matt Arsenaultf0d9e472015-10-13 00:07:54 +0000355 // If the destination register is a physical register there isn't really
356 // much we can do to fix this.
357 if (!TargetRegisterInfo::isVirtualRegister(MI.getOperand(0).getReg()))
358 continue;
359
360 const TargetRegisterClass *SrcRC, *DstRC;
361 std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
362 if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
Tom Stellard00cfa742016-12-06 21:13:30 +0000363 MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg());
364 unsigned SMovOp;
365 int64_t Imm;
366 // If we are just copying an immediate, we can replace the copy with
367 // s_mov_b32.
368 if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) {
369 MI.getOperand(1).ChangeToImmediate(Imm);
370 MI.addImplicitDefUseOperands(MF);
371 MI.setDesc(TII->get(SMovOp));
372 break;
373 }
Matt Arsenault85441dd2015-09-21 16:27:22 +0000374 TII->moveToVALU(MI);
375 }
376
377 break;
378 }
Tom Stellard82166022013-11-13 23:36:37 +0000379 case AMDGPU::PHI: {
Tom Stellard82166022013-11-13 23:36:37 +0000380 unsigned Reg = MI.getOperand(0).getReg();
Tom Stellard82166022013-11-13 23:36:37 +0000381 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
382 break;
383
Tom Stellard0bc68812016-11-29 00:46:46 +0000384 // We don't need to fix the PHI if the common dominator of the
385 // two incoming blocks terminates with a uniform branch.
386 if (MI.getNumExplicitOperands() == 5) {
387 MachineBasicBlock *MBB0 = MI.getOperand(2).getMBB();
388 MachineBasicBlock *MBB1 = MI.getOperand(4).getMBB();
389
390 MachineBasicBlock *NCD = MDT->findNearestCommonDominator(MBB0, MBB1);
391 if (NCD && !hasTerminatorThatModifiesExec(*NCD, *TRI)) {
392 DEBUG(dbgs() << "Not fixing PHI for uniform branch: " << MI << '\n');
393 break;
394 }
395 }
396
Tom Stellard82166022013-11-13 23:36:37 +0000397 // If a PHI node defines an SGPR and any of its operands are VGPRs,
398 // then we need to move it to the VALU.
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000399 //
400 // Also, if a PHI node defines an SGPR and has all SGPR operands
401 // we must move it to the VALU, because the SGPR operands will
402 // all end up being assigned the same register, which means
403 // there is a potential for a conflict if different threads take
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000404 // different control flow paths.
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000405 //
406 // For Example:
407 //
408 // sgpr0 = def;
409 // ...
410 // sgpr1 = def;
411 // ...
412 // sgpr2 = PHI sgpr0, sgpr1
413 // use sgpr2;
414 //
415 // Will Become:
416 //
417 // sgpr2 = def;
418 // ...
419 // sgpr2 = def;
420 // ...
421 // use sgpr2
422 //
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000423 // The one exception to this rule is when one of the operands
424 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
425 // instruction. In this case, there we know the program will
426 // never enter the second block (the loop) without entering
427 // the first block (where the condition is computed), so there
428 // is no chance for values to be over-written.
429
Tom Stellard9fdbec82016-11-11 23:35:42 +0000430 SmallSet<unsigned, 8> Visited;
431 if (phiHasVGPROperands(MI, MRI, TRI, TII) ||
Tom Stellard0bc68812016-11-29 00:46:46 +0000432 !phiHasBreakDef(MI, MRI, Visited)) {
433 DEBUG(dbgs() << "Fixing PHI: " << MI);
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000434 TII->moveToVALU(MI);
Tom Stellard9fdbec82016-11-11 23:35:42 +0000435 }
Tom Stellard82166022013-11-13 23:36:37 +0000436 break;
437 }
438 case AMDGPU::REG_SEQUENCE: {
439 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
Matt Arsenault0de924b2015-11-02 23:15:42 +0000440 !hasVGPROperands(MI, TRI)) {
441 foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI);
Tom Stellard82166022013-11-13 23:36:37 +0000442 continue;
Matt Arsenault0de924b2015-11-02 23:15:42 +0000443 }
Tom Stellard82166022013-11-13 23:36:37 +0000444
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000445 DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
Tom Stellard82166022013-11-13 23:36:37 +0000446
447 TII->moveToVALU(MI);
Tom Stellard82166022013-11-13 23:36:37 +0000448 break;
449 }
Tom Stellard204e61b2014-04-07 19:45:45 +0000450 case AMDGPU::INSERT_SUBREG: {
Tom Stellarda5687382014-05-15 14:41:55 +0000451 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
Tom Stellard204e61b2014-04-07 19:45:45 +0000452 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
Tom Stellarda5687382014-05-15 14:41:55 +0000453 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
454 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
455 if (TRI->isSGPRClass(DstRC) &&
456 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000457 DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
Tom Stellarda5687382014-05-15 14:41:55 +0000458 TII->moveToVALU(MI);
459 }
460 break;
Tom Stellard204e61b2014-04-07 19:45:45 +0000461 }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000462 }
463 }
464 }
Matt Arsenault6f679782014-11-17 21:11:34 +0000465
466 return true;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000467}