blob: c7a16f4c3878eea9a94997afd80e622ad931a257 [file] [log] [blame]
Tom Stellard2f7cdda2013-08-06 23:08:28 +00001//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Copies from VGPR to SGPR registers are illegal and the register coalescer
12/// will sometimes generate these illegal copies in situations like this:
13///
14/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
15///
16/// BB0:
17/// %vreg0 <sgpr> = SCALAR_INST
18/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
19/// ...
20/// BRANCH %cond BB1, BB2
21/// BB1:
22/// %vreg2 <vgpr> = VECTOR_INST
23/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
24/// BB2:
25/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000026/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
Tom Stellard2f7cdda2013-08-06 23:08:28 +000027///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000028///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000029/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30/// code will look like this:
31///
32/// BB0:
33/// %vreg0 <sgpr> = SCALAR_INST
34/// ...
35/// BRANCH %cond BB1, BB2
36/// BB1:
37/// %vreg2 <vgpr> = VECTOR_INST
38/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
39/// BB2:
40/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
42///
43/// Now that the result of the PHI instruction is an SGPR, the register
44/// allocator is now forced to constrain the register class of %vreg3 to
45/// <sgpr> so we end up with final code like this:
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000046///
Tom Stellard2f7cdda2013-08-06 23:08:28 +000047/// BB0:
48/// %vreg0 <sgpr> = SCALAR_INST
49/// ...
50/// BRANCH %cond BB1, BB2
51/// BB1:
52/// %vreg2 <vgpr> = VECTOR_INST
53/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
54/// BB2:
55/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
57///
NAKAMURA Takumi78e80cd2013-11-14 04:05:22 +000058/// Now this code contains an illegal copy from a VGPR to an SGPR.
Tom Stellard2f7cdda2013-08-06 23:08:28 +000059///
60/// In order to avoid this problem, this pass searches for PHI instructions
61/// which define a <vsrc> register and constrains its definition class to
62/// <vgpr> if the user of the PHI's definition register is a vector instruction.
63/// If the PHI's definition class is constrained to <vgpr> then the coalescer
64/// will be unable to perform the COPY removal from the above example which
65/// ultimately led to the creation of an illegal COPY.
66//===----------------------------------------------------------------------===//
67
68#include "AMDGPU.h"
Eric Christopherd9134482014-08-04 21:25:23 +000069#include "AMDGPUSubtarget.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000070#include "SIInstrInfo.h"
71#include "llvm/CodeGen/MachineFunctionPass.h"
Tom Stellard82166022013-11-13 23:36:37 +000072#include "llvm/CodeGen/MachineInstrBuilder.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000073#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard82166022013-11-13 23:36:37 +000074#include "llvm/Support/Debug.h"
Hans Wennborga74fd702013-11-14 23:24:09 +000075#include "llvm/Support/raw_ostream.h"
Tom Stellard2f7cdda2013-08-06 23:08:28 +000076#include "llvm/Target/TargetMachine.h"
77
78using namespace llvm;
79
Chandler Carruth84e68b22014-04-22 02:41:26 +000080#define DEBUG_TYPE "sgpr-copies"
81
Tom Stellard2f7cdda2013-08-06 23:08:28 +000082namespace {
83
84class SIFixSGPRCopies : public MachineFunctionPass {
85
86private:
87 static char ID;
Tom Stellard82166022013-11-13 23:36:37 +000088 const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
Tom Stellard2f7cdda2013-08-06 23:08:28 +000089 const MachineRegisterInfo &MRI,
Tom Stellard82166022013-11-13 23:36:37 +000090 unsigned Reg,
91 unsigned SubReg) const;
92 const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
93 const MachineRegisterInfo &MRI,
94 unsigned Reg,
95 unsigned SubReg) const;
96 bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
97 const MachineRegisterInfo &MRI) const;
Tom Stellard2f7cdda2013-08-06 23:08:28 +000098
99public:
100 SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
101
Craig Topper5656db42014-04-29 07:57:24 +0000102 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000103
Craig Topper5656db42014-04-29 07:57:24 +0000104 const char *getPassName() const override {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000105 return "SI Fix SGPR copies";
106 }
107
108};
109
110} // End anonymous namespace
111
112char SIFixSGPRCopies::ID = 0;
113
114FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
115 return new SIFixSGPRCopies(tm);
116}
117
Tom Stellard82166022013-11-13 23:36:37 +0000118static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
119 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
120 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
121 if (!MI.getOperand(i).isReg() ||
122 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
123 continue;
124
125 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
126 return true;
127 }
128 return false;
129}
130
131/// This functions walks the use list of Reg until it finds an Instruction
132/// that isn't a COPY returns the register class of that instruction.
NAKAMURA Takumib88288f2013-11-14 04:05:28 +0000133/// \return The register defined by the first non-COPY instruction.
Tom Stellard82166022013-11-13 23:36:37 +0000134const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
135 const SIRegisterInfo *TRI,
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000136 const MachineRegisterInfo &MRI,
Tom Stellard82166022013-11-13 23:36:37 +0000137 unsigned Reg,
138 unsigned SubReg) const {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000139 // The Reg parameter to the function must always be defined by either a PHI
140 // or a COPY, therefore it cannot be a physical register.
141 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
142 "Reg cannot be a physical register");
143
144 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
Tom Stellard82166022013-11-13 23:36:37 +0000145 RC = TRI->getSubRegClass(RC, SubReg);
Owen Anderson16c6bf42014-03-13 23:12:04 +0000146 for (MachineRegisterInfo::use_instr_iterator
147 I = MRI.use_instr_begin(Reg), E = MRI.use_instr_end(); I != E; ++I) {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000148 switch (I->getOpcode()) {
149 case AMDGPU::COPY:
Tom Stellard82166022013-11-13 23:36:37 +0000150 RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
151 I->getOperand(0).getReg(),
152 I->getOperand(0).getSubReg()));
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000153 break;
154 }
155 }
156
157 return RC;
158}
159
Tom Stellard82166022013-11-13 23:36:37 +0000160const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
161 const SIRegisterInfo *TRI,
162 const MachineRegisterInfo &MRI,
163 unsigned Reg,
164 unsigned SubReg) const {
165 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
166 const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
167 return TRI->getSubRegClass(RC, SubReg);
168 }
169 MachineInstr *Def = MRI.getVRegDef(Reg);
170 if (Def->getOpcode() != AMDGPU::COPY) {
171 return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
172 }
173
174 return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
175 Def->getOperand(1).getSubReg());
176}
177
178bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
179 const SIRegisterInfo *TRI,
180 const MachineRegisterInfo &MRI) const {
181
182 unsigned DstReg = Copy.getOperand(0).getReg();
183 unsigned SrcReg = Copy.getOperand(1).getReg();
184 unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
185 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
Tom Stellard13de5452013-11-18 18:50:15 +0000186 const TargetRegisterClass *SrcRC;
Tom Stellard82166022013-11-13 23:36:37 +0000187
188 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
Tom Stellard1bd80722014-04-30 15:31:33 +0000189 DstRC == &AMDGPU::M0RegRegClass ||
190 MRI.getRegClass(SrcReg) == &AMDGPU::VReg_1RegClass)
Tom Stellard82166022013-11-13 23:36:37 +0000191 return false;
192
Tom Stellardb8725d82014-02-04 17:18:42 +0000193 SrcRC = TRI->getSubRegClass(MRI.getRegClass(SrcReg), SrcSubReg);
Tom Stellardf3407872013-11-18 18:50:20 +0000194 return TRI->isSGPRClass(DstRC) && TRI->hasVGPRs(SrcRC);
Tom Stellard82166022013-11-13 23:36:37 +0000195}
196
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000197bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
198 MachineRegisterInfo &MRI = MF.getRegInfo();
Eric Christopherfc6de422014-08-05 02:39:49 +0000199 const SIRegisterInfo *TRI =
200 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
201 const SIInstrInfo *TII =
202 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000203 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
204 BI != BE; ++BI) {
205
206 MachineBasicBlock &MBB = *BI;
207 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
208 I != E; ++I) {
209 MachineInstr &MI = *I;
Tom Stellard82166022013-11-13 23:36:37 +0000210 if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
211 DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
212 DEBUG(MI.print(dbgs()));
213 TII->moveToVALU(MI);
214
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000215 }
Tom Stellard82166022013-11-13 23:36:37 +0000216
217 switch (MI.getOpcode()) {
218 default: continue;
219 case AMDGPU::PHI: {
Matt Arsenault3896a0a2014-11-25 21:03:22 +0000220 DEBUG(dbgs() << "Fixing PHI: " << MI);
Tom Stellard82166022013-11-13 23:36:37 +0000221
222 for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
223 unsigned Reg = MI.getOperand(i).getReg();
224 const TargetRegisterClass *RC = inferRegClassFromDef(TRI, MRI, Reg,
225 MI.getOperand(0).getSubReg());
226 MRI.constrainRegClass(Reg, RC);
227 }
228 unsigned Reg = MI.getOperand(0).getReg();
229 const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
230 MI.getOperand(0).getSubReg());
231 if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
232 MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
233 }
234
235 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
236 break;
237
238 // If a PHI node defines an SGPR and any of its operands are VGPRs,
239 // then we need to move it to the VALU.
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000240 //
241 // Also, if a PHI node defines an SGPR and has all SGPR operands
242 // we must move it to the VALU, because the SGPR operands will
243 // all end up being assigned the same register, which means
244 // there is a potential for a conflict if different threads take
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000245 // different control flow paths.
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000246 //
247 // For Example:
248 //
249 // sgpr0 = def;
250 // ...
251 // sgpr1 = def;
252 // ...
253 // sgpr2 = PHI sgpr0, sgpr1
254 // use sgpr2;
255 //
256 // Will Become:
257 //
258 // sgpr2 = def;
259 // ...
260 // sgpr2 = def;
261 // ...
262 // use sgpr2
263 //
264 // FIXME: This is OK if the branching decision is made based on an
265 // SGPR value.
266 bool SGPRBranch = false;
267
268 // The one exception to this rule is when one of the operands
269 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
270 // instruction. In this case, there we know the program will
271 // never enter the second block (the loop) without entering
272 // the first block (where the condition is computed), so there
273 // is no chance for values to be over-written.
274
275 bool HasBreakDef = false;
Tom Stellard82166022013-11-13 23:36:37 +0000276 for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
277 unsigned Reg = MI.getOperand(i).getReg();
278 if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
279 TII->moveToVALU(MI);
280 break;
281 }
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000282 MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
283 assert(DefInstr);
284 switch(DefInstr->getOpcode()) {
285
286 case AMDGPU::SI_BREAK:
287 case AMDGPU::SI_IF_BREAK:
288 case AMDGPU::SI_ELSE_BREAK:
289 // If we see a PHI instruction that defines an SGPR, then that PHI
290 // instruction has already been considered and should have
291 // a *_BREAK as an operand.
292 case AMDGPU::PHI:
293 HasBreakDef = true;
294 break;
295 }
Tom Stellard82166022013-11-13 23:36:37 +0000296 }
297
Tom Stellarddeb3f9e2014-09-24 01:33:26 +0000298 if (!SGPRBranch && !HasBreakDef)
299 TII->moveToVALU(MI);
Tom Stellard82166022013-11-13 23:36:37 +0000300 break;
301 }
302 case AMDGPU::REG_SEQUENCE: {
303 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
304 !hasVGPROperands(MI, TRI))
305 continue;
306
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000307 DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI);
Tom Stellard82166022013-11-13 23:36:37 +0000308
309 TII->moveToVALU(MI);
Tom Stellard82166022013-11-13 23:36:37 +0000310 break;
311 }
Tom Stellard204e61b2014-04-07 19:45:45 +0000312 case AMDGPU::INSERT_SUBREG: {
Tom Stellarda5687382014-05-15 14:41:55 +0000313 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
Tom Stellard204e61b2014-04-07 19:45:45 +0000314 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
Tom Stellarda5687382014-05-15 14:41:55 +0000315 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
316 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
317 if (TRI->isSGPRClass(DstRC) &&
318 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
Matt Arsenaultbfaab762014-10-17 00:36:20 +0000319 DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI);
Tom Stellarda5687382014-05-15 14:41:55 +0000320 TII->moveToVALU(MI);
321 }
322 break;
Tom Stellard204e61b2014-04-07 19:45:45 +0000323 }
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000324 }
325 }
326 }
Matt Arsenault6f679782014-11-17 21:11:34 +0000327
328 return true;
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000329}