blob: b27d7c691032e5c6ef5f7aa8e9eb6eb0c9e9bd8f [file] [log] [blame]
Tom Stellard1aaad692014-07-21 16:55:33 +00001//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// The pass tries to use the 32-bit encoding for instructions when possible.
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
Marek Olsaka93603d2015-01-15 18:42:51 +000013#include "AMDGPUMCInstLower.h"
Eric Christopherd9134482014-08-04 21:25:23 +000014#include "AMDGPUSubtarget.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000015#include "SIInstrInfo.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6407e1e2014-08-01 00:32:33 +000020#include "llvm/IR/Constants.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000021#include "llvm/IR/Function.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000022#include "llvm/IR/LLVMContext.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000023#include "llvm/Support/Debug.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000024#include "llvm/Support/raw_ostream.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000025#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE "si-shrink-instructions"
28
29STATISTIC(NumInstructionsShrunk,
30 "Number of 64-bit instruction reduced to 32-bit.");
Tom Stellard6407e1e2014-08-01 00:32:33 +000031STATISTIC(NumLiteralConstantsFolded,
32 "Number of literal constants folded into 32-bit instructions.");
Tom Stellard1aaad692014-07-21 16:55:33 +000033
Tom Stellard1aaad692014-07-21 16:55:33 +000034using namespace llvm;
35
36namespace {
37
38class SIShrinkInstructions : public MachineFunctionPass {
39public:
40 static char ID;
41
42public:
43 SIShrinkInstructions() : MachineFunctionPass(ID) {
44 }
45
Craig Topperfd38cbe2014-08-30 16:48:34 +000046 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1aaad692014-07-21 16:55:33 +000047
Mehdi Amini117296c2016-10-01 02:56:57 +000048 StringRef getPassName() const override { return "SI Shrink Instructions"; }
Tom Stellard1aaad692014-07-21 16:55:33 +000049
Craig Topperfd38cbe2014-08-30 16:48:34 +000050 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1aaad692014-07-21 16:55:33 +000051 AU.setPreservesCFG();
52 MachineFunctionPass::getAnalysisUsage(AU);
53 }
54};
55
56} // End anonymous namespace.
57
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +000058INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
59 "SI Shrink Instructions", false, false)
Tom Stellard1aaad692014-07-21 16:55:33 +000060
61char SIShrinkInstructions::ID = 0;
62
63FunctionPass *llvm::createSIShrinkInstructionsPass() {
64 return new SIShrinkInstructions();
65}
66
67static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
68 const MachineRegisterInfo &MRI) {
69 if (!MO->isReg())
70 return false;
71
72 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
73 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
74
75 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
76}
77
78static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
79 const SIRegisterInfo &TRI,
80 const MachineRegisterInfo &MRI) {
81
82 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
83 // Can't shrink instruction with three operands.
Tom Stellard5224df32015-03-10 16:16:44 +000084 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
85 // a special case for it. It can only be shrunk if the third operand
86 // is vcc. We should handle this the same way we handle vopc, by addding
87 // a register allocation hint pre-regalloc and then do the shrining
88 // post-regalloc.
Tom Stellarddb5a11f2015-07-13 15:47:57 +000089 if (Src2) {
Tom Stellarde48fe2a2015-07-14 14:15:03 +000090 switch (MI.getOpcode()) {
91 default: return false;
Tom Stellarddb5a11f2015-07-13 15:47:57 +000092
Tom Stellarde48fe2a2015-07-14 14:15:03 +000093 case AMDGPU::V_MAC_F32_e64:
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000094 case AMDGPU::V_MAC_F16_e64:
Tom Stellarde48fe2a2015-07-14 14:15:03 +000095 if (!isVGPR(Src2, TRI, MRI) ||
96 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
97 return false;
98 break;
99
100 case AMDGPU::V_CNDMASK_B32_e64:
101 break;
102 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000103 }
Tom Stellard1aaad692014-07-21 16:55:33 +0000104
105 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
106 const MachineOperand *Src1Mod =
107 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
108
Tom Stellardb4a313a2014-08-01 00:32:39 +0000109 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
Tom Stellard1aaad692014-07-21 16:55:33 +0000110 return false;
111
Matt Arsenault8943d242014-10-17 18:00:45 +0000112 // We don't need to check src0, all input types are legal, so just make sure
113 // src0 isn't using any modifiers.
114 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
Tom Stellard1aaad692014-07-21 16:55:33 +0000115 return false;
116
117 // Check output modifiers
Matt Arsenault8943d242014-10-17 18:00:45 +0000118 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
Tom Stellard1aaad692014-07-21 16:55:33 +0000119 return false;
120
Matt Arsenault8226fc42016-03-02 23:00:21 +0000121 return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
Tom Stellard1aaad692014-07-21 16:55:33 +0000122}
123
Tom Stellard6407e1e2014-08-01 00:32:33 +0000124/// \brief This function checks \p MI for operands defined by a move immediate
125/// instruction and then folds the literal constant into the instruction if it
126/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
127/// and will only fold literal constants if we are still in SSA.
128static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
129 MachineRegisterInfo &MRI, bool TryToCommute = true) {
130
131 if (!MRI.isSSA())
132 return;
133
Matt Arsenault3add6432015-10-20 04:35:43 +0000134 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
Tom Stellard6407e1e2014-08-01 00:32:33 +0000135
Matt Arsenault11a4d672015-02-13 19:05:03 +0000136 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
Tom Stellard6407e1e2014-08-01 00:32:33 +0000137
138 // Only one literal constant is allowed per instruction, so if src0 is a
139 // literal constant then we can't do any folding.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000140 if (TII->isLiteralConstant(MI, Src0Idx))
Tom Stellard6407e1e2014-08-01 00:32:33 +0000141 return;
142
Tom Stellard6407e1e2014-08-01 00:32:33 +0000143 // Try to fold Src0
Matt Arsenault4bd72362016-12-10 00:39:12 +0000144 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Tom Stellardab6e9c02015-07-09 16:30:36 +0000145 if (Src0.isReg() && MRI.hasOneUse(Src0.getReg())) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000146 unsigned Reg = Src0.getReg();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000147 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
148 if (Def && Def->isMoveImmediate()) {
149 MachineOperand &MovSrc = Def->getOperand(1);
150 bool ConstantFolded = false;
151
Matt Arsenault124384f2016-09-09 23:32:53 +0000152 if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
153 isUInt<32>(MovSrc.getImm()))) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000154 Src0.ChangeToImmediate(MovSrc.getImm());
Tom Stellard6407e1e2014-08-01 00:32:33 +0000155 ConstantFolded = true;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000156 }
157 if (ConstantFolded) {
Tom Stellard6407e1e2014-08-01 00:32:33 +0000158 if (MRI.use_empty(Reg))
159 Def->eraseFromParent();
160 ++NumLiteralConstantsFolded;
161 return;
162 }
163 }
164 }
165
166 // We have failed to fold src0, so commute the instruction and try again.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000167 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(MI))
Tom Stellard6407e1e2014-08-01 00:32:33 +0000168 foldImmediates(MI, TII, MRI, false);
169
170}
171
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000172// Copy MachineOperand with all flags except setting it as implicit.
Matt Arsenault22096252016-06-20 18:34:00 +0000173static void copyFlagsToImplicitVCC(MachineInstr &MI,
174 const MachineOperand &Orig) {
175
176 for (MachineOperand &Use : MI.implicit_operands()) {
177 if (Use.getReg() == AMDGPU::VCC) {
178 Use.setIsUndef(Orig.isUndef());
179 Use.setIsKill(Orig.isKill());
180 return;
181 }
182 }
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000183}
184
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000185static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000186 return isInt<16>(Src.getImm()) &&
187 !TII->isInlineConstant(*Src.getParent(),
188 Src.getParent()->getOperandNo(&Src));
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000189}
190
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000191static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000192 return isUInt<16>(Src.getImm()) &&
193 !TII->isInlineConstant(*Src.getParent(),
194 Src.getParent()->getOperandNo(&Src));
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000195}
196
197static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
198 const MachineOperand &Src,
199 bool &IsUnsigned) {
200 if (isInt<16>(Src.getImm())) {
201 IsUnsigned = false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000202 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000203 }
204
205 if (isUInt<16>(Src.getImm())) {
206 IsUnsigned = true;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000207 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000208 }
209
210 return false;
211}
212
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000213/// \returns true if the constant in \p Src should be replaced with a bitreverse
214/// of an inline immediate.
215static bool isReverseInlineImm(const SIInstrInfo *TII,
216 const MachineOperand &Src,
217 int32_t &ReverseImm) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000218 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000219 return false;
220
221 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
222 return ReverseImm >= -16 && ReverseImm <= 64;
223}
224
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000225/// Copy implicit register operands from specified instruction to this
226/// instruction that are not part of the instruction definition.
227static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF,
228 const MachineInstr &MI) {
229 for (unsigned i = MI.getDesc().getNumOperands() +
230 MI.getDesc().getNumImplicitUses() +
231 MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
232 i != e; ++i) {
233 const MachineOperand &MO = MI.getOperand(i);
234 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
235 NewMI.addOperand(MF, MO);
236 }
237}
238
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000239static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
240 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
241 // get constants on the RHS.
242 if (!MI.getOperand(0).isReg())
243 TII->commuteInstruction(MI, false, 0, 1);
244
245 const MachineOperand &Src1 = MI.getOperand(1);
246 if (!Src1.isImm())
247 return;
248
249 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
250 if (SOPKOpc == -1)
251 return;
252
253 // eq/ne is special because the imm16 can be treated as signed or unsigned,
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000254 // and initially selectd to the unsigned versions.
255 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000256 bool HasUImm;
257 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000258 if (!HasUImm) {
259 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
260 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000261 }
262
263 MI.setDesc(TII->get(SOPKOpc));
264 }
265
266 return;
267 }
268
269 const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
270
271 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
272 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
273 MI.setDesc(NewDesc);
274 }
275}
276
Tom Stellard1aaad692014-07-21 16:55:33 +0000277bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000278 if (skipFunction(*MF.getFunction()))
279 return false;
280
Tom Stellard1aaad692014-07-21 16:55:33 +0000281 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000282 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
283 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard1aaad692014-07-21 16:55:33 +0000284 const SIRegisterInfo &TRI = TII->getRegisterInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000285
Tom Stellard1aaad692014-07-21 16:55:33 +0000286 std::vector<unsigned> I1Defs;
287
288 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
289 BI != BE; ++BI) {
290
291 MachineBasicBlock &MBB = *BI;
292 MachineBasicBlock::iterator I, Next;
293 for (I = MBB.begin(); I != MBB.end(); I = Next) {
294 Next = std::next(I);
295 MachineInstr &MI = *I;
296
Matt Arsenault9a19c242016-03-11 07:42:49 +0000297 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
298 // If this has a literal constant source that is the same as the
299 // reversed bits of an inline immediate, replace with a bitreverse of
300 // that constant. This saves 4 bytes in the common case of materializing
301 // sign bits.
302
303 // Test if we are after regalloc. We only want to do this after any
304 // optimizations happen because this will confuse them.
305 // XXX - not exactly a check for post-regalloc run.
306 MachineOperand &Src = MI.getOperand(1);
307 if (Src.isImm() &&
308 TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000309 int32_t ReverseImm;
310 if (isReverseInlineImm(TII, Src, ReverseImm)) {
311 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
312 Src.setImm(ReverseImm);
313 continue;
Matt Arsenault9a19c242016-03-11 07:42:49 +0000314 }
315 }
316 }
317
Matt Arsenault074ea282016-04-25 19:53:22 +0000318 // Combine adjacent s_nops to use the immediate operand encoding how long
319 // to wait.
320 //
321 // s_nop N
322 // s_nop M
323 // =>
324 // s_nop (N + M)
325 if (MI.getOpcode() == AMDGPU::S_NOP &&
326 Next != MBB.end() &&
327 (*Next).getOpcode() == AMDGPU::S_NOP) {
328
329 MachineInstr &NextMI = *Next;
330 // The instruction encodes the amount to wait with an offset of 1,
331 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
332 // after adding.
333 uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
334 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
335
336 // Make sure we don't overflow the bounds.
337 if (Nop0 + Nop1 <= 8) {
338 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
339 MI.eraseFromParent();
340 }
341
342 continue;
343 }
344
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000345 // FIXME: We also need to consider movs of constant operands since
346 // immediate operands are not folded if they have more than one use, and
347 // the operand folding pass is unaware if the immediate will be free since
348 // it won't know if the src == dest constraint will end up being
349 // satisfied.
350 if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
351 MI.getOpcode() == AMDGPU::S_MUL_I32) {
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000352 const MachineOperand *Dest = &MI.getOperand(0);
353 MachineOperand *Src0 = &MI.getOperand(1);
354 MachineOperand *Src1 = &MI.getOperand(2);
355
356 if (!Src0->isReg() && Src1->isReg()) {
357 if (TII->commuteInstruction(MI, false, 1, 2))
358 std::swap(Src0, Src1);
359 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000360
361 // FIXME: This could work better if hints worked with subregisters. If
362 // we have a vector add of a constant, we usually don't get the correct
363 // allocation due to the subregister usage.
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000364 if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
365 Src0->isReg()) {
366 MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
367 MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000368 continue;
369 }
370
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000371 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
372 if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000373 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
374 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
375
376 MI.setDesc(TII->get(Opc));
377 MI.tieOperands(0, 1);
378 }
379 }
380 }
381
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000382 // Try to use s_cmpk_*
383 if (MI.isCompare() && TII->isSOPC(MI)) {
384 shrinkScalarCompare(TII, MI);
385 continue;
386 }
387
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000388 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
389 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000390 const MachineOperand &Dst = MI.getOperand(0);
391 MachineOperand &Src = MI.getOperand(1);
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000392
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000393 if (Src.isImm() &&
394 TargetRegisterInfo::isPhysicalRegister(Dst.getReg())) {
395 int32_t ReverseImm;
396 if (isKImmOperand(TII, Src))
397 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
398 else if (isReverseInlineImm(TII, Src, ReverseImm)) {
399 MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
400 Src.setImm(ReverseImm);
401 }
402 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000403
404 continue;
405 }
406
Tom Stellard86d12eb2014-08-01 00:32:28 +0000407 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard1aaad692014-07-21 16:55:33 +0000408 continue;
409
410 if (!canShrink(MI, TII, TRI, MRI)) {
Matt Arsenault66524032014-09-16 18:00:23 +0000411 // Try commuting the instruction and see if that enables us to shrink
Tom Stellard1aaad692014-07-21 16:55:33 +0000412 // it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000413 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
Tom Stellard1aaad692014-07-21 16:55:33 +0000414 !canShrink(MI, TII, TRI, MRI))
415 continue;
416 }
417
Marek Olsaka93603d2015-01-15 18:42:51 +0000418 // getVOPe32 could be -1 here if we started with an instruction that had
Tom Stellard86d12eb2014-08-01 00:32:28 +0000419 // a 32-bit encoding and then commuted it to an instruction that did not.
Marek Olsaka93603d2015-01-15 18:42:51 +0000420 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard86d12eb2014-08-01 00:32:28 +0000421 continue;
422
Marek Olsaka93603d2015-01-15 18:42:51 +0000423 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
424
Tom Stellard1aaad692014-07-21 16:55:33 +0000425 if (TII->isVOPC(Op32)) {
426 unsigned DstReg = MI.getOperand(0).getReg();
427 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000428 // VOPC instructions can only write to the VCC register. We can't
429 // force them to use VCC here, because this is only one register and
430 // cannot deal with sequences which would require multiple copies of
431 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
Tom Stellard1aaad692014-07-21 16:55:33 +0000432 //
Matt Arsenaulta9627ae2014-09-21 17:27:32 +0000433 // So, instead of forcing the instruction to write to VCC, we provide
434 // a hint to the register allocator to use VCC and then we we will run
435 // this pass again after RA and shrink it if it outputs to VCC.
Tom Stellard1aaad692014-07-21 16:55:33 +0000436 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
437 continue;
438 }
439 if (DstReg != AMDGPU::VCC)
440 continue;
441 }
442
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000443 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
444 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
445 // instructions.
446 const MachineOperand *Src2 =
447 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
448 if (!Src2->isReg())
449 continue;
450 unsigned SReg = Src2->getReg();
451 if (TargetRegisterInfo::isVirtualRegister(SReg)) {
452 MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC);
453 continue;
454 }
455 if (SReg != AMDGPU::VCC)
456 continue;
457 }
458
Tom Stellard1aaad692014-07-21 16:55:33 +0000459 // We can shrink this instruction
Matt Arsenaulte0b44042015-09-10 21:51:19 +0000460 DEBUG(dbgs() << "Shrinking " << MI);
Tom Stellard1aaad692014-07-21 16:55:33 +0000461
Tom Stellard6407e1e2014-08-01 00:32:33 +0000462 MachineInstrBuilder Inst32 =
Tom Stellard1aaad692014-07-21 16:55:33 +0000463 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
464
Tom Stellardcc4c8712016-02-16 18:14:56 +0000465 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
Matt Arsenault46359152015-08-08 00:41:48 +0000466 // For VOPC instructions, this is replaced by an implicit def of vcc.
Tom Stellardcc4c8712016-02-16 18:14:56 +0000467 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
Matt Arsenault46359152015-08-08 00:41:48 +0000468 if (Op32DstIdx != -1) {
469 // dst
470 Inst32.addOperand(MI.getOperand(0));
471 } else {
472 assert(MI.getOperand(0).getReg() == AMDGPU::VCC &&
473 "Unexpected case");
474 }
475
Tom Stellard1aaad692014-07-21 16:55:33 +0000476
Tom Stellard6407e1e2014-08-01 00:32:33 +0000477 Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
Tom Stellard1aaad692014-07-21 16:55:33 +0000478
479 const MachineOperand *Src1 =
480 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
481 if (Src1)
Tom Stellard6407e1e2014-08-01 00:32:33 +0000482 Inst32.addOperand(*Src1);
Tom Stellard1aaad692014-07-21 16:55:33 +0000483
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000484 const MachineOperand *Src2 =
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000485 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
486 if (Src2) {
487 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
488 if (Op32Src2Idx != -1) {
489 Inst32.addOperand(*Src2);
490 } else {
491 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
Matt Arsenault22096252016-06-20 18:34:00 +0000492 // replaced with an implicit read of vcc. This was already added
493 // during the initial BuildMI, so find it to preserve the flags.
494 copyFlagsToImplicitVCC(*Inst32, *Src2);
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000495 }
496 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000497
Tom Stellard1aaad692014-07-21 16:55:33 +0000498 ++NumInstructionsShrunk;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000499
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000500 // Copy extra operands not present in the instruction definition.
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000501 copyExtraImplicitOps(*Inst32, MF, MI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000502
503 MI.eraseFromParent();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000504 foldImmediates(*Inst32, TII, MRI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000505
Tom Stellard6407e1e2014-08-01 00:32:33 +0000506 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
507
508
Tom Stellard1aaad692014-07-21 16:55:33 +0000509 }
510 }
511 return false;
512}