blob: 96a18544f02acd62c07e895fcd178a047403f4fe [file] [log] [blame]
Tom Stellard1aaad692014-07-21 16:55:33 +00001//===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// The pass tries to use the 32-bit encoding for instructions when possible.
9//===----------------------------------------------------------------------===//
10//
11
12#include "AMDGPU.h"
Marek Olsaka93603d2015-01-15 18:42:51 +000013#include "AMDGPUMCInstLower.h"
Eric Christopherd9134482014-08-04 21:25:23 +000014#include "AMDGPUSubtarget.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000015#include "SIInstrInfo.h"
16#include "llvm/ADT/Statistic.h"
17#include "llvm/CodeGen/MachineFunctionPass.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard6407e1e2014-08-01 00:32:33 +000020#include "llvm/IR/Constants.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000021#include "llvm/IR/Function.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000022#include "llvm/IR/LLVMContext.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000023#include "llvm/Support/Debug.h"
Benjamin Kramer16132e62015-03-23 18:07:13 +000024#include "llvm/Support/raw_ostream.h"
Tom Stellard1aaad692014-07-21 16:55:33 +000025#include "llvm/Target/TargetMachine.h"
26
27#define DEBUG_TYPE "si-shrink-instructions"
28
29STATISTIC(NumInstructionsShrunk,
30 "Number of 64-bit instruction reduced to 32-bit.");
Tom Stellard6407e1e2014-08-01 00:32:33 +000031STATISTIC(NumLiteralConstantsFolded,
32 "Number of literal constants folded into 32-bit instructions.");
Tom Stellard1aaad692014-07-21 16:55:33 +000033
Tom Stellard1aaad692014-07-21 16:55:33 +000034using namespace llvm;
35
36namespace {
37
38class SIShrinkInstructions : public MachineFunctionPass {
39public:
40 static char ID;
41
42public:
43 SIShrinkInstructions() : MachineFunctionPass(ID) {
44 }
45
Craig Topperfd38cbe2014-08-30 16:48:34 +000046 bool runOnMachineFunction(MachineFunction &MF) override;
Tom Stellard1aaad692014-07-21 16:55:33 +000047
Mehdi Amini117296c2016-10-01 02:56:57 +000048 StringRef getPassName() const override { return "SI Shrink Instructions"; }
Tom Stellard1aaad692014-07-21 16:55:33 +000049
Craig Topperfd38cbe2014-08-30 16:48:34 +000050 void getAnalysisUsage(AnalysisUsage &AU) const override {
Tom Stellard1aaad692014-07-21 16:55:33 +000051 AU.setPreservesCFG();
52 MachineFunctionPass::getAnalysisUsage(AU);
53 }
54};
55
56} // End anonymous namespace.
57
Matt Arsenaultc3a01ec2016-06-09 23:18:47 +000058INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
59 "SI Shrink Instructions", false, false)
Tom Stellard1aaad692014-07-21 16:55:33 +000060
61char SIShrinkInstructions::ID = 0;
62
63FunctionPass *llvm::createSIShrinkInstructionsPass() {
64 return new SIShrinkInstructions();
65}
66
67static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
68 const MachineRegisterInfo &MRI) {
69 if (!MO->isReg())
70 return false;
71
72 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
73 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
74
75 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
76}
77
78static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
79 const SIRegisterInfo &TRI,
80 const MachineRegisterInfo &MRI) {
81
82 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
83 // Can't shrink instruction with three operands.
Tom Stellard5224df32015-03-10 16:16:44 +000084 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
85 // a special case for it. It can only be shrunk if the third operand
86 // is vcc. We should handle this the same way we handle vopc, by addding
Matt Arsenault28bd4cb2017-01-11 22:35:17 +000087 // a register allocation hint pre-regalloc and then do the shrinking
Tom Stellard5224df32015-03-10 16:16:44 +000088 // post-regalloc.
Tom Stellarddb5a11f2015-07-13 15:47:57 +000089 if (Src2) {
Tom Stellarde48fe2a2015-07-14 14:15:03 +000090 switch (MI.getOpcode()) {
91 default: return false;
Tom Stellarddb5a11f2015-07-13 15:47:57 +000092
Matt Arsenault24a12732017-01-11 22:58:12 +000093 case AMDGPU::V_ADDC_U32_e64:
94 case AMDGPU::V_SUBB_U32_e64:
Stanislav Mekhanoshina9d846c2017-06-20 20:33:44 +000095 if (TII->getNamedOperand(MI, AMDGPU::OpName::src1)->isImm())
96 return false;
Matt Arsenault24a12732017-01-11 22:58:12 +000097 // Additional verification is needed for sdst/src2.
98 return true;
99
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000100 case AMDGPU::V_MAC_F32_e64:
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000101 case AMDGPU::V_MAC_F16_e64:
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000102 if (!isVGPR(Src2, TRI, MRI) ||
103 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers))
104 return false;
105 break;
106
107 case AMDGPU::V_CNDMASK_B32_e64:
108 break;
109 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000110 }
Tom Stellard1aaad692014-07-21 16:55:33 +0000111
112 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
113 const MachineOperand *Src1Mod =
114 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
115
Tom Stellardb4a313a2014-08-01 00:32:39 +0000116 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
Tom Stellard1aaad692014-07-21 16:55:33 +0000117 return false;
118
Matt Arsenault8943d242014-10-17 18:00:45 +0000119 // We don't need to check src0, all input types are legal, so just make sure
120 // src0 isn't using any modifiers.
121 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
Tom Stellard1aaad692014-07-21 16:55:33 +0000122 return false;
123
124 // Check output modifiers
Matt Arsenault8943d242014-10-17 18:00:45 +0000125 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
Tom Stellard1aaad692014-07-21 16:55:33 +0000126 return false;
127
Matt Arsenault8226fc42016-03-02 23:00:21 +0000128 return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp);
Tom Stellard1aaad692014-07-21 16:55:33 +0000129}
130
Tom Stellard6407e1e2014-08-01 00:32:33 +0000131/// \brief This function checks \p MI for operands defined by a move immediate
132/// instruction and then folds the literal constant into the instruction if it
133/// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
134/// and will only fold literal constants if we are still in SSA.
135static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
136 MachineRegisterInfo &MRI, bool TryToCommute = true) {
137
138 if (!MRI.isSSA())
139 return;
140
Matt Arsenault3add6432015-10-20 04:35:43 +0000141 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
Tom Stellard6407e1e2014-08-01 00:32:33 +0000142
Matt Arsenault11a4d672015-02-13 19:05:03 +0000143 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
Tom Stellard6407e1e2014-08-01 00:32:33 +0000144
145 // Only one literal constant is allowed per instruction, so if src0 is a
146 // literal constant then we can't do any folding.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000147 if (TII->isLiteralConstant(MI, Src0Idx))
Tom Stellard6407e1e2014-08-01 00:32:33 +0000148 return;
149
Tom Stellard6407e1e2014-08-01 00:32:33 +0000150 // Try to fold Src0
Matt Arsenault4bd72362016-12-10 00:39:12 +0000151 MachineOperand &Src0 = MI.getOperand(Src0Idx);
Tom Stellardab6e9c02015-07-09 16:30:36 +0000152 if (Src0.isReg() && MRI.hasOneUse(Src0.getReg())) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000153 unsigned Reg = Src0.getReg();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000154 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
155 if (Def && Def->isMoveImmediate()) {
156 MachineOperand &MovSrc = Def->getOperand(1);
157 bool ConstantFolded = false;
158
Matt Arsenault124384f2016-09-09 23:32:53 +0000159 if (MovSrc.isImm() && (isInt<32>(MovSrc.getImm()) ||
160 isUInt<32>(MovSrc.getImm()))) {
Matt Arsenault11a4d672015-02-13 19:05:03 +0000161 Src0.ChangeToImmediate(MovSrc.getImm());
Tom Stellard6407e1e2014-08-01 00:32:33 +0000162 ConstantFolded = true;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000163 }
164 if (ConstantFolded) {
Tom Stellard6407e1e2014-08-01 00:32:33 +0000165 if (MRI.use_empty(Reg))
166 Def->eraseFromParent();
167 ++NumLiteralConstantsFolded;
168 return;
169 }
170 }
171 }
172
173 // We have failed to fold src0, so commute the instruction and try again.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000174 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(MI))
Tom Stellard6407e1e2014-08-01 00:32:33 +0000175 foldImmediates(MI, TII, MRI, false);
176
177}
178
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000179// Copy MachineOperand with all flags except setting it as implicit.
Matt Arsenault22096252016-06-20 18:34:00 +0000180static void copyFlagsToImplicitVCC(MachineInstr &MI,
181 const MachineOperand &Orig) {
182
183 for (MachineOperand &Use : MI.implicit_operands()) {
Matt Arsenault24a12732017-01-11 22:58:12 +0000184 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) {
Matt Arsenault22096252016-06-20 18:34:00 +0000185 Use.setIsUndef(Orig.isUndef());
186 Use.setIsKill(Orig.isKill());
187 return;
188 }
189 }
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000190}
191
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000192static bool isKImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000193 return isInt<16>(Src.getImm()) &&
194 !TII->isInlineConstant(*Src.getParent(),
195 Src.getParent()->getOperandNo(&Src));
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000196}
197
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000198static bool isKUImmOperand(const SIInstrInfo *TII, const MachineOperand &Src) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000199 return isUInt<16>(Src.getImm()) &&
200 !TII->isInlineConstant(*Src.getParent(),
201 Src.getParent()->getOperandNo(&Src));
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000202}
203
204static bool isKImmOrKUImmOperand(const SIInstrInfo *TII,
205 const MachineOperand &Src,
206 bool &IsUnsigned) {
207 if (isInt<16>(Src.getImm())) {
208 IsUnsigned = false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000209 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000210 }
211
212 if (isUInt<16>(Src.getImm())) {
213 IsUnsigned = true;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000214 return !TII->isInlineConstant(Src);
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000215 }
216
217 return false;
218}
219
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000220/// \returns true if the constant in \p Src should be replaced with a bitreverse
221/// of an inline immediate.
222static bool isReverseInlineImm(const SIInstrInfo *TII,
223 const MachineOperand &Src,
224 int32_t &ReverseImm) {
Matt Arsenault4bd72362016-12-10 00:39:12 +0000225 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000226 return false;
227
228 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
229 return ReverseImm >= -16 && ReverseImm <= 64;
230}
231
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000232/// Copy implicit register operands from specified instruction to this
233/// instruction that are not part of the instruction definition.
234static void copyExtraImplicitOps(MachineInstr &NewMI, MachineFunction &MF,
235 const MachineInstr &MI) {
236 for (unsigned i = MI.getDesc().getNumOperands() +
237 MI.getDesc().getNumImplicitUses() +
238 MI.getDesc().getNumImplicitDefs(), e = MI.getNumOperands();
239 i != e; ++i) {
240 const MachineOperand &MO = MI.getOperand(i);
241 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
242 NewMI.addOperand(MF, MO);
243 }
244}
245
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000246static void shrinkScalarCompare(const SIInstrInfo *TII, MachineInstr &MI) {
247 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
248 // get constants on the RHS.
249 if (!MI.getOperand(0).isReg())
250 TII->commuteInstruction(MI, false, 0, 1);
251
252 const MachineOperand &Src1 = MI.getOperand(1);
253 if (!Src1.isImm())
254 return;
255
256 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
257 if (SOPKOpc == -1)
258 return;
259
260 // eq/ne is special because the imm16 can be treated as signed or unsigned,
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000261 // and initially selectd to the unsigned versions.
262 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000263 bool HasUImm;
264 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) {
Matt Arsenault5d8eb252016-09-30 01:50:20 +0000265 if (!HasUImm) {
266 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
267 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000268 }
269
270 MI.setDesc(TII->get(SOPKOpc));
271 }
272
273 return;
274 }
275
276 const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
277
278 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) ||
279 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) {
280 MI.setDesc(NewDesc);
281 }
282}
283
Tom Stellard1aaad692014-07-21 16:55:33 +0000284bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
Andrew Kaylor7de74af2016-04-25 22:23:44 +0000285 if (skipFunction(*MF.getFunction()))
286 return false;
287
Tom Stellard1aaad692014-07-21 16:55:33 +0000288 MachineRegisterInfo &MRI = MF.getRegInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000289 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
290 const SIInstrInfo *TII = ST.getInstrInfo();
Tom Stellard1aaad692014-07-21 16:55:33 +0000291 const SIRegisterInfo &TRI = TII->getRegisterInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000292
Tom Stellard1aaad692014-07-21 16:55:33 +0000293 std::vector<unsigned> I1Defs;
294
295 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
296 BI != BE; ++BI) {
297
298 MachineBasicBlock &MBB = *BI;
299 MachineBasicBlock::iterator I, Next;
300 for (I = MBB.begin(); I != MBB.end(); I = Next) {
301 Next = std::next(I);
302 MachineInstr &MI = *I;
303
Matt Arsenault9a19c242016-03-11 07:42:49 +0000304 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
305 // If this has a literal constant source that is the same as the
306 // reversed bits of an inline immediate, replace with a bitreverse of
307 // that constant. This saves 4 bytes in the common case of materializing
308 // sign bits.
309
310 // Test if we are after regalloc. We only want to do this after any
311 // optimizations happen because this will confuse them.
312 // XXX - not exactly a check for post-regalloc run.
313 MachineOperand &Src = MI.getOperand(1);
314 if (Src.isImm() &&
315 TargetRegisterInfo::isPhysicalRegister(MI.getOperand(0).getReg())) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000316 int32_t ReverseImm;
317 if (isReverseInlineImm(TII, Src, ReverseImm)) {
318 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
319 Src.setImm(ReverseImm);
320 continue;
Matt Arsenault9a19c242016-03-11 07:42:49 +0000321 }
322 }
323 }
324
Matt Arsenault074ea282016-04-25 19:53:22 +0000325 // Combine adjacent s_nops to use the immediate operand encoding how long
326 // to wait.
327 //
328 // s_nop N
329 // s_nop M
330 // =>
331 // s_nop (N + M)
332 if (MI.getOpcode() == AMDGPU::S_NOP &&
333 Next != MBB.end() &&
334 (*Next).getOpcode() == AMDGPU::S_NOP) {
335
336 MachineInstr &NextMI = *Next;
337 // The instruction encodes the amount to wait with an offset of 1,
338 // i.e. 0 is wait 1 cycle. Convert both to cycles and then convert back
339 // after adding.
340 uint8_t Nop0 = MI.getOperand(0).getImm() + 1;
341 uint8_t Nop1 = NextMI.getOperand(0).getImm() + 1;
342
343 // Make sure we don't overflow the bounds.
344 if (Nop0 + Nop1 <= 8) {
345 NextMI.getOperand(0).setImm(Nop0 + Nop1 - 1);
346 MI.eraseFromParent();
347 }
348
349 continue;
350 }
351
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000352 // FIXME: We also need to consider movs of constant operands since
353 // immediate operands are not folded if they have more than one use, and
354 // the operand folding pass is unaware if the immediate will be free since
355 // it won't know if the src == dest constraint will end up being
356 // satisfied.
357 if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
358 MI.getOpcode() == AMDGPU::S_MUL_I32) {
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000359 const MachineOperand *Dest = &MI.getOperand(0);
360 MachineOperand *Src0 = &MI.getOperand(1);
361 MachineOperand *Src1 = &MI.getOperand(2);
362
363 if (!Src0->isReg() && Src1->isReg()) {
364 if (TII->commuteInstruction(MI, false, 1, 2))
365 std::swap(Src0, Src1);
366 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000367
368 // FIXME: This could work better if hints worked with subregisters. If
369 // we have a vector add of a constant, we usually don't get the correct
370 // allocation due to the subregister usage.
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000371 if (TargetRegisterInfo::isVirtualRegister(Dest->getReg()) &&
372 Src0->isReg()) {
373 MRI.setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
374 MRI.setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000375 continue;
376 }
377
Matt Arsenaultbe90f702016-09-08 17:35:41 +0000378 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
379 if (Src1->isImm() && isKImmOperand(TII, *Src1)) {
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000380 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
381 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
382
383 MI.setDesc(TII->get(Opc));
384 MI.tieOperands(0, 1);
385 }
386 }
387 }
388
Matt Arsenault7ccf6cd2016-09-16 21:41:16 +0000389 // Try to use s_cmpk_*
390 if (MI.isCompare() && TII->isSOPC(MI)) {
391 shrinkScalarCompare(TII, MI);
392 continue;
393 }
394
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000395 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
396 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000397 const MachineOperand &Dst = MI.getOperand(0);
398 MachineOperand &Src = MI.getOperand(1);
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000399
Matt Arsenault663ab8c2016-11-01 23:14:20 +0000400 if (Src.isImm() &&
401 TargetRegisterInfo::isPhysicalRegister(Dst.getReg())) {
402 int32_t ReverseImm;
403 if (isKImmOperand(TII, Src))
404 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
405 else if (isReverseInlineImm(TII, Src, ReverseImm)) {
406 MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
407 Src.setImm(ReverseImm);
408 }
409 }
Matt Arsenaultb6be2022016-04-16 01:46:49 +0000410
411 continue;
412 }
413
Tom Stellard86d12eb2014-08-01 00:32:28 +0000414 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard1aaad692014-07-21 16:55:33 +0000415 continue;
416
417 if (!canShrink(MI, TII, TRI, MRI)) {
Matt Arsenault66524032014-09-16 18:00:23 +0000418 // Try commuting the instruction and see if that enables us to shrink
Tom Stellard1aaad692014-07-21 16:55:33 +0000419 // it.
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000420 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
Tom Stellard1aaad692014-07-21 16:55:33 +0000421 !canShrink(MI, TII, TRI, MRI))
422 continue;
423 }
424
Marek Olsaka93603d2015-01-15 18:42:51 +0000425 // getVOPe32 could be -1 here if we started with an instruction that had
Tom Stellard86d12eb2014-08-01 00:32:28 +0000426 // a 32-bit encoding and then commuted it to an instruction that did not.
Marek Olsaka93603d2015-01-15 18:42:51 +0000427 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
Tom Stellard86d12eb2014-08-01 00:32:28 +0000428 continue;
429
Marek Olsaka93603d2015-01-15 18:42:51 +0000430 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
431
Tom Stellard1aaad692014-07-21 16:55:33 +0000432 if (TII->isVOPC(Op32)) {
433 unsigned DstReg = MI.getOperand(0).getReg();
434 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000435 // VOPC instructions can only write to the VCC register. We can't
436 // force them to use VCC here, because this is only one register and
437 // cannot deal with sequences which would require multiple copies of
438 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
Tom Stellard1aaad692014-07-21 16:55:33 +0000439 //
Matt Arsenaulta9627ae2014-09-21 17:27:32 +0000440 // So, instead of forcing the instruction to write to VCC, we provide
441 // a hint to the register allocator to use VCC and then we we will run
442 // this pass again after RA and shrink it if it outputs to VCC.
Tom Stellard1aaad692014-07-21 16:55:33 +0000443 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
444 continue;
445 }
446 if (DstReg != AMDGPU::VCC)
447 continue;
448 }
449
Tom Stellarde48fe2a2015-07-14 14:15:03 +0000450 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
451 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
452 // instructions.
453 const MachineOperand *Src2 =
454 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
455 if (!Src2->isReg())
456 continue;
457 unsigned SReg = Src2->getReg();
458 if (TargetRegisterInfo::isVirtualRegister(SReg)) {
459 MRI.setRegAllocationHint(SReg, 0, AMDGPU::VCC);
460 continue;
461 }
462 if (SReg != AMDGPU::VCC)
463 continue;
464 }
465
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000466 // Check for the bool flag output for instructions like V_ADD_I32_e64.
467 const MachineOperand *SDst = TII->getNamedOperand(MI,
468 AMDGPU::OpName::sdst);
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000469
Matt Arsenault24a12732017-01-11 22:58:12 +0000470 // Check the carry-in operand for v_addc_u32_e64.
471 const MachineOperand *Src2 = TII->getNamedOperand(MI,
472 AMDGPU::OpName::src2);
473
474 if (SDst) {
475 if (SDst->getReg() != AMDGPU::VCC) {
476 if (TargetRegisterInfo::isVirtualRegister(SDst->getReg()))
477 MRI.setRegAllocationHint(SDst->getReg(), 0, AMDGPU::VCC);
478 continue;
479 }
480
481 // All of the instructions with carry outs also have an SGPR input in
482 // src2.
483 if (Src2 && Src2->getReg() != AMDGPU::VCC) {
484 if (TargetRegisterInfo::isVirtualRegister(Src2->getReg()))
485 MRI.setRegAllocationHint(Src2->getReg(), 0, AMDGPU::VCC);
486
487 continue;
488 }
Matt Arsenault28bd4cb2017-01-11 22:35:17 +0000489 }
490
Tom Stellard1aaad692014-07-21 16:55:33 +0000491 // We can shrink this instruction
Matt Arsenaulte0b44042015-09-10 21:51:19 +0000492 DEBUG(dbgs() << "Shrinking " << MI);
Tom Stellard1aaad692014-07-21 16:55:33 +0000493
Tom Stellard6407e1e2014-08-01 00:32:33 +0000494 MachineInstrBuilder Inst32 =
Tom Stellard1aaad692014-07-21 16:55:33 +0000495 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
496
Tom Stellardcc4c8712016-02-16 18:14:56 +0000497 // Add the dst operand if the 32-bit encoding also has an explicit $vdst.
Matt Arsenault46359152015-08-08 00:41:48 +0000498 // For VOPC instructions, this is replaced by an implicit def of vcc.
Tom Stellardcc4c8712016-02-16 18:14:56 +0000499 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst);
Matt Arsenault46359152015-08-08 00:41:48 +0000500 if (Op32DstIdx != -1) {
501 // dst
Diana Picus116bbab2017-01-13 09:58:52 +0000502 Inst32.add(MI.getOperand(0));
Matt Arsenault46359152015-08-08 00:41:48 +0000503 } else {
504 assert(MI.getOperand(0).getReg() == AMDGPU::VCC &&
505 "Unexpected case");
506 }
507
Tom Stellard1aaad692014-07-21 16:55:33 +0000508
Diana Picus116bbab2017-01-13 09:58:52 +0000509 Inst32.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
Tom Stellard1aaad692014-07-21 16:55:33 +0000510
511 const MachineOperand *Src1 =
512 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
513 if (Src1)
Diana Picus116bbab2017-01-13 09:58:52 +0000514 Inst32.add(*Src1);
Tom Stellard1aaad692014-07-21 16:55:33 +0000515
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000516 if (Src2) {
517 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2);
518 if (Op32Src2Idx != -1) {
Diana Picus116bbab2017-01-13 09:58:52 +0000519 Inst32.add(*Src2);
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000520 } else {
521 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is
Matt Arsenault22096252016-06-20 18:34:00 +0000522 // replaced with an implicit read of vcc. This was already added
523 // during the initial BuildMI, so find it to preserve the flags.
524 copyFlagsToImplicitVCC(*Inst32, *Src2);
Matt Arsenault6942d1a2015-08-08 00:41:45 +0000525 }
526 }
Tom Stellarddb5a11f2015-07-13 15:47:57 +0000527
Tom Stellard1aaad692014-07-21 16:55:33 +0000528 ++NumInstructionsShrunk;
Tom Stellard6407e1e2014-08-01 00:32:33 +0000529
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000530 // Copy extra operands not present in the instruction definition.
Matt Arsenault5ffe3e12016-09-03 17:25:39 +0000531 copyExtraImplicitOps(*Inst32, MF, MI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000532
533 MI.eraseFromParent();
Tom Stellard6407e1e2014-08-01 00:32:33 +0000534 foldImmediates(*Inst32, TII, MRI);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000535
Tom Stellard6407e1e2014-08-01 00:32:33 +0000536 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');
537
538
Tom Stellard1aaad692014-07-21 16:55:33 +0000539 }
540 }
541 return false;
542}